repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
libo/Enigma2
|
lib/python/Screens/TimerEdit.py
|
1
|
14240
|
from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.config import config
from Components.MenuList import MenuList
from Components.TimerList import TimerList
from Components.TimerSanityCheck import TimerSanityCheck
from Components.UsageConfig import preferredTimerPath
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from ServiceReference import ServiceReference
from TimerEntry import TimerEntry, TimerLog
from Tools.BoundFunction import boundFunction
from time import time
class TimerEditList(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
CLEANUP = 3
DELETE = 4
def __init__(self, session):
Screen.__init__(self, session)
list = [ ]
self.list = list
self.fillTimerList()
self["timerlist"] = TimerList(list)
self.key_red_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["key_red"] = Button(" ")
self["key_green"] = Button(_("Add"))
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
print "key_red_choice:",self.key_red_choice
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.openEdit,
"cancel": self.leave,
"green": self.addCurrentTimer,
"log": self.showLog,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down
}, -1)
self.session.nav.RecordTimer.on_state_change.append(self.onStateChange)
self.onShown.append(self.updateState)
def up(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveUp)
self.updateState()
def down(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveDown)
self.updateState()
def left(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageUp)
self.updateState()
def right(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageDown)
self.updateState()
def toggleDisabledState(self):
cur=self["timerlist"].getCurrent()
if cur:
t = cur
if t.disabled:
print "try to ENABLE timer"
t.enable()
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, cur)
if not timersanitycheck.check():
t.disable()
print "Sanity check failed"
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList())
else:
print "Sanity check passed"
if timersanitycheck.doubleCheck():
t.disable()
else:
if t.isRunning():
if t.repeated:
list = (
(_("Stop current event but not coming events"), "stoponlycurrent"),
(_("Stop current event and disable coming events"), "stopall"),
(_("Don't stop current event but disable coming events"), "stoponlycoming")
)
self.session.openWithCallback(boundFunction(self.runningEventCallback, t), ChoiceBox, title=_("Repeating event currently recording... What do you want to do?"), list = list)
else:
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def runningEventCallback(self, t, result):
if result is not None:
if result[1] == "stoponlycurrent" or result[1] == "stopall":
t.enable()
t.processRepeated(findRunningEvent = False)
self.session.nav.RecordTimer.doActivate(t)
if result[1] == "stoponlycoming" or result[1] == "stopall":
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
cur = self["timerlist"].getCurrent()
if cur:
if self.key_red_choice != self.DELETE:
self["actions"].actions.update({"red":self.removeTimerQuestion})
self["key_red"].setText(_("Delete"))
self.key_red_choice = self.DELETE
if cur.disabled and (self.key_yellow_choice != self.ENABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Enable"))
self.key_yellow_choice = self.ENABLE
elif cur.isRunning() and not cur.repeated and (self.key_yellow_choice != self.EMPTY):
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
elif ((not cur.isRunning())or cur.repeated ) and (not cur.disabled) and (self.key_yellow_choice != self.DISABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Disable"))
self.key_yellow_choice = self.DISABLE
else:
if self.key_red_choice != self.EMPTY:
self.removeAction("red")
self["key_red"].setText(" ")
self.key_red_choice = self.EMPTY
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
showCleanup = True
for x in self.list:
if (not x[0].disabled) and (x[1] == True):
break
else:
showCleanup = False
if showCleanup and (self.key_blue_choice != self.CLEANUP):
self["actions"].actions.update({"blue":self.cleanupQuestion})
self["key_blue"].setText(_("Cleanup"))
self.key_blue_choice = self.CLEANUP
elif (not showCleanup) and (self.key_blue_choice != self.EMPTY):
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
def fillTimerList(self):
list = self.list
del list[:]
list.extend([(timer, False) for timer in self.session.nav.RecordTimer.timer_list])
list.extend([(timer, True) for timer in self.session.nav.RecordTimer.processed_timers])
list.sort(cmp = lambda x, y: x[0].begin < y[0].begin)
def showLog(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerLog, cur)
def openEdit(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerEntry, cur)
def cleanupQuestion(self):
self.session.openWithCallback(self.cleanupTimer, MessageBox, _("Really delete done timers?"))
def cleanupTimer(self, delete):
if delete:
self.session.nav.RecordTimer.cleanup()
self.refill()
self.updateState()
def removeTimerQuestion(self):
cur = self["timerlist"].getCurrent()
if not cur:
return
self.session.openWithCallback(self.removeTimer, MessageBox, _("Do you really want to delete %s?") % (cur.name))
def removeTimer(self, result):
if not result:
return
list = self["timerlist"]
cur = list.getCurrent()
if cur:
timer = cur
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self.refill()
self.updateState()
def refill(self):
oldsize = len(self.list)
self.fillTimerList()
lst = self["timerlist"]
newsize = len(self.list)
if oldsize and oldsize != newsize:
idx = lst.getCurrentIndex()
lst.entryRemoved(idx)
else:
lst.invalidate()
def addCurrentTimer(self):
event = None
service = self.session.nav.getCurrentService()
if service is not None:
info = service.info()
if info is not None:
event = info.getEvent(0)
# FIXME only works if already playing a service
serviceref = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference())
if event is None:
data = (int(time()), int(time() + 60), "", "", None)
else:
data = parseEvent(event, description = False)
self.addTimer(RecordTimerEntry(serviceref, checkOldTimers = True, dirname = preferredTimerPath(), *data))
def addTimer(self, timer):
self.session.openWithCallback(self.finishedAdd, TimerEntry, timer)
def finishedEdit(self, answer):
print "finished edit"
if answer[0]:
print "Edited timer"
entry = answer[1]
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, entry)
success = False
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList())
else:
success = True
else:
success = True
if success:
print "Sanity check passed"
self.session.nav.RecordTimer.timeChanged(entry)
self.fillTimerList()
self.updateState()
else:
print "Timeredit aborted"
def finishedAdd(self, answer):
print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self.fillTimerList()
self.updateState()
else:
print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def leave(self):
self.session.nav.RecordTimer.on_state_change.remove(self.onStateChange)
self.close()
def onStateChange(self, entry):
self.refill()
self.updateState()
class TimerSanityConflict(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
EDIT = 3
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
print "TimerSanityConflict"
self["timer1"] = TimerList(self.getTimerList(timer[0]))
self.list = []
self.list2 = []
count = 0
for x in timer:
if count != 0:
self.list.append((_("Conflicting timer") + " " + str(count), x))
self.list2.append((timer[count], False))
count += 1
if count == 1:
self.list.append((_("Channel not in services list")))
self["list"] = MenuList(self.list)
self["timer2"] = TimerList(self.list2)
self["key_red"] = Button("Edit")
self["key_green"] = Button(" ")
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self.key_green_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.leave_ok,
"cancel": self.leave_cancel,
"red": self.editTimer1,
"up": self.up,
"down": self.down
}, -1)
self.onShown.append(self.updateState)
def getTimerList(self, timer):
return [(timer, False)]
def editTimer1(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer1"].getCurrent())
def toggleTimer1(self):
if self.timer[0].disabled:
self.timer[0].disabled = False
else:
if not self.timer[0].isRunning():
self.timer[0].disabled = True
self.finishedEdit((True, self.timer[0]))
def editTimer2(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer2"].getCurrent())
def toggleTimer2(self):
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x].disabled:
self.timer[x].disabled = False
elif not self.timer[x].isRunning():
self.timer[x].disabled = True
self.finishedEdit((True, self.timer[0]))
def finishedEdit(self, answer):
self.leave_ok()
def leave_ok(self):
self.close((True, self.timer[0]))
def leave_cancel(self):
self.close((False, self.timer[0]))
def up(self):
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def down(self):
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
if self.timer[0] is not None:
if self.timer[0].disabled and self.key_green_choice != self.ENABLE:
self["actions"].actions.update({"green":self.toggleTimer1})
self["key_green"].setText(_("Enable"))
self.key_green_choice = self.ENABLE
elif self.timer[0].isRunning() and not self.timer[0].repeated and self.key_green_choice != self.EMPTY:
self.removeAction("green")
self["key_green"].setText(" ")
self.key_green_choice = self.EMPTY
elif (not self.timer[0].isRunning() or self.timer[0].repeated ) and self.key_green_choice != self.DISABLE:
self["actions"].actions.update({"green":self.toggleTimer1})
self["key_green"].setText(_("Disable"))
self.key_green_choice = self.DISABLE
if len(self.timer) > 1:
x = self["list"].getSelectedIndex()
if self.timer[x] is not None:
if self.key_yellow_choice == self.EMPTY:
self["actions"].actions.update({"yellow":self.editTimer2})
self["key_yellow"].setText(_("Edit"))
self.key_yellow_choice = self.EDIT
if self.timer[x].disabled and self.key_blue_choice != self.ENABLE:
self["actions"].actions.update({"blue":self.toggleTimer2})
self["key_blue"].setText(_("Enable"))
self.key_blue_choice = self.ENABLE
elif self.timer[x].isRunning() and not self.timer[x].repeated and self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
elif (not self.timer[x].isRunning() or self.timer[x].repeated ) and self.key_blue_choice != self.DISABLE:
self["actions"].actions.update({"blue":self.toggleTimer2})
self["key_blue"].setText(_("Disable"))
self.key_blue_choice = self.DISABLE
else:
#FIXME.... this doesnt hide the buttons self.... just the text
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
if self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
|
gpl-2.0
| 2,415,394,993,166,281,700
| 31.290249
| 179
| 0.692275
| false
| 3.236364
| true
| false
| false
|
treww/counters
|
server/performance_server.py
|
1
|
4745
|
__author__ = 'treww'
import json
import MySQLdb as mysql
import http.client
import tornado.ioloop
import tornado.web
class _DatabaseRows:
_cursor = None
def __init__(self, cursor):
self._cursor = cursor
def __iter__(self):
return self
def __next__(self):
row = self._cursor.fetchone()
if row:
return row
raise StopIteration
class _CountersDB:
_db = None
def __init__(self):
host = 'rykovanov.com'
user ='counters'
password = 'qwerty'
database='counters'
self._db = mysql.connect(host=host, user=user, passwd=password, db=database)
def select_group(self, group_id):
c = self._db.cursor()
if group_id:
c.execute('select * from groups where groupid = {}'.format(group_id))
else:
c.execute('select * from groups where groupid = parentid')
row = c.fetchone()
return row
def select_sub_groups(self, parent_id):
c = self._db.cursor()
if parent_id:
c.execute('select * from groups where parentid = {} and not groupid = parentid'.format(parent_id))
else:
c.execute('select * from groups where not groupid = parentid and parentid in '
'(select groupid from groups where groupid = parentid)')
return _DatabaseRows(c)
def add_group(self, parent_id, name):
c = self._db.cursor()
c.execute('insert into groups (parentid, name) values ({}, "{}")'.format(parent_id, name))
self._db.commit()
c.execute("select last_insert_id()")
row = c.fetchone()
return row[0]
def delete_group(self, group_id):
c = self._db.cursor()
c.execute('delete from groups where groupid={}'.format(group_id))
self._db.commit()
class _CountersRequest(tornado.web.RequestHandler):
def _convert_to_group(self, row):
group = {
"id" : int(row[0]),
"parent_id" : int(row[1]),
"name": str(row[2])
}
return group
class _GroupInfo(_CountersRequest):
def get(self, group_id=None):
db = _CountersDB()
row = db.select_group(group_id)
if not row:
self.set_status(http.client.NOT_FOUND)
return
self.set_header('Content-Type', 'application/json; charset=utf-8')
groups = [ self._convert_to_group(row) ]
self.write(json.dumps(groups))
def post(self, group_id):
group_id = int(group_id) #ensure passed parameter is a number
if len(self.request.body) > 512:
self.send_error(http.client.BAD_REQUEST)
return
group = json.loads(self.request.body.decode('utf-8'))
if not isinstance(group, list) or len(group) != 1:
self.send_error(http.client.BAD_REQUEST)
return
db = _CountersDB()
new_group_id = db.add_group(group_id, group[0]['name'])
self.set_header("Location", '/api/v1/groups/{}'.format(new_group_id))
self.set_status(http.client.CREATED)
def delete(self, group_id):
group_id = int(group_id) #ensure passed parameter is a number
db = _CountersDB()
db.delete_group(group_id)
#TODO Support deletion of multiple goroups inside group specified in the request.
#TODO HTTP/1.1 DELETE /api/v1/groups/6
#TODO [
#TODO {"id": 12}
#TODO {"id": 14}
#TODO ]
class _BrowseGroup(_CountersRequest):
def get(self, group_id=None):
self.set_header('Content-Type', 'application/json; charset=utf-8')
db = _CountersDB()
rows = db.select_sub_groups(int(group_id) if group_id else None)
self.write("[")
first = True
for row in rows:
group = self._convert_to_group(row)
if first == False:
self.write(",")
self.write(json.dumps(group))
first = False
self.write("]")
class _ListCounters(_CountersRequest):
def get(self):
self.set_header('Content-Type', 'application/json; charset=utf-8')
self.write(json.dumps({ "name" : "hello"}))
class _MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
class PerformanceCountersServer:
def run(self):
application = tornado.web.Application([
(r'/', _MainHandler),
(r'/api/v1/groups$', _GroupInfo),
(r'/api/v1/groups[/]?$', _BrowseGroup),
(r'/api/v1/groups/([0-9]+)$', _GroupInfo),
(r'/api/v1/groups/([0-9]+)/$', _BrowseGroup),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
mit
| 6,965,204,526,205,543,000
| 29.22293
| 110
| 0.567545
| false
| 3.641596
| false
| false
| false
|
nkmk/python-snippets
|
notebook/numpy_nan_replace.py
|
1
|
1889
|
import numpy as np
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
print(a)
# [[11. 12. nan 14.]
# [21. nan nan 24.]
# [31. 32. 33. 34.]]
a_nan = np.array([0, 1, np.nan, float('nan')])
print(a_nan)
# [ 0. 1. nan nan]
print(np.nan == np.nan)
# False
print(np.isnan(np.nan))
# True
print(a_nan == np.nan)
# [False False False False]
print(np.isnan(a_nan))
# [False False True True]
a_fill = np.genfromtxt('data/src/sample_nan.csv', delimiter=',', filling_values=0)
print(a_fill)
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
print(np.nan_to_num(a))
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
print(a)
# [[11. 12. nan 14.]
# [21. nan nan 24.]
# [31. 32. 33. 34.]]
print(np.nan_to_num(a, copy=False))
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
print(a)
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
print(np.nan_to_num(a, nan=-1))
# [[11. 12. -1. 14.]
# [21. -1. -1. 24.]
# [31. 32. 33. 34.]]
print(np.nanmean(a))
# 23.555555555555557
print(np.nan_to_num(a, nan=np.nanmean(a)))
# [[11. 12. 23.55555556 14. ]
# [21. 23.55555556 23.55555556 24. ]
# [31. 32. 33. 34. ]]
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
print(np.isnan(a))
# [[False False True False]
# [False True True False]
# [False False False False]]
a[np.isnan(a)] = 0
print(a)
# [[11. 12. 0. 14.]
# [21. 0. 0. 24.]
# [31. 32. 33. 34.]]
a = np.genfromtxt('data/src/sample_nan.csv', delimiter=',')
a[np.isnan(a)] = np.nanmean(a)
print(a)
# [[11. 12. 23.55555556 14. ]
# [21. 23.55555556 23.55555556 24. ]
# [31. 32. 33. 34. ]]
|
mit
| 8,838,509,263,677,381,000
| 21.759036
| 82
| 0.51244
| false
| 2.211944
| false
| true
| false
|
NotBobTheBuilder/robogrid
|
robogrid/robot.py
|
1
|
2297
|
from .grids import Simple_Grid
class Robot(object):
def __init__(self, name, grid=None):
self.name = name
if grid == None:
grid = Simple_Grid(20)
self.grid = grid
start_pos = self.grid.free_position()
if start_pos == None:
raise ValueError("No space in proposed grid")
self._heading = 0
self._x, self._y = start_pos
def __repr__(self):
summary = {
"name": self.name,
"grid": repr(self.grid)
}
return 'Robot("{name}", {grid})'.format(**summary)
def __str__(self):
arrow = "^>v<"[self.heading]
result = ""
for row_i, row in enumerate(self.grid):
for col_i, cell in enumerate(row):
if (col_i, row_i) == self.pos:
result += arrow
else:
result += self.grid.char(cell)
result += "\n"
return result
def forward(self):
if not self.can_move_forward():
return
if self.heading == 0:
self._y -= 1
elif self.heading == 1:
self._x += 1
elif self.heading == 2:
self._y += 1
elif self.heading == 3:
self._x -= 1
def can_move_forward(self):
return not self.cell_at_heading_blocked()
def cell_at_heading_blocked(self, heading=None):
return {
0: self.grid[self.x, self.y-1],
1: self.grid[self.x+1, self.y],
2: self.grid[self.x, self.y+1],
3: self.grid[self.x-1, self.y],
}[heading or self.heading]
def backward(self):
self.right()
self.right()
self.forward()
self.right()
self.right()
def right(self):
self.heading += 1
def left(self):
self.heading -= 1
@property
def heading(self):
return self._heading
@heading.setter
def heading(self, val):
self._heading = val % 4
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def pos(self):
return self.x, self.y
def is_finished(self):
return self.x, self.y == self.grid.width - 2, self.grid.height - 2
|
mit
| 6,702,044,922,090,397,000
| 23.178947
| 74
| 0.491946
| false
| 3.784185
| false
| false
| false
|
ppyordanov/HCI_4_Future_Cities
|
Server/src/virtualenv/Lib/encodings/iso8859_16.py
|
1
|
12859
|
""" Python Character Mapping Codec iso8859_16 generated from 'MAPPINGS/ISO8859/8859-16.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-16',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0105' # 0xA2 -> LATIN SMALL LETTER A WITH OGONEK
u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
u'\u20ac' # 0xA4 -> EURO SIGN
u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK
u'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0218' # 0xAA -> LATIN CAPITAL LETTER S WITH COMMA BELOW
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\u017a' # 0xAE -> LATIN SMALL LETTER Z WITH ACUTE
u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u010c' # 0xB2 -> LATIN CAPITAL LETTER C WITH CARON
u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
u'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON
u'\u201d' # 0xB5 -> RIGHT DOUBLE QUOTATION MARK
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON
u'\u010d' # 0xB9 -> LATIN SMALL LETTER C WITH CARON
u'\u0219' # 0xBA -> LATIN SMALL LETTER S WITH COMMA BELOW
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE
u'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0106' # 0xC5 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015a' # 0xD7 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u0170' # 0xD8 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0118' # 0xDD -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0107' # 0xE5 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u015b' # 0xF7 -> LATIN SMALL LETTER S WITH ACUTE
u'\u0171' # 0xF8 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0119' # 0xFD -> LATIN SMALL LETTER E WITH OGONEK
u'\u021b' # 0xFE -> LATIN SMALL LETTER T WITH COMMA BELOW
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
|
mit
| -2,447,403,365,782,937,600
| 40.347267
| 109
| 0.578505
| false
| 2.791187
| false
| false
| false
|
alsoicode/django-maintenancemode-2
|
testproject/testproject/urls.py
|
1
|
1160
|
"""testproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from distutils.version import StrictVersion
from django.conf.urls import include
from django.contrib import admin
from maintenancemode.utils.settings import DJANGO_VERSION
if DJANGO_VERSION >= StrictVersion('2.0'):
from django.urls import path
urlpatterns = [
path(r'admin/', admin.site.urls),
path(r'', include('app.urls')),
]
else:
from django.conf.urls import url
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('app.urls', namespace='app')),
]
|
apache-2.0
| 1,782,432,068,853,554,000
| 34.151515
| 77
| 0.67931
| false
| 3.613707
| false
| false
| false
|
Grumbel/scatterbackup
|
scatterbackup/format.py
|
1
|
7129
|
# ScatterBackup - A chaotic backup solution
# Copyright (C) 2016 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import stat
import datetime
from pwd import getpwuid
from grp import getgrgid
from scatterbackup.units import bytes2human_decimal, bytes2human_binary, units
from scatterbackup.time import format_time
class Bytes:
def __init__(self, count):
self.count = count
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
str_spec, unit = r if len(r) == 2 else (r[0], "h")
return format(self.as_str(unit), str_spec)
def as_str(self, unit):
if unit == "h":
return bytes2human_decimal(self.count)
elif unit == "H":
return bytes2human_binary(self.count)
elif unit == "r":
return "{}".format(self.count)
elif unit == "B":
return "{}{}".format(self.count, unit)
elif unit in units:
return "{:.2f}{}".format(self.count / units[unit], unit)
else:
raise Exception("unknown unit: {}".format(unit))
class Checksum:
def __init__(self, checksum):
self.checksum = checksum
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
if len(r) == 2:
str_spec = r[0]
cut = int(r[1])
else:
str_spec = r[0]
cut = None
return format(self.checksum[0:cut], str_spec)
class Time:
def __init__(self, time):
self.time = time
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
str_spec, time_spec = r if len(r) == 2 else (r[0], "h")
return format(self.as_str(time_spec), str_spec)
def as_str(self, spec):
if spec == 'r':
return str(self.time)
elif spec == 'iso' or spec == 'i':
return format_time(self.time)
elif spec == 'h':
if self.time is None:
return " <unknown> "
else:
dt = datetime.datetime.fromtimestamp(self.time / 1000**3)
return dt.strftime("%F %T")
else:
if self.time is None:
return "<unknown>"
else:
dt = datetime.datetime.fromtimestamp(self.time / 1000**3)
return dt.strftime(spec)
class Mode:
def __init__(self, mode):
self.mode = mode
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
str_spec, spec = r if len(r) == 2 else (r[0], "h")
return format(self.as_str(spec), str_spec)
def as_str(self, spec):
if spec == 'h':
return self.as_str_human()
else:
return str(self.mode)
def as_str_human(self):
mode = self.mode
s = ""
if stat.S_ISDIR(mode):
s += "d"
elif stat.S_ISCHR(mode):
s += "c"
elif stat.S_ISBLK(mode):
s += "b"
elif stat.S_ISREG(mode):
s += "-"
elif stat.S_ISFIFO(mode):
s += "p"
elif stat.S_ISLNK(mode):
s += "l"
elif stat.S_ISSOCK(mode):
s += "s"
else:
s += "?"
if mode & stat.S_IRUSR:
s += "r"
else:
s += "-"
if mode & stat.S_IWUSR:
s += "w"
else:
s += "-"
if mode & stat.S_IXUSR:
s += "s" if mode & stat.S_ISGID else "x"
else:
s += "S" if mode & stat.S_ISGID else "-"
if mode & stat.S_IRGRP:
s += "r"
else:
s += "-"
if mode & stat.S_IWGRP:
s += "w"
else:
s += "-"
if mode & stat.S_IXGRP:
s += "s" if mode & stat.S_ISGID else "x"
else:
s += "S" if mode & stat.S_ISGID else "-"
if mode & stat.S_IROTH:
s += "r"
else:
s += "-"
if mode & stat.S_IWOTH:
s += "w"
else:
s += "-"
if mode & stat.S_IXOTH: # stat.S_ISVTX:
s += "t" if mode & stat.S_ISGID else "x"
else:
s += "T" if mode & stat.S_ISGID else "-"
return s
class RelPath:
def __init__(self, path):
self.path = path
def __format__(self, spec):
r = spec.rsplit(":", maxsplit=1)
str_spec, spec = r if len(r) == 2 else (r[0], "")
return format(self.as_str(spec), str_spec)
def as_str(self, spec):
return os.path.relpath(self.path, spec)
class FileInfoFormatter:
def __init__(self, fileinfo):
self.fileinfo = fileinfo
def __getitem__(self, key):
# FIXME: potential security hole
return self.__getattribute__(key)()
def path(self):
return self.fileinfo.path
def relpath(self):
return RelPath(self.fileinfo.path)
def dev(self):
return self.fileinfo.dev
def ino(self):
return self.fileinfo.ino
def mode(self):
return Mode(self.fileinfo.mode)
def nlink(self):
return self.fileinfo.nlink
def uid(self):
return self.fileinfo.uid
def gid(self):
return self.fileinfo.gid
def owner(self):
try:
return getpwuid(self.fileinfo.uid).pw_name # FIXME: maybe cache this?
except KeyError as err:
return str(self.fileinfo.uid)
def group(self):
try:
return getgrgid(self.fileinfo.gid).gr_name # FIXME: maybe cache this?
except KeyError as err:
return str(self.fileinfo.gid)
def rdev(self):
return self.fileinfo.rdev
def size(self):
return Bytes(self.fileinfo.size)
def blksize(self):
return self.fileinfo.blksize
def blocks(self):
return self.fileinfo.blocks
def atime(self):
return Time(self.fileinfo.atime)
def ctime(self):
return Time(self.fileinfo.ctime)
def mtime(self):
return Time(self.fileinfo.mtime)
def time(self): return Time(self.fileinfo.time)
def birth(self):
return self.fileinfo.birth
def death(self):
return self.fileinfo.death
def sha1(self):
return Checksum(self.fileinfo.blob.sha1 if self.fileinfo.blob else "<sha1:unknown>")
def md5(self):
return Checksum(self.fileinfo.blob.md5 if self.fileinfo.blob else "<md5:unknown>")
def target(self):
return self.fileinfo.target
# EOF #
|
gpl-3.0
| -175,609,349,884,586,240
| 24.280142
| 92
| 0.534016
| false
| 3.578815
| false
| false
| false
|
carefree0910/MachineLearning
|
_Dist/NeuralNetworks/_Tests/_UnitTests/b_Advanced.py
|
1
|
2339
|
import os
import sys
root_path = os.path.abspath("../../../../")
if root_path not in sys.path:
sys.path.append(root_path)
import unittest
import numpy as np
from Util.Util import DataUtil
from _Dist.NeuralNetworks.e_AdvancedNN.NN import Advanced
from _Dist.NeuralNetworks._Tests._UnitTests.UnitTestUtil import clear_cache
base_params = {
"name": "UnitTest",
"data_info": {
"numerical_idx": [True] * 6 + [False],
"categorical_columns": []
},
"model_param_settings": {"n_epoch": 3, "max_epoch": 5}
}
nn = Advanced(**base_params)
train_set, cv_set, test_set = DataUtil.gen_special_linear(1000, 2, 2, 2, one_hot=False)
class TestAdvancedNN(unittest.TestCase):
def test_00_train(self):
self.assertIsInstance(
nn.fit(*train_set, *cv_set, verbose=0), Advanced,
msg="Train failed"
)
def test_01_predict(self):
self.assertIs(nn.predict(train_set[0]).dtype, np.dtype("float32"), "Predict failed")
self.assertIs(nn.predict_classes(cv_set[0]).dtype, np.dtype("int32"), "Predict classes failed")
self.assertIs(nn.predict_classes(test_set[0]).dtype, np.dtype("int32"), "Predict classes failed")
def test_02_evaluate(self):
self.assertEqual(len(nn.evaluate(*train_set, *cv_set, *test_set)), 3, "Evaluation failed")
def test_03_save(self):
self.assertIsInstance(nn.save(), Advanced, msg="Save failed")
def test_04_load(self):
global nn
nn = Advanced(**base_params).load()
self.assertIsInstance(nn, Advanced, "Load failed")
def test_05_re_predict(self):
self.assertIs(nn.predict(train_set[0]).dtype, np.dtype("float32"), "Re-Predict failed")
self.assertIs(nn.predict_classes(cv_set[0]).dtype, np.dtype("int32"), "Re-Predict classes failed")
self.assertIs(nn.predict_classes(test_set[0]).dtype, np.dtype("int32"), "Re-Predict classes failed")
def test_06_re_evaluate(self):
self.assertEqual(len(nn.evaluate(*train_set, *cv_set, *test_set)), 3, "Re-Evaluation failed")
def test_07_re_train(self):
self.assertIsInstance(
nn.fit(*train_set, *cv_set, verbose=0), Advanced,
msg="Re-Train failed"
)
def test_99_clear_cache(self):
clear_cache()
if __name__ == '__main__':
unittest.main()
|
mit
| -1,695,884,148,561,220,400
| 32.898551
| 108
| 0.637024
| false
| 3.331909
| true
| false
| false
|
SasView/sasmodels
|
sasmodels/models/polymer_micelle.py
|
1
|
5987
|
r"""
This model provides the form factor, $P(q)$, for a micelle with a spherical
core and Gaussian polymer chains attached to the surface, thus may be applied
to block copolymer micelles. To work well the Gaussian chains must be much
smaller than the core, which is often not the case. Please study the
reference carefully.
Definition
----------
The 1D scattering intensity for this model is calculated according to
the equations given by Pedersen (Pedersen, 2000), summarised briefly here.
The micelle core is imagined as $N$ = *n_aggreg* polymer heads, each of volume
$V_\text{core}$, which then defines a micelle core of radius $r$ = *r_core*,
which is a separate parameter even though it could be directly determined.
The Gaussian random coil tails, of gyration radius $R_g$, are imagined
uniformly distributed around the spherical core, centred at a distance
$r + d \cdot R_g$ from the micelle centre, where $d$ = *d_penetration* is
of order unity. A volume $V_\text{corona}$ is defined for each coil. The
model in detail seems to separately parameterize the terms for the shape
of $I(Q)$ and the relative intensity of each term, so use with caution
and check parameters for consistency. The spherical core is monodisperse,
so it's intensity and the cross terms may have sharp oscillations (use $q$
resolution smearing if needs be to help remove them).
.. math::
P(q) &= N^2\beta^2_s\Phi(qr)^2 + N\beta^2_cP_c(q)
+ 2N^2\beta_s\beta_cS_{sc}(q) + N(N-1)\beta_c^2S_{cc}(q) \\
\beta_s &= V_\text{core}(\rho_\text{core} - \rho_\text{solvent}) \\
\beta_c &= V_\text{corona}(\rho_\text{corona} - \rho_\text{solvent})
where $\rho_\text{core}$, $\rho_\text{corona}$ and $\rho_\text{solvent}$ are
the scattering length densities *sld_core*, *sld_corona* and *sld_solvent*.
For the spherical core of radius $r$
.. math::
\Phi(qr)= \frac{\sin(qr) - qr\cos(qr)}{(qr)^3}
whilst for the Gaussian coils
.. math::
P_c(q) &= 2 [\exp(-Z) + Z - 1] / Z^2 \\
Z &= (q R_g)^2
The sphere to coil (core to corona) and coil to coil (corona to corona) cross
terms are approximated by:
.. math::
S_{sc}(q) &= \Phi(qr)\psi(Z)
\frac{\sin(q(r+d \cdot R_g))}{q(r+d \cdot R_g)} \\
S_{cc}(q) &= \psi(Z)^2
\left[\frac{\sin(q(r+d \cdot R_g))}{q(r+d \cdot R_g)} \right]^2 \\
\psi(Z) &= \frac{[1-\exp^{-Z}]}{Z}
Validation
----------
$P(q)$ above is multiplied by *ndensity*, and a units conversion of $10^{-13}$,
so *scale* is likely 1.0 if the scattering data is in absolute units. This
model has not yet been independently validated.
References
----------
#. J Pedersen, *J. Appl. Cryst.*, 33 (2000) 637-640
Authorship and Verification
----------------------------
* **Translated by :** Richard Heenan **Date:** March 20, 2016
* **Last modified by:** Paul Kienzle **Date:** November 29, 2017
* **Last reviewed by:** Steve King **Date:** November 30, 2017
"""
import numpy as np
from numpy import inf, pi
name = "polymer_micelle"
title = "Polymer micelle model"
description = """
This model provides the form factor, $P(q)$, for a micelle with a spherical
core and Gaussian polymer chains attached to the surface, thus may be applied
to block copolymer micelles. To work well the Gaussian chains must be much
smaller than the core, which is often not the case. Please study the
reference to Pedersen and full documentation carefully.
"""
category = "shape:sphere"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [
["ndensity", "1e15/cm^3", 8.94, [0.0, inf], "", "Number density of micelles"],
["v_core", "Ang^3", 62624.0, [0.0, inf], "", "Core volume "],
["v_corona", "Ang^3", 61940.0, [0.0, inf], "", "Corona volume"],
["sld_solvent", "1e-6/Ang^2", 6.4, [0.0, inf], "sld", "Solvent scattering length density"],
["sld_core", "1e-6/Ang^2", 0.34, [0.0, inf], "sld", "Core scattering length density"],
["sld_corona", "1e-6/Ang^2", 0.8, [0.0, inf], "sld", "Corona scattering length density"],
["radius_core", "Ang", 45.0, [0.0, inf], "", "Radius of core ( must be >> rg )"],
["rg", "Ang", 20.0, [0.0, inf], "", "Radius of gyration of chains in corona"],
["d_penetration", "", 1.0, [-inf, inf], "", "Factor to mimic non-penetration of Gaussian chains"],
["n_aggreg", "", 6.0, [-inf, inf], "", "Aggregation number of the micelle"],
]
# pylint: enable=bad-whitespace, line-too-long
single = False
source = ["lib/sas_3j1x_x.c", "polymer_micelle.c"]
def random():
"""Return a random parameter set for the model."""
radius_core = 10**np.random.uniform(1, 3)
rg = radius_core * 10**np.random.uniform(-2, -0.3)
d_penetration = np.random.randn()*0.05 + 1
n_aggreg = np.random.randint(3, 30)
# volume of head groups is the core volume over the number of groups,
# with a correction for packing fraction of the head groups.
v_core = 4*pi/3*radius_core**3/n_aggreg * 0.68
# Rg^2 for gaussian coil is a^2n/6 => a^2 = 6 Rg^2/n
# a=2r => r = Rg sqrt(3/2n)
# v = 4/3 pi r^3 n => v = 4/3 pi Rg^3 (3/2n)^(3/2) n = pi Rg^3 sqrt(6/n)
tail_segments = np.random.randint(6, 30)
v_corona = pi * rg**3 * np.sqrt(6/tail_segments)
V = 4*pi/3*(radius_core + rg)**3
pars = dict(
background=0,
scale=1e7/V,
ndensity=8.94,
v_core=v_core,
v_corona=v_corona,
radius_core=radius_core,
rg=rg,
d_penetration=d_penetration,
n_aggreg=n_aggreg,
)
return pars
tests = [
[{}, 0.01, 15.3532],
]
# RKH 20Mar2016 - need to check whether the core & corona volumes are per
# monomer ??? and how aggregation number works!
# renamed from micelle_spherical_core to polymer_micelle,
# moved from shape-independent to spheres section.
# Ought to be able to add polydisp to core? And add ability to x by S(Q) ?
|
bsd-3-clause
| 991,960,999,830,574,600
| 38.649007
| 113
| 0.632036
| false
| 2.809479
| false
| false
| false
|
oldm/OldMan
|
oldman/resource/manager.py
|
1
|
7765
|
from oldman.resource.resource import ClientResource
from oldman.store.selector import DataStoreSelector
from oldman.model.manager import ClientModelManager
DEFAULT_MODEL_NAME = "Default_Client"
class ClientResourceManager:
"""
TODO: describe
"""
def __init__(self, data_stores, schema_graph=None, attr_extractor=None, oper_extractor=None,
declare_default_operation_functions=True):
self._model_manager = ClientModelManager(self, schema_graph=schema_graph, attr_extractor=attr_extractor,
oper_extractor=oper_extractor,
declare_default_operation_functions=declare_default_operation_functions)
self._store_selector = DataStoreSelector(data_stores)
# Default model
self._model_manager.create_model(DEFAULT_MODEL_NAME, {u"@context": {}}, self, untyped=True,
iri_prefix=u"http://localhost/.well-known/genid/client/",
is_default=True)
@property
def model_manager(self):
return self._model_manager
def declare_method(self, method, name, class_iri):
"""Attaches a method to the :class:`~oldman.resource.Resource` objects that are instances of a given RDFS class.
Like in Object-Oriented Programming, this method can be overwritten by attaching a homonymous
method to a class that has a higher inheritance priority (such as a sub-class).
To benefit from this method (or an overwritten one), :class:`~oldman.resource.Resource` objects
must be associated to a :class:`~oldman.model.Model` that corresponds to the RDFS class or to one of its
subclasses.
:param method: Python function that takes as first argument a :class:`~oldman.resource.Resource` object.
:param name: Name assigned to this method.
:param class_iri: Targeted RDFS class. If not overwritten, all the instances
(:class:`~oldman.resource.Resource` objects) should inherit this method.
"""
models = self._model_manager.find_descendant_models(class_iri)
for model in models:
if model.class_iri is None:
continue
model.declare_method(method, name, class_iri)
def new(self, id=None, types=None, hashless_iri=None, collection_iri=None, **kwargs):
"""Creates a new :class:`~oldman.resource.Resource` object **without saving it** in the `data_store`.
The `kwargs` dict can contains regular attribute key-values that will be assigned to
:class:`~oldman.attribute.OMAttribute` objects.
:param id: IRI of the new resource. Defaults to `None`.
If not given, the IRI is generated by the IRI generator of the main model.
:param types: IRIs of RDFS classes the resource is instance of. Defaults to `None`.
Note that these IRIs are used to find the models of the resource
(see :func:`~oldman.resource.manager.ResourceManager.find_models_and_types` for more details).
:param hashless_iri: hash-less IRI that MAY be considered when generating an IRI for the new resource.
Defaults to `None`. Ignored if `id` is given. Must be `None` if `collection_iri` is given.
:param collection_iri: IRI of the controller to which this resource belongs. This information
is used to generate a new IRI if no `id` is given. The IRI generator may ignore it.
Defaults to `None`. Must be `None` if `hashless_iri` is given.
:return: A new :class:`~oldman.resource.Resource` object.
"""
if (types is None or len(types) == 0) and len(kwargs) == 0:
name = id if id is not None else ""
self._logger.info(u"""New resource %s has no type nor attribute.
As such, nothing is stored in the data graph.""" % name)
# Store of the resource
store = self._store_selector.select_store(id=id, types=types, hashless_iri=hashless_iri,
collection_iri=collection_iri, **kwargs)
return ClientResource(self, self._model_manager, store, id=id, types=types, hashless_iri=hashless_iri,
collection_iri=collection_iri, **kwargs)
def create(self, id=None, types=None, hashless_iri=None, collection_iri=None, **kwargs):
"""Creates a new resource and save it in the `data_store`.
See :func:`~oldman.resource.manager.ResourceManager.new` for more details.
"""
return self.new(id=id, types=types, hashless_iri=hashless_iri,
collection_iri=collection_iri, **kwargs).save()
def get(self, id=None, types=None, hashless_iri=None, eager_with_reversed_attributes=True, **kwargs):
"""See :func:`oldman.store.datastore.DataStore.get`."""
#TODO: consider parallelism
store_resources = [store.get(id=id, types=types, hashless_iri=hashless_iri,
eager_with_reversed_attributes=eager_with_reversed_attributes, **kwargs)
for store in self._store_selector.select_stores(id=id, types=types,
hashless_iri=hashless_iri, **kwargs)]
returned_store_resources = filter(lambda x: x, store_resources)
resources = self._model_manager.convert_store_resources(returned_store_resources)
resource_count = len(resources)
if resource_count == 1:
return resources[0]
elif resource_count == 0:
return None
#TODO: find a better exception and explain better
#TODO: see if relevant
raise Exception("Non unique object")
def filter(self, types=None, hashless_iri=None, limit=None, eager=False, pre_cache_properties=None, **kwargs):
"""See :func:`oldman.store.datastore.DataStore.filter`."""
#TODO: support again generator. Find a way to aggregate them.
resources = [r for store in self._store_selector.select_stores(types=types, hashless_iri=hashless_iri,
pre_cache_properties=pre_cache_properties,
**kwargs)
for r in store.filter(types=types, hashless_iri=hashless_iri, limit=limit, eager=eager,
pre_cache_properties=pre_cache_properties, **kwargs)]
return self._model_manager.convert_store_resources(resources)
def sparql_filter(self, query):
"""See :func:`oldman.store.datastore.DataStore.sparql_filter`."""
#TODO: support again generator. Find a way to aggregate them.
resources = [r for store in self._store_selector.select_sparql_stores(query)
for r in store.sparql_filter(query)]
return self._model_manager.convert_store_resources(resources)
def use_store_model(self, class_iri, data_store=None):
raise NotImplementedError("TODO: implement me here")
def import_store_models(self):
"""TODO: check possible conflicts with local models."""
for store in self._store_selector.data_stores:
for store_model in store.model_manager.models:
is_default = (store_model.class_iri is None)
self._model_manager.import_model(store_model, store,
is_default=is_default)
def get_model(self, class_name_or_iri):
return self._model_manager.get_model(class_name_or_iri)
|
bsd-3-clause
| 4,425,027,235,086,407,700
| 55.678832
| 121
| 0.616098
| false
| 4.28532
| false
| false
| false
|
decodio/l10n_hr
|
l10n_hr_vat/wizard/wizard_pdv_knjiga.py
|
1
|
11523
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Author:
# mail:
# Copyright:
# Contributions:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import os
import uuid
from lxml import etree, objectify
from openerp.osv import orm, fields
from openerp.tools.translate import _
from openerp.exceptions import Warning
class pdv_knjiga(orm.TransientModel):
_name = 'pdv.knjiga'
_inherit = 'account.common.report'
_columns = {
'chart_tax_id': fields.many2one('account.tax.code', 'Chart of Tax',
help='Select Charts of Taxes', required=True,
domain=[('parent_id', '=', False)]),
'knjiga_id': fields.many2one('l10n_hr_pdv.knjiga', 'Porezna knjiga',
help='Odaberite poreznu knjigu za ispis', required=True),
'date_start': fields.date('Od datuma'),
'date_stop': fields.date('Do datuma'),
'journal_ids': fields.many2many('account.journal', 'pdv_knjiga_journal_rel', 'pdv_knjiga_id', 'journal_id',
'Journals'),
'data': fields.binary('File', readonly=True),
'name': fields.char('Filename', size=128, readonly=True),
'state': fields.selection((('choose', 'choose'), ('get', 'get'),)),
}
def _get_tax(self, cr, uid, context=None):
taxes = self.pool.get('account.tax.code').search(cr, uid, [('parent_id', '=', False)], limit=1)
return taxes and taxes[0] or False
_defaults = {
'chart_tax_id': _get_tax,
'journal_ids': [],
'state': 'choose',
}
# def export_vat(self, cr, uid, ids, context=None):
# """
# Kopiram logiku iz parsera bez potezanja parsera
# """
# if context is None:
# context = {}
def create_vat(self, cr, uid, ids, context=None):
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
datas['form'] = self.read(cr, uid, ids)[0]
if not datas['form'].get('journal_ids', False):
sql = """SELECT id FROM account_journal"""
cr.execute(sql)
datas['form']['journal_ids'] = [a for (a,) in cr.fetchall()]
for field in datas['form'].keys():
if isinstance(datas['form'][field], tuple):
datas['form'][field] = datas['form'][field][0]
if datas['form']['knjiga_id']:
knjiga_type = self.pool.get('l10n_hr_pdv.knjiga').browse(cr, uid, datas['form']['knjiga_id']).type
else:
raise orm.except_orm(_('Knjiga nije upisana!'),
_("Knjiga je obavezan podatak kod ovog ispisa!"))
# if (datas['form']['period_from'] and not datas['form']['period_to']) or \
# (not datas['form']['period_from'] and datas['form']['period_to']):
# raise orm.except_orm(_('Krivi periodi!'),_("Potrebno je upisati oba perioda za ispis po periodima!"))
#
# if (datas['form']['date_start'] and not datas['form']['date_stop']) or \
# (not datas['form']['date_start'] and datas['form']['date_stop']):
# raise orm.except_orm(_('Krivo razdoblje!'),_("Potrebno je upisati oba datuma za ispis po datumima!"))
report_name = None
if knjiga_type == 'ira':
# report_name = 'knjiga.ira'
# report_name = 'knjiga.ira.eu.2014'
report_name = 'knjiga_ira_ods'
elif knjiga_type in ('ura', 'ura_uvoz'):
# report_name = 'knjiga.ura'
# report_name = 'knjiga.ura.eu.2014'
report_name = 'knjiga_ura_ods'
elif knjiga_type in ('ura_tu', 'ura_st', 'ura_nerezident'):
report_name = 'knjiga_ura_prijenos'
if context.get('xml'):
return self.create_xml(cr, uid, ids, context, datas, report_name)
return {
'type': 'ir.actions.report.xml',
'report_name': report_name,
'datas': datas,
}
def create_xml(self, cr, uid, ids, context=None, datas=False, report_name=False):
form = self.browse(cr, uid, ids)[0]
if not form.company_id.podrucje_djelatnosti:
raise Warning(_('Warning'),
_('Please set company data : Area of activity'))
if form.knjiga_id.type != 'ura':
raise Warning(_('Warning'),
_('Only URA is for XML export!'))
try:
from ..report import knjiga_ura as URA
from ..report.vat_book_report_common import get_vat_book_report_common
from . import xml_common
except:
raise Warning(_('Warning'),
_('Important librarys missing!'))
def decimal_num(num):
# JER ETO u podacim mi dodje round na 3 znamenke...
num = str(round(num, 2))
dec = num.split('.')[1]
if dec == '0':
num += '0'
elif len(dec) > 2:
num = num[:-1]
return num
parser = URA.Parser(cr, uid, report_name, context=context)
parser.set_context(objects=[], data=datas, ids=[])
parser_ctx = parser.localcontext
lines = parser_ctx['get_lines'](datas)
total = parser_ctx['get_totals']()
total = total and total[0] or False
metadata, identifier = xml_common.create_xml_metadata(self, {
'naslov': u'Knjiga primljenih (ulaznih) računa',
'autor': ' '.join((
form.company_id.responsible_fname,
form.company_id.responsible_lname)),
'format': 'text/xml',
'jezik': 'hr-HR',
'uskladjenost': 'ObrazacURA-v1-0',
'tip': u'Elektronički obrazac',
'adresant': 'Ministarstvo Financija, Porezna uprava, Zagreb'
})
EM = objectify.ElementMaker(annotate=False)
date_start = form.date_start and form.date_start or \
form.period_from.date_start
date_stop = form.date_stop and form.date_stop or \
form.period_to.date_stop
zaglavlje = EM.Zaglavlje(
EM.Razdoblje(
EM.DatumOd(date_start),
EM.DatumDo(date_stop)),
EM.Obveznik(
EM.OIB(form.company_id.vat[2:]),
EM.Naziv(form.company_id.name),
EM.Adresa(
EM.Mjesto(form.company_id.city),
EM.Ulica(form.company_id.ulica),
EM.Broj(form.company_id.kbr),
EM.DodatakKucnomBroju(form.company_id.kbr_dodatak and \
form.company_id.kbr_dodatak or '')
),
EM.PodrucjeDjelatnosti(form.company_id.podrucje_djelatnosti),
EM.SifraDjelatnosti(form.company_id.l10n_hr_base_nkd_id.code),),
EM.ObracunSastavio(
EM.Ime(form.company_id.responsible_fname),
EM.Prezime(form.company_id.responsible_lname),
),
)
racuni = []
errors = []
for line in lines:
partner = line['partner_name'].split(', ')
partner_r4 = partner[0]
partner_r5 = ', '.join((partner[1], partner[2]))
if line['partner_oib'] == '':
errors.append(line)
continue
racuni.append(EM.R(
EM.R1(line['rbr'].replace('.', '')),
EM.R2(line['invoice_number']),
EM.R3(line['invoice_date']),
EM.R4(partner_r4),
EM.R5(partner_r5),
EM.R6(line['vat_type']),
EM.R7(line['partner_oib'].lstrip().rstrip()),
EM.R8(decimal_num(line['stupac6'])),
EM.R9(decimal_num(line['stupac7'])),
EM.R10(decimal_num(line['stupac8'])),
EM.R11(decimal_num(line['stupac9'])),
EM.R12(decimal_num(line['stupac10'])),
EM.R13(decimal_num(line['stupac11'])),
EM.R14(decimal_num(line['stupac12'])),
EM.R15(decimal_num(line['stupac13'])),
EM.R16(decimal_num(line['stupac14'])),
EM.R17(decimal_num(line['stupac15'])),
EM.R18(decimal_num(line['stupac16'])),
))
Racuni = EM.Racuni(EM.R)
Racuni.R = racuni
Ukupno = EM.Ukupno(
EM.U8(decimal_num(total['stupac6'])),
EM.U9(decimal_num(total['stupac7'])),
EM.U10(decimal_num(total['stupac8'])),
EM.U11(decimal_num(total['stupac9'])),
EM.U12(decimal_num(total['stupac10'])),
EM.U13(decimal_num(total['stupac11'])),
EM.U14(decimal_num(total['stupac12'])),
EM.U15(decimal_num(total['stupac13'])),
EM.U16(decimal_num(total['stupac14'])),
EM.U17(decimal_num(total['stupac15'])),
EM.U18(decimal_num(total['stupac16'])),
)
tijelo = EM.Tijelo(Racuni, Ukupno)
PDV = objectify.ElementMaker(
namespace='http://e-porezna.porezna-uprava.hr/sheme/zahtjevi/ObrazacURA/v1-0',
)
obrazac = PDV.ObrazacURA(metadata, zaglavlje, tijelo, verzijaSheme='1.0')
pdv_xml = xml_common.etree_tostring(self, obrazac)
pdv_xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + pdv_xml
# print pdv_xml
# TODO: validate xml
# xml_common.validate_xml
file_path = os.path.dirname(os.path.abspath(__file__))
xml = {
'path': file_path,
'xsd_path': 'shema/URA',
'xsd_name': 'ObrazacURA-v1-0.xsd',
'xml': pdv_xml
}
valid = xml_common.validate_xml(self, xml)
data64 = base64.encodestring(pdv_xml)
xml_name = 'PDV_Obrazac_%s_%s.XML' % (date_start.replace('-', ''),
date_stop.replace('-',''))
form.write({'state': 'get',
'data': data64,
'name': xml_name
})
if errors:
msg= "Errors\n"
for e in errors:
msg += "%s - %s\n" % (e['rbr'], e['partner_name'])
raise Warning('Nedostaje OIB', msg)
return {
'type': 'ir.actions.act_window',
'res_model': 'pdv.knjiga',
'view_mode': 'form',
'view_type': 'form',
'res_id': ids[0],
'views': [(False, 'form')],
'target': 'new',
}
|
agpl-3.0
| 8,604,639,130,252,183,000
| 39.566901
| 115
| 0.512369
| false
| 3.454573
| false
| false
| false
|
dreadrel/UWF_2014_spring_COP3990C-2507
|
notebooks/scripts/book_code/code/getattribute-person.py
|
1
|
1434
|
class Person: # Portable: 2.X or 3.X
def __init__(self, name): # On [Person()]
self._name = name # Triggers __setattr__!
def __getattribute__(self, attr): # On [obj.any]
print('get: ' + attr)
if attr == 'name': # Intercept all names
attr = '_name' # Map to internal name
return object.__getattribute__(self, attr) # Avoid looping here
def __setattr__(self, attr, value): # On [obj.any = value]
print('set: ' + attr)
if attr == 'name':
attr = '_name' # Set internal name
self.__dict__[attr] = value # Avoid looping here
def __delattr__(self, attr): # On [del obj.any]
print('del: ' + attr)
if attr == 'name':
attr = '_name' # Avoid looping here too
del self.__dict__[attr] # but much less common
bob = Person('Bob Smith') # bob has a managed attribute
print(bob.name) # Runs __getattr__
bob.name = 'Robert Smith' # Runs __setattr__
print(bob.name)
del bob.name # Runs __delattr__
print('-'*20)
sue = Person('Sue Jones') # sue inherits property too
print(sue.name)
#print(Person.name.__doc__) # No equivalent here
|
apache-2.0
| 6,289,622,435,151,963,000
| 43.8125
| 76
| 0.459554
| false
| 4.156522
| false
| false
| false
|
kissgyorgy/Womanager
|
mainwindow.py
|
1
|
47297
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created: Thu Oct 10 19:57:03 2013
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(756, 515)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.groupBox = QtGui.QGroupBox(self.centralwidget)
self.groupBox.setEnabled(True)
self.groupBox.setMinimumSize(QtCore.QSize(300, 75))
self.groupBox.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.groupBox.setVisible(False)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setGeometry(QtCore.QRect(30, 30, 62, 16))
self.label_4.setMargin(0)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_5 = QtGui.QLabel(self.groupBox)
self.label_5.setGeometry(QtCore.QRect(30, 50, 151, 16))
self.label_5.setStyleSheet(_fromUtf8(""))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setGeometry(QtCore.QRect(10, 30, 16, 16))
self.label.setStyleSheet(_fromUtf8("background-color: rgb(255, 155, 103);"))
self.label.setText(_fromUtf8(""))
self.label.setObjectName(_fromUtf8("label"))
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(10, 50, 16, 16))
self.label_2.setStyleSheet(_fromUtf8("background-color: rgb(168, 255, 171);"))
self.label_2.setText(_fromUtf8(""))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setGeometry(QtCore.QRect(90, 30, 16, 16))
self.label_3.setStyleSheet(_fromUtf8("background-color: rgb(255,255, 255);"))
self.label_3.setText(_fromUtf8(""))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_7 = QtGui.QLabel(self.groupBox)
self.label_7.setGeometry(QtCore.QRect(110, 30, 71, 16))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_8 = QtGui.QLabel(self.groupBox)
self.label_8.setGeometry(QtCore.QRect(200, 30, 16, 16))
self.label_8.setAutoFillBackground(True)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.label_9 = QtGui.QLabel(self.groupBox)
self.label_9.setGeometry(QtCore.QRect(220, 30, 71, 16))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.horizontalLayout.addWidget(self.groupBox)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.dateEdit = QtGui.QDateEdit(self.centralwidget)
self.dateEdit.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.dateEdit.setAlignment(QtCore.Qt.AlignCenter)
self.dateEdit.setDate(QtCore.QDate(2013, 8, 1))
self.dateEdit.setMinimumDate(QtCore.QDate(1900, 8, 1))
self.dateEdit.setCurrentSection(QtGui.QDateTimeEdit.MonthSection)
self.dateEdit.setCalendarPopup(False)
self.dateEdit.setCurrentSectionIndex(1)
self.dateEdit.setObjectName(_fromUtf8("dateEdit"))
self.gridLayout.addWidget(self.dateEdit, 1, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.centralwidget)
self.label_6.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 0, 1, 1, 1)
self.horizontalLayout.addLayout(self.gridLayout)
self.pushButton = QtGui.QPushButton(self.centralwidget)
self.pushButton.setMinimumSize(QtCore.QSize(0, 75))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.first_month_label = QtGui.QLabel(self.centralwidget)
self.first_month_label.setText(_fromUtf8(""))
self.first_month_label.setAlignment(QtCore.Qt.AlignCenter)
self.first_month_label.setObjectName(_fromUtf8("first_month_label"))
self.verticalLayout_2.addWidget(self.first_month_label)
self.first_month_table = QtGui.QTableWidget(self.centralwidget)
self.first_month_table.setMinimumSize(QtCore.QSize(0, 0))
self.first_month_table.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.first_month_table.setObjectName(_fromUtf8("first_month_table"))
self.first_month_table.setColumnCount(31)
self.first_month_table.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.first_month_table.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(13)
item.setFont(font)
self.first_month_table.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(6, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(7, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(8, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(9, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(10, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(11, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(12, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(13, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(14, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(15, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(16, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(17, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(18, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(19, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(20, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(21, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(22, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(23, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(24, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(25, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(26, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(27, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(28, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(29, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setHorizontalHeaderItem(30, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 2, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 3, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 4, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 5, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 6, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 7, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 8, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 9, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 10, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 11, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(0, 12, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 2, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 3, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 4, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 5, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 6, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 7, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 8, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 9, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 10, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 11, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 12, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 13, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 14, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 15, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 16, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 17, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 18, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 19, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 20, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 21, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 22, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 23, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 24, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 25, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 26, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 27, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 28, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 29, item)
item = QtGui.QTableWidgetItem()
self.first_month_table.setItem(1, 30, item)
self.first_month_table.horizontalHeader().setDefaultSectionSize(22)
self.first_month_table.horizontalHeader().setMinimumSectionSize(5)
self.first_month_table.verticalHeader().setDefaultSectionSize(22)
self.first_month_table.verticalHeader().setMinimumSectionSize(5)
self.verticalLayout_2.addWidget(self.first_month_table)
self.second_month_label = QtGui.QLabel(self.centralwidget)
self.second_month_label.setText(_fromUtf8(""))
self.second_month_label.setAlignment(QtCore.Qt.AlignCenter)
self.second_month_label.setObjectName(_fromUtf8("second_month_label"))
self.verticalLayout_2.addWidget(self.second_month_label)
self.second_month_table = QtGui.QTableWidget(self.centralwidget)
self.second_month_table.setMinimumSize(QtCore.QSize(0, 0))
self.second_month_table.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.second_month_table.setLayoutDirection(QtCore.Qt.LeftToRight)
self.second_month_table.setObjectName(_fromUtf8("second_month_table"))
self.second_month_table.setColumnCount(31)
self.second_month_table.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.second_month_table.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(13)
item.setFont(font)
self.second_month_table.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(6, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(7, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(8, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(9, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(10, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(11, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(12, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(13, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(14, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(15, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(16, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(17, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(18, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(19, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(20, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(21, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(22, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(23, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(24, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(25, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(26, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(27, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(28, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(29, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setHorizontalHeaderItem(30, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 2, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 3, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 4, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 5, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 6, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 7, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 8, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 9, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 10, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 11, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 12, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(0, 13, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 2, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 3, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 4, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 5, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 6, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 7, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 8, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 9, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 10, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 11, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 12, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 13, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 14, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 15, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 16, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 17, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 18, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 19, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 20, item)
item = QtGui.QTableWidgetItem()
self.second_month_table.setItem(1, 21, item)
self.second_month_table.horizontalHeader().setDefaultSectionSize(22)
self.second_month_table.horizontalHeader().setMinimumSectionSize(5)
self.second_month_table.verticalHeader().setDefaultSectionSize(22)
self.second_month_table.verticalHeader().setMinimumSectionSize(5)
self.verticalLayout_2.addWidget(self.second_month_table)
self.third_month_label = QtGui.QLabel(self.centralwidget)
self.third_month_label.setText(_fromUtf8(""))
self.third_month_label.setAlignment(QtCore.Qt.AlignCenter)
self.third_month_label.setObjectName(_fromUtf8("third_month_label"))
self.verticalLayout_2.addWidget(self.third_month_label)
self.third_month_table = QtGui.QTableWidget(self.centralwidget)
self.third_month_table.setMinimumSize(QtCore.QSize(0, 0))
self.third_month_table.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.third_month_table.setObjectName(_fromUtf8("third_month_table"))
self.third_month_table.setColumnCount(31)
self.third_month_table.setRowCount(3)
item = QtGui.QTableWidgetItem()
self.third_month_table.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
font = QtGui.QFont()
font.setPointSize(13)
item.setFont(font)
self.third_month_table.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(6, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(7, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(8, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(9, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(10, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(11, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(12, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(13, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(14, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(15, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(16, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(17, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(18, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(19, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(20, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(21, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(22, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(23, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(24, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(25, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(26, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(27, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(28, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(29, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setHorizontalHeaderItem(30, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 0, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 1, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 2, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 3, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 4, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 5, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 6, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 7, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 8, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 9, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 10, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 11, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 12, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 13, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 14, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 15, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 16, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 17, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(0, 18, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 0, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 1, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 2, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 3, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 4, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 5, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 6, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 7, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 8, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 9, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 10, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 11, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 12, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 13, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 14, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 15, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 16, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 17, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 18, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 19, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 20, item)
item = QtGui.QTableWidgetItem()
self.third_month_table.setItem(1, 21, item)
self.third_month_table.horizontalHeader().setDefaultSectionSize(22)
self.third_month_table.horizontalHeader().setMinimumSectionSize(5)
self.third_month_table.verticalHeader().setDefaultSectionSize(22)
self.third_month_table.verticalHeader().setMinimumSectionSize(5)
self.verticalLayout_2.addWidget(self.third_month_table)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 756, 22))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.fileMenu = QtGui.QMenu(self.menubar)
self.fileMenu.setObjectName(_fromUtf8("fileMenu"))
self.helpMenu = QtGui.QMenu(self.menubar)
self.helpMenu.setObjectName(_fromUtf8("helpMenu"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionSave = QtGui.QAction(MainWindow)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.actionLoad = QtGui.QAction(MainWindow)
self.actionLoad.setObjectName(_fromUtf8("actionLoad"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.actionHelp = QtGui.QAction(MainWindow)
self.actionHelp.setObjectName(_fromUtf8("actionHelp"))
self.actionExport = QtGui.QAction(MainWindow)
self.actionExport.setObjectName(_fromUtf8("actionExport"))
self.fileMenu.addAction(self.actionSave)
self.fileMenu.addAction(self.actionLoad)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.actionExport)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.actionQuit)
self.helpMenu.addAction(self.actionHelp)
self.helpMenu.addSeparator()
self.helpMenu.addAction(self.actionAbout)
self.menubar.addAction(self.fileMenu.menuAction())
self.menubar.addAction(self.helpMenu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.groupBox.show)
QtCore.QObject.connect(self.actionQuit, QtCore.SIGNAL(_fromUtf8("triggered()")), MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.pushButton, self.dateEdit)
MainWindow.setTabOrder(self.dateEdit, self.first_month_table)
MainWindow.setTabOrder(self.first_month_table, self.second_month_table)
MainWindow.setTabOrder(self.second_month_table, self.third_month_table)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Womanager", None))
self.groupBox.setTitle(_translate("MainWindow", "Legend", None))
self.label_4.setText(_translate("MainWindow", "Friday", None))
self.label_5.setText(_translate("MainWindow", "3rd week free weekday", None))
self.label_7.setText(_translate("MainWindow", "free day", None))
self.label_8.setText(_translate("MainWindow", "X", None))
self.label_9.setText(_translate("MainWindow", "workday", None))
self.dateEdit.setDisplayFormat(_translate("MainWindow", "yyyy.MM.", None))
self.label_6.setText(_translate("MainWindow", "first month:", None))
self.pushButton.setText(_translate("MainWindow", "Manage!", None))
item = self.first_month_table.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "Jack", None))
item = self.first_month_table.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Jane", None))
item = self.first_month_table.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "Joe", None))
item = self.first_month_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "1", None))
item = self.first_month_table.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "2", None))
item = self.first_month_table.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "3", None))
item = self.first_month_table.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "4", None))
item = self.first_month_table.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "5", None))
item = self.first_month_table.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "6", None))
item = self.first_month_table.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "7", None))
item = self.first_month_table.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "8", None))
item = self.first_month_table.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "9", None))
item = self.first_month_table.horizontalHeaderItem(9)
item.setText(_translate("MainWindow", "10", None))
item = self.first_month_table.horizontalHeaderItem(10)
item.setText(_translate("MainWindow", "11", None))
item = self.first_month_table.horizontalHeaderItem(11)
item.setText(_translate("MainWindow", "12", None))
item = self.first_month_table.horizontalHeaderItem(12)
item.setText(_translate("MainWindow", "13", None))
item = self.first_month_table.horizontalHeaderItem(13)
item.setText(_translate("MainWindow", "14", None))
item = self.first_month_table.horizontalHeaderItem(14)
item.setText(_translate("MainWindow", "15", None))
item = self.first_month_table.horizontalHeaderItem(15)
item.setText(_translate("MainWindow", "16", None))
item = self.first_month_table.horizontalHeaderItem(16)
item.setText(_translate("MainWindow", "17", None))
item = self.first_month_table.horizontalHeaderItem(17)
item.setText(_translate("MainWindow", "18", None))
item = self.first_month_table.horizontalHeaderItem(18)
item.setText(_translate("MainWindow", "19", None))
item = self.first_month_table.horizontalHeaderItem(19)
item.setText(_translate("MainWindow", "20", None))
item = self.first_month_table.horizontalHeaderItem(20)
item.setText(_translate("MainWindow", "21", None))
item = self.first_month_table.horizontalHeaderItem(21)
item.setText(_translate("MainWindow", "22", None))
item = self.first_month_table.horizontalHeaderItem(22)
item.setText(_translate("MainWindow", "23", None))
item = self.first_month_table.horizontalHeaderItem(23)
item.setText(_translate("MainWindow", "24", None))
item = self.first_month_table.horizontalHeaderItem(24)
item.setText(_translate("MainWindow", "25", None))
item = self.first_month_table.horizontalHeaderItem(25)
item.setText(_translate("MainWindow", "26", None))
item = self.first_month_table.horizontalHeaderItem(26)
item.setText(_translate("MainWindow", "27", None))
item = self.first_month_table.horizontalHeaderItem(27)
item.setText(_translate("MainWindow", "28", None))
item = self.first_month_table.horizontalHeaderItem(28)
item.setText(_translate("MainWindow", "29", None))
item = self.first_month_table.horizontalHeaderItem(29)
item.setText(_translate("MainWindow", "30", None))
item = self.first_month_table.horizontalHeaderItem(30)
item.setText(_translate("MainWindow", "31", None))
__sortingEnabled = self.first_month_table.isSortingEnabled()
self.first_month_table.setSortingEnabled(False)
self.first_month_table.setSortingEnabled(__sortingEnabled)
item = self.second_month_table.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "Jack", None))
item = self.second_month_table.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Jane", None))
item = self.second_month_table.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "Joe", None))
item = self.second_month_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "1", None))
item = self.second_month_table.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "2", None))
item = self.second_month_table.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "3", None))
item = self.second_month_table.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "4", None))
item = self.second_month_table.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "5", None))
item = self.second_month_table.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "6", None))
item = self.second_month_table.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "7", None))
item = self.second_month_table.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "8", None))
item = self.second_month_table.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "9", None))
item = self.second_month_table.horizontalHeaderItem(9)
item.setText(_translate("MainWindow", "10", None))
item = self.second_month_table.horizontalHeaderItem(10)
item.setText(_translate("MainWindow", "11", None))
item = self.second_month_table.horizontalHeaderItem(11)
item.setText(_translate("MainWindow", "12", None))
item = self.second_month_table.horizontalHeaderItem(12)
item.setText(_translate("MainWindow", "13", None))
item = self.second_month_table.horizontalHeaderItem(13)
item.setText(_translate("MainWindow", "14", None))
item = self.second_month_table.horizontalHeaderItem(14)
item.setText(_translate("MainWindow", "15", None))
item = self.second_month_table.horizontalHeaderItem(15)
item.setText(_translate("MainWindow", "16", None))
item = self.second_month_table.horizontalHeaderItem(16)
item.setText(_translate("MainWindow", "17", None))
item = self.second_month_table.horizontalHeaderItem(17)
item.setText(_translate("MainWindow", "18", None))
item = self.second_month_table.horizontalHeaderItem(18)
item.setText(_translate("MainWindow", "19", None))
item = self.second_month_table.horizontalHeaderItem(19)
item.setText(_translate("MainWindow", "20", None))
item = self.second_month_table.horizontalHeaderItem(20)
item.setText(_translate("MainWindow", "21", None))
item = self.second_month_table.horizontalHeaderItem(21)
item.setText(_translate("MainWindow", "22", None))
item = self.second_month_table.horizontalHeaderItem(22)
item.setText(_translate("MainWindow", "23", None))
item = self.second_month_table.horizontalHeaderItem(23)
item.setText(_translate("MainWindow", "24", None))
item = self.second_month_table.horizontalHeaderItem(24)
item.setText(_translate("MainWindow", "25", None))
item = self.second_month_table.horizontalHeaderItem(25)
item.setText(_translate("MainWindow", "26", None))
item = self.second_month_table.horizontalHeaderItem(26)
item.setText(_translate("MainWindow", "27", None))
item = self.second_month_table.horizontalHeaderItem(27)
item.setText(_translate("MainWindow", "28", None))
item = self.second_month_table.horizontalHeaderItem(28)
item.setText(_translate("MainWindow", "29", None))
item = self.second_month_table.horizontalHeaderItem(29)
item.setText(_translate("MainWindow", "30", None))
item = self.second_month_table.horizontalHeaderItem(30)
item.setText(_translate("MainWindow", "31", None))
__sortingEnabled = self.second_month_table.isSortingEnabled()
self.second_month_table.setSortingEnabled(False)
self.second_month_table.setSortingEnabled(__sortingEnabled)
item = self.third_month_table.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "Jack", None))
item = self.third_month_table.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "Jane", None))
item = self.third_month_table.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "Joe", None))
item = self.third_month_table.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "1", None))
item = self.third_month_table.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "2", None))
item = self.third_month_table.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "3", None))
item = self.third_month_table.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "4", None))
item = self.third_month_table.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "5", None))
item = self.third_month_table.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "6", None))
item = self.third_month_table.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "7", None))
item = self.third_month_table.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "8", None))
item = self.third_month_table.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "9", None))
item = self.third_month_table.horizontalHeaderItem(9)
item.setText(_translate("MainWindow", "10", None))
item = self.third_month_table.horizontalHeaderItem(10)
item.setText(_translate("MainWindow", "11", None))
item = self.third_month_table.horizontalHeaderItem(11)
item.setText(_translate("MainWindow", "12", None))
item = self.third_month_table.horizontalHeaderItem(12)
item.setText(_translate("MainWindow", "13", None))
item = self.third_month_table.horizontalHeaderItem(13)
item.setText(_translate("MainWindow", "14", None))
item = self.third_month_table.horizontalHeaderItem(14)
item.setText(_translate("MainWindow", "15", None))
item = self.third_month_table.horizontalHeaderItem(15)
item.setText(_translate("MainWindow", "16", None))
item = self.third_month_table.horizontalHeaderItem(16)
item.setText(_translate("MainWindow", "17", None))
item = self.third_month_table.horizontalHeaderItem(17)
item.setText(_translate("MainWindow", "18", None))
item = self.third_month_table.horizontalHeaderItem(18)
item.setText(_translate("MainWindow", "19", None))
item = self.third_month_table.horizontalHeaderItem(19)
item.setText(_translate("MainWindow", "20", None))
item = self.third_month_table.horizontalHeaderItem(20)
item.setText(_translate("MainWindow", "21", None))
item = self.third_month_table.horizontalHeaderItem(21)
item.setText(_translate("MainWindow", "22", None))
item = self.third_month_table.horizontalHeaderItem(22)
item.setText(_translate("MainWindow", "23", None))
item = self.third_month_table.horizontalHeaderItem(23)
item.setText(_translate("MainWindow", "24", None))
item = self.third_month_table.horizontalHeaderItem(24)
item.setText(_translate("MainWindow", "25", None))
item = self.third_month_table.horizontalHeaderItem(25)
item.setText(_translate("MainWindow", "26", None))
item = self.third_month_table.horizontalHeaderItem(26)
item.setText(_translate("MainWindow", "27", None))
item = self.third_month_table.horizontalHeaderItem(27)
item.setText(_translate("MainWindow", "28", None))
item = self.third_month_table.horizontalHeaderItem(28)
item.setText(_translate("MainWindow", "29", None))
item = self.third_month_table.horizontalHeaderItem(29)
item.setText(_translate("MainWindow", "30", None))
item = self.third_month_table.horizontalHeaderItem(30)
item.setText(_translate("MainWindow", "31", None))
__sortingEnabled = self.third_month_table.isSortingEnabled()
self.third_month_table.setSortingEnabled(False)
self.third_month_table.setSortingEnabled(__sortingEnabled)
self.fileMenu.setTitle(_translate("MainWindow", "File", None))
self.helpMenu.setTitle(_translate("MainWindow", "Help", None))
self.actionSave.setText(_translate("MainWindow", "Save", None))
self.actionQuit.setText(_translate("MainWindow", "Quit", None))
self.actionLoad.setText(_translate("MainWindow", "Load", None))
self.actionAbout.setText(_translate("MainWindow", "About", None))
self.actionHelp.setText(_translate("MainWindow", "Help", None))
self.actionExport.setText(_translate("MainWindow", "Export...", None))
|
gpl-2.0
| -7,005,826,944,289,512,000
| 52.624717
| 106
| 0.672495
| false
| 3.771691
| false
| false
| false
|
smilebin818/wx-cqwdt
|
DBDATA/fileServer.py
|
1
|
1270
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import SocketServer
import struct
import os
# Format: name_len --- one byte
# name --- name_len bytes
# data --- variable length
# Save data to name into current directory
# Refer to: http://blog.csdn.net/g__gle/article/details/8144968
addr = ('', 7819)
class MyTCPHandler (SocketServer.StreamRequestHandler):
def handle(self):
name_len = ord(self.rfile.read(1))
name = self.rfile.read(name_len)
print "Get request: %s" % name
file_size = struct.unpack(">l", self.rfile.read(4))[0]
restfile = file_size
fd = open(name, 'wb')
package_cnt = 0
while restfile > 4096:
package_cnt += 1
cont = self.rfile.read(4096)
fd.write(cont)
restfile -= 4096
if package_cnt >= 5:
self.request.send(struct.pack('>l', file_size - restfile))
package_cnt = 0
self.request.send(struct.pack('>l', file_size - restfile))
fd.write(self.rfile.read(restfile))
fd.close()
print "Out: %s\n" % name
server = SocketServer.TCPServer(addr, MyTCPHandler)
print "Serving on port %s ..." % addr[1]
server.serve_forever()
|
gpl-3.0
| -6,804,128,155,356,753,000
| 28.534884
| 74
| 0.570866
| false
| 3.527778
| false
| false
| false
|
blaze225/zulip
|
zerver/views/users.py
|
1
|
16558
|
from __future__ import absolute_import
from typing import Text, Union, Optional, Dict, Any, List, Tuple
import os
import simplejson as json
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from django.shortcuts import redirect
from django.conf import settings
from six.moves import map
from zerver.decorator import has_request_variables, REQ, JsonableError, \
require_realm_admin
from zerver.forms import CreateUserForm
from zerver.lib.actions import do_change_avatar_fields, do_change_bot_owner, \
do_change_is_admin, do_change_default_all_public_streams, \
do_change_default_events_register_stream, do_change_default_sending_stream, \
do_create_user, do_deactivate_user, do_reactivate_user, do_regenerate_api_key
from zerver.lib.avatar import avatar_url, get_avatar_url
from zerver.lib.response import json_error, json_success
from zerver.lib.streams import access_stream_by_name
from zerver.lib.upload import upload_avatar_image
from zerver.lib.validator import check_bool, check_string
from zerver.lib.users import check_change_full_name, check_full_name
from zerver.lib.utils import generate_random_token
from zerver.models import UserProfile, Stream, Realm, Message, get_user_profile_by_email, \
email_allowed_for_realm, get_user_profile_by_id
from zproject.jinja2 import render_to_response
def deactivate_user_backend(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if target.is_bot:
return json_error(_('No such user'))
if check_last_admin(target):
return json_error(_('Cannot deactivate the only organization administrator'))
return _deactivate_user_profile_backend(request, user_profile, target)
def deactivate_user_own_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
if user_profile.is_realm_admin and check_last_admin(user_profile):
return json_error(_('Cannot deactivate the only organization administrator'))
do_deactivate_user(user_profile)
return json_success()
def check_last_admin(user_profile):
# type: (UserProfile) -> bool
admins = set(user_profile.realm.get_admin_users())
return user_profile.is_realm_admin and len(admins) == 1
def deactivate_bot_backend(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such bot'))
if not target.is_bot:
return json_error(_('No such bot'))
return _deactivate_user_profile_backend(request, user_profile, target)
def _deactivate_user_profile_backend(request, user_profile, target):
# type: (HttpRequest, UserProfile, UserProfile) -> HttpResponse
if not user_profile.can_admin_user(target):
return json_error(_('Insufficient permission'))
do_deactivate_user(target)
return json_success()
def reactivate_user_backend(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(target):
return json_error(_('Insufficient permission'))
do_reactivate_user(target)
return json_success()
@has_request_variables
def update_user_backend(request, user_profile, email,
full_name=REQ(default="", validator=check_string),
is_admin=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, Text, Optional[Text], Optional[bool]) -> HttpResponse
try:
target = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(target):
return json_error(_('Insufficient permission'))
if is_admin is not None:
if not is_admin and check_last_admin(user_profile):
return json_error(_('Cannot remove the only organization administrator'))
do_change_is_admin(target, is_admin)
if (full_name is not None and target.full_name != full_name and
full_name.strip() != ""):
# We don't respect `name_changes_disabled` here because the request
# is on behalf of the administrator.
check_change_full_name(target, full_name)
return json_success()
# TODO: Since eventually we want to support using the same email with
# different organizations, we'll eventually want this to be a
# logged-in endpoint so that we can access the realm_id.
def avatar(request, email_or_id, medium=None):
# type: (HttpRequest, str, bool) -> HttpResponse
"""Accepts an email address or user ID and returns the avatar"""
try:
int(email_or_id)
except ValueError:
get_user_func = get_user_profile_by_email
else:
get_user_func = get_user_profile_by_id
try:
# If there is a valid user account passed in, use its avatar
user_profile = get_user_func(email_or_id)
url = avatar_url(user_profile, medium=medium)
except UserProfile.DoesNotExist:
# If there is no such user, treat it as a new gravatar
email = email_or_id
avatar_source = 'G'
avatar_version = 1
url = get_avatar_url(avatar_source, email, avatar_version, medium=medium)
# We can rely on the url already having query parameters. Because
# our templates depend on being able to use the ampersand to
# add query parameters to our url, get_avatar_url does '?x=x'
# hacks to prevent us from having to jump through decode/encode hoops.
assert '?' in url
url += '&' + request.META['QUERY_STRING']
return redirect(url)
def get_stream_name(stream):
# type: (Optional[Stream]) -> Optional[Text]
if stream:
return stream.name
return None
@has_request_variables
def patch_bot_backend(request, user_profile, email,
full_name=REQ(default=None),
bot_owner=REQ(default=None),
default_sending_stream=REQ(default=None),
default_events_register_stream=REQ(default=None),
default_all_public_streams=REQ(default=None, validator=check_bool)):
# type: (HttpRequest, UserProfile, Text, Optional[Text], Optional[Text], Optional[Text], Optional[Text], Optional[bool]) -> HttpResponse
try:
bot = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(bot):
return json_error(_('Insufficient permission'))
if full_name is not None:
check_change_full_name(bot, full_name)
if bot_owner is not None:
owner = get_user_profile_by_email(bot_owner)
do_change_bot_owner(bot, owner)
if default_sending_stream is not None:
if default_sending_stream == "":
stream = None # type: Optional[Stream]
else:
(stream, recipient, sub) = access_stream_by_name(
user_profile, default_sending_stream)
do_change_default_sending_stream(bot, stream)
if default_events_register_stream is not None:
if default_events_register_stream == "":
stream = None
else:
(stream, recipient, sub) = access_stream_by_name(
user_profile, default_events_register_stream)
do_change_default_events_register_stream(bot, stream)
if default_all_public_streams is not None:
do_change_default_all_public_streams(bot, default_all_public_streams)
if len(request.FILES) == 0:
pass
elif len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot)
avatar_source = UserProfile.AVATAR_FROM_USER
do_change_avatar_fields(bot, avatar_source)
else:
return json_error(_("You may only upload one file at a time"))
json_result = dict(
full_name=bot.full_name,
avatar_url=avatar_url(bot),
default_sending_stream=get_stream_name(bot.default_sending_stream),
default_events_register_stream=get_stream_name(bot.default_events_register_stream),
default_all_public_streams=bot.default_all_public_streams,
)
# Don't include the bot owner in case it is not set.
# Default bots have no owner.
if bot.bot_owner is not None:
json_result['bot_owner'] = bot.bot_owner.email
return json_success(json_result)
@has_request_variables
def regenerate_bot_api_key(request, user_profile, email):
# type: (HttpRequest, UserProfile, Text) -> HttpResponse
try:
bot = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return json_error(_('No such user'))
if not user_profile.can_admin_user(bot):
return json_error(_('Insufficient permission'))
do_regenerate_api_key(bot)
json_result = dict(
api_key = bot.api_key
)
return json_success(json_result)
@has_request_variables
def add_bot_backend(request, user_profile, full_name_raw=REQ("full_name"), short_name=REQ(),
default_sending_stream_name=REQ('default_sending_stream', default=None),
default_events_register_stream_name=REQ('default_events_register_stream', default=None),
default_all_public_streams=REQ(validator=check_bool, default=None)):
# type: (HttpRequest, UserProfile, Text, Text, Optional[Text], Optional[Text], Optional[bool]) -> HttpResponse
short_name += "-bot"
full_name = check_full_name(full_name_raw)
email = '%s@%s' % (short_name, user_profile.realm.get_bot_domain())
form = CreateUserForm({'full_name': full_name, 'email': email})
if not form.is_valid():
# We validate client-side as well
return json_error(_('Bad name or username'))
try:
get_user_profile_by_email(email)
return json_error(_("Username already in use"))
except UserProfile.DoesNotExist:
pass
if len(request.FILES) == 0:
avatar_source = UserProfile.AVATAR_FROM_GRAVATAR
elif len(request.FILES) != 1:
return json_error(_("You may only upload one file at a time"))
else:
avatar_source = UserProfile.AVATAR_FROM_USER
default_sending_stream = None
if default_sending_stream_name is not None:
(default_sending_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, default_sending_stream_name)
default_events_register_stream = None
if default_events_register_stream_name is not None:
(default_events_register_stream, ignored_rec, ignored_sub) = access_stream_by_name(
user_profile, default_events_register_stream_name)
bot_profile = do_create_user(email=email, password='',
realm=user_profile.realm, full_name=full_name,
short_name=short_name, active=True,
bot_type=UserProfile.DEFAULT_BOT,
bot_owner=user_profile,
avatar_source=avatar_source,
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=default_all_public_streams)
if len(request.FILES) == 1:
user_file = list(request.FILES.values())[0]
upload_avatar_image(user_file, user_profile, bot_profile)
json_result = dict(
api_key=bot_profile.api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=get_stream_name(bot_profile.default_sending_stream),
default_events_register_stream=get_stream_name(bot_profile.default_events_register_stream),
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success(json_result)
def get_bots_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
bot_profiles = UserProfile.objects.filter(is_bot=True, is_active=True,
bot_owner=user_profile)
bot_profiles = bot_profiles.select_related('default_sending_stream', 'default_events_register_stream')
bot_profiles = bot_profiles.order_by('date_joined')
def bot_info(bot_profile):
# type: (UserProfile) -> Dict[str, Any]
default_sending_stream = get_stream_name(bot_profile.default_sending_stream)
default_events_register_stream = get_stream_name(bot_profile.default_events_register_stream)
return dict(
username=bot_profile.email,
full_name=bot_profile.full_name,
api_key=bot_profile.api_key,
avatar_url=avatar_url(bot_profile),
default_sending_stream=default_sending_stream,
default_events_register_stream=default_events_register_stream,
default_all_public_streams=bot_profile.default_all_public_streams,
)
return json_success({'bots': list(map(bot_info, bot_profiles))})
def get_members_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
realm = user_profile.realm
admins = set(user_profile.realm.get_admin_users())
members = []
for profile in UserProfile.objects.select_related().filter(realm=realm):
member = {"full_name": profile.full_name,
"is_bot": profile.is_bot,
"is_active": profile.is_active,
"is_admin": (profile in admins),
"email": profile.email,
"user_id": profile.id,
"avatar_url": avatar_url(profile)}
if profile.is_bot and profile.bot_owner is not None:
member["bot_owner"] = profile.bot_owner.email
members.append(member)
return json_success({'members': members})
@require_realm_admin
@has_request_variables
def create_user_backend(request, user_profile, email=REQ(), password=REQ(),
full_name_raw=REQ("full_name"), short_name=REQ()):
# type: (HttpRequest, UserProfile, Text, Text, Text, Text) -> HttpResponse
full_name = check_full_name(full_name_raw)
form = CreateUserForm({'full_name': full_name, 'email': email})
if not form.is_valid():
return json_error(_('Bad name or username'))
# Check that the new user's email address belongs to the admin's realm
# (Since this is an admin API, we don't require the user to have been
# invited first.)
realm = user_profile.realm
if not email_allowed_for_realm(email, user_profile.realm):
return json_error(_("Email '%(email)s' does not belong to domain '%(domain)s'") %
{'email': email, 'domain': realm.domain})
try:
get_user_profile_by_email(email)
return json_error(_("Email '%s' already in use") % (email,))
except UserProfile.DoesNotExist:
pass
do_create_user(email, password, realm, full_name, short_name)
return json_success()
def generate_client_id():
# type: () -> Text
return generate_random_token(32)
def get_profile_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1,
user_id = user_profile.id,
full_name = user_profile.full_name,
email = user_profile.email,
is_bot = user_profile.is_bot,
is_admin = user_profile.is_realm_admin,
short_name = user_profile.short_name)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
def authors_view(request):
# type: (HttpRequest) -> HttpResponse
with open(settings.CONTRIBUTORS_DATA) as f:
data = json.load(f)
return render_to_response(
'zerver/authors.html',
data,
request=request
)
|
apache-2.0
| -2,537,478,890,875,718,000
| 41.239796
| 140
| 0.650803
| false
| 3.877752
| false
| false
| false
|
dnbaker/emp
|
python/master_map.py
|
1
|
1618
|
#!/usr/bin/env python
import sys
import multiprocessing
import gzip
import os
from subprocess import check_call as cc, CalledProcessError
from download_genomes import is_valid_gzip, xfirstline
argv = sys.argv
def getopts():
import argparse
a = argparse.ArgumentParser()
a.add_argument("paths", nargs="+", help="Paths to as files.")
a.add_argument("--threads", "-p",
help="Number of threads to use while downloading.",
type=int, default=1)
a.add_argument("-o", "--out",
help="Path to write output. Default: stdout")
return a.parse_args()
def as2dict(path):
ret = {}
folder = "/".join(path.split("/")[:-1]) + "/"
for line in open(path):
if line.startswith("#"):
continue
toks = line.split("\t")
taxid = int(toks[5])
fn = folder + toks[19].split("/")[-1] + "_genomic.fna.gz"
if not os.path.isfile(fn):
sys.stderr.write("%s not a file. Continuing.\n" % fn)
continue
ret[xfirstline(fn)[1:].split()[0].decode()] = taxid
return ret
FTP_BASENAME = "ftp://ftp.ncbi.nlm.nih.gov/genomes/refseq/"
def main():
args = getopts()
master = {}
mini_dicts = (multiprocessing.Pool(args.threads).map if args.threads > 1
else map)(as2dict, args.paths)
with open(args.out, "w") if args.out else sys.stdout as outfile:
of = outfile.write
for mini in mini_dicts:
for k, v in mini.items():
of("%s\t%i\n" % (k, v))
return 0
if __name__ == "__main__":
sys.exit(main())
|
gpl-3.0
| 451,841,040,786,476,540
| 27.892857
| 76
| 0.567367
| false
| 3.509761
| false
| false
| false
|
kseetharam/genPolaron
|
xanalysis_groundstate_paper.py
|
1
|
26133
|
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.lines import Line2D
import matplotlib.colors as colors
from matplotlib.animation import writers
import os
import itertools
import pf_dynamic_cart as pfc
import pf_dynamic_sph as pfs
import Grid
from scipy import interpolate
from timeit import default_timer as timer
if __name__ == "__main__":
# # Initialization
# fm = matplotlib.font_manager.json_load(os.path.expanduser("~/.cache/matplotlib/fontlist-v310.json"))
# fm.findfont("serif", rebuild_if_missing=False)
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['font.serif'] = ['Adobe Garamond Pro']
# matplotlib.rcParams.update({'font.size': 12, 'text.usetex': True})
mpegWriter = writers['ffmpeg'](fps=0.75, bitrate=1800)
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (21, 21, 21)
(dx, dy, dz) = (0.375, 0.375, 0.375)
# (Lx, Ly, Lz) = (105, 105, 105)
# (dx, dy, dz) = (0.375, 0.375, 0.375)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
# Toggle parameters
toggleDict = {'Dynamics': 'imaginary', 'Interaction': 'on', 'Grid': 'spherical', 'Coupling': 'twophonon', 'IRcuts': 'false', 'ReducedInterp': 'false', 'kGrid_ext': 'false'}
# ---- SET OUTPUT DATA FOLDER ----
datapath = '/Users/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints_cart, 1)
animpath = '/Users/kis/Dropbox/VariationalResearch/DataAnalysis/figs'
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
animpath = animpath + '/rdyn'
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
animpath = animpath + '/idyn'
if toggleDict['Grid'] == 'cartesian':
innerdatapath = innerdatapath + '_cart'
elif toggleDict['Grid'] == 'spherical':
innerdatapath = innerdatapath + '_spherical'
if toggleDict['Coupling'] == 'frohlich':
innerdatapath = innerdatapath + '_froh'
animpath = animpath + '_frohlich'
elif toggleDict['Coupling'] == 'twophonon':
innerdatapath = innerdatapath
animpath = animpath + '_twophonon'
if toggleDict['IRcuts'] == 'true':
innerdatapath = innerdatapath + '_IRcuts'
elif toggleDict['IRcuts'] == 'false':
innerdatapath = innerdatapath
print(innerdatapath)
# # # Concatenate Individual Datasets (aIBi specific)
# aIBi_List = [-15.0, -12.5, -10.0, -9.0, -8.0, -7.0, -6.0, -5.0, -3.5, -2.0, -1.0, -0.75, -0.5, -0.1]
# for aIBi in aIBi_List:
# ds_list = []; P_list = []; mI_list = []
# for ind, filename in enumerate(os.listdir(innerdatapath)):
# if filename[0:14] == 'quench_Dataset':
# continue
# if filename[0:6] == 'interp':
# continue
# if filename[0:2] == 'mm':
# continue
# if float(filename[13:-3]) != aIBi:
# continue
# print(filename)
# ds = xr.open_dataset(innerdatapath + '/' + filename)
# ds_list.append(ds)
# P_list.append(ds.attrs['P'])
# mI_list.append(ds.attrs['mI'])
# s = sorted(zip(P_list, ds_list))
# g = itertools.groupby(s, key=lambda x: x[0])
# P_keys = []; P_ds_list = []; aIBi_ds_list = []
# for key, group in g:
# P_temp_list, ds_temp_list = zip(*list(group))
# P_keys.append(key) # note that key = P_temp_list[0]
# P_ds_list.append(ds_temp_list[0])
# with xr.concat(P_ds_list, pd.Index(P_keys, name='P')) as ds_tot:
# # ds_tot = xr.concat(P_ds_list, pd.Index(P_keys, name='P'))
# del(ds_tot.attrs['P']); del(ds_tot.attrs['nu']); del(ds_tot.attrs['gIB'])
# ds_tot.to_netcdf(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# # # Concatenate Individual Datasets (aIBi specific, IRcuts)
# IRrat_Vals = [1, 2, 5, 10, 50, 1e2, 5e2, 1e3, 5e3, 1e4]
# aIBi_List = [-10.0, -5.0, -2.0, -0.5]
# for IRrat in IRrat_Vals:
# IRdatapath = innerdatapath + '/IRratio_{:.1E}'.format(IRrat)
# for aIBi in aIBi_List:
# ds_list = []; P_list = []; mI_list = []
# for ind, filename in enumerate(os.listdir(IRdatapath)):
# if filename[0:14] == 'quench_Dataset':
# continue
# if filename[0:6] == 'interp':
# continue
# if filename[0:2] == 'mm':
# continue
# if float(filename[13:-3]) != aIBi:
# continue
# print(filename)
# ds = xr.open_dataset(IRdatapath + '/' + filename)
# ds_list.append(ds)
# P_list.append(ds.attrs['P'])
# mI_list.append(ds.attrs['mI'])
# s = sorted(zip(P_list, ds_list))
# g = itertools.groupby(s, key=lambda x: x[0])
# P_keys = []; P_ds_list = []; aIBi_ds_list = []
# for key, group in g:
# P_temp_list, ds_temp_list = zip(*list(group))
# P_keys.append(key) # note that key = P_temp_list[0]
# P_ds_list.append(ds_temp_list[0])
# with xr.concat(P_ds_list, pd.Index(P_keys, name='P')) as ds_tot:
# # ds_tot = xr.concat(P_ds_list, pd.Index(P_keys, name='P'))
# del(ds_tot.attrs['P']); del(ds_tot.attrs['nu']); del(ds_tot.attrs['gIB'])
# ds_tot.to_netcdf(IRdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# # Analysis of Total Dataset
aIBi = -2
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset.nc')
# qds_aIBi = qds.sel(aIBi=aIBi)
qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
qds_aIBi = qds
PVals = qds['P'].values
tVals = qds['t'].values
n0 = qds.attrs['n0']
gBB = qds.attrs['gBB']
mI = qds.attrs['mI']
mB = qds.attrs['mB']
nu = np.sqrt(n0 * gBB / mB)
aBB = (mB / (4 * np.pi)) * gBB
xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
print(qds.attrs['k_mag_cutoff'] * xi)
aIBi_Vals = np.array([-12.5, -10.0, -9.0, -8.0, -7.0, -5.0, -3.5, -2.0, -1.0, -0.75, -0.5, -0.1]) # used by many plots (spherical)
# # PHASE DIAGRAM (SPHERICAL)
Pnormdes = 0.5
Pind = np.abs(PVals / (mI * nu) - Pnormdes).argmin()
P = PVals[Pind]
ZVals = np.zeros(aIBi_Vals.size)
for aind, aIBi in enumerate(aIBi_Vals):
qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
ZVals[aind] = np.exp(-1 * qds_aIBi.isel(P=Pind, t=-1)['Nph'].values)
xmin = np.min(aIBi_Vals)
xmax = 1.01 * np.max(aIBi_Vals)
fig, ax = plt.subplots()
ax.plot(aIBi_Vals, ZVals, 'g-')
ax.set_title('Quasiparticle Residue (' + r'$\frac{P}{m_{I}c_{BEC}}=$' + '{:.2f})'.format(P / (mI * nu)))
ax.set_xlabel(r'$a_{IB}^{-1}$')
ax.set_ylabel(r'$Z=e^{-N_{ph}}$')
ax.set_xlim([xmin, xmax])
ax.set_ylim([0, 1.1])
plt.show()
# # # # BOGOLIUBOV DISPERSION (SPHERICAL)
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', qds.coords['k'].values); kgrid.initArray_premade('th', qds.coords['th'].values)
# kVals = kgrid.getArray('k')
# wk_Vals = pfs.omegak(kVals, mB, n0, gBB)
# fig, ax = plt.subplots()
# ax.plot(kVals, wk_Vals, 'k-', label='')
# ax.plot(kVals, nu * kVals, 'b--', label=r'$c_{BEC}|k|$')
# ax.set_title('Bogoliubov Phonon Dispersion')
# ax.set_xlabel(r'$|k|$')
# ax.set_ylabel(r'$\omega_{|k|}$')
# ax.set_xlim([0, 2])
# ax.set_ylim([0, 3])
# ax.legend(loc=2, fontsize='x-large')
# plt.show()
# # # PHASE DIAGRAM (SPHERICAL)
# Pcrit = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds_aIBi = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).isel(t=-1).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# # Pcrit[aind] = Pinf_Vals[np.argwhere(Einf_2ndderiv_Vals < 0)[-2][0] + 3]
# Pcrit[aind] = Pinf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0] # there is a little bit of fudging with the -3 here so that aIBi=-10 gives me Pcrit/(mI*c) = 1 -> I can also just generate data for weaker interactions and see if it's better
# Pcrit_norm = Pcrit / (mI * nu)
# Pcrit_tck = interpolate.splrep(aIBi_Vals, Pcrit_norm, s=0, k=3)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# Pcrit_interpVals = 1 * interpolate.splev(aIBi_interpVals, Pcrit_tck, der=0)
# print(Pcrit_norm)
# print(Pcrit_norm[1], Pcrit_norm[5], Pcrit_norm[-5])
# scalefac = 1.0
# # scalefac = 0.95 # just to align weakly interacting case slightly to 1 (it's pretty much there, would just need higher resolution data)
# Pcrit_norm = scalefac * Pcrit_norm
# Pcrit_interpVals = scalefac * Pcrit_interpVals
# xmin = np.min(aIBi_interpVals)
# xmax = 1.01 * np.max(aIBi_interpVals)
# ymin = 0
# ymax = 1.01 * np.max(Pcrit_interpVals)
# font = {'family': 'serif', 'color': 'black', 'size': 14}
# sfont = {'family': 'serif', 'color': 'black', 'size': 13}
# fig, ax = plt.subplots()
# ax.plot(aIBi_Vals, Pcrit_norm, 'kx')
# ax.plot(aIBi_interpVals, Pcrit_interpVals, 'k-')
# # f1 = interpolate.interp1d(aIBi_Vals, Pcrit_norm, kind='cubic')
# # ax.plot(aIBi_interpVals, f1(aIBi_interpVals), 'k-')
# ax.set_title('Ground State Phase Diagram')
# ax.set_xlabel(r'$a_{IB}^{-1}$')
# ax.set_ylabel(r'$\frac{P}{m_{I}c_{BEC}}$')
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
# ax.fill_between(aIBi_interpVals, Pcrit_interpVals, ymax, facecolor='b', alpha=0.25)
# ax.fill_between(aIBi_interpVals, ymin, Pcrit_interpVals, facecolor='g', alpha=0.25)
# ax.text(-3.0, ymin + 0.175 * (ymax - ymin), 'Polaron', fontdict=font)
# ax.text(-2.9, ymin + 0.1 * (ymax - ymin), '(' + r'$Z>0$' + ')', fontdict=sfont)
# ax.text(-6.5, ymin + 0.6 * (ymax - ymin), 'Cherenkov', fontdict=font)
# ax.text(-6.35, ymin + 0.525 * (ymax - ymin), '(' + r'$Z=0$' + ')', fontdict=sfont)
# plt.show()
# # # ENERGY DERIVATIVES (SPHERICAL)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals = np.zeros((PVals.size, tVals.size))
# for Pind, P in enumerate(PVals):
# for tind, t in enumerate(tVals):
# CSAmp = CSAmp_ds.sel(P=P, t=t).values
# Energy_Vals[Pind, tind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# Energy_Vals_inf = Energy_Vals[:, -1]
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 5 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# sound_mask = np.abs(Einf_2ndderiv_Vals) <= 5e-3
# Einf_sound = Einf_Vals[sound_mask]
# Pinf_sound = Pinf_Vals[sound_mask]
# [vsound, vs_const] = np.polyfit(Pinf_sound, Einf_sound, deg=1)
# ms_mask = Pinf_Vals <= 0.5
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms, ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# fig, axes = plt.subplots(nrows=3, ncols=1)
# axes[0].plot(Pinf_Vals, Einf_Vals, 'k-')
# axes[0].set_title('Ground State Energy (' + r'$a_{IB}^{-1}=$' + '{0})'.format(aIBi))
# axes[0].set_xlabel('P')
# axes[0].set_ylim([1.1 * np.min(Einf_Vals), -0.5])
# axes[0].set_xlim([0, 2.0])
# axes[1].plot(Pinf_Vals, Einf_1stderiv_Vals, 'k-')
# axes[1].set_title('First Derivative of Energy')
# axes[1].set_xlabel('P')
# axes[1].plot(Pinf_Vals, vsound * np.ones(Pinf_Vals.size), 'r--', linewidth=2.0)
# axes[1].set_ylim([0, 1.2 * np.max(Einf_1stderiv_Vals)])
# axes[1].set_xlim([0, 2.0])
# axes[2].plot(Pinf_Vals[::2], Einf_2ndderiv_Vals[::2], 'ko')
# axes[2].set_title('Second Derivative of Energy')
# axes[2].set_xlabel('P')
# axes[2].plot(Pinf_Vals, ms * np.ones(Pinf_Vals.size), 'c--', linewidth=2.0)
# axes[2].set_ylim([0, 1.2 * np.max(Einf_2ndderiv_Vals)])
# axes[2].set_xlim([0, 2.0])
# # # This plot below is for saturation/convergence of the energy with imaginary time
# # fig3, ax3 = plt.subplots()
# # Pind = 8
# # ax3.plot(tVals, np.abs(Energy_Vals[Pind, :]), 'k-')
# # ax3.set_yscale('log')
# # ax3.set_xscale('log')
# # ax3.set_title('Ground State Energy (' + r'$a_{IB}^{-1}=$' + '{0}, '.format(aIBi) + r'$P=$' + '{:.2f})'.format(PVals[Pind]))
# # ax3.set_xlabel('Imaginary time')
# fig.tight_layout()
# plt.show()
# # # POLARON SOUND VELOCITY (SPHERICAL)
# # Check to see if linear part of polaron (total system) energy spectrum has slope equal to sound velocity
# vsound_Vals = np.zeros(aIBi_Vals.size)
# vI_Vals = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# qds_aIBi = qds.isel(t=-1)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# PI_Vals = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# PI_Vals[Pind] = P - qds_aIBi.sel(P=P)['Pph'].values
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# sound_mask = np.abs(Einf_2ndderiv_Vals) <= 5e-3
# Einf_sound = Einf_Vals[sound_mask]
# Pinf_sound = Pinf_Vals[sound_mask]
# [vsound_Vals[aind], vs_const] = np.polyfit(Pinf_sound, Einf_sound, deg=1)
# vI_inf_tck = interpolate.splrep(PVals, PI_Vals / mI, s=0)
# vI_inf_Vals = 1 * interpolate.splev(Pinf_Vals, vI_inf_tck, der=0)
# vI_Vals[aind] = np.polyfit(Pinf_sound, vI_inf_Vals[sound_mask], deg=0)
# print(vsound_Vals)
# print(100 * (vsound_Vals - nu) / nu)
# fig, ax = plt.subplots()
# ax.plot(aIBi_Vals, vsound_Vals, 'rx', mew=1, ms=10, label='Post-Transition Polaron Sound Velocity (' + r'$\frac{\partial E}{\partial P}$' + ')')
# ax.plot(aIBi_Vals, vI_Vals, 'ko', mew=1, ms=10, markerfacecolor='none', label='Post-Transition Impurity Velocity (' + r'$\frac{P-<P_{ph}>}{m_{I}}$' + ')')
# ax.plot(aIBi_Vals, nu * np.ones(aIBi_Vals.size), 'g--', linewidth=3.0, label='BEC Sound Speed')
# ax.set_ylim([0, 1.2])
# ax.legend(loc=(0.25, 0.1))
# ax.set_title('Velocity Comparison')
# ax.set_xlabel(r'$a_{IB}^{-1}$')
# ax.set_ylabel('Velocity')
# plt.show()
# # # POLARON EFFECTIVE MASS (SPHERICAL)
# ms_Vals = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# qds_aIBi = qds.isel(t=-1)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# PI_Vals = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# PI_Vals[Pind] = P - qds_aIBi.sel(P=P)['Pph'].values
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# ms_mask = Pinf_Vals < 0.3
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms_Vals[aind], ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# massEnhancement_Vals = (1 / ms_Vals) / mI
# mE_tck = interpolate.splrep(aIBi_Vals, massEnhancement_Vals, s=0)
# aIBi_interpVals = np.linspace(np.min(aIBi_Vals), np.max(aIBi_Vals), 5 * aIBi_Vals.size)
# mE_interpVals = 1 * interpolate.splev(aIBi_interpVals, mE_tck, der=0)
# fig, ax = plt.subplots()
# ax.plot(aIBi_Vals, massEnhancement_Vals, 'cD', mew=1, ms=10)
# ax.plot(aIBi_interpVals, mE_interpVals, 'c-')
# ax.set_title('Mass Enhancement')
# ax.set_xlabel(r'$a_{IB}^{-1}$')
# ax.set_ylabel(r'$\frac{m^{*}}{m_{I}} = \frac{1}{m_{I}}\frac{\partial^{2} E}{\partial P^{2}}$')
# plt.show()
# # # POLARON EFFECTIVE MASS VS CRITICAL MOMENTUM (SPHERICAL)
# ms_Vals = np.zeros(aIBi_Vals.size)
# Pcrit = np.zeros(aIBi_Vals.size)
# for aind, aIBi in enumerate(aIBi_Vals):
# qds = xr.open_dataset(innerdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))
# qds_aIBi = qds.isel(t=-1)
# CSAmp_ds = qds_aIBi['Real_CSAmp'] + 1j * qds_aIBi['Imag_CSAmp']
# kgrid = Grid.Grid("SPHERICAL_2D"); kgrid.initArray_premade('k', CSAmp_ds.coords['k'].values); kgrid.initArray_premade('th', CSAmp_ds.coords['th'].values)
# Energy_Vals_inf = np.zeros(PVals.size)
# PI_Vals = np.zeros(PVals.size)
# for Pind, P in enumerate(PVals):
# CSAmp = CSAmp_ds.sel(P=P).values
# Energy_Vals_inf[Pind] = pfs.Energy(CSAmp, kgrid, P, aIBi, mI, mB, n0, gBB)
# PI_Vals[Pind] = P - qds_aIBi.sel(P=P)['Pph'].values
# Einf_tck = interpolate.splrep(PVals, Energy_Vals_inf, s=0)
# Pinf_Vals = np.linspace(np.min(PVals), np.max(PVals), 2 * PVals.size)
# Einf_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=0)
# Einf_1stderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=1)
# Einf_2ndderiv_Vals = 1 * interpolate.splev(Pinf_Vals, Einf_tck, der=2)
# ms_mask = Pinf_Vals < 0.3
# Einf_1stderiv_ms = Einf_1stderiv_Vals[ms_mask]
# Pinf_ms = Pinf_Vals[ms_mask]
# [ms_Vals[aind], ms_const] = np.polyfit(Pinf_ms, Einf_1stderiv_ms, deg=1)
# Pcrit[aind] = Pinf_Vals[np.argmin(np.gradient(Einf_2ndderiv_Vals)) - 0]
# massEnhancement_Vals = (1 / ms_Vals) / mI
# Pcrit_norm = Pcrit / (mI * nu)
# print(massEnhancement_Vals)
# print(Pcrit_norm)
# print(100 * np.abs(massEnhancement_Vals - Pcrit_norm) / Pcrit_norm)
# fig, ax = plt.subplots()
# ax.plot(aIBi_Vals, massEnhancement_Vals, 'co', mew=1, ms=10, markerfacecolor='none', label='Mass Enhancement (' + r'$\frac{m^{*}}{m_{I}}$' + ')')
# ax.plot(aIBi_Vals, Pcrit_norm, 'kx', mew=1, ms=10, label='Normalized Critical Momentum (' + r'$\frac{P_{crit}}{m_{I}c_{BEC}}$' + ')')
# ax.legend(loc=2)
# ax.set_title('Mass Enhancement vs Critical Momentum')
# ax.set_xlabel(r'$a_{IB}^{-1}$')
# plt.show()
# # # Nph (SPHERICAL)
# # IRrat_Vals = np.array([1, 2, 5, 10, 50, 1e2, 5e2, 1e3, 5e3, 1e4])
# IRrat_Vals = np.array([1, 2, 5, 10, 50, 1e2])
# aIBi_List = [-10.0, -5.0, -2.0, -0.5]
# aIBi = aIBi_List[1]
# IRrat = IRrat_Vals[0]
# IRdatapath = innerdatapath + '/IRratio_{:.1E}'.format(IRrat)
# qds_aIBi = (xr.open_dataset(IRdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))).isel(t=-1)
# PVals = qds_aIBi['P'].values
# n0 = qds_aIBi.attrs['n0']
# gBB = qds_aIBi.attrs['gBB']
# mI = qds_aIBi.attrs['mI']
# mB = qds_aIBi.attrs['mB']
# nu = np.sqrt(n0 * gBB / mB)
# Nph_ds = qds_aIBi['Nph']
# Nph_Vals = Nph_ds.values
# Pind = np.argmin(np.abs(PVals - 3.0 * mI * nu))
# Nph_IRcuts = np.zeros(IRrat_Vals.size)
# for ind, IRrat in enumerate(IRrat_Vals):
# IRdatapath = innerdatapath + '/IRratio_{:.1E}'.format(IRrat)
# qds_IRrat = (xr.open_dataset(IRdatapath + '/quench_Dataset_aIBi_{:.2f}.nc'.format(aIBi))).isel(t=-1)
# kmin = np.min(qds_IRrat.coords['k'].values)
# Nph_ds_IRrat = qds_IRrat['Nph']
# Nph_IRcuts[ind] = Nph_ds_IRrat.values[Pind]
# fig, axes = plt.subplots(nrows=1, ncols=2)
# axes[0].plot(PVals / (mI * nu), Nph_Vals, 'k-')
# axes[0].set_title('Phonon Number (' + r'$aIB^{-1}=$' + '{0})'.format(aIBi))
# axes[0].set_xlabel(r'$\frac{P}{m_{I}c_{BEC}}$')
# axes[0].set_ylabel(r'$N_{ph}$')
# axes[1].plot(IRrat_Vals, Nph_IRcuts, 'g-')
# axes[1].set_xlabel('IR Cutoff Increase Ratio')
# axes[1].set_ylabel(r'$N_{ph}$')
# axes[1].set_title('Phonon Number (' + r'$aIB^{-1}=$' + '{0}, '.format(aIBi) + r'$\frac{P}{m_{I}c_{BEC}}=$' + '{:.1f})'.format(PVals[Pind] / (mI * nu)))
# fig.tight_layout()
# plt.show()
# # IMPURITY DISTRIBUTION ANIMATION WITH CHARACTERIZATION (CARTESIAN)
# nPIm_FWHM_indices = []
# nPIm_distPeak_index = np.zeros(PVals.size, dtype=int)
# nPIm_FWHM_Vals = np.zeros(PVals.size)
# nPIm_distPeak_Vals = np.zeros(PVals.size)
# nPIm_deltaPeak_Vals = np.zeros(PVals.size)
# nPIm_Tot_Vals = np.zeros(PVals.size)
# nPIm_Vec = np.empty(PVals.size, dtype=np.object)
# PIm_Vec = np.empty(PVals.size, dtype=np.object)
# for ind, P in enumerate(PVals):
# qds_nPIm_inf = qds_aIBi['nPI_mag'].sel(P=P).isel(t=-1).dropna('PI_mag')
# PIm_Vals = qds_nPIm_inf.coords['PI_mag'].values
# dPIm = PIm_Vals[1] - PIm_Vals[0]
# # # Plot nPIm(t=inf)
# # qds_nPIm_inf.plot(ax=ax, label='P: {:.1f}'.format(P))
# nPIm_Vec[ind] = qds_nPIm_inf.values
# PIm_Vec[ind] = PIm_Vals
# # # Calculate nPIm(t=inf) normalization
# nPIm_Tot_Vals[ind] = np.sum(qds_nPIm_inf.values * dPIm) + qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# # Calculate FWHM, distribution peak, and delta peak
# nPIm_FWHM_Vals[ind] = pfc.FWHM(PIm_Vals, qds_nPIm_inf.values)
# nPIm_distPeak_Vals[ind] = np.max(qds_nPIm_inf.values)
# nPIm_deltaPeak_Vals[ind] = qds_aIBi.sel(P=P).isel(t=-1)['mom_deltapeak'].values
# D = qds_nPIm_inf.values - np.max(qds_nPIm_inf.values) / 2
# indices = np.where(D > 0)[0]
# nPIm_FWHM_indices.append((indices[0], indices[-1]))
# nPIm_distPeak_index[ind] = np.argmax(qds_nPIm_inf.values)
# Pratio = 1.4
# Pnorm = PVals / (mI * nu)
# Pind = np.abs(Pnorm - Pratio).argmin()
# print(Pnorm[Pind])
# print(nPIm_deltaPeak_Vals[Pind])
# fig1, ax = plt.subplots()
# ax.plot(mI * nu * np.ones(PIm_Vals.size), np.linspace(0, 1, PIm_Vals.size), 'y--', label=r'$m_{I}c$')
# curve = ax.plot(PIm_Vec[Pind], nPIm_Vec[Pind], color='k', lw=3, label='')
# ind_s, ind_f = nPIm_FWHM_indices[Pind]
# FWHMcurve = ax.plot(np.linspace(PIm_Vec[Pind][ind_s], PIm_Vec[Pind][ind_f], 100), nPIm_Vec[Pind][ind_s] * np.ones(100), 'b-', linewidth=3.0, label='Incoherent Part FWHM')
# FWHMmarkers = ax.plot(np.linspace(PIm_Vec[Pind][ind_s], PIm_Vec[Pind][ind_f], 2), nPIm_Vec[Pind][ind_s] * np.ones(2), 'bD', mew=0.75, ms=7.5, label='')
# Zline = ax.plot(PVals[Pind] * np.ones(PIm_Vals.size), np.linspace(0, nPIm_deltaPeak_Vals[Pind], PIm_Vals.size), 'r-', linewidth=3.0, label='Delta Peak (Z-factor)')
# Zmarker = ax.plot(PVals[Pind], nPIm_deltaPeak_Vals[Pind], 'rx', mew=0.75, ms=7.5, label='')
# norm_text = ax.text(0.7, 0.65, r'$\int n_{|\vec{P_{I}}|} d|\vec{P_{I}}| = $' + '{:.2f}'.format(nPIm_Tot_Vals[Pind]), transform=ax.transAxes, color='k')
# ax.legend()
# ax.set_xlim([-0.01, np.max(PIm_Vec[Pind])])
# # ax.set_xlim([-0.01, 8])
# ax.set_ylim([0, 1.05])
# ax.set_title('Impurity Momentum Magnitude Distribution (' + r'$aIB^{-1}=$' + '{0}, '.format(aIBi) + r'$\frac{P}{m_{I}c_{BEC}}=$' + '{:.2f})'.format(Pnorm[Pind]))
# ax.set_ylabel(r'$n_{|\vec{P_{I}}|}$')
# ax.set_xlabel(r'$|\vec{P_{I}}|$')
# # Plot characterization of nPIm(t=inf)
# fig2, axes = plt.subplots(nrows=1, ncols=2)
# axes[0].plot(PVals, nPIm_deltaPeak_Vals, 'r-')
# axes[0].set_xlabel('$P$')
# # axes[0].set_ylabel(r'$Z$')
# axes[0].set_title('Delta Peak (Z-factor)')
# axes[1].plot(PVals, nPIm_FWHM_Vals, 'b-')
# axes[1].set_xlabel('$P$')
# # axes[1].set_ylabel('FWHM')
# axes[1].set_title('Incoherent Part FWHM')
# fig2.tight_layout()
# plt.show()
|
mit
| 5,256,427,196,833,747,000
| 44.60733
| 257
| 0.576895
| false
| 2.424886
| false
| false
| false
|
adamnovak/hgvm-builder
|
src/hgvmbuilder/toilvgfacade.py
|
1
|
14356
|
# hgvm-builder toilvgfacade.py: Provide a function-argument-based toil-vg API
"""
toil-vg curtrently has lots of cases where low-level functions depend on
command-line arguments in the options object. To make toil-vg targets callable
on arbitrary Toil file IDs, we need wrappers.
To use this facade, run add_options() on your argparse parser before parsing
arguments, initialize() on your resulting options namespace on the master, and
the various _job functions as Toil jobs to actually do stuff.
"""
import os
import os.path
import logging
import urlparse
import shutil
import argparse
import timeit
import toil_vg.vg_common
import toil_vg.vg_config
import toil_vg.vg_index
import toil_vg.vg_call
from toil.realtimeLogger import RealtimeLogger
from .toilpromise import ToilPromise
Logger = logging.getLogger("toilvgfacade")
class OptionFilter(object):
"""
Can wrap an ArgumentParser or other such class and drop options on a
blacklist/accept only options on a whitelist.
"""
def __init__(self, real_parser, blacklist=[]):
"""
Wrap the given actual parser with an add_option method.
"""
# Save the parser
self.real_parser = real_parser
# Save the blacklist
self.blacklist = set(blacklist)
def add_argument(self, name, *args, **kwargs):
"""
Add the given argument, if its name passes the filters.
"""
if name.strip("-") not in self.blacklist:
# Add it!
return self.real_parser.add_argument(name, *args, **kwargs)
# What options don't we want to pass through to/from the command line? Don't add
# the leading dashes. Holds a dict from toil vg operation type to the options
# that should be removed.
option_blacklist = {
"wrapperscript": {"out_store", "tool"},
"common": {"force_outstore"},
"index": {"index_name", "graphs", "chroms"},
# TODO: we can't use most of the toil-vg call logic because it's too tied to
# chunking and having a single reference path.
"call": {"overlap", "call_chunk_size", "genotype", "genotype_opts",
"filter_opts"}
}
# TODO: Un-blacklist --config and add logic to import the config file and send
# it via the file store to the nodes that actually use toil-vg. Or otherwise
# require special prep code to be run on the master to use this library.
def add_options(parser):
"""
Given an argparse parser or option group, add all the toil-vg configuration
options (for extra vg command flags, Docker containers, and so on).
"""
# Add all the non-blacklisted toil-vg common options
common_group = parser.add_argument_group("Toil VG configuration",
"options to configure the Toil VG wrapper")
toil_vg.vg_common.add_container_tool_parse_args(OptionFilter(common_group,
option_blacklist["common"]))
toil_vg.vg_common.add_common_vg_parse_args(OptionFilter(common_group,
option_blacklist["common"]))
# Add all the non-blacklisted vg index options to this group
index_group = parser.add_argument_group("VG Indexing",
"options to configure involations of `vg index`")
toil_vg.vg_index.index_parse_args(OptionFilter(index_group,
option_blacklist["index"]))
# And the call options
call_group = parser.add_argument_group("VG Calling",
"options to configure involations of `vg call`")
toil_vg.vg_call.chunked_call_parse_args(OptionFilter(call_group,
option_blacklist["call"]))
def initialize(options):
"""
Start up the Toil VG system on the master. Imports a bunch of config file
defaults into the options.
"""
logging.info("Using Toil VG from {}".format(toil_vg.__file__))
# Apply the config file
processed_options = toil_vg.vg_config.apply_config_file_args(options)
# Apply the changes back to the original options
options.__dict__ = processed_options.__dict__
# Make a command runner that uses Docker (or Singularity)
options.drunner = toil_vg.vg_common.ContainerRunner(
container_tool_map = toil_vg.vg_common.get_container_tool_map(options))
def sanitize_options(cli_options):
"""
Since Toil VG uses the command line options namespace thingy as a sort of
general context, we will need to feed one into every call.
However, since we removed some command-line options, our caller might feed
us an options object with those set (because it re-used those option names).
So we have to strip them out.
"""
# We'll fill this in
sanitized = argparse.Namespace()
# We compute a global blacklist of options that some toil vg function
# shouldn't get.
global_blacklist = set()
for local_blacklist in option_blacklist.itervalues():
for item in local_blacklist:
# We should strip this out
global_blacklist.add(item)
for key, value in vars(cli_options).iteritems():
# For everything we got fed
if key.strip("-") in global_blacklist:
# Blacklisted options get skipped
continue
# Copy everything else over
vars(sanitized)[key] = value
return sanitized
def xg_index_job(job, options, vg_ids):
"""
Index the given VG graphs into an XG file. Returns the ID of the XG file.
Automatically sets the correct resource requirements based on the config
passed via options.
Internally uses toil_vg to perform the indexing.
"""
# Do any options manipulation we need to do
# Strip out stuff we don't want and apply config defaults
options = sanitize_options(options)
# Add the outstore, which we have sort of disabled. It insists on writing
# stuff, so just drop it in the current directory. It doesn't read it back.
options.out_store = "file:."
# Don't use outstore instead of the file store
options.force_outstore = False
# Pretend we're the pipeline tool
options.tool = "pipeline"
# Add stuff that toil vg index uses
# options.chroms has to have a name for every graph, to save it under in the
# local temp dir.
options.chroms = ["graph{}".format(i) for i in xrange(len(vg_ids))]
# options.index_name has to have the basename for the .xg in the local temp
# dir.
options.index_name = "xgindex"
return job.addChildJobFn(toil_vg.vg_index.run_xg_indexing, options,
vg_ids, cores=options.xg_index_cores, memory=options.xg_index_mem,
disk=options.xg_index_disk).rv()
def gcsa_index_job(job, options, vg_ids, primary_path_names=None):
"""
Index the given graphs into a GCSA/LCP index, and return a pair of file IDs
for the GCSA and the LCP files.
Will prune the graph before indexing unless options.prune_opts is explicitly
set as an empty list.
"""
# Do any options manipulation we need to do
# Strip out stuff we don't want and apply config defaults
options = sanitize_options(options)
# Add the outstore, which we have sort of disabled. It insists on writing
# stuff, so just drop it in the current directory. It doesn't read it back.
options.out_store = "file:."
# Don't use outstore instead of the file store
options.force_outstore = False
# Pretend we're the pipeline tool
options.tool = "pipeline"
# Add stuff that toil vg index uses
# options.graphs has to have a name for every graph, to save it under in the
# local temp dir.
options.graphs = ["graph{}".format(i) for i in xrange(len(vg_ids))]
# We also need a "chroms" giving the primary path for each graph. It's OK if
# the path doesn't exist in a given graph, but if it does it will be added
# to the index.
# We have primary path names to use. We can just try and retain all ther
# paths in all graphs.
RealtimeLogger.info("Want to GCSA-index {} with paths {}".format(
vg_ids, primary_path_names))
# Fake path names
options.chroms = ["fake{}".format(i) for i in xrange(len(vg_ids))]
# options.index_name has to have the basename for the .gcsa in the local
# temp dir.
options.index_name = "gcsaindex"
return job.addChildJobFn(toil_vg.vg_index.run_gcsa_prep, options, vg_ids,
primary_path_override=primary_path_names,
cores=options.misc_cores, memory=options.misc_mem,
disk=options.misc_disk).rv()
def vg_call_job(job, options, vg_id, pileup_id, vcf=False, primary_paths=[]):
"""
Given a vg file ID and a pileup file ID, produce variant calls in Locus or
VCF format. Returns a pair of the VCF or locus file ID and the augmented
graph file ID.
if vcf is true, returns VCF oformat. Otherwise, returns Locus format. If
primary_paths is non-empty, passes those primary path names to vg call to
override its autodetection logic.
Handles requirements itself.
TODO: change toil-vg to allow not forcing a single contig and actually use
it.
"""
return job.addChildJobFn(run_vg_call, options, vg_id, pileup_id, vcf,
primary_paths,
cores=options.calling_cores, memory="100G", disk="50G").rv()
def run_vg_call(job, options, vg_id, pileup_id, vcf, primary_paths):
"""
Actually run vg call on a given pileup. Separate toil-gv-style job to make
requirement abstraction work.
"""
# Define a work_dir so Docker can work
work_dir = job.fileStore.getLocalTempDir()
# Download the vg
vg_filename = "hgvm.vg"
job.fileStore.readGlobalFile(vg_id, os.path.join(work_dir, vg_filename))
# Download the pileup
pileup_filename = "pileup.vgpu"
job.fileStore.readGlobalFile(pileup_id, os.path.join(work_dir,
pileup_filename))
# Pick an augmented graph filename
augmented_filename = "augmented.vg"
# Make arguments to annotate all the reference paths
ref_args = []
for ref_path in primary_paths:
# For every primary path we have defined, tell the caller to use it as a
# reference path.
ref_args.append("--ref")
ref_args.append(ref_path)
# Set up the VG run. Make sure to pass any user-defined call options that
# configure thresholds and filters.
vg_args = (["vg", "call", "-t", str(options.calling_cores), "--aug-graph",
augmented_filename, vg_filename, pileup_filename] + ref_args +
options.call_opts)
if not vcf:
# Don'tmake a VCF
vg_args.append("--no-vcf")
with job.fileStore.writeGlobalFileStream() as (output_handle, output_id):
# Call and stream the Locus or VCF data to the file store
options.drunner.call(job, [vg_args], outfile=output_handle,
work_dir=work_dir)
# Upload the augmented graph
augmented_id = job.fileStore.writeGlobalFile(
os.path.join(work_dir, augmented_filename))
# Return the file IDs
return output_id, augmented_id
def id_range_job(job, options, vg_id):
"""
Find the first and last ID in the given VG file and return them as a tuple.
"""
# Strip out stuff we don't want and apply config defaults
options = sanitize_options(options)
# We need an options.chroms, because the job we're running returns an entry
# form it.
options.chroms = [None]
# Don't use outstore instead of the file store
options.force_outstore = False
# Run the job and return the start and end IDs as a pair of ints (dropping
# the chrom name)
return ToilPromise.wrap(job.addChildJobFn(toil_vg.vg_index.run_id_range,
options, 0, vg_id,
cores=options.misc_cores, memory=options.misc_mem,
disk=options.misc_disk)
).then(lambda (name, start, end): (int(start), int(end))
).unwrap_result()
def id_increment_job(job, options, vg_id, distance):
"""
Increment all the node IDs in the given vg graph by the given distance.
Return a new vg graph file ID.
Not actually in toil-vg, but we put it here so all the vg-touching functions
can live in one place.
"""
if distance == 0:
# No need to shift at all
return vg_id
# Strip out stuff we don't want and apply config defaults
options = sanitize_options(options)
# We need an options.chroms, because the job we're running uses it for local
# filenames
options.chroms = ["increment"]
# Don't use outstore instead of the file store
options.force_outstore = False
return job.addChildJobFn(run_id_increment, options, 0, vg_id, distance,
cores=options.misc_cores, memory=options.misc_mem,
disk=options.misc_disk).rv()
def run_id_increment(job, options, graph_i, graph_id, distance):
"""
Actually do the ID incrementing. Is a separate, toil-vg-style job so it
can be added to toil-vg and so we can set the correct resource requirements.
"""
RealtimeLogger.info("Starting graph shift...")
start_time = timeit.default_timer()
work_dir = job.fileStore.getLocalTempDir()
# download graph
graph_filename = os.path.join(work_dir, '{}.vg'.format(
options.chroms[graph_i]))
toil_vg.vg_common.read_from_store(job, options, graph_id, graph_filename)
# Output
output_graph_filename = graph_filename + '.shifted.vg'
RealtimeLogger.info("Moving {} up by {} to {}".format(
graph_filename, distance, output_graph_filename))
with open(output_graph_filename, "w") as out_file:
command = ['vg', 'ids', '--increment', str(distance),
os.path.basename(graph_filename)]
options.drunner.call(job, command, work_dir=work_dir, outfile=out_file)
# Back to store
output_graph_id = toil_vg.vg_common.write_to_store(job, options,
output_graph_filename)
end_time = timeit.default_timer()
run_time = end_time - start_time
RealtimeLogger.info("Finished graph shift. Process took {} seconds.".format(
run_time))
return output_graph_id
|
apache-2.0
| -7,453,239,172,026,143,000
| 32.231481
| 80
| 0.654012
| false
| 3.827246
| true
| false
| false
|
mmeslab/linux-nctusde
|
compute_nctuss_related_function_size.py
|
1
|
1314
|
#!/usr/bin/python
nctuss_symbol_names = ["nctuss_poll_emacps",
"nctuss_xemacps_tx_poll",
"xemacps_tx_hwtstamp",
"nctuss_xemacps_rx",
"nctuss_xemacps_send_skb",
"nctuss_xemacps_start_xmit",
"xemacps_clear_csum",
"nctuss_xemacps_return_skb",
"nctuss_skb_pool_return",
"nctuss_gt_stop",
"nctuss_gt_resume",
"nctuss_gt_get_gt_counter_base",
"nctuss_gt_get_counter_value",
"nctuss_ttc_stop",
"nctuss_ttc_resume",
"nctuss_smp_invoke_function",
#"wakeup_softirqd",
"skb_push",
"skb_reset_transport_header", # inline function
"udp_hdr", # inline function
"skb_reset_network_header", # inline function
"skb_reset_mac_header", # inline function
"skb_reserve", # inline function
]
if __name__ == '__main__':
f = open("System.map", "r")
totalSize = 0
symbolNamesFound = 0
lines = f.readlines()
for i in range(0, len(lines)):
line = lines[i]
symbol_name = line[11:-1]
if symbol_name in nctuss_symbol_names:
print symbol_name
address = int(line[0:8], 16)
addressNext = int(lines[i+1][0:8], 16)
size = addressNext - address
totalSize += size
symbolNamesFound += 1
print "totalSize: %s" % (totalSize)
print "symbolNamesFound: %s" % (symbolNamesFound)
f.close()
|
gpl-2.0
| -1,664,718,970,826,183,700
| 22.464286
| 52
| 0.616438
| false
| 2.622754
| false
| false
| false
|
bxlab/HiFive_Paper
|
Scripts/hifive-1.1.3/hifive/hic_binning.py
|
1
|
92093
|
#!/usr/bin/env python
"""
This is a module contains scripts for generating compact, upper-triangle and full matrices of HiC interaction data.
Concepts
--------
These functions rely on the :class:`HiC` class in conjunction with the :class:`Fend` and :class:`HiCData` classes.
Data can either be arranged in compact, complete, or flattened (row-major) upper-triangle arrays. Compact arrays are N x M, where N is the number of fends or bins, and M is the maximum distance between fends or bins. This is useful for working with sets of short interactions. Data can be raw, fend-corrected, distance-dependence removed, or enrichment values. Arrays are 3-dimensional with observed values in the first layer of d3, expected values in the second layer of d3. The exception to this is upper-triangle arrays, which are 2d, divinding observed and expected along the second axis.
API Documentation
-----------------
"""
import os
import sys
import subprocess
import numpy
import h5py
try:
from mpi4py import MPI
except:
pass
from libraries._hic_interactions import find_max_fend
import libraries._hic_binning as _hic_binning
def find_cis_signal(hic, chrom, binsize=10000, binbounds=None, start=None, stop=None, startfend=None, stopfend=None,
datatype='enrichment', arraytype='compact', maxdistance=0, skipfiltered=False, returnmapping=False,
proportional=False, **kwargs):
"""
Create an array of format 'arraytype' and fill with data requested in 'datatype'.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param chrom: The name of a chromosome contained in 'hic'.
:type chrom: str.
:param binsize: This is the coordinate width of each bin. A value of zero indicates unbinned. If binbounds is not None, this value is ignored.
:type binsize: int.
:param binbounds: An array containing start and stop coordinates for a set of user-defined bins. Any fend not falling in a bin is ignored.
:type binbounds: numpy array
:param start: The smallest coordinate to include in the array, measured from fend midpoints or the start of the first bin. If 'binbounds' is given, this value is ignored. If both 'start' and 'startfend' are given, 'start' will override 'startfend'. If unspecified, this will be set to the midpoint of the first fend for 'chrom', adjusted to the first multiple of 'binsize' if not zero. Optional.
:type start: int.
:param stop: The largest coordinate to include in the array, measured from fend midpoints or the end of the last bin. If 'binbounds' is given, this value is ignored. If both 'stop' and 'stopfend' are given, 'stop' will override 'stopfend'. If unspecified, this will be set to the midpoint of the last fend plus one for 'chrom', adjusted to the last multiple of 'start' + 'binsize' if not zero. Optional.
:type stop: int.
:param startfend: The first fend to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'start' is not given, this is set to the first valid fend in 'chrom'. In cases where 'start' is specified and conflicts with 'startfend', 'start' is given preference. Optional
:type startfend: int.
:param stopfend: The first fend not to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'stop' is not given, this is set to the last valid fend in 'chrom' plus one. In cases where 'stop' is specified and conflicts with 'stopfend', 'stop' is given preference. Optional.
:type stopfend: str.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', 'enrichment', and 'expected'. Observed values are always in the first index along the last axis, except when 'datatype' is 'expected'. In this case, filter values replace counts. Conversely, if 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and both 'enrichment' and 'expected' use both correction and distance mean values.
:type datatype: str.
:param arraytype: This determines what shape of array data are returned in. Acceptable values are 'compact', 'full', and 'upper'. 'compact' means data are arranged in a N x M x 2 array where N is the number of bins, M is the maximum number of steps between included bin pairs, and data are stored such that bin n,m contains the interaction values between n and n + m + 1. 'full' returns a square, symmetric array of size N x N x 2. 'upper' returns only the flattened upper triangle of a full array, excluding the diagonal of size (N * (N - 1) / 2) x 2.
:type arraytype: str.
:param maxdistance: This specifies the maximum coordinate distance between bins that will be included in the array. If set to zero, all distances are included.
:type maxdistance: str.
:param skipfiltered: If 'True', all interaction bins for filtered out fends are removed and a reduced-size array is returned.
:type skipfiltered: bool.
:param returnmapping: If 'True', a list containing the data array and a 2d array containing first coordinate included and excluded from each bin, and the first fend included and excluded from each bin is returned. Otherwise only the data array is returned.
:type returnmapping: bool.
:param proportional: Indicates whether interactions should proportionally contribute to bins based on the amount of overlap instead of being attributed solely based on midpoint. Only valid for binned heatmaps.
:type proportional: bool.
:returns: Array in format requested with 'arraytype' containing data requested with 'datatype'.
"""
if 'silent' in kwargs:
silent = kwargs['silent']
else:
silent = False
# check that all values are acceptable
if datatype not in ['raw', 'fend', 'distance', 'enrichment', 'expected']:
if not silent:
print >> sys.stderr, ("Datatype given is not recognized. No data returned\n"),
return None
elif datatype in ['fend', 'enrichment'] and hic.normalization == 'none':
if not silent:
print >> sys.stderr, ("Normalization has not been performed yet on this project. Select either 'raw' or 'distance' for datatype. No data returned\n"),
return None
elif datatype in ['distance', 'enrichment'] and hic.distance_parameters is None:
if not silent:
print >> sys.stderr, ("No distance-dependence relationship has been calculated for this project yet. Select either 'raw' or 'fend' for datatype. No data returned\n"),
return None
if arraytype not in ['full', 'compact', 'upper']:
if not silent:
print >> sys.stderr, ("Unrecognized array type. No data returned.\n"),
return None
# Determine start, stop, startfend, and stopfend
chrint = hic.chr2int[chrom.strip('chr')]
if not binbounds is None:
start = binbounds[0, 0]
stop = binbounds[-1, 1]
startfend = _find_fend_from_coord(hic, chrint, start)
stopfend = _find_fend_from_coord(hic, chrint, stop) + 1
else:
if start is None and startfend is None:
startfend = hic.fends['chr_indices'][chrint]
while startfend < hic.fends['chr_indices'][chrint + 1] and hic.filter[startfend] == 0:
startfend += 1
if startfend == hic.fends['chr_indices'][chrint + 1]:
if not silent:
print >> sys.stderr, ("Insufficient data.\n"),
return None
start = hic.fends['fends']['mid'][startfend]
if binsize > 0:
start = (start / binsize) * binsize
elif start is None:
start = hic.fends['fends']['mid'][startfend]
if binsize > 0:
start = (start / binsize) * binsize
else:
startfend = _find_fend_from_coord(hic, chrint, start)
if (stop is None or stop == 0) and stopfend is None:
stopfend = hic.fends['chr_indices'][chrint + 1]
while stopfend > hic.fends['chr_indices'][chrint] and hic.filter[stopfend - 1] == 0:
stopfend -= 1
stop = hic.fends['fends']['mid'][stopfend - 1]
if binsize > 0:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
elif stop is None or stop == 0:
stop = hic.fends['fends']['mid'][stopfend - 1]
if binsize > 0:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
else:
if binsize > 0:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
stopfend = _find_fend_from_coord(hic, chrint, stop) + 1
if not silent:
print >> sys.stderr, ("Finding %s %s array for %s:%i-%i...") % (datatype, arraytype, chrom, start, stop),
# If datatype is not 'expected', pull the needed slice of data
if datatype != 'expected':
start_index = hic.data['cis_indices'][startfend]
stop_index = hic.data['cis_indices'][stopfend]
if start_index == stop_index:
if not silent:
print >> sys.stderr, ("Insufficient data\n"),
return None
data_indices = hic.data['cis_indices'][startfend:(stopfend + 1)]
data_indices -= data_indices[0]
data = hic.data['cis_data'][start_index:stop_index, :]
data[:, :2] -= startfend
else:
data_indices = None
data = None
# Determine mapping of valid fends to bins
mapping = numpy.zeros(stopfend - startfend, dtype=numpy.int32) - 1
valid = numpy.where(hic.filter[startfend:stopfend] > 0)[0]
mids = hic.fends['fends']['mid'][startfend:stopfend]
if binsize == 0 and binbounds is None:
if skipfiltered:
mapping[valid] = numpy.arange(valid.shape[0])
num_bins = valid.shape[0]
else:
mapping[valid] = valid
num_bins = mapping.shape[0]
elif not binbounds is None:
start_indices = numpy.searchsorted(binbounds[:, 0], mids[valid], side='right') - 1
stop_indices = numpy.searchsorted(binbounds[:, 1], mids[valid], side='right')
where = numpy.where(start_indices == stop_indices)[0]
valid = valid[where]
mapping[valid] = start_indices[where]
num_bins = binbounds.shape[0]
else:
mapping[valid] = (mids[valid] - start) / binsize
num_bins = (stop - start) / binsize
# Find maximum interaction partner for each fend
if num_bins < 2:
if not silent:
print >> sys.stderr, ("Insufficient data\n"),
return None
max_fend = numpy.zeros(mapping.shape[0], dtype=numpy.int32)
find_max_fend(max_fend, mids, hic.fends['fends']['chr'][startfend:stopfend],
hic.fends['chr_indices'][...], startfend, maxdistance)
max_fend = numpy.minimum(max_fend, mapping.shape[0])
if binsize == 0:
max_bin = numpy.amax(max_fend - numpy.arange(mapping.shape[0]))
if max_bin <= 0:
if not silent:
print >> sys.stderr, ("Insufficient data.\n"),
return None
else:
if maxdistance == 0:
max_bin = num_bins - 1
else:
max_bin = maxdistance / binsize
# If correction is required, determine what type and get appropriate data
if 'binning' not in hic.normalization and datatype != 'raw':
corrections = hic.corrections[startfend:stopfend]
elif datatype == 'raw':
corrections = numpy.ones(stopfend - startfend, dtype=numpy.float32)
else:
corrections = None
if ((hic.normalization in ['express', 'probability'] and
datatype == 'fend') or datatype == 'raw') and maxdistance == 0:
if datatype == 'fend':
correction_sums = numpy.bincount(mapping[valid], weights=corrections[valid],
minlength=num_bins).astype(numpy.float64)
else:
correction_sums = numpy.bincount(mapping[valid], minlength=num_bins).astype(numpy.float64)
else:
correction_sums = None
if 'binning' in hic.normalization and datatype not in ['raw', 'distance']:
binning_corrections = hic.binning_corrections
binning_num_bins = hic.binning_num_bins
fend_indices = hic.binning_fend_indices
else:
binning_corrections = None
binning_num_bins = None
fend_indices = None
if datatype in ['distance', 'enrichment', 'expected']:
distance_parameters = hic.distance_parameters
chrom_mean = hic.chromosome_means[chrint]
else:
distance_parameters = None
chrom_mean = 0.0
# If proportional is requested, find bin ranges
if proportional and binsize > 0:
fends = hic.fends['fends'][startfend:stopfend]
ranges = numpy.zeros((mapping.shape[0], 2), dtype=numpy.int32)
overlap = numpy.zeros(ranges.shape, dtype=numpy.float32)
positions = numpy.arange(1, 1 + num_bins) * binsize + start
ranges[:, 0] = numpy.searchsorted(positions, fends['start'])
ranges[:, 1] = numpy.searchsorted(positions[:-1], fends['stop'])
where = numpy.where(ranges[:, 0] < ranges[:, 1])[0]
overlap[where, 0] = numpy.minimum(positions[ranges[where, 0]] - fends['start'][where],
binsize) / float(binsize)
overlap[where, 1] = numpy.minimum(fends['stop'][where] - positions[ranges[where, 1]] + binsize,
binsize) / float(binsize)
where = numpy.where(ranges[:, 0] == ranges[:, 1])[0]
overlap[where, 0] = (fends['stop'][where] - fends['start'][where]) / float(binsize)
else:
ranges = None
overlap = None
# Create requested array
if arraytype == 'compact':
data_array = numpy.zeros((num_bins, max_bin, 2), dtype=numpy.float32)
else:
data_array = numpy.zeros((num_bins * (num_bins - 1) / 2, 2), dtype=numpy.float32)
# Fill in data values
if arraytype == 'compact':
_hic_binning.find_cis_compact_expected(mapping, corrections, binning_corrections,
binning_num_bins, fend_indices, mids, distance_parameters,
max_fend, data_array, correction_sums, ranges, overlap,
chrom_mean, startfend)
if datatype != 'expected':
_hic_binning.find_cis_compact_observed(data, data_indices, mapping, max_fend, data_array, ranges, overlap)
else:
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1].fill(0)
correction_sums = numpy.bincount(mapping[valid], minlength=num_bins).astype(numpy.float64)
corrections.fill(1)
_hic_binning.find_cis_compact_expected(mapping, corrections, None, None, None, mids, None,
max_fend, data_array, correction_sums, ranges, overlap,
chrom_mean, startfend)
data_array = data_array[:, :, ::-1]
else:
_hic_binning.find_cis_upper_expected(mapping, corrections, binning_corrections,
binning_num_bins, fend_indices, mids, distance_parameters,
max_fend, data_array, correction_sums, ranges, overlap,
chrom_mean, startfend)
if datatype != 'expected':
_hic_binning.find_cis_upper_observed(data, data_indices, mapping, max_fend, data_array, ranges, overlap)
else:
data_array[:, 0] = data_array[:, 1]
data_array[:, 1].fill(0)
correction_sums = numpy.bincount(mapping[valid], minlength=num_bins).astype(numpy.float64)
corrections.fill(1)
_hic_binning.find_cis_upper_expected(mapping, corrections, None, None, None, mids, None,
max_fend, data_array, correction_sums, ranges, overlap,
chrom_mean, startfend)
data_array = data_array[:, ::-1]
# If requesting 'full' array, convert 'upper' array type to 'full'
if arraytype == 'full':
indices = numpy.triu_indices(num_bins, 1)
full_data_array = numpy.zeros((num_bins, num_bins, 2), dtype=numpy.float32)
full_data_array[indices[1], indices[0], :] = data_array
full_data_array[indices[0], indices[1], :] = data_array
del data_array
data_array = full_data_array
if returnmapping:
bin_mapping = numpy.zeros((num_bins, 4), dtype=numpy.int32)
if binsize == 0 and binbounds is None:
if skipfiltered:
bin_mapping[:, 2] = valid + startfend
else:
bin_mapping[:, 2] = numpy.arange(startfend, stopfend)
bin_mapping[:, 3] = bin_mapping[:, 2] + 1
bin_mapping[:, 0] = hic.fends['fends']['start'][bin_mapping[:, 2]]
bin_mapping[:, 1] = hic.fends['fends']['stop'][bin_mapping[:, 2]]
else:
if binbounds is None:
bin_mapping[:, 0] = start + binsize * numpy.arange(num_bins)
bin_mapping[:, 1] = bin_mapping[:, 0] + binsize
else:
bin_mapping[:, :2] = binbounds
bin_mapping[:, 2] = numpy.searchsorted(mids, bin_mapping[:, 0]) + startfend
bin_mapping[:, 3] = numpy.searchsorted(mids, bin_mapping[:, 1]) + startfend
if not silent:
print >> sys.stderr, ("Done\n"),
return [data_array, bin_mapping]
else:
if not silent:
print >> sys.stderr, ("Done\n"),
return data_array
def _find_fend_from_coord(hic, chrint, coord):
"""Find the next fend after the coordinate on chromosome 'chrint'."""
first_fend = hic.fends['chr_indices'][chrint]
last_fend = hic.fends['chr_indices'][chrint + 1]
return numpy.searchsorted(hic.fends['fends']['mid'][first_fend:last_fend], coord) + first_fend
def bin_cis_array(data_array, data_mapping, binsize=10000, binbounds=None, start=None, stop=None, arraytype='full',
returnmapping=False, **kwargs):
"""
Create an array of format 'arraytype' and fill 'binsize' bins or bins defined by 'binbounds' with data provided in the array passed by 'data_array'.
:param data_array: A 2d (upper) or 3d (compact) array containing data to be binned. Array format will be determined from the number of dimensions.
:type data_array: numpy array
:param data_mapping: An N x 4 2d integer array containing the start and stop coordinates, and start and stop fends for each of the N bin ranges in 'data_array'.
:type data_mapping: numpy array
:param binsize: This is the coordinate width of each bin. If binbounds is not None, this value is ignored.
:type binsize: int.
:param binbounds: An array containing start and stop coordinates for a set of user-defined bins. Any bin from 'data_array' not falling in a bin is ignored.
:type binbounds: numpy array
:param start: The coordinate at the beginning of the first bin of the binned data. If unspecified, 'start' will be the first multiple of 'binsize' below the first coordinate from 'data_mapping'. If 'binbounds' is given, 'start' is ignored. Optional.
:type start: int.
:param stop: The coordinate at the end of the last bin of the binned data. If unspecified, 'stop' will be the first multiple of 'binsize' after the last coordinate from 'data_mapping'. If needed, 'stop' is adjusted upward to create a complete last bin. If 'binbounds' is given, 'stop' is ignored. Optional.
:type stop: int.
:param arraytype: This determines what shape of array data are returned in. Acceptable values are 'compact', 'full', and 'upper'. 'compact' means data are arranged in a N x M x 2 array where N is the number of bins, M is the maximum number of steps between included bin pairs, and data are stored such that bin n,m contains the interaction values between n and n + m + 1. 'full' returns a square, symmetric array of size N x N x 2. 'upper' returns only the flattened upper triangle of a full array, excluding the diagonal of size (N * (N - 1) / 2) x 2.
:type arraytype: str.
:param returnmapping: If 'True', a list containing the data array and a 2d array containing first coordinate included and excluded from each bin, and the first fend included and excluded from each bin is returned. Otherwise only the data array is returned.
:type returnmapping: bool.
:returns: Array in format requested with 'arraytype' containing binned data requested with 'datatype' pulled from 'data_array' or list of binned data array and mapping array.
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
# check that arraytype value is acceptable
if arraytype not in ['full', 'compact', 'upper']:
if not silent:
print >> sys.stderr, ("Unrecognized array type. No data returned.\n"),
return None
# Determine input array type
if len(data_array.shape) == 2 and data_mapping.shape[0] * (data_mapping.shape[0] - 1) / 2 == data_array.shape[0]:
input_type = 'upper'
elif len(data_array.shape) == 3 and data_array.shape[0] == data_mapping.shape[0]:
input_type = 'compact'
else:
if not silent:
print >> sys.stderr, ("Unrecognized input array type. No data returned.\n"),
return None
# Determine start and stop, if necessary
if binbounds is None:
if start is None:
start = (data_mapping[0, 0] / binsize) * binsize
if stop is None:
stop = ((data_mapping[-1, 1] - 1) / binsize + 1) * binsize
else:
stop = ((stop - 1 - start) / binsize + 1) * binsize + start
num_bins = (stop - start) / binsize
binbounds = numpy.zeros((num_bins, 2), dtype=numpy.int32)
binbounds[:, 0] = numpy.arange(num_bins) * binsize + start
binbounds[:, 1] = binbounds[:, 0] + binsize
else:
num_bins = binbounds.shape[0]
start = binbounds[0, 0]
stop = binbounds[0, 1]
mids = (data_mapping[:, 0] + data_mapping[:, 1]) / 2
if not silent:
print >> sys.stderr, ("Finding binned %s array...") % (arraytype),
# Find bin mapping for each fend
mapping = numpy.zeros(mids.shape[0], dtype=numpy.int32) - 1
fend_ranges = numpy.zeros((binbounds.shape[0], 2), dtype=numpy.int32)
for i in range(binbounds.shape[0]):
firstbin = numpy.searchsorted(mids, binbounds[i, 0])
lastbin = numpy.searchsorted(mids, binbounds[i, 1])
mapping[firstbin:lastbin] = i
fend_ranges[i, 0] = data_mapping[firstbin, 2]
fend_ranges[i, 1] = data_mapping[lastbin, 3]
# Create requested array
if arraytype == 'compact':
max_bin = (stop - start) / binsize + 1
binned_array = numpy.zeros((num_bins, max_bin, 2), dtype=numpy.float32)
else:
binned_array = numpy.zeros((num_bins * (num_bins - 1) / 2, 2), dtype=numpy.float32)
# Fill in binned data values
if arraytype == 'compact':
if input_type == 'compact':
_hic_binning.bin_compact_to_compact(binned_array, data_array, mapping)
else:
_hic_binning.bin_upper_to_compact(binned_array, data_array, mapping)
# Trim unused bins
valid = numpy.where(numpy.sum(binned_array[:, :, 1] > 0, axis=0) > 0)[0][-1]
binned_array = binned_array[:, :(valid + 1), :]
else:
if input_type == 'compact':
_hic_binning.bin_compact_to_upper(binned_array, data_array, mapping, num_bins)
else:
_hic_binning.bin_upper_to_upper(binned_array, data_array, mapping, num_bins)
# If requesting 'full' array, convert 'upper' array type to 'full'
if arraytype == 'full':
indices = numpy.triu_indices(num_bins, 1)
full_binned_array = numpy.zeros((num_bins, num_bins, 2), dtype=numpy.float32)
full_binned_array[indices[1], indices[0], :] = binned_array
full_binned_array[indices[0], indices[1], :] = binned_array
del binned_array
binned_array = full_binned_array
# If mapping requested, calculate bin bounds
if returnmapping:
mapping = numpy.zeros((num_bins, 4), dtype=numpy.int32)
mapping[:, 0] = binbounds[:, 0]
mapping[:, 1] = binbounds[:, 1]
mapping[:, 2:4] = fend_ranges
if not silent:
print >> sys.stderr, ("Done\n"),
return [binned_array, mapping]
else:
if not silent:
print >> sys.stderr, ("Done\n"),
return binned_array
def dynamically_bin_cis_array(unbinned, unbinnedpositions, binned, binbounds, minobservations=10,
searchdistance=0, removefailed=True, **kwargs):
"""
Expand bins in 'binned' to include additional data provided in 'unbinned' as necessary to meet 'minobservations', or 'searchdistance' criteria.
:param unbinned: A 2d or 3d array containing data in either compact or upper format to be used for filling expanding bins. Array format will be determined from the number of dimensions.
:type unbinned: numpy array
:param unbinnedpositions: A 2d integer array indicating the first and last coordinate of each bin in 'unbinned' array.
:type unbinnedpositions: numpy array
:param binned: A 2d or 3d array containing binned data in either compact or upper format to be dynamically binned. Array format will be determined from the number of dimensions. Data in this array will be altered by this function.
:type binned: numpy array
:param binbounds: An integer array indicating the start and end position of each bin in 'binned' array. This array should be N x 2, where N is the number of intervals in 'binned'.
:type binbounds: numpy array
:param minobservations: The fewest number of observed reads needed for a bin to counted as valid and stop expanding.
:type minobservations: int.
:param searchdistance: The furthest distance from the bin minpoint to expand bounds. If this is set to zero, there is no limit on expansion distance.
:type searchdistance: int.
:param removefailed: If a non-zero 'searchdistance' is given, it is possible for a bin not to meet the 'minobservations' criteria before stopping looking. If this occurs and 'removefailed' is True, the observed and expected values for that bin are zero.
:type removefailed: bool.
:returns: None
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
# Determine unbinned array type
if len(unbinned.shape) == 2 and (unbinnedpositions.shape[0] * (unbinnedpositions.shape[0] - 1) / 2 ==
unbinned.shape[0]):
unbinned_type = 'upper'
elif len(unbinned.shape) == 3 and unbinned.shape[0] == unbinnedpositions.shape[0]:
unbinned_type = 'compact'
else:
if not silent:
print >> sys.stderr, ("Unrecognized unbinned array type. No data returned.\n"),
return None
# Determine binned array type
if len(binned.shape) == 2 and binbounds.shape[0] * (binbounds.shape[0] - 1) / 2 == binned.shape[0]:
binned_type = 'upper'
elif len(binned.shape) == 3 and binned.shape[0] == binbounds.shape[0]:
binned_type = 'compact'
else:
if not silent:
print >> sys.stderr, ("Unrecognized binned array type. No data returned.\n"),
return None
if not silent:
print >> sys.stderr, ("Dynamically binning data..."),
# Determine bin edges relative to unbinned positions
unbinnedmids = (unbinnedpositions[:, 0] + unbinnedpositions[:, 1]) / 2
binedges = numpy.zeros(binbounds.shape, dtype=numpy.int32)
binedges[:, 0] = numpy.searchsorted(unbinnedmids, binbounds[:, 0])
binedges[:, 1] = numpy.searchsorted(unbinnedmids, binbounds[:, 1])
# Determine bin midpoints
mids = (binbounds[:, 0] + binbounds[:, 1]) / 2
# Dynamically bin using appropriate array type combination
if unbinned_type == 'upper':
if binned_type == 'upper':
_hic_binning.dynamically_bin_upper_from_upper(unbinned, unbinnedmids, binned, binedges,
mids, minobservations, searchdistance, int(removefailed))
else:
_hic_binning.dynamically_bin_compact_from_upper(unbinned, unbinnedmids, binned, binedges,
mids, minobservations, searchdistance, int(removefailed))
else:
if binned_type == 'upper':
_hic_binning.dynamically_bin_upper_from_compact(unbinned, unbinnedmids, binned, binedges,
mids, minobservations, searchdistance, int(removefailed))
else:
_hic_binning.dynamically_bin_compact_from_compact(unbinned, unbinnedmids, binned, binedges,
mids, minobservations, searchdistance, int(removefailed))
if not silent:
print >> sys.stderr, ("Done\n"),
return None
def find_trans_signal(hic, chrom1, chrom2, binsize=10000, binbounds1=None, binbounds2=None, start1=None, stop1=None,
startfend1=None, stopfend1=None, start2=None, stop2=None, startfend2=None, stopfend2=None,
datatype='enrichment', skipfiltered=False, returnmapping=False, **kwargs):
"""
Create an array of format 'arraytype' and fill with data requested in 'datatype'.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param chrom: The name of a chromosome contained in 'hic'.
:type chrom: str.
:param binsize: This is the coordinate width of each bin. A value of zero indicates unbinned. If binbounds is not None, this value is ignored.
:type binsize: int.
:param binbounds: An array containing start and stop coordinates for a set of user-defined bins. Any fend not falling in a bin is ignored.
:type binbounds: numpy array
:param start: The smallest coordinate to include in the array, measured from fend midpoints or the start of the first bin. If 'binbounds' is given, this value is ignored. If both 'start' and 'startfend' are given, 'start' will override 'startfend'. If unspecified, this will be set to the midpoint of the first fend for 'chrom', adjusted to the first multiple of 'binsize' if not zero. Optional.
:type start: int.
:param stop: The largest coordinate to include in the array, measured from fend midpoints or the end of the last bin. If 'binbounds' is given, this value is ignored. If both 'stop' and 'stopfend' are given, 'stop' will override 'stopfend'. If unspecified, this will be set to the midpoint of the last fend plus one for 'chrom', adjusted to the last multiple of 'start' + 'binsize' if not zero. Optional.
:type stop: int.
:param startfend: The first fend to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'start' is not given, this is set to the first valid fend in 'chrom'. In cases where 'start' is specified and conflicts with 'startfend', 'start' is given preference. Optional
:type startfend: int.
:param stopfend: The first fend not to include in the array. If 'binbounds' is given, this value is ignored. If unspecified and 'stop' is not given, this is set to the last valid fend in 'chrom' plus one. In cases where 'stop' is specified and conflicts with 'stopfend', 'stop' is given preference. Optional.
:type stopfend: str.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', 'enrichment', and 'expected'. Observed values are always in the first index along the last axis, except when 'datatype' is 'expected'. In this case, filter values replace counts. Conversely, if 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and both 'enrichment' and 'expected' use both correction and distance mean values.
:type datatype: str.
:param arraytype: This determines what shape of array data are returned in. Acceptable values are 'compact', 'full', and 'upper'. 'compact' means data are arranged in a N x M x 2 array where N is the number of bins, M is the maximum number of steps between included bin pairs, and data are stored such that bin n,m contains the interaction values between n and n + m + 1. 'full' returns a square, symmetric array of size N x N x 2. 'upper' returns only the flattened upper triangle of a full array, excluding the diagonal of size (N * (N - 1) / 2) x 2.
:type arraytype: str.
:param maxdistance: This specifies the maximum coordinate distance between bins that will be included in the array. If set to zero, all distances are included.
:type maxdistance: str.
:param skipfiltered: If 'True', all interaction bins for filtered out fends are removed and a reduced-size array is returned.
:type skipfiltered: bool.
:param returnmapping: If 'True', a list containing the data array and two 2d array containing first coordinate included and excluded from each bin, and the first fend included and excluded from each bin for the first and second axis is returned. Otherwise only the data array is returned.
:type returnmapping: bool.
:returns: Array in format requested with 'arraytype' containing data requested with 'datatype'.
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
# check that all values are acceptable
if datatype not in ['raw', 'fend', 'distance', 'enrichment', 'expected']:
if not silent:
print >> sys.stderr, ("Datatype given is not recognized. No data returned\n"),
return None
elif datatype in ['fend', 'enrichment'] and hic.normalization == 'none':
if not silent:
print >> sys.stderr, ("Normalization has not been performed yet on this project. Select either 'raw' or 'distance' for datatype. No data returned\n"),
return None
# Determine start, stop, startfend, and stopfend
chrint1 = hic.chr2int[chrom1.strip('chr')]
chrint2 = hic.chr2int[chrom2.strip('chr')]
if not binbounds1 is None:
start1 = binbounds1[0, 0]
stop1 = binbounds1[-1, 1]
startfend1 = _find_fend_from_coord(hic, chrint1, start1)
stopfend1 = _find_fend_from_coord(hic, chrint1, stop1) + 1
else:
if start1 is None and startfend1 is None:
startfend1 = hic.fends['chr_indices'][chrint1]
while startfend1 < hic.fends['chr_indices'][chrint1 + 1] and hic.filter[startfend1] == 0:
startfend1 += 1
if startfend1 == hic.fends['chr_indices'][chrint1 + 1]:
if not silent:
print >> sys.stderr, ("Insufficient data.\n"),
return None
start1 = hic.fends['fends']['mid'][startfend1]
if binsize > 0:
start1 = (start1 / binsize) * binsize
elif start1 is None:
start1 = hic.fends['fends']['mid'][startfend1]
if binsize > 0:
start1 = (start1 / binsize) * binsize
else:
startfend1 = _find_fend_from_coord(hic, chrint1, start1)
if (stop1 is None or stop1 == 0) and stopfend1 is None:
stopfend1 = hic.fends['chr_indices'][chrint1 + 1]
while stopfend1 > hic.fends['chr_indices'][chrint1] and hic.filter[stopfend1 - 1] == 0:
stopfend1 -= 1
stop1 = hic.fends['fends']['mid'][stopfend1 - 1]
if binsize > 0:
stop1 = ((stop1 - 1 - start1) / binsize + 1) * binsize + start1
elif stop1 is None or stop1 == 0:
stop1 = hic.fends['fends']['mid'][stopfend1 - 1]
if binsize > 0:
stop1 = ((stop1 - 1 - start1) / binsize + 1) * binsize + start1
else:
if binsize > 0:
stop1 = ((stop1 - 1 - start1) / binsize + 1) * binsize + start1
stopfend1 = _find_fend_from_coord(hic, chrint1, stop1) + 1
if not binbounds1 is None:
start2 = binbounds1[0, 0]
stop2 = binbounds1[-1, 1]
startfend2 = _find_fend_from_coord(hic, chrint2, start2)
stopfend2 = _find_fend_from_coord(hic, chrint2, stop2) + 1
else:
if start2 is None and startfend2 is None:
startfend2 = hic.fends['chr_indices'][chrint2]
while startfend2 < hic.fends['chr_indices'][chrint2 + 1] and hic.filter[startfend2] == 0:
startfend2 += 1
if startfend2 == hic.fends['chr_indices'][chrint2 + 1]:
if not silent:
print >> sys.stderr, ("Insufficient data.\n"),
return None
start2 = hic.fends['fends']['mid'][startfend2]
if binsize > 0:
start2 = (start2 / binsize) * binsize
elif start2 is None:
start2 = hic.fends['fends']['mid'][startfend2]
if binsize > 0:
start2 = (start2 / binsize) * binsize
else:
startfend2 = _find_fend_from_coord(hic, chrint2, start2)
if (stop2 is None or stop2 == 0) and stopfend2 is None:
stopfend2 = hic.fends['chr_indices'][chrint2 + 1]
while stopfend2 > hic.fends['chr_indices'][chrint2] and hic.filter[stopfend2 - 1] == 0:
stopfend2 -= 1
stop2 = hic.fends['fends']['mid'][stopfend2 - 1]
if binsize > 0:
stop2 = ((stop2 - 1 - start2) / binsize + 1) * binsize + start2
elif stop2 is None or stop2 == 0:
stop2 = hic.fends['fends']['mid'][stopfend2 - 1]
if binsize > 0:
stop2 = ((stop2 - 1 - start2) / binsize + 1) * binsize + start2
else:
if binsize > 0:
stop2 = ((stop2 - 1 - start2) / binsize + 1) * binsize + start2
stopfend2 = _find_fend_from_coord(hic, chrint2, stop2) + 1
if not silent:
print >> sys.stderr, ("Finding %s array for %s:%i-%i by %s:%i-%i...") % (datatype, chrom1,
start1, stop1, chrom2, start2,
stop2),
# If datatype is not 'expected', pull the needed slice of data
if datatype != 'expected':
if chrint1 < chrint2:
start_index = hic.data['trans_indices'][startfend1]
stop_index = hic.data['trans_indices'][stopfend1]
else:
start_index = hic.data['trans_indices'][startfend2]
stop_index = hic.data['trans_indices'][stopfend2]
if start_index == stop_index:
if not silent:
print >> sys.stderr, ("Insufficient data\n"),
return None
if chrint1 < chrint2:
data_indices = hic.data['trans_indices'][startfend1:(stopfend1 + 1)]
else:
data_indices = hic.data['trans_indices'][startfend2:(stopfend2 + 1)]
data_indices -= data_indices[0]
data = hic.data['trans_data'][start_index:stop_index, :]
if chrint1 < chrint2:
data[:, 0] -= startfend1
data[:, 1] -= startfend2
else:
data[:, 0] -= startfend2
data[:, 1] -= startfend1
else:
data_indices = None
data = None
# Determine mapping of valid fends to bins
mapping1 = numpy.zeros(stopfend1 - startfend1, dtype=numpy.int32) - 1
mapping2 = numpy.zeros(stopfend2 - startfend2, dtype=numpy.int32) - 1
valid1 = numpy.where(hic.filter[startfend1:stopfend1] > 0)[0].astype(numpy.int32)
valid2 = numpy.where(hic.filter[startfend2:stopfend2] > 0)[0].astype(numpy.int32)
mids1 = hic.fends['fends']['mid'][startfend1:stopfend1]
mids2 = hic.fends['fends']['mid'][startfend2:stopfend2]
if binsize == 0 and binbounds1 is None:
if skipfiltered:
mapping1[valid1] = numpy.arange(valid1.shape[0])
num_bins1 = valid1.shape[0]
else:
mapping1[valid1] = valid1
num_bins1 = mapping1.shape[0]
elif not binbounds1 is None:
start_indices = numpy.searchsorted(binbounds1[:, 0], mids1[valid1], side='right') - 1
stop_indices = numpy.searchsorted(binbounds1[:, 1], mids1[valid1], side='right')
where = numpy.where(start_indices == stop_indices)[0]
valid1 = valid1[where]
mapping1[valid1] = start_indices[where]
num_bins1 = binbounds1.shape[0]
else:
mapping1[valid1] = (mids1[valid1] - start1) / binsize
num_bins1 = (stop1 - start1) / binsize
if binsize == 0 and binbounds2 is None:
if skipfiltered:
mapping2[valid2] = numpy.arange(valid2.shape[0])
num_bins2 = valid2.shape[0]
else:
mapping2[valid2] = valid2
num_bins2 = mapping2.shape[0]
elif not binbounds2 is None:
start_indices = numpy.searchsorted(binbounds2[:, 0], mids2[valid2], side='right') - 1
stop_indices = numpy.searchsorted(binbounds2[:, 1], mids2[valid2], side='right')
where = numpy.where(start_indices == stop_indices)[0]
valid2 = valid2[where]
mapping2[valid2] = start_indices[where]
num_bins2 = binbounds2.shape[0]
else:
mapping2[valid2] = (mids2[valid2] - start2) / binsize
num_bins2 = (stop2 - start2) / binsize
# Find maximum interaction partner for each fend
if num_bins1 < 1 or num_bins2 < 1:
if not silent:
print >> sys.stderr, ("Insufficient data\n"),
return None
# If correction is required, determine what type and get appropriate data
if hic.normalization != 'binning' and datatype != 'raw':
corrections1 = hic.corrections[startfend1:stopfend1]
corrections2 = hic.corrections[startfend2:stopfend2]
elif datatype == 'raw':
corrections1 = numpy.ones(stopfend1 - startfend1, dtype=numpy.float32)
corrections2 = numpy.ones(stopfend2 - startfend2, dtype=numpy.float32)
else:
corrections1 = None
corrections2 = None
if ((hic.normalization in ['express', 'probability'] and
datatype == 'fend') or datatype == 'raw'):
correction_sums1 = numpy.zeros(num_bins1, dtype=numpy.float64)
correction_sums2 = numpy.zeros(num_bins2, dtype=numpy.float64)
if datatype == 'fend':
correction_sums1[:] = numpy.bincount(mapping1[valid1], weights=corrections1[valid1], minlength=num_bins1)
correction_sums2[:] = numpy.bincount(mapping2[valid2], weights=corrections2[valid2], minlength=num_bins2)
else:
correction_sums1[:] = numpy.bincount(mapping1[valid1], minlength=num_bins1)
correction_sums2[:] = numpy.bincount(mapping2[valid2], minlength=num_bins2)
else:
correction_sums1 = None
correction_sums2 = None
if (hic.normalization in ['binning', 'binning-express', 'binning-probability'] and
datatype not in ['raw', 'distance']):
binning_corrections = hic.binning_corrections
binning_num_bins = hic.binning_num_bins
fend_indices = hic.binning_fend_indices
else:
binning_corrections = None
binning_num_bins = None
fend_indices = None
if datatype in ['distance', 'enrichment', 'expected']:
if 'trans_means' not in hic.__dict__.keys():
hic.find_trans_means()
if chrint1 < chrint2:
index = chrint1 * (hic.fends['chromosomes'].shape[0] - 1) - chrint1 * (chrint1 + 1) / 2 - 1 + chrint2
else:
index = chrint2 * (hic.fends['chromosomes'].shape[0] - 1) - chrint2 * (chrint2 + 1) / 2 - 1 + chrint1
trans_mean = hic.trans_means[index]
else:
trans_mean = 1.0
# Create data array
if chrint1 < chrint2:
data_array = numpy.zeros((num_bins1, num_bins2, 2), dtype=numpy.float32)
else:
data_array = numpy.zeros((num_bins2, num_bins1, 2), dtype=numpy.float32)
# Fill in data values
if chrint1 < chrint2:
_hic_binning.find_trans_expected(mapping1, mapping2, corrections1, corrections2, binning_corrections,
binning_num_bins, fend_indices, data_array,
correction_sums1, correction_sums2, trans_mean, startfend1, startfend2)
if datatype != 'expected':
_hic_binning.find_trans_observed(data, data_indices, mapping1, mapping2, data_array)
else:
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1].fill(0)
corrections1.fill(1.0)
corrections2.fill(1.0)
correction_sums1 = numpy.bincount(mapping1[valid1], minlength=num_bins1).astype(numpy.float64)
correction_sums2 = numpy.bincount(mapping2[valid2], minlength=num_bins2).astype(numpy.float64)
_hic_binning.find_trans_expected(mapping1, mapping2, corrections1, corrections2, None, None, None,
data_array, correction_sums1, correction_sums2, 1.0, startfend1,
startfend2)
temp = data_array[:, :, 0]
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1] = temp
else:
_hic_binning.find_trans_expected(mapping2, mapping1, corrections2, corrections1, binning_corrections,
binning_num_bins, fend_indices, data_array,
correction_sums2, correction_sums1, trans_mean, startfend2, startfend1)
if datatype != 'expected':
_hic_binning.find_trans_observed(data, data_indices, mapping2, mapping1, data_array)
else:
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1].fill(0)
corrections1.fill(1.0)
corrections2.fill(1.0)
correction_sums1 = numpy.bincount(mapping1[valid1], minlength=num_bins1).astype(numpy.float64)
correction_sums2 = numpy.bincount(mapping2[valid2], minlength=num_bins2).astype(numpy.float64)
_hic_binning.find_trans_expected(mapping2, mapping1, corrections2, corrections1, None, None, None,
data_array, correction_sums2, correction_sums1, 1.0, startfend2,
startfend1)
temp = data_array[:, :, 0]
data_array[:, :, 0] = data_array[:, :, 1]
data_array[:, :, 1] = temp
if chrint2 < chrint1:
data_array = numpy.transpose(data_array, (1, 0, 2))
if returnmapping:
bin_mapping1 = numpy.zeros((num_bins1, 4), dtype=numpy.int32)
if binsize == 0 and binbounds1 is None:
if skipfiltered:
bin_mapping1[:, 2] = valid1 + startfend1
else:
bin_mapping1[:, 2] = numpy.arange(startfend1, stopfend1)
bin_mapping1[:, 3] = bin_mapping1[:, 2] + 1
bin_mapping1[:, 0] = hic.fends['fends']['start'][bin_mapping1[:, 2]]
bin_mapping1[:, 1] = hic.fends['fends']['stop'][bin_mapping1[:, 2]]
else:
if binbounds1 is None:
bin_mapping1[:, 0] = start1 + binsize * numpy.arange(num_bins1)
bin_mapping1[:, 1] = bin_mapping1[:, 0] + binsize
else:
bin_mapping1[:, :2] = binbounds1
bin_mapping1[:, 2] = numpy.searchsorted(mids1, bin_mapping1[:, 0]) + startfend1
bin_mapping1[:, 3] = numpy.searchsorted(mids1, bin_mapping1[:, 1]) + startfend1
bin_mapping2 = numpy.zeros((num_bins2, 4), dtype=numpy.int32)
if binsize == 0 and binbounds2 is None:
if skipfiltered:
bin_mapping2[:, 2] = valid2 + startfend2
else:
bin_mapping2[:, 2] = numpy.arange(startfend2, stopfend2)
bin_mapping2[:, 3] = bin_mapping2[:, 2] + 1
bin_mapping2[:, 0] = hic.fends['fends']['start'][bin_mapping2[:, 2]]
bin_mapping2[:, 1] = hic.fends['fends']['stop'][bin_mapping2[:, 2]]
else:
if binbounds2 is None:
bin_mapping2[:, 0] = start2 + binsize * numpy.arange(num_bins2)
bin_mapping2[:, 1] = bin_mapping2[:, 0] + binsize
else:
bin_mapping2[:, :2] = binbounds2
bin_mapping2[:, 2] = numpy.searchsorted(mids2, bin_mapping2[:, 0]) + startfend2
bin_mapping2[:, 3] = numpy.searchsorted(mids2, bin_mapping2[:, 1]) + startfend2
if not silent:
print >> sys.stderr, ("Done\n"),
return [data_array, bin_mapping1, bin_mapping2]
else:
if not silent:
print >> sys.stderr, ("Done\n"),
return data_array
def bin_trans_array(data_array, data_mapping1, data_mapping2, binsize=10000, binbounds1=None, start1=None, stop1=None,
binbounds2=None, start2=None, stop2=None, returnmapping=False, **kwargs):
"""
Create an array of format 'arraytype' and fill 'binsize' bins or bins defined by 'binbounds' with data provided in the array passed by 'unbinned'.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param data_array: A 3d array containing data to be binned.
:type data_array: numpy array
:param data_mapping1: An N x 4 2d integer array containing the start and stop coordinates, and start and stop fends for each of the N bin ranges along the first axis in 'data_array'.
:type data_mapping1: numpy array
:param data_mapping2: An N x 4 2d integer array containing the start and stop coordinates, and start and stop fends for each of the N bin ranges along the second axis in 'data_array'.
:type data_mapping2: numpy array
:param binsize: This is the coordinate width of each bin. If binbounds is not None, this value is ignored.
:type binsize: int.
:param binbounds1: An array containing start and stop coordinates for a set of user-defined bins along the first axis. Any bin from 'data_array' not falling in a bin is ignored.
:type binbounds1: numpy array
:param start1: The coordinate at the beginning of the first bin for the first axis of the binned data. If unspecified, 'start1' will be the first multiple of 'binsize' below the first coordinate from 'data_mapping1'. If 'binbounds1' is given, 'start1' is ignored. Optional.
:type start1: int.
:param stop1: The coordinate at the end of the last bin for the first axis of the binned data. If unspecified, 'stop1' will be the first multiple of 'binsize' after the last coordinate from 'data_mapping1'. If needed, 'stop1' is adjusted upward to create a complete last bin. If 'binbounds1' is given, 'stop1' is ignored. Optional.
:type stop1: int.
:param binbounds2: An array containing start and stop coordinates for a set of user-defined bins along the second axis. Any bin from 'data_array' not falling in a bin is ignored.
:type binbounds2: numpy array
:param start2: The coordinate at the beginning of the first bin for the second axis of the binned data. If unspecified, 'start2' will be the first multiple of 'binsize' below the first coordinate from 'data_mapping2'. If 'binbounds2' is given, 'start2' is ignored. Optional.
:type start2: int.
:param stop2: The coordinate at the end of the last bin for the second axis of the binned data. If unspecified, 'stop2' will be the first multiple of 'binsize' after the last coordinate from 'data_mapping2'. If needed, 'stop2' is adjusted upward to create a complete last bin. If 'binbounds2' is given, 'stop2' is ignored. Optional.
:type stop2: int.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', 'enrichment', and 'expected'. Observed values are always in the first index along the last axis, except when 'datatype' is 'expected'. In this case, filter values replace counts. Conversely, if 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and both 'enrichment' and 'expected' use both correction and distance mean values.
:type datatype: str.
:param returnmapping: If 'True', a list containing the data array and a 2d array containing first coordinate included and excluded from each bin, and the first fend included and excluded from each bin is returned. Otherwise only the data array is returned.
:type returnmapping: bool.
:returns: Array in format requested with 'arraytype' containing binned data requested with 'datatype' pulled from 'unbinned'.
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
# Determine start and stop, if necessary
if binbounds1 is None:
if start1 is None:
start1 = (data_mapping1[0, 0] / binsize) * binsize
if stop1 is None:
stop1 = ((data_mapping1[-1, 1] - 1) / binsize + 1) * binsize
else:
stop1 = ((stop1 - 1 - start1) / binsize + 1) * binsize + start1
num_bins1 = (stop1 - start1) / binsize
binbounds1 = numpy.zeros((num_bins1, 2), dtype=numpy.int32)
binbounds1[:, 0] = numpy.arange(num_bins1) * binsize + start1
binbounds1[:, 1] = binbounds1[:, 0] + binsize
else:
num_bins1 = binbounds1.shape[0]
start1 = binbounds1[0, 0]
stop1 = binbounds1[0, 1]
if binbounds2 is None:
if start2 is None:
start2 = (data_mapping2[0, 0] / binsize) * binsize
if stop2 is None:
stop2 = ((data_mapping2[-1, 1] - 1) / binsize + 1) * binsize
else:
stop2 = ((stop2 - 1 - start2) / binsize + 1) * binsize + start2
num_bins2 = (stop2 - start2) / binsize
binbounds2 = numpy.zeros((num_bins2, 2), dtype=numpy.int32)
binbounds2[:, 0] = numpy.arange(num_bins2) * binsize + start2
binbounds2[:, 1] = binbounds2[:, 0] + binsize
else:
num_bins2 = binbounds2.shape[0]
start2 = binbounds2[0, 0]
stop2 = binbounds2[0, 1]
mids1 = (data_mapping1[:, 0] + data_mapping1[:, 1]) / 2
mids2 = (data_mapping2[:, 0] + data_mapping2[:, 1]) / 2
if not silent:
print >> sys.stderr, ("Finding binned trans array..."),
# Find bin mapping for each fend
mapping1 = numpy.zeros(mids1.shape[0], dtype=numpy.int32) - 1
fend_ranges1 = numpy.zeros((binbounds1.shape[0], 2), dtype=numpy.int32)
for i in range(binbounds1.shape[0]):
firstbin = numpy.searchsorted(mids1, binbounds1[i, 0])
lastbin = numpy.searchsorted(mids1, binbounds1[i, 1])
mapping1[firstbin:lastbin] = i
fend_ranges1[i, 0] = data_mapping1[firstbin, 2]
fend_ranges1[i, 1] = data_mapping1[lastbin, 3]
valid1 = numpy.where(mapping1 >= 0)[0]
mapping2 = numpy.zeros(mids2.shape[0], dtype=numpy.int32) - 1
fend_ranges2 = numpy.zeros((binbounds2.shape[0], 2), dtype=numpy.int32)
for i in range(binbounds2.shape[0]):
firstbin = numpy.searchsorted(mids2, binbounds2[i, 0])
lastbin = numpy.searchsorted(mids2, binbounds2[i, 1])
mapping2[firstbin:lastbin] = i
fend_ranges2[i, 0] = data_mapping2[firstbin, 2]
fend_ranges2[i, 1] = data_mapping2[lastbin, 3]
valid2 = numpy.where(mapping2 >= 0)[0]
# Create requested array
binned_array = numpy.zeros((num_bins1, num_bins2, 2), dtype=numpy.float32)
# Fill in binned data values
for i in range(valid1.shape[0]):
binned_array[i, :, 0] = numpy.bincount(mapping2[valid2], weights=data_array[valid1[i], valid2, 0],
minlength=num_bins2)
binned_array[i, :, 1] = numpy.bincount(mapping2[valid2], weights=data_array[valid1[i], valid2, 1],
minlength=num_bins2)
# If mapping requested, calculate bin bounds
if returnmapping:
mapping1 = numpy.zeros((num_bins1, 4), dtype=numpy.int32)
mapping1[:, 0] = binbounds1[:, 0]
mapping1[:, 1] = binbounds1[:, 1]
mapping1[:, 2:4] = fend_ranges1
mapping2 = numpy.zeros((num_bins2, 4), dtype=numpy.int32)
mapping2[:, 0] = binbounds2[:, 0]
mapping2[:, 1] = binbounds2[:, 1]
mapping2[:, 2:4] = fend_ranges2
if not silent:
print >> sys.stderr, ("Done\n"),
return [binned_array, mapping1, mapping2]
else:
if not silent:
print >> sys.stderr, ("Done\n"),
return binned_array
def dynamically_bin_trans_array(unbinned, unbinnedpositions1, unbinnedpositions2, binned, binbounds1, binbounds2,
minobservations=10, searchdistance=0, removefailed=False, **kwargs):
"""
Expand bins in 'binned' to include additional data provided in 'unbinned' as necessary to meet 'minobservations', or 'searchdistance' criteria.
:param unbinned: A 3d array containing data to be used for filling expanding bins. This array should be N x M x 2, where N is the number of bins or fends from the first chromosome and M is the number of bins or fends from the second chromosome.
:type unbinned: numpy array
:param unbinnedpositions1: A 2d integer array indicating the first and last coordinate of each bin along the first axis in 'unbinned' array.
:type unbinnedpositions1: numpy array
:param unbinnedpositions2: A 2d integer array indicating the first and last coordinate of each bin along the first axis in 'unbinned' array.
:type unbinnedpositions2: numpy array
:param binned: A 3d array containing binned data to be dynamically binned. This array should be N x M x 2, where N is the number of bins from the first chromosome and M is the number of bins from the second chromosome. Data in this array will be altered by this function.
:type binned: numpy array
:param binbounds1: An integer array indicating the start and end position of each bin from the first chromosome in the 'binned' array. This array should be N x 2, where N is the size of the first dimension of 'binned'.
:type binbounds1: numpy array
:param binbounds2: An integer array indicating the start and end position of each bin from the second chromosome in the 'binned' array. This array should be N x 2, where N is the size of the second dimension of 'binned'.
:type binbounds2: numpy array
:param minobservations: The fewest number of observed reads needed for a bin to counted as valid and stop expanding.
:type minobservations: int.
:param searchdistance: The furthest distance from the bin minpoint to expand bounds. If this is set to zero, there is no limit on expansion distance.
:type searchdistance: int.
:param removefailed: If a non-zero 'searchdistance' is given, it is possible for a bin not to meet the 'minobservations' criteria before stopping looking. If this occurs and 'removefailed' is True, the observed and expected values for that bin are zero.
:type removefailed: bool.
:returns: None
"""
if 'silent' in kwargs and kwargs['silent']:
silent = True
else:
silent = False
if not silent:
print >> sys.stderr, ("Dynamically binning data..."),
# Determine bin edges relative to unbinned positions
unbinnedmids1 = (unbinnedpositions1[:, 0] + unbinnedpositions1[:, 1]) / 2
unbinnedmids2 = (unbinnedpositions2[:, 0] + unbinnedpositions2[:, 1]) / 2
binedges1 = numpy.zeros(binbounds1.shape, dtype=numpy.int32)
binedges1[:, 0] = numpy.searchsorted(unbinnedmids1, binbounds1[:, 0])
binedges1[:, 1] = numpy.searchsorted(unbinnedmids1, binbounds1[:, 1])
binedges2 = numpy.zeros(binbounds2.shape, dtype=numpy.int32)
binedges2[:, 0] = numpy.searchsorted(unbinnedmids2, binbounds2[:, 0])
binedges2[:, 1] = numpy.searchsorted(unbinnedmids2, binbounds2[:, 1])
# Determine bin midpoints
mids1 = (binbounds1[:, 0] + binbounds1[:, 1]) / 2
mids2 = (binbounds2[:, 0] + binbounds2[:, 1]) / 2
# Dynamically bin using appropriate array type combination
_hic_binning.dynamically_bin_trans(unbinned, unbinnedmids1, unbinnedmids2, binned, binedges1,
binedges2, mids1, mids2, minobservations, searchdistance, int(removefailed))
if not silent:
print >> sys.stderr, ("Done\n"),
return None
def write_heatmap_dict(hic, filename, binsize, includetrans=True, datatype='enrichment', chroms=[],
dynamically_binned=False, minobservations=0, searchdistance=0, expansion_binsize=0,
removefailed=False, **kwargs):
"""
Create an h5dict file containing binned interaction arrays, bin positions, and an index of included chromosomes. This function is MPI compatible.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param filename: Location to write h5dict object to.
:type filename: str.
:param binsize: Size of bins for interaction arrays.
:type binsize: int.
:param includetrans: Indicates whether trans interaction arrays should be calculated and saved.
:type includetrans: bool.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', 'enrichment', and 'expected'. Observed values are always in the first index along the last axis, except when 'datatype' is 'expected'. In this case, filter values replace counts. Conversely, if 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and both 'enrichment' and 'expected' use both correction and distance mean values.
:type datatype: str.
:param chroms: A list of chromosome names indicating which chromosomes should be included. If left empty, all chromosomes are included. Optional.
:type chroms: list
:param dynamically_binned: If 'True', return dynamically binned data.
:type dynamically_binned: bool.
:param minobservations: The fewest number of observed reads needed for a bin to counted as valid and stop expanding.
:type minobservations: int.
:param searchdistance: The furthest distance from the bin minpoint to expand bounds. If this is set to zero, there is no limit on expansion distance.
:type searchdistance: int.
:param expansion_binsize: The size of bins to use for data to pull from when expanding dynamic bins. If set to zero, unbinned data is used.
:type expansion_binsize: int.
:param removefailed: If a non-zero 'searchdistance' is given, it is possible for a bin not to meet the 'minobservations' criteria before stopping looking. If this occurs and 'removefailed' is True, the observed and expected values for that bin are zero.
:type removefailed: bool.
:returns: None
"""
# check if MPI is available
if 'mpi4py' in sys.modules.keys():
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
num_procs = comm.Get_size()
else:
comm = None
rank = 0
num_procs = 1
if ('silent' in kwargs and kwargs['silent']) or rank > 0:
silent = True
else:
silent = False
# Check if trans mean is needed and calculate if not already done
if includetrans and datatype in ['distance', 'enrichment'] and 'trans_mean' not in hic.__dict__.keys():
hic.find_trans_means()
# Check if filename already exists, and remove if it does
if rank == 0:
if os.path.exists(filename):
if not silent:
print >> sys.stderr, ("%s already exists, overwriting.") % filename
subprocess.call('rm %s' % filename, shell=True)
if not silent:
print >> sys.stderr, ("Creating binned heatmap...\n"),
output = h5py.File(filename, 'w')
output.attrs['resolution'] = binsize
# If chromosomes not specified, fill list
if len(chroms) == 0:
chroms = list(hic.fends['chromosomes'][...])
# Assemble list of requested arrays
needed = []
chr_indices = hic.fends['chr_indices'][...]
for i in range(len(chroms))[::-1]:
chrom = chroms[i]
chrint = hic.chr2int[chrom]
if numpy.sum(hic.filter[chr_indices[chrint]:chr_indices[chrint + 1]]) > 0:
needed.append((chrom,))
else:
del chroms[i]
if includetrans:
for i in range(len(chroms)-1):
for j in range(i + 1, len(chroms)):
needed.append((chroms[i],chroms[j]))
if num_procs == 1:
node_needed = needed
else:
node_ranges = numpy.round(numpy.linspace(0, len(needed), num_procs + 1)).astype(numpy.int32)
for i in range(1, num_procs):
comm.send(needed[node_ranges[i]:node_ranges[i + 1]], dest=i, tag=11)
node_needed = needed[node_ranges[0]:node_ranges[1]]
else:
node_needed = comm.recv(source=0, tag=11)
heatmaps = {}
# Find heatmaps
for chrom in node_needed:
if len(chrom) == 1:
# Find cis heatmap
# determine if data is to be dynamically binned
if not dynamically_binned:
heatmaps[chrom] = find_cis_signal(hic, chrom[0], binsize=binsize, datatype=datatype,
arraytype='upper', returnmapping=True, silent=silent,
skipfiltered=True)
else:
temp = find_cis_signal(hic, chrom[0], binsize=expansion_binsize, datatype=datatype, arraytype='upper',
returnmapping=True, silent=silent)
if temp is None:
continue
expansion, exp_mapping = temp
binned, mapping = find_cis_signal(hic, chrom[0], binsize=binsize, datatype=datatype,
arraytype='upper', returnmapping=True, silent=silent)
dynamically_bin_cis_array(expansion, exp_mapping, binned, mapping, minobservations=minobservations,
searchdistance=searchdistance, removefailed=removefailed, silent=silent)
heatmaps[chrom] = [binned, mapping]
else:
# Find trans heatmap
# determine if data is to be dynamically binned
if not dynamically_binned:
heatmaps[chrom] = find_trans_signal(hic, chrom[0], chrom[1], binsize=binsize, datatype=datatype,
returnmapping=False, silent=silent, skipfiltered=True)
else:
temp = find_trans_signal(hic, chrom[0], chrom[1], binsize=expansion_binsize, datatype=datatype,
returnmapping=True, silent=silent)
if temp is None:
continue
expansion, exp_mapping1, exp_mapping2 = temp
binned, mapping1, mapping2 = find_trans_signal(hic, chrom[0], chrom[1], binsize=binsize,
datatype=datatype, returnmapping=True, silent=silent)
dynamically_bin_trans_array(expansion, exp_mapping1, exp_mapping2, binned, mapping1, mapping2,
minobservations=minobservations, searchdistance=searchdistance,
removefailed=removefailed, silent=silent)
heatmaps[chrom] = binned
# Check if array contains data
if heatmaps[chrom] is None or heatmaps[chrom][0].shape[0] == 0:
del heatmaps[chrom]
# Collect heatmaps at node 0 and write to h5dict
if rank == 0:
if num_procs > 1:
for i in range(1, num_procs):
if node_ranges[i + 1] - node_ranges[i] > 0:
temp = comm.recv(source=i, tag=11)
heatmaps.update(temp)
del temp
for chrom in heatmaps.keys():
if len(chrom) == 1:
output.create_dataset('%s.counts' % chrom[0], data=heatmaps[chrom][0][:, 0])
output.create_dataset('%s.expected' % chrom[0], data=heatmaps[chrom][0][:, 1])
output.create_dataset('%s.positions' % chrom[0], data=heatmaps[chrom][1][:, :2])
else:
output.create_dataset('%s_by_%s.counts' % (chrom[0], chrom[1]), data=heatmaps[chrom][:, :, 0])
output.create_dataset('%s_by_%s.expected' % (chrom[0], chrom[1]), data=heatmaps[chrom][:, :, 1])
output.create_dataset('chromosomes', data=numpy.array(chroms))
if 'history' in kwargs:
output.attrs['history'] = kwargs['history']
output.close()
if not silent:
print >> sys.stderr, ("Creating binned heatmap...Done\n"),
else:
if len(heatmaps) > 0:
comm.send(heatmaps, dest=0, tag=11)
del heatmaps
return None
def find_multiresolution_heatmap(hic, chrom, start, stop, chrom2=None, start2=None, stop2=None, minbinsize=5000,
maxbinsize=12800000, minobservations=5, datatype='fend', midbinsize=40000,
silent=True):
"""
Create a multi-resolution data and index heatmap array for a chromosome or chromosome pair.
:param hic: A :class:`HiC <hifive.hic.HiC>` class object containing fend and count data.
:type hic: :class:`HiC <hifive.hic.HiC>`
:param chrom: The first (or only) chromosome to find the multi-resolution heatmap for.
:type chrom: str.
:param start: The first bin start coordinate.
:type start: int.
:param stop: The last bin stop coordinate. The difference between start and stop must be a multiple of maxbinsize.
:type stop: int.
:param chrom2: The second chromosome to find the multi-resolution heatmap for. If None, an intra-chromosomal multi-resolution heatmap is returned for chrom.
:type chrom2: str.
:param start2: The first bin start coordinate for the second chromosome.
:type start2: int.
:param stop2: The last bin stop coordinate for the second chromosome. The difference between start and stop must be a multiple of maxbinsize.
:type stop2: int.
:param maxbinsize: The maximum sized bin (lowest resolution) heatmap to be produced for each chromosome.
:type maxbinsize: int.
:param minbinsize: The minimum sized bin (highest resolution) heatmap to be produced for each chromosome.
:type minbinsize: int.
:param minobservations: The minimum number of reads needed for a bin to be considered valid and be included in the heatmap.
:type minobservations: int.
:param datatype: This specifies the type of data that is processed and returned. Options are 'raw', 'distance', 'fend', and 'enrichment'. Observed values are always in the first index along the last axis. If 'raw' is specified, unfiltered fends return value of one. Expected values are returned for 'distance', 'fend', 'enrichment', and 'expected' values of 'datatype'. 'distance' uses only the expected signal given distance for calculating the expected values, 'fend' uses only fend correction values, and 'enrichment' uses both correction and distance mean values.
:type datatype: str.
:param midbinsize: This is used to determine the smallest bin size (highest resolution) complete heatmap to generate in producing the multi-resolution heatmap. It does not affect the resulting output but can be used to limit the total memory usage, with higher values using less memory but more time.
:type midbinsize: int.
:param silent: Indicates whether to display messages or not.
:type silent: bool.
"""
# check that all values are acceptable
if datatype not in ['raw', 'fend', 'distance', 'enrichment']:
if not silent:
print >> sys.stderr, ("Datatype given is not recognized. No data returned\n"),
return None
if not chrom2 is None and (start2 is None or stop2 is None):
if not silent:
print >> sys.stderr, ("Need values for start2 and stop2. No data returned\n"),
return None
if (stop - start) % maxbinsize != 0 or (not chrom2 is None and (stop2 - start2) % maxbinsize != 0):
if not silent:
print >> sys.stderr, ("Genomic intervals must be multiples of maxbinsize. No data returned\n"),
return None
res_levels = numpy.round(numpy.log(maxbinsize / minbinsize) / numpy.log(2.0)).astype(numpy.int32)
if maxbinsize != minbinsize * 2 ** res_levels:
if not silent:
print >> sys.stderr, ("Maxbinsize must be a multiple of 2^N and minbinsize for an integer N. No data returned\n"),
return None
if not silent:
if chrom2 is None:
target = chrom
else:
target = '%s by %s' % (chrom, chrom2)
print >> sys.stderr, ("\r%s\rFinding multi-resolution heatmap for %s...") % (' ' * 80, target),
# determine if finding cis or trans multi-resolution heatmap
chrint = hic.chr2int[chrom]
chrint2 = None
span = stop - start
startfend = _find_fend_from_coord(hic, chrint, start)
stopfend = _find_fend_from_coord(hic, chrint, stop)
if chrom2 is None:
trans = False
else:
span2 = stop2 - start2
chrint2 = hic.chr2int[chrom2]
trans = True
startfend2 = _find_fend_from_coord(hic, chrint2, start2)
stopfend2 = _find_fend_from_coord(hic, chrint2, stop2)
# determine actual midresolution limit
temp = maxbinsize
while temp / 2 >= max(midbinsize, minbinsize):
temp /= 2
midbinsize = temp
# pull relevant data
n = span / midbinsize
valid = numpy.where(hic.filter[startfend:stopfend])[0].astype(numpy.int32)
fend_nums = valid + startfend
mids = hic.fends['fends']['mid'][fend_nums] - start
binbounds = numpy.round(numpy.linspace(0, span, n + 1)).astype(numpy.int32)
bin_mids = (binbounds[:-1] + binbounds[1:]) / 2
mapping = numpy.empty(stopfend - startfend, dtype=numpy.int32)
mapping.fill(-1)
mapping[valid] = numpy.arange(valid.shape[0])
binmapping = mids / midbinsize
obs_indices = numpy.searchsorted(mids, binbounds).astype(numpy.int32)
if hic.normalization in ['express', 'probability', 'binning-express', 'binning-probability']:
corrections = hic.corrections[fend_nums]
correction_sums = numpy.bincount(binmapping, weights=corrections, minlength=n).astype(numpy.float32)
else:
corrections = None
correction_sums = None
if hic.normalization in ['binning', 'binning-express', 'binning-probability']:
binning_corrections = hic.binning_corrections
fend_indices = hic.binning_fend_indices[fend_nums, :, :]
else:
binning_corrections = None
fend_indices = None
if datatype in ['distance', 'enrichment']:
distance_parameters = hic.distance_parameters
chrom_mean = hic.chromosome_means[chrint]
else:
distance_parameters = None
chrom_mean = 0.0
if trans:
m = span2 / midbinsize
valid2 = numpy.where(hic.filter[startfend2:stopfend2])[0]
fend_nums2 = valid2 + startfend2
mids2 = hic.fends['fends']['mid'][fend_nums2] - start2
binbounds2 = numpy.round(numpy.linspace(0, span2, m + 1)).astype(numpy.int32)
bin_mids2 = (binbounds2[:-1] + binbounds2[1:]) / 2
obs_indices2 = numpy.searchsorted(mids2, binbounds2).astype(numpy.int32)
mapping2 = numpy.empty(stopfend2 - startfend2, dtype=numpy.int32)
mapping2.fill(-1)
mapping2[valid2] = numpy.arange(valid2.shape[0])
binmapping2 = mids2 / midbinsize
if hic.normalization in ['express', 'probability', 'binning-express', 'binning-probability']:
corrections2 = hic.corrections[fend_nums2]
correction_sums2 = numpy.bincount(binmapping2, weights=corrections2, minlength=m).astype(numpy.float32)
else:
corrections2 = None
correction_sums2 = None
if hic.normalization in ['binning', 'binning-express', 'binning-probability']:
fend_indices2 = hic.binning_fend_indices[fend_nums2, :, :]
else:
fend_indices2 = None
if datatype in ['distance', 'enrichment']:
if 'trans_means' not in hic.__dict__.keys():
hic.find_trans_means()
if chrint < chrint2:
index = chrint * (hic.fends['chromosomes'].shape[0] - 1) - chrint * (chrint + 1) / 2 - 1 + chrint2
else:
index = chrint2 * (hic.fends['chromosomes'].shape[0] - 1) - chrint2 * (chrint2 + 1) / 2 - 1 + chrint
chrom_mean = hic.trans_means[index]
# pull relevant trans observations and remap
if chrint2 < chrint:
start_index = hic.data['trans_indices'][startfend2]
stop_index = hic.data['trans_indices'][stopfend2]
data = hic.data['trans_data'][start_index:stop_index, :]
data_indices = hic.data['trans_indices'][startfend2:(stopfend2 + 1)]
data_indices -= data_indices[0]
num_data = _hic_binning.remap_mrh_data(
data,
data_indices,
mapping2,
mapping,
startfend,
stopfend,
startfend2,
stopfend2 - startfend2,
1)
else:
start_index = hic.data['trans_indices'][startfend]
stop_index = hic.data['trans_indices'][stopfend]
data = hic.data['trans_data'][start_index:stop_index, :]
data_indices = hic.data['trans_indices'][startfend:(stopfend + 1)]
data_indices -= data_indices[0]
num_data = _hic_binning.remap_mrh_data(
data,
data_indices,
mapping,
mapping2,
startfend2,
stopfend2,
startfend,
stopfend - startfend,
0)
else:
# pull relevant cis observations
start_index = hic.data['cis_indices'][startfend]
stop_index = hic.data['cis_indices'][stopfend]
data = hic.data['cis_data'][start_index:stop_index, :]
data_indices = hic.data['cis_indices'][startfend:(stopfend + 1)]
data_indices -= data_indices[0]
num_data = _hic_binning.remap_mrh_data(
data,
data_indices,
mapping,
None,
startfend,
stopfend,
startfend,
stopfend - startfend,
0)
if trans and chrint2 < chrint:
data = data[numpy.lexsort((data[:, 1], data[:, 0])), :]
data_indices = numpy.r_[0, numpy.bincount(data[:num_data, 0], minlength=valid.shape[0])].astype(numpy.int64)
for i in range(1, data_indices.shape[0]):
data_indices[i] += data_indices[i - 1]
data = data[:data_indices[-1], 1:]
# convert observations into binned matrix
if trans:
observed = numpy.zeros((n, m), dtype=numpy.int32)
else:
observed = numpy.zeros((n, n), dtype=numpy.int32)
binmapping2 = None
_hic_binning.find_mrh_observed(
data,
data_indices,
observed,
binmapping,
binmapping2)
expected = numpy.zeros(observed.shape, dtype=numpy.float32)
datatype_int = {'raw':0, 'fend':1, 'distance':2, 'enrichment':3}
dt_int = datatype_int[datatype]
if trans:
_hic_binning.find_mrh_trans_expected(
expected,
binmapping,
binmapping2,
obs_indices,
obs_indices2,
corrections,
corrections2,
correction_sums,
correction_sums2,
binning_corrections,
fend_indices,
fend_indices2,
chrom_mean,
dt_int)
else:
_hic_binning.find_mrh_cis_expected(
expected,
fend_nums,
binmapping,
mapping,
mids,
obs_indices,
corrections,
correction_sums,
binning_corrections,
fend_indices,
distance_parameters,
chrom_mean,
dt_int)
# find features for largest binned data array
n_bins = span / maxbinsize
m_bins = 0
binbounds = numpy.linspace(0, span, n_bins + 1)
if trans:
m_bins = span2 / maxbinsize
binbounds2 = numpy.linspace(0, span2, m_bins + 1)
# find fend assignments for largest bin sizes
obs_indices = numpy.searchsorted(bin_mids, binbounds).astype(numpy.int32)
if trans:
obs_indices2 = numpy.searchsorted(bin_mids2, binbounds2).astype(numpy.int32)
else:
obs_indices2 = None
# make data arrays to hold output
if trans:
current_level_data = numpy.zeros(n_bins * m_bins, dtype=numpy.float32)
else:
current_level_data = numpy.zeros((n_bins * (n_bins + 1)) / 2, dtype=numpy.float32)
current_level_indices = numpy.empty(current_level_data.shape, dtype=numpy.int32)
current_level_indices.fill(-1)
current_level_shapes = numpy.zeros(current_level_data.shape, dtype=numpy.int32)
bin_position = numpy.empty(current_level_data.shape, dtype=numpy.int32)
bin_position.fill(-1)
# find largest binned data array
if trans:
_hic_binning.make_trans_mrh_toplevel(observed,
expected,
current_level_data,
obs_indices,
obs_indices2,
bin_position,
minobservations)
else:
_hic_binning.make_cis_mrh_toplevel(observed,
expected,
current_level_data,
obs_indices,
bin_position,
minobservations)
all_data = [current_level_data]
all_indices = [current_level_indices]
all_shapes = [current_level_shapes]
# find subpartitioning for all valid bins for each resolution level
resolution = maxbinsize / 2
if trans:
pos = n_bins * m_bins
else:
pos = (n_bins * (n_bins + 1)) / 2
# find levels below the first but above or equal to midbinsize
while resolution >= midbinsize:
prev_bin_position = bin_position
bin_position = numpy.empty(prev_bin_position.shape[0] * 4, dtype=numpy.int32)
bin_position.fill(-1)
prev_level_data = all_data[-1]
current_level_data = numpy.empty(prev_level_data.shape[0] * 4, dtype=numpy.float32)
current_level_data.fill(numpy.nan)
prev_level_indices = all_indices[-1]
prev_level_shapes = all_shapes[-1]
prev_n_bins = n_bins
prev_m_bins = 0
n_bins = span / resolution
binbounds = numpy.linspace(0, span, n_bins + 1)
obs_indices = numpy.searchsorted(bin_mids, binbounds).astype(numpy.int32)
if trans:
prev_m_bins = m_bins
m_bins = span2 / resolution
binbounds2 = numpy.linspace(0, span2, m_bins + 1)
obs_indices2 = numpy.searchsorted(bin_mids2, binbounds2).astype(numpy.int32)
if trans:
_hic_binning.make_trans_mrh_midlevel(observed,
expected,
current_level_data,
prev_level_data,
prev_level_indices,
prev_level_shapes,
obs_indices,
obs_indices2,
prev_bin_position,
bin_position,
prev_m_bins,
m_bins,
minobservations,
pos)
else:
_hic_binning.make_cis_mrh_midlevel(observed,
expected,
current_level_data,
prev_level_data,
prev_level_indices,
prev_level_shapes,
obs_indices,
prev_bin_position,
bin_position,
prev_n_bins,
n_bins,
minobservations,
pos)
where = numpy.where(bin_position >= 0)[0]
pos += where.shape[0]
bin_position = bin_position[where]
all_data.append(current_level_data[where])
if resolution > minbinsize:
all_indices.append(numpy.empty(all_data[-1].shape[0], dtype=numpy.int32))
all_indices[-1].fill(-1)
all_shapes.append(numpy.zeros(all_data[-1].shape[0], dtype=numpy.int32))
resolution /= 2
# find levels below midbinsize
if midbinsize > minbinsize:
while resolution >= minbinsize:
prev_bin_position = bin_position
bin_position = numpy.empty(prev_bin_position.shape[0] * 4, dtype=numpy.int32)
bin_position.fill(-1)
prev_level_data = all_data[-1]
current_level_data = numpy.empty(prev_level_data.shape[0] * 4, dtype=numpy.float32)
current_level_data.fill(numpy.nan)
prev_level_indices = all_indices[-1]
prev_level_shapes = all_shapes[-1]
prev_n_bins = n_bins
prev_m_bins = 0
n_bins = span / resolution
binbounds = numpy.linspace(0, span, n_bins + 1)
obs_indices = numpy.searchsorted(mids, binbounds).astype(numpy.int32)
correction_sums = numpy.zeros(n_bins, dtype=numpy.float32)
for i in range(n_bins):
correction_sums[i] = numpy.sum(corrections[obs_indices[i]:obs_indices[i + 1]])
if trans:
prev_m_bins = m_bins
m_bins = span2 / resolution
binbounds2 = numpy.linspace(0, span2, m_bins + 1)
obs_indices2 = numpy.searchsorted(mids2, binbounds2).astype(numpy.int32)
correction_sums2 = numpy.zeros(m_bins, dtype=numpy.float32)
for i in range(m_bins):
correction_sums2[i] = numpy.sum(corrections2[obs_indices2[i]:obs_indices2[i + 1]])
if trans:
_hic_binning.make_trans_mrh_lowerlevel(data,
data_indices,
correction_sums,
correction_sums2,
current_level_data,
prev_level_data,
prev_level_indices,
prev_level_shapes,
obs_indices,
obs_indices2,
prev_bin_position,
bin_position,
prev_m_bins,
m_bins,
minobservations,
pos)
else:
_hic_binning.make_cis_mrh_lowerlevel(data,
data_indices,
corrections,
correction_sums,
fend_nums,
current_level_data,
prev_level_data,
prev_level_indices,
prev_level_shapes,
obs_indices,
prev_bin_position,
bin_position,
prev_n_bins,
n_bins,
minobservations,
pos)
where = numpy.where(bin_position >= 0)[0]
pos += where.shape[0]
bin_position = bin_position[where]
all_data.append(current_level_data[where])
if resolution > minbinsize:
all_indices.append(numpy.empty(all_data[-1].shape[0], dtype=numpy.int32))
all_indices[-1].fill(-1)
all_shapes.append(numpy.zeros(all_data[-1].shape[0], dtype=numpy.int32))
resolution /= 2
data = all_data[0]
for i in range(1, len(all_data)):
where = numpy.where(numpy.logical_not(numpy.isnan(all_data[i])))
data = numpy.hstack((data, all_data[i]))
all_data[i] = None
indices = all_indices[0]
for i in range(1, len(all_indices)):
indices = numpy.hstack((indices, all_indices[i]))
all_indices[i] = None
shapes = all_shapes[0]
for i in range(1, len(all_shapes)):
shapes = numpy.hstack((shapes, all_shapes[i]))
all_shapes[i] = None
if not silent:
print >> sys.stderr, ("Done\n"),
return [data, indices, shapes]
|
bsd-3-clause
| 5,788,391,827,346,598,000
| 57.249842
| 696
| 0.601859
| false
| 3.805653
| false
| false
| false
|
CroissanceCommune/autonomie
|
autonomie/alembic/versions/3_0_migrate_task_lines_2192101f133b.py
|
1
|
1508
|
"""3.0 : Migrate task lines
Revision ID: 2192101f133b
Revises: 465776bbb019
Create Date: 2015-06-29 11:57:26.726124
"""
# revision identifiers, used by Alembic.
revision = '2192101f133b'
down_revision = '36b1d9c38c43'
from alembic import op
import sqlalchemy as sa
def upgrade():
from autonomie.models.task import (
TaskLine,
TaskLineGroup,
Task,
Estimation,
CancelInvoice,
Invoice,
)
from autonomie_base.models.base import (
DBSESSION,
)
session = DBSESSION()
index = 0
query = Task.query()
query = query.with_polymorphic([Invoice, CancelInvoice, Estimation])
query = query.filter(
Task.type_.in_(['invoice', 'estimation', 'cancelinvoice'])
)
for task in query:
group = TaskLineGroup(task_id=task.id, order=0)
for line in task.lines:
tline = TaskLine(
group=group,
order=line.rowIndex,
description=line.description,
cost=line.cost,
tva=line.tva,
quantity=line.quantity,
)
if hasattr(line, 'product_id'):
tline.product_id = line.product_id
session.add(tline)
if index % 100 == 0:
session.flush()
op.alter_column(
table_name='estimation_payment',
column_name='rowIndex',
new_column_name='order',
type_=sa.Integer,
)
def downgrade():
pass
|
gpl-3.0
| -5,757,862,300,584,383,000
| 21.176471
| 72
| 0.566313
| false
| 3.732673
| false
| false
| false
|
niosus/EasyClangComplete
|
tests/test_makefile.py
|
1
|
3170
|
"""Tests for Makefile flags extraction."""
import imp
import platform
from os import path
from unittest import TestCase
from EasyClangComplete.plugin.utils import flag
from EasyClangComplete.plugin.utils import search_scope
from EasyClangComplete.plugin.flags_sources import makefile
imp.reload(makefile)
imp.reload(flag)
imp.reload(search_scope)
SearchScope = search_scope.TreeSearchScope
Makefile = makefile.Makefile
Flag = flag.Flag
class TestMakefile(object):
"""Test finding and generating flags from Makeifles."""
def test_init(self):
"""Initialization test."""
self.assertEqual(Makefile._FILE_NAME, 'Makefile')
def _get_project_root(self):
return path.join(path.dirname(__file__), 'makefile_files')
def _check_include(self, flags, include):
expected = path.join(self._get_project_root(), include)
self.assertIn(Flag('-I', expected), flags)
def _check_define(self, flags, define):
self.assertIn(Flag('', '-D' + define), flags)
def _check_makefile(self, cache, flags, test_path, makefile_path):
expected = path.join(self._get_project_root(), makefile_path)
self.assertEqual(expected, cache[test_path])
self.assertEqual(flags, cache[expected])
def _check_cache(self, cache, flags, makefile_path):
key = path.join(self._get_project_root(), makefile_path)
self.assertEqual(flags, cache[key])
def test_makefile_root(self):
"""Test finding and parsing root Makefile."""
test_path = path.join(self._get_project_root(), 'main.c')
mfile = Makefile(['-I', '-isystem'])
flags = mfile.get_flags(test_path)
self._check_include(flags, "inc")
self._check_define(flags, "REQUIRED_DEFINE")
self._check_makefile(mfile._cache, flags, test_path, "Makefile")
def test_makefile_lib(self):
"""Test finding and parsing library Makefile."""
test_path = path.join(self._get_project_root(), 'lib', 'bar.c')
mfile = Makefile(['-I', '-isystem'])
flags = mfile.get_flags(test_path)
self._check_include(flags, path.join("lib", "foo"))
self._check_makefile(mfile._cache, flags, test_path,
path.join("lib", "Makefile"))
def test_makefile_sub(self):
"""Test finding and parsing Makefile for library subdir."""
test_path = path.join(self._get_project_root(), 'lib', 'foo', 'foo.c')
mfile = Makefile(['-I', '-isystem'])
flags = mfile.get_flags(test_path)
self._check_include(flags, path.join("lib", "foo"))
self._check_makefile(mfile._cache, flags, test_path,
path.join("lib", "Makefile"))
def test_makefile_fail(self):
"""Test behavior when no Makefile found."""
test_path = path.join(path.dirname(__file__), 'test_files', 'test.cpp')
mfile = Makefile(['-I', '-isystem'])
flags = mfile.get_flags(test_path)
self.assertTrue(flags is None)
if platform.system() != "Windows":
class MakefileTestRunner(TestMakefile, TestCase):
"""Run make only if we are not on windows."""
pass
|
mit
| -5,041,730,860,606,384,000
| 34.222222
| 79
| 0.633438
| false
| 3.782816
| true
| false
| false
|
jyi/ITSP
|
prophet-gpl/tools/httpd-build.py
|
1
|
3243
|
# Copyright (C) 2016 Fan Long, Martin Rianrd and MIT CSAIL
# Prophet
#
# This file is part of Prophet.
#
# Prophet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Prophet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Prophet. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python
from sys import argv
from os import path, chdir, getcwd, environ
from tester_common import extract_arguments
import subprocess
import getopt
def compileit(out_dir, compile_only = False, config_only = False, paraj = 0):
ori_dir = getcwd();
my_env = environ;
chdir(out_dir);
if not compile_only:
ret = subprocess.call(["./buildconf --with-apr=" + deps_dir +"/apr-src --with-apr-util=" + deps_dir + "/apr-util-src"], shell = True, env = my_env);
if (ret != 0):
print "Failed to run buildconf!";
chdir(ori_dir);
exit(1);
cmd = "./configure --with-apr=" + deps_dir + "/apr-build --with-apr-util=" + deps_dir + "/apr-util-build";
ret = subprocess.call([cmd], shell = True, env = my_env);
if (ret != 0):
print "Failed to run configure!";
chdir(ori_dir);
print "Executed: cmd";
print cmd;
exit(1);
subprocess.call(["make", "clean"], shell = True, env = my_env);
if not config_only:
if paraj == 0:
ret = subprocess.call(["make"], env = my_env);
else:
ret = subprocess.call(["make", "-j", str(paraj)], env = my_env);
if ret != 0:
print "Failed to make!";
chdir(ori_dir);
exit(1);
chdir(ori_dir);
if __name__ == "__main__":
deps_dir = getcwd() + "/apache-deps";
compile_only = False;
config_only = False;
paraj = 0;
dryrun_src = "";
opts, args = getopt.getopt(argv[1:], 'cd:j:p:');
for o, a in opts:
if o == "-d":
dryrun_src = a;
elif o == "-j":
paraj = int(a);
elif o == "-p":
if a[0] == "/":
deps_dir = a;
else:
deps_dir = getcwd() + "/" + a;
elif o == "-c":
compile_only = True;
print deps_dir;
out_dir = args[0];
if (path.exists(out_dir)):
print "Working with existing directory: " + out_dir;
else:
print "Non-exist directory";
exit(1);
compileit(out_dir, compile_only, config_only, paraj);
if dryrun_src != "":
(builddir, buildargs) = extract_arguments(out_dir, dryrun_src);
if len(args) > 1:
out_file = open(args[1], "w");
print >> out_file, builddir;
print >> out_file, buildargs;
out_file.close();
else:
print builddir;
print buildargs;
|
mit
| 2,086,195,526,570,263,600
| 31.757576
| 156
| 0.5572
| false
| 3.521173
| true
| false
| false
|
praekelt/jmbo-contact
|
contact/models.py
|
1
|
1051
|
from django.contrib.auth.models import User
from django.db import models
from preferences.models import Preferences
class ContactPreferences(Preferences):
__module__ = 'preferences.models'
telephone = models.CharField(
max_length=24,
blank=True,
null=True,
)
fax = models.CharField(
max_length=24,
blank=True,
null=True,
)
physical_address = models.TextField(
blank=True,
null=True,
)
postal_address = models.TextField(
blank=True,
null=True,
)
email = models.EmailField(
blank=True,
null=True,
)
sms = models.CharField(
max_length=24,
blank=True,
null=True,
)
email_recipients = models.ManyToManyField(
User,
blank=True,
null=True,
help_text='Select users who will recieve emails sent via the \
general contact form.'
)
class Meta:
verbose_name = 'Contact preferences'
verbose_name_plural = 'Contact preferences'
|
bsd-3-clause
| 1,976,256,016,569,341,200
| 21.361702
| 70
| 0.596575
| false
| 4.073643
| false
| false
| false
|
nealtodd/wagtail
|
wagtail/images/views/serve.py
|
3
|
3400
|
import base64
import hashlib
import hmac
import imghdr
from wsgiref.util import FileWrapper
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.http import HttpResponse, HttpResponsePermanentRedirect, StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import classonlymethod
from django.utils.encoding import force_text
from django.views.generic import View
from wagtail.images import get_image_model
from wagtail.images.exceptions import InvalidFilterSpecError
from wagtail.images.models import SourceImageIOError
from wagtail.utils.sendfile import sendfile
def generate_signature(image_id, filter_spec, key=None):
if key is None:
key = settings.SECRET_KEY
# Key must be a bytes object
if isinstance(key, str):
key = key.encode()
# Based on libthumbor hmac generation
# https://github.com/thumbor/libthumbor/blob/b19dc58cf84787e08c8e397ab322e86268bb4345/libthumbor/crypto.py#L50
url = '{}/{}/'.format(image_id, filter_spec)
return force_text(base64.urlsafe_b64encode(hmac.new(key, url.encode(), hashlib.sha1).digest()))
def verify_signature(signature, image_id, filter_spec, key=None):
return force_text(signature) == generate_signature(image_id, filter_spec, key=key)
def generate_image_url(image, filter_spec, viewname='wagtailimages_serve', key=None):
signature = generate_signature(image.id, filter_spec, key)
url = reverse(viewname, args=(signature, image.id, filter_spec))
url += image.file.name[len('original_images/'):]
return url
class ServeView(View):
model = get_image_model()
action = 'serve'
key = None
@classonlymethod
def as_view(cls, **initkwargs):
if 'action' in initkwargs:
if initkwargs['action'] not in ['serve', 'redirect']:
raise ImproperlyConfigured("ServeView action must be either 'serve' or 'redirect'")
return super(ServeView, cls).as_view(**initkwargs)
def get(self, request, signature, image_id, filter_spec):
if not verify_signature(signature.encode(), image_id, filter_spec, key=self.key):
raise PermissionDenied
image = get_object_or_404(self.model, id=image_id)
# Get/generate the rendition
try:
rendition = image.get_rendition(filter_spec)
except SourceImageIOError:
return HttpResponse("Source image file not found", content_type='text/plain', status=410)
except InvalidFilterSpecError:
return HttpResponse("Invalid filter spec: " + filter_spec, content_type='text/plain', status=400)
return getattr(self, self.action)(rendition)
def serve(self, rendition):
# Open and serve the file
rendition.file.open('rb')
image_format = imghdr.what(rendition.file)
return StreamingHttpResponse(FileWrapper(rendition.file),
content_type='image/' + image_format)
def redirect(self, rendition):
# Redirect to the file's public location
return HttpResponsePermanentRedirect(rendition.url)
serve = ServeView.as_view()
class SendFileView(ServeView):
backend = None
def serve(self, rendition):
return sendfile(self.request, rendition.file.path, backend=self.backend)
|
bsd-3-clause
| -6,663,978,026,707,857,000
| 34.789474
| 114
| 0.707353
| false
| 3.894616
| false
| false
| false
|
toros-astro/ProperImage
|
properimage/utils.py
|
1
|
10039
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# utils.py
#
# Copyright 2016 Bruno S <bruno@oac.unc.edu.ar>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
"""utils module from ProperImage,
for coadding astronomical images.
Written by Bruno SANCHEZ
PhD of Astromoy - UNC
bruno@oac.unc.edu.ar
Instituto de Astronomia Teorica y Experimental (IATE) UNC
Cordoba - Argentina
Of 301
"""
import os
import astroalign as aa
import numpy as np
import scipy.ndimage as ndimage
from astropy.io import fits
from astropy.modeling import fitting, models
from astropy.stats import sigma_clipped_stats
from numpy.lib.recfunctions import append_fields
from scipy import sparse
from scipy.spatial import cKDTree
aa.PIXEL_TOL = 0.3
aa.NUM_NEAREST_NEIGHBORS = 5
aa.MIN_MATCHES_FRACTION = 0.6
def store_img(img, path=None):
if isinstance(img[0, 0], np.complex):
img = img.real
if isinstance(img, np.ma.core.MaskedArray):
mask = img.mask.astype("int")
data = img.data
hdu_data = fits.PrimaryHDU(data)
hdu_data.scale(type="float32")
hdu_mask = fits.ImageHDU(mask, uint="uint8")
hdu_mask.header["IMG_TYPE"] = "BAD_PIXEL_MASK"
hdu = fits.HDUList([hdu_data, hdu_mask])
else:
hdu = fits.PrimaryHDU(img)
if path is not None:
hdu.writeto(path, overwrite=True)
else:
return hdu
def crossmatch(X1, X2, max_distance=np.inf):
"""Cross-match the values between X1 and X2
By default, this uses a KD Tree for speed.
Parameters
----------
X1 : array_like
first dataset, shape(N1, D)
X2 : array_like
second dataset, shape(N2, D)
max_distance : float (optional)
maximum radius of search. If no point is within the given radius,
then inf will be returned.
Returns
-------
dist, ind: ndarrays
The distance and index of the closest point in X2 to each point in X1
Both arrays are length N1.
Locations with no match are indicated by
dist[i] = inf, ind[i] = N2
"""
X1 = np.asarray(X1, dtype=float)
X2 = np.asarray(X2, dtype=float)
N1, D = X1.shape
N2, D2 = X2.shape
if D != D2:
raise ValueError("Arrays must have the same second dimension")
kdt = cKDTree(X2)
dist, ind = kdt.query(X1, k=1, distance_upper_bound=max_distance)
return dist, ind
def _matching(master, cat, masteridskey=None, radius=1.5, masked=False):
"""Function to match stars between frames."""
if masteridskey is None:
masterids = np.arange(len(master))
master["masterindex"] = masterids
idkey = "masterindex"
else:
idkey = masteridskey
masterXY = np.empty((len(master), 2), dtype=np.float64)
masterXY[:, 0] = master["x"]
masterXY[:, 1] = master["y"]
imXY = np.empty((len(cat), 2), dtype=np.float64)
imXY[:, 0] = cat["x"]
imXY[:, 1] = cat["y"]
dist, ind = crossmatch(masterXY, imXY, max_distance=radius)
dist_, ind_ = crossmatch(imXY, masterXY, max_distance=radius)
IDs = np.zeros_like(ind_) - 13133
for i in range(len(ind_)):
if dist_[i] != np.inf:
ind_o = ind_[i]
if dist[ind_o] != np.inf:
ind_s = ind[ind_o]
if ind_s == i:
IDs[i] = master[idkey][ind_o]
if masked:
mask = IDs > 0
return (IDs, mask)
return IDs
def transparency(images, master=None):
"""Transparency calculator, using Ofek method."""
if master is None:
p = len(images)
master = images[0]
imglist = images[1:]
else:
# master is a separated file
p = len(images) + 1
imglist = images
mastercat = master.best_sources
try:
mastercat = append_fields(
mastercat,
"sourceid",
np.arange(len(mastercat)),
usemask=False,
dtypes=int,
)
except ValueError:
pass
detect = np.repeat(True, len(mastercat))
# Matching the sources
for img in imglist:
newcat = img.best_sources
ids, mask = _matching(
mastercat,
newcat,
masteridskey="sourceid",
radius=2.0,
masked=True,
)
try:
newcat = append_fields(newcat, "sourceid", ids, usemask=False)
except ValueError:
newcat["sourceid"] = ids
for i in range(len(mastercat)):
if mastercat[i]["sourceid"] not in ids:
detect[i] = False
newcat.sort(order="sourceid")
img.update_sources(newcat)
try:
mastercat = append_fields(
mastercat, "detected", detect, usemask=False, dtypes=bool
)
except ValueError:
mastercat["detected"] = detect
# Now populating the vector of magnitudes
q = sum(mastercat["detected"])
if q != 0:
m = np.zeros(p * q)
# here 20 is a common value for a zp, and is only for weighting
m[:q] = (
-2.5 * np.log10(mastercat[mastercat["detected"]]["flux"]) + 20.0
)
j = 0
for row in mastercat[mastercat["detected"]]:
for img in imglist:
cat = img.best_sources
imgrow = cat[cat["sourceid"] == row["sourceid"]]
m[q + j] = -2.5 * np.log10(imgrow["flux"]) + 20.0
j += 1
master.update_sources(mastercat)
ident = sparse.identity(q)
col = np.repeat(1.0, q)
sparses = []
for j in range(p):
ones_col = np.zeros((q, p))
ones_col[:, j] = col
sparses.append([sparse.csc_matrix(ones_col), ident])
H = sparse.bmat(sparses)
P = sparse.linalg.lsqr(H, m)
zps = P[0][:p]
meanmags = P[0][p:]
return np.asarray(zps), np.asarray(meanmags)
else:
return np.ones(p), np.nan
def _align_for_diff(refpath, newpath, newmask=None):
"""Function to align two images using their paths,
and returning newpaths for differencing.
We will allways rotate and align the new image to the reference,
so it is easier to compare differences along time series.
"""
ref = np.ma.masked_invalid(fits.getdata(refpath))
new = fits.getdata(newpath)
hdr = fits.getheader(newpath)
if newmask is not None:
new = np.ma.masked_array(new, mask=fits.getdata(newmask))
else:
new = np.ma.masked_invalid(new)
dest_file = "aligned_" + os.path.basename(newpath)
dest_file = os.path.join(os.path.dirname(newpath), dest_file)
try:
new2 = aa.register(ref, new.filled(np.median(new)))
except ValueError:
ref = ref.astype(float)
new = new.astype(float)
new2 = aa.register(ref, new)
hdr.set("comment", "aligned img " + newpath + " to " + refpath)
if isinstance(new2, np.ma.masked_array):
hdu = fits.HDUList(
[
fits.PrimaryHDU(new2.data, header=hdr),
fits.ImageHDU(new2.mask.astype("uint8")),
]
)
hdu.writeto(dest_file, overwrite=True)
else:
fits.writeto(dest_file, new2, hdr, overwrite=True)
return dest_file
def _align_for_coadd(imglist):
"""
Function to align a group of images for coadding, it uses
the astroalign `align_image` tool.
"""
ref = imglist[0]
new_list = [ref]
for animg in imglist[1:]:
registrd, registrd_mask = aa.register(
animg.data, ref.data, propagate_mask=True
)
# [: ref.data.shape[0], : ref.data.shape[1]], Deprecated
new_list.append(
type(animg)(registrd, mask=registrd_mask, borders=False)
)
return new_list
def find_S_local_maxima(S_image, threshold=2.5, neighborhood_size=5):
mean, median, std = sigma_clipped_stats(S_image, maxiters=3)
labeled, num_objects = ndimage.label((S_image - mean) / std > threshold)
xy = np.array(
ndimage.center_of_mass(S_image, labeled, range(1, num_objects + 1))
)
cat = []
for x, y in xy:
cat.append((y, x, (S_image[int(x), int(y)] - mean) / std))
return cat
def chunk_it(seq, num):
"""Creates chunks of a sequence suitable for data parallelism using
multiprocessing.
Parameters
----------
seq: list, array or sequence like object. (indexable)
data to separate in chunks
num: int
number of chunks required
Returns
-------
Sorted list.
List of chunks containing the data splited in num parts.
"""
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last) : int(last + avg)])
last += avg
try:
return sorted(out, reverse=True)
except TypeError:
return out
except ValueError:
return out
def fit_gaussian2d(b, fitter=None):
if fitter is None:
fitter = fitting.LevMarLSQFitter()
y2, x2 = np.mgrid[: b.shape[0], : b.shape[1]]
ampl = b.max() - b.min()
p = models.Gaussian2D(
x_mean=b.shape[1] / 2.0,
y_mean=b.shape[0] / 2.0,
x_stddev=1.0,
y_stddev=1.0,
theta=np.pi / 4.0,
amplitude=ampl,
)
p += models.Const2D(amplitude=b.min())
out = fitter(p, x2, y2, b, maxiter=1000)
return out
|
bsd-3-clause
| 2,111,238,457,958,437,600
| 27.278873
| 77
| 0.591892
| false
| 3.363149
| false
| false
| false
|
seung-lab/cloud-volume
|
cloudvolume/datasource/precomputed/image/rx.py
|
1
|
11090
|
from functools import partial
import itertools
import math
import os
import threading
import numpy as np
from six.moves import range
from tqdm import tqdm
from cloudfiles import reset_connection_pools, CloudFiles, compression
import fastremap
from ....exceptions import EmptyVolumeException, EmptyFileException
from ....lib import (
mkdir, clamp, xyzrange, Vec,
Bbox, min2, max2, check_bounds,
jsonify, red
)
from .... import chunks
from cloudvolume.scheduler import schedule_jobs
from cloudvolume.threaded_queue import DEFAULT_THREADS
from cloudvolume.volumecutout import VolumeCutout
import cloudvolume.sharedmemory as shm
from ..common import should_compress, content_type
from .common import (
fs_lock, parallel_execution,
chunknames, shade, gridpoints,
compressed_morton_code
)
from .. import sharding
def download_sharded(
requested_bbox, mip,
meta, cache, spec,
compress, progress,
fill_missing,
order
):
full_bbox = requested_bbox.expand_to_chunk_size(
meta.chunk_size(mip), offset=meta.voxel_offset(mip)
)
full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
shape = list(requested_bbox.size3()) + [ meta.num_channels ]
compress_cache = should_compress(meta.encoding(mip), compress, cache, iscache=True)
chunk_size = meta.chunk_size(mip)
grid_size = np.ceil(meta.bounds(mip).size3() / chunk_size).astype(np.uint32)
reader = sharding.ShardReader(meta, cache, spec)
bounds = meta.bounds(mip)
renderbuffer = np.zeros(shape=shape, dtype=meta.dtype, order=order)
gpts = list(gridpoints(full_bbox, bounds, chunk_size))
code_map = {}
morton_codes = compressed_morton_code(gpts, grid_size)
for gridpoint, morton_code in zip(gpts, morton_codes):
cutout_bbox = Bbox(
bounds.minpt + gridpoint * chunk_size,
min2(bounds.minpt + (gridpoint + 1) * chunk_size, bounds.maxpt)
)
code_map[morton_code] = cutout_bbox
all_chunkdata = reader.get_data(list(code_map.keys()), meta.key(mip), progress=progress)
for zcode, chunkdata in all_chunkdata.items():
cutout_bbox = code_map[zcode]
if chunkdata is None:
if fill_missing:
chunkdata = None
else:
raise EmptyVolumeException(cutout_bbox)
img3d = decode(
meta, cutout_bbox,
chunkdata, fill_missing, mip
)
shade(renderbuffer, requested_bbox, img3d, cutout_bbox)
return VolumeCutout.from_volume(
meta, mip, renderbuffer,
requested_bbox
)
def download(
requested_bbox, mip,
meta, cache,
fill_missing, progress,
parallel, location,
retain, use_shared_memory,
use_file, compress, order='F',
green=False, secrets=None,
renumber=False, background_color=0
):
"""Cutout a requested bounding box from storage and return it as a numpy array."""
full_bbox = requested_bbox.expand_to_chunk_size(
meta.chunk_size(mip), offset=meta.voxel_offset(mip)
)
full_bbox = Bbox.clamp(full_bbox, meta.bounds(mip))
cloudpaths = list(chunknames(
full_bbox, meta.bounds(mip),
meta.key(mip), meta.chunk_size(mip),
protocol=meta.path.protocol
))
shape = list(requested_bbox.size3()) + [ meta.num_channels ]
compress_cache = should_compress(meta.encoding(mip), compress, cache, iscache=True)
handle = None
if renumber and (parallel != 1):
raise ValueError("renumber is not supported for parallel operation.")
if use_shared_memory and use_file:
raise ValueError("use_shared_memory and use_file are mutually exclusive arguments.")
dtype = np.uint16 if renumber else meta.dtype
if parallel == 1:
if use_shared_memory: # write to shared memory
handle, renderbuffer = shm.ndarray(
shape, dtype=dtype, order=order,
location=location, lock=fs_lock
)
if not retain:
shm.unlink(location)
elif use_file: # write to ordinary file
handle, renderbuffer = shm.ndarray_fs(
shape, dtype=dtype, order=order,
location=location, lock=fs_lock,
emulate_shm=False
)
if not retain:
os.unlink(location)
else:
renderbuffer = np.full(shape=shape, fill_value=background_color,
dtype=dtype, order=order)
def process(img3d, bbox):
shade(renderbuffer, requested_bbox, img3d, bbox)
remap = { background_color: background_color }
lock = threading.Lock()
N = 1
def process_renumber(img3d, bbox):
nonlocal N
nonlocal lock
nonlocal remap
nonlocal renderbuffer
img_labels = fastremap.unique(img3d)
with lock:
for lbl in img_labels:
if lbl not in remap:
remap[lbl] = N
N += 1
if N > np.iinfo(renderbuffer.dtype).max:
renderbuffer = fastremap.refit(renderbuffer, value=N, increase_only=True)
fastremap.remap(img3d, remap, in_place=True)
shade(renderbuffer, requested_bbox, img3d, bbox)
fn = process
if renumber and not (use_file or use_shared_memory):
fn = process_renumber
download_chunks_threaded(
meta, cache, mip, cloudpaths,
fn=fn, fill_missing=fill_missing,
progress=progress, compress_cache=compress_cache,
green=green, secrets=secrets, background_color=background_color
)
else:
handle, renderbuffer = multiprocess_download(
requested_bbox, mip, cloudpaths,
meta, cache, compress_cache,
fill_missing, progress,
parallel, location, retain,
use_shared_memory=(use_file == False),
order=order,
green=green,
secrets=secrets,
background_color=background_color
)
out = VolumeCutout.from_volume(
meta, mip, renderbuffer,
requested_bbox, handle=handle
)
if renumber:
return (out, remap)
return out
def multiprocess_download(
requested_bbox, mip, cloudpaths,
meta, cache, compress_cache,
fill_missing, progress,
parallel, location,
retain, use_shared_memory, order,
green, secrets=None, background_color=0
):
cloudpaths_by_process = []
length = int(math.ceil(len(cloudpaths) / float(parallel)) or 1)
for i in range(0, len(cloudpaths), length):
cloudpaths_by_process.append(
cloudpaths[i:i+length]
)
cpd = partial(child_process_download,
meta, cache, mip, compress_cache,
requested_bbox,
fill_missing, progress,
location, use_shared_memory,
green, secrets, background_color
)
parallel_execution(cpd, cloudpaths_by_process, parallel, cleanup_shm=location)
shape = list(requested_bbox.size3()) + [ meta.num_channels ]
if use_shared_memory:
mmap_handle, renderbuffer = shm.ndarray(
shape, dtype=meta.dtype, order=order,
location=location, lock=fs_lock
)
else:
handle, renderbuffer = shm.ndarray_fs(
shape, dtype=meta.dtype, order=order,
location=location, lock=fs_lock,
emulate_shm=False
)
if not retain:
if use_shared_memory:
shm.unlink(location)
else:
os.unlink(location)
return mmap_handle, renderbuffer
def child_process_download(
meta, cache, mip, compress_cache,
dest_bbox,
fill_missing, progress,
location, use_shared_memory, green,
secrets, background_color, cloudpaths
):
reset_connection_pools() # otherwise multi-process hangs
shape = list(dest_bbox.size3()) + [ meta.num_channels ]
if use_shared_memory:
array_like, dest_img = shm.ndarray(
shape, dtype=meta.dtype,
location=location, lock=fs_lock
)
else:
array_like, dest_img = shm.ndarray_fs(
shape, dtype=meta.dtype,
location=location, emulate_shm=False,
lock=fs_lock
)
if background_color != 0:
dest_img[dest_bbox.to_slices()] = background_color
def process(src_img, src_bbox):
shade(dest_img, dest_bbox, src_img, src_bbox)
download_chunks_threaded(
meta, cache, mip, cloudpaths,
fn=process, fill_missing=fill_missing,
progress=progress, compress_cache=compress_cache,
green=green, secrets=secrets, background_color=background_color
)
array_like.close()
def download_chunk(
meta, cache,
cloudpath, mip,
filename, fill_missing,
enable_cache, compress_cache,
secrets, background_color
):
(file,) = CloudFiles(cloudpath, secrets=secrets).get([ filename ], raw=True)
content = file['content']
if enable_cache:
cache_content = next(compression.transcode(file, compress_cache))['content']
CloudFiles('file://' + cache.path).put(
path=filename,
content=(cache_content or b''),
content_type=content_type(meta.encoding(mip)),
compress=compress_cache,
raw=bool(cache_content),
)
del cache_content
if content is not None:
content = compression.decompress(content, file['compress'])
bbox = Bbox.from_filename(filename) # possible off by one error w/ exclusive bounds
img3d = decode(meta, filename, content, fill_missing, mip,
background_color=background_color)
return img3d, bbox
def download_chunks_threaded(
meta, cache, mip, cloudpaths, fn,
fill_missing, progress, compress_cache,
green=False, secrets=None, background_color=0
):
locations = cache.compute_data_locations(cloudpaths)
cachedir = 'file://' + os.path.join(cache.path, meta.key(mip))
def process(cloudpath, filename, enable_cache):
img3d, bbox = download_chunk(
meta, cache, cloudpath, mip,
filename, fill_missing,
enable_cache, compress_cache,
secrets, background_color
)
fn(img3d, bbox)
local_downloads = (
partial(process, cachedir, os.path.basename(filename), False) for filename in locations['local']
)
remote_downloads = (
partial(process, meta.cloudpath, filename, cache.enabled) for filename in locations['remote']
)
downloads = itertools.chain( local_downloads, remote_downloads )
if progress and not isinstance(progress, str):
progress = "Downloading"
schedule_jobs(
fns=downloads,
concurrency=DEFAULT_THREADS,
progress=progress,
total=len(cloudpaths),
green=green,
)
def decode(meta, input_bbox, content, fill_missing, mip, background_color=0):
"""
Decode content from bytes into a numpy array using the
dataset metadata.
If fill_missing is True, return an array filled with background_color
if content is empty. Otherwise, raise an EmptyVolumeException
in that case.
Returns: ndarray
"""
bbox = Bbox.create(input_bbox)
content_len = len(content) if content is not None else 0
if not content:
if fill_missing:
content = b''
else:
raise EmptyVolumeException(input_bbox)
shape = list(bbox.size3()) + [ meta.num_channels ]
try:
return chunks.decode(
content,
encoding=meta.encoding(mip),
shape=shape,
dtype=meta.dtype,
block_size=meta.compressed_segmentation_block_size(mip),
background_color=background_color
)
except Exception as error:
print(red('File Read Error: {} bytes, {}, {}, errors: {}'.format(
content_len, bbox, input_bbox, error)))
raise
|
bsd-3-clause
| 5,789,913,100,231,406,000
| 27.656331
| 101
| 0.67358
| false
| 3.570509
| false
| false
| false
|
tangentlabs/tangent-deployer
|
src/tangentdeployer/aws/elb.py
|
1
|
4028
|
import json
import utils
import boto.ec2.elb
from fabconfig import env
from fabric.api import local
def get_or_create_load_balancer():
utils.status("Getting %s load balancer" % env.environment)
load_balancer = get(load_balancer_name=env.load_balancer_name)
if not load_balancer:
return create_load_balancer()
return load_balancer
def create_load_balancer():
load_balancer = env.connections.elb.create_load_balancer(
name=env.load_balancer_name,
zones=env.zones,
security_groups=utils.security_groups(),
complex_listeners=[('80', '80', 'http', 'http')]
)
utils.success('Finished creating load balancer')
health_check = create_health_check()
load_balancer.configure_health_check(health_check=health_check)
return load_balancer
def create_health_check():
utils.status('Creating health check for load balancer')
health_check = boto.ec2.elb.HealthCheck(
interval=10,
healthy_threshold=2,
unhealthy_threshold=3,
target='HTTP:80/health')
utils.success('Finished creating health check for load balancer')
return health_check
def register_instances(load_balancer, autoscaling_group):
instances = [
instance.instance_id
for instance in autoscaling_group.instances
]
env.connections.elb.register_instances(
load_balancer_name=load_balancer.name, instances=instances)
def deregister_instances(load_balancer, autoscaling_group):
instances = [
instance.instance_id
for instance in autoscaling_group.instances
]
env.connections.elb.deregister_instances(
load_balancer_name=load_balancer.name, instances=instances)
def get(load_balancer_name):
utils.status('Getting %s load balancer' % env.environment)
try:
load_balancers = env.connections.elb.get_all_load_balancers(
load_balancer_names=[env.load_balancer_name])
except boto.exception.BotoServerError:
return None
return load_balancers[0]
def has_tag(load_balancer_name, key, value):
"""
We fall back to using the AWS CLI tool here because boto doesn't
support adding tags to load balancers yet.
As soon as https://github.com/boto/boto/issues/2549 is merged we're good
to change this to use boto.
"""
response = json.loads(local(
'aws elb describe-tags '
'--load-balancer-names %s '
'--region=%s --profile=%s' % (load_balancer_name,
env.region,
env.profile_name),
capture=True))
in_env = False
if 'TagDescriptions' in response:
for tag_description in response['TagDescriptions']:
for tag in tag_description['Tags']:
if tag['Key'] == 'env' and tag['Value'] == env.environment:
in_env = True
for tag in tag_description['Tags']:
if tag['Key'] == 'type' and tag['Value'] == value and in_env:
return True
return False
def tag(load_balancer, tags):
"""
We fall back to using the AWS CLI tool here because boto doesn't
support adding tags to load balancers yet.
As soon as https://github.com/boto/boto/issues/2549 is merged we're good
to change this to use boto
"""
utils.status('Tagging load balancer')
tags = make_tags(tags=tags)
local('aws elb add-tags '
'--load-balancer-names {lb_name} '
'--tags {tags} '
'--region={region} '
'--profile={profile_name}'.format(lb_name=load_balancer.name,
tags=tags,
region=env.region,
profile_name=env.profile_name)
)
utils.success('Finished tagging load balancer')
def make_tags(tags):
return ' '.join(
'Key={key},Value={value}'.format(key=key, value=value)
for key, value in tags.iteritems()
)
|
mit
| -4,524,741,475,131,346,400
| 31.483871
| 77
| 0.617428
| false
| 4.081054
| false
| false
| false
|
Blazemeter/taurus
|
tests/resources/apiritif/test_codegen.py
|
1
|
2578
|
# coding=utf-8
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
log = logging.getLogger('apiritif.http')
log.addHandler(logging.StreamHandler(sys.stdout))
log.setLevel(logging.DEBUG)
class TestWithExtractors(unittest.TestCase):
def setUp(self):
self.vars = {}
timeout = 5.0
self.target = apiritif.http.target('https://jsonplaceholder.typicode.com')
self.target.keep_alive(True)
self.target.auto_assert_ok(True)
self.target.use_cookies(True)
self.target.allow_redirects(True)
self.target.timeout(5.0)
apiritif.put_into_thread_store(timeout=timeout, func_mode=False, scenario_name='with-extractors')
def _1_just_get(self):
with apiritif.smart_transaction('just get'):
response = self.target.get('/')
def _2_get_posts(self):
with apiritif.smart_transaction('get posts'):
response = self.target.get('/posts')
response.assert_jsonpath('$.[0].userId', expected_value=1)
self.vars['userID'] = response.extract_jsonpath('$.[5].userId', 'NOT_FOUND')
def _3_get_posts_of_certain_user(self):
with apiritif.smart_transaction('get posts of certain user'):
response = self.target.get('/posts?userId={}'.format(self.vars['userID']))
self.vars['postID'] = response.extract_jsonpath('$.[0].id', 'NOT_FOUND')
def _4_get_comments_on_post(self):
with apiritif.smart_transaction('get comments on post'):
response = self.target.get('/posts/{}/comments'.format(self.vars['postID']))
response.assert_jsonpath('$[0].email', expected_value=None)
def _5_add_into_posts(self):
with apiritif.smart_transaction('add into posts'):
response = self.target.post('/posts', headers={
'content-type': 'application/json',
}, json={
'body': 'bar',
'title': 'foo',
'userId': self.vars['userID'],
})
self.vars['addedID'] = response.extract_jsonpath('$.id', 'NOT_FOUND')
def _6_delete_from_posts(self):
with apiritif.smart_transaction('delete from posts'):
response = self.target.delete('/posts/{}'.format(self.vars['postID']))
def test_with_extractors(self):
self._1_just_get()
self._2_get_posts()
self._3_get_posts_of_certain_user()
self._4_get_comments_on_post()
self._5_add_into_posts()
self._6_delete_from_posts()
|
apache-2.0
| 2,061,219,892,558,965,800
| 34.315068
| 105
| 0.611715
| false
| 3.600559
| false
| false
| false
|
bengosney/romrescue.org
|
team/migrations/0005_auto_20161029_1857.py
|
1
|
1667
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-29 18:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('team', '0004_teammember_job'),
]
operations = [
migrations.CreateModel(
name='DogPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='uploads/teamdogs')),
('thumbnail', image_cropping.fields.ImageRatioField('image', '150x150', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='thumbnail')),
('position', models.PositiveIntegerField(default=0)),
],
options={
'ordering': ('position',),
'verbose_name': 'Photo',
'verbose_name_plural': 'Photos',
},
),
migrations.AlterField(
model_name='teammember',
name='cropped',
field=image_cropping.fields.ImageRatioField('image', '400x400', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='cropped'),
),
migrations.AddField(
model_name='dogphoto',
name='TeamMember',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='team.TeamMember'),
),
]
|
gpl-3.0
| 2,396,542,858,550,056,400
| 39.658537
| 236
| 0.604079
| false
| 4.03632
| false
| false
| false
|
stvstnfrd/edx-platform
|
common/lib/xmodule/xmodule/modulestore/split_mongo/definition_lazy_loader.py
|
1
|
1619
|
# lint-amnesty, pylint: disable=missing-module-docstring
import copy
from opaque_keys.edx.locator import DefinitionLocator
class DefinitionLazyLoader(object):
"""
A placeholder to put into an xblock in place of its definition which
when accessed knows how to get its content. Only useful if the containing
object doesn't force access during init but waits until client wants the
definition. Only works if the modulestore is a split mongo store.
"""
def __init__(self, modulestore, course_key, block_type, definition_id, field_converter):
"""
Simple placeholder for yet-to-be-fetched data
:param modulestore: the pymongo db connection with the definitions
:param definition_locator: the id of the record in the above to fetch
"""
self.modulestore = modulestore
self.course_key = course_key
self.definition_locator = DefinitionLocator(block_type, definition_id)
self.field_converter = field_converter
def fetch(self):
"""
Fetch the definition. Note, the caller should replace this lazy
loader pointer with the result so as not to fetch more than once
"""
# get_definition may return a cached value perhaps from another course or code path
# so, we copy the result here so that updates don't cross-pollinate nor change the cached
# value in such a way that we can't tell that the definition's been updated.
definition = self.modulestore.get_definition(self.course_key, self.definition_locator.definition_id)
return copy.deepcopy(definition)
|
agpl-3.0
| -1,342,814,386,339,033,600
| 45.257143
| 108
| 0.704756
| false
| 4.625714
| false
| false
| false
|
Antergos/Cnchi
|
src/widgets/webcam_widget.py
|
1
|
5550
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# webcam_widget.py
#
# Copyright © 2013-2018 Antergos
#
# This file is part of Cnchi.
#
# Cnchi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Cnchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with Cnchi; If not, see <http://www.gnu.org/licenses/>.
""" Widget that shows a web camera feed """
import logging
import os
import gi
gi.require_version('Gst', '1.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GObject, Gst, Gtk
# Needed for window.get_xid(), xvimagesink.set_window_handle(), respectively:
gi.require_version('GstVideo', '1.0')
from gi.repository import GdkX11, GstVideo
class WebcamWidget(Gtk.DrawingArea):
""" Webcam widget """
__gtype_name__ = 'WebcamWidget'
def __init__(self, width=160, height=90):
Gtk.DrawingArea.__init__(self)
self.pipeline = None
self.xid = None
self.bus = None
self.error = False
if not os.path.exists("/dev/video0"):
logging.warning("Cannot find any camera. Camera widget won't be used")
self.error = True
self.destroy()
return
self.set_size_request(width, height)
# Initialize GStreamer
Gst.init(None)
self.create_video_pipeline(width, height)
self.connect('destroy', self.on_destroy)
def create_video_pipeline(self, width, height):
""" Create GStreamer pipeline """
# Create pipeline
self.pipeline = Gst.Pipeline.new()
# Create bus to get events from GStreamer pipeline
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message::error', self.on_error)
# This is needed to make the video output in our DrawingArea:
self.bus.enable_sync_message_emission()
self.bus.connect('sync-message::element', self.on_sync_message)
# Create GStreamer elements
self.source = Gst.ElementFactory.make('autovideosrc', 'source')
self.sink = Gst.ElementFactory.make('autovideosink', 'sink')
if self.source and self.sink:
#fmt_str = 'video/x-raw, format=(string)YV12, '
fmt_str = 'video/x-raw, '
fmt_str += 'width=(int){0}, height=(int){1}, '.format(width, height)
fmt_str += 'pixel-aspect-ratio=(fraction)1/1, '
fmt_str += 'interlace-mode=(string)progressive, '
fmt_str += 'framerate=(fraction){ 30/1, 24/1, 20/1, 15/1, 10/1, 15/2, 5/1 }'
caps = Gst.caps_from_string(fmt_str)
# Add elements to the pipeline
self.pipeline.add(self.source)
self.pipeline.add(self.sink)
self.source.link_filtered(self.sink, caps)
logging.debug("Camera found. Video pipeline created.")
else:
logging.debug("Cannot initialize camera.")
self.error = True
def show_all(self):
""" You need to get the XID after window.show_all(). You shouldn't get it
in the on_sync_message() handler because threading issues will cause
segfaults there. """
self.xid = self.get_property('window').get_xid()
if self.pipeline:
# Start playing
self.pipeline.set_state(Gst.State.PLAYING)
def on_destroy(self, _data):
""" Widget is destroyed. Stop playing """
if self.pipeline:
# Stop playing
self.pipeline.set_state(Gst.State.NULL)
self.destroy()
def on_sync_message(self, _bus, msg):
""" This is needed to make the video output in our DrawingArea """
if msg.get_structure().get_name() == 'prepare-window-handle':
msg.src.set_property('force-aspect-ratio', True)
msg.src.set_window_handle(self.xid)
@staticmethod
def on_error(_bus, msg):
""" A gst error has occurred """
logging.error(msg.parse_error())
def clicked(self, _event_box, _event_button):
""" User clicks on camera widget """
pass
GObject.type_register(WebcamWidget)
def test_module():
""" function to test this module """
window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
window.set_title("Webcam test")
window.set_default_size(160, 90)
window.connect("destroy", Gtk.main_quit, "WM destroy")
vbox = Gtk.VBox()
window.add(vbox)
overlay = Gtk.Overlay.new()
overlay.show()
webcam = WebcamWidget()
event_box = Gtk.EventBox.new()
event_box.connect(
'button-press-event',
webcam.clicked)
overlay.add_overlay(event_box)
event_box.add(webcam)
webcam.set_halign(Gtk.Align.START)
webcam.set_valign(Gtk.Align.START)
vbox.add(overlay)
window.show_all()
webcam.show_all()
GObject.threads_init()
Gtk.main()
if __name__ == '__main__':
test_module()
|
gpl-3.0
| -5,490,614,462,187,439,000
| 30.890805
| 88
| 0.631465
| false
| 3.614984
| false
| false
| false
|
NPPC-UK/wireless_sensors
|
setup_logger/setup_logger.py
|
1
|
7621
|
#!/usr/bin/env python
# Copyright (C) 2017 Aberystwyth University
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# setup_logger.py
# Author: Katie Awty-Carroll (klh5@aber.ac.uk)
#
import serial
import MySQLdb
import sys
from datetime import datetime
"""
setup_node.py - Python 2.7.6 script for setting up nodes.
This script is designed to fetch OneWire sensor ID's from a node, and add them to the database. The node must be running setup_node.ino.
This script has very little error checking at the moment so be careful.
"""
print "Setting up..."
#The directory where the config file is stored
parser.read("config")
#Fetch database details from config file
db_host = parser.get("db_connect", "host")
db_user = parser.get("db_connect", "user")
db_pass = parser.get("db_connect", "password")
db_schema = parser.get("db_connect", "schema")
#Clear input buffer, because menu will print
def clear_menu(serial_conn):
for i in range(0, 6):
serial_conn.readline()
#Set up the SQL statement to add a sensor location
def get_location_data(node_id, sensor_id, sensor_order):
loc_statement = None
print "Sensor X location: "
x_loc = raw_input()
if x_loc.isdigit():
print "X location is OK"
print "Sensor Y location: "
y_loc = raw_input()
if y_loc.isdigit():
print "Y location is OK"
print "Sensor Z location: "
z_loc = raw_input()
if z_loc.isdigit():
print "Z location is OK"
print "Sensor compartment: "
comp = raw_input()
if comp.isdigit():
print "Compartment number is OK"
print "Sensor network ID: "
network_id = raw_input()
if network_id.isdigit():
print "Network ID is OK"
loc_statement = "INSERT INTO locations (location_sensor_id, x_location, y_location, z_location, compartment, node_id, node_order, network) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s');" % (sensor_id, x_loc, y_loc, z_loc, comp, node_id, sensor_order, network_id)
else:
print "Network ID is not numeric"
else:
print "Compartment number is not numeric"
else:
print "Z location is not numeric"
else:
print "Y location is not numeric"
else:
print "X location is not numeric"
return loc_statement
#Set up the SQL statement to add a sensor calibration record
def get_calibration_data(sensor_id):
cal_statement = None
print "Calibration date (YYYY-MM-DD): "
cal_date = raw_input()
try:
val_date = datetime.strptime(cal_date, '%Y-%m-%d')
print "Equation (e.g. x*2/(12-1.5)): "
cal_equation = raw_input()
try:
x = 1
equation_res = eval(cal_equation)
print "Instrument used to calibrate: "
cal_instrument = raw_input()
print "Who performed the calibration: "
cal_person = raw_input()
cal_statement = "INSERT INTO calibration_data VALUES (equation_sensor_id, calibration_date, equation, calibration_instrument, who_calibrated) VALUES ('%s', '%s', '%s', '%s', '%s')" % (sensor_id, cal_date, cal_equation, cal_instrument, cal_person)
except SyntaxError:
print "Equation cannot be evaluated - check your syntax"
except ValueError:
print "Date needs to be in YYYY-MM-DD format"
return cal_statement
def main():
calibration_flag = "NO"
#Connect to serial port so that we can communicate with the Moteino
try:
serial_conn = serial.Serial("/dev/ttyUSB0", 115200)
print "Connected to serial port"
clear_menu(serial_conn)
except OSError as e:
print "ERROR: Could not open serial port: %s" % (e)
sys.exit(1)
try:
#Connect to database
db = MySQLdb.connect(db_host, db_user, db_pass, db_schema)
#Set up cursor to fetch data
cursor = db.cursor()
print "Connected to database"
print "Fetching Node ID..."
serial_conn.write('6')
node_id = serial_conn.readline()
print "Node ID is " + node_id
#Check that the node ID is within range
if int(node_id) > 1 and int(node_id) < 255:
clear_menu(serial_conn)
print "Fetching OneWire sensors from node..."
#Send instruction to get OneWire sensor data
serial_conn.write('7')
#Fetch reply
num_sensors = serial_conn.read(1)
print "Number of sensors in EEPROM: " + num_sensors
#Check that the number of sensors is within range
if int(num_sensors) > 0 and int(num_sensors) <= 5:
for i in range(1, int(num_sensors)+1):
sensor_addr = serial_conn.readline()
print "Received address of device " + str(i)
sensor_addr = sensor_addr.strip()
print sensor_addr
print "Date of purchase for this sensor (YYYY-MM-DD): "
dop = raw_input()
print "Has this sensor been calibrated? (y/n) "
if_calibrated = raw_input()
if if_calibrated == "y":
calibration_flag = "YES"
print "Preparing sensor SQL statement..."
add_sensor = "INSERT INTO sensors (manufacturer, calibrated, sensor_type, measurement_unit, date_purchased, serial_number) VALUES ('Dallas OneWire', '%s', 'temperature', 'degrees celsius', '%s', '%s');" % (calibration_flag, dop, sensor_addr)
cursor.execute(add_sensor)
#Commit the change so that we can then fetch the sensor ID
db.commit()
#The ID of the sensor we just added will be the highest value in the auto incrementing column
cursor.execute("SELECT sensor_id FROM sensors ORDER BY sensor_id DESC LIMIT 1;")
sensor_id = cursor.fetchone()
#Add location data
print "Add location data? (y/n) "
if_location = raw_input()
if if_location == "y":
#Add location data
location_record = get_location_data(node_id, sensor_id[0], i)
if location_record != None:
print "Adding location data"
cursor.execute(location_record)
else:
print "Invalid location data"
if calibration_flag == "YES":
#Calibration flag has been set to YES, so add calibration data
"Calibration data needs to be added for this sensor"
calibration_record = get_calibration_data(sensor_id)
if calibration_record != None:
print "Adding calibration data"
cursor.execute(calibration_record)
else:
#User entered values are probably incorrect. Check if the user wants to change the calibration flag to NO
print "Invalid calibration data. Set calibrated field to NO? (y/n) "
if_reset = raw_input()
if if_reset == "y":
update_cal = "UPDATE sensors SET calibrated = 'NO' WHERE sensor_id = '%s'" % (sensor_id)
cursor.execute(update_cal)
else:
print "Warning: Calibrated flag is set to YES, but no calibration data has been added"
#Commit calibration and location data
db.commit()
print "Changes to database have been committed"
print "Done"
else:
print "Invalid number of sensors"
else:
print "Node ID is invalid or has not been set"
#Catch any errors associated with accessing the database
except MySQLdb.Error as e:
print "***ERROR***: Database error: {} {}" % (e[0], e[1])
db.rollback()
finally:
db.close()
main()
|
gpl-3.0
| -9,138,549,356,838,000,000
| 27.436567
| 278
| 0.668023
| false
| 3.363195
| false
| false
| false
|
googleads/googleads-python-lib
|
examples/ad_manager/v202105/custom_field_service/deactivate_all_line_item_custom_fields.py
|
1
|
2682
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deactivates all active line items custom fields.
To determine which custom fields exist, run get_all_custom_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v202105')
# Create statement to select only active custom fields that apply to
# line items.
statement = (ad_manager.StatementBuilder(version='v202105')
.Where('entityType = :entityType and isActive = :isActive')
.WithBindVariable('entityType', 'LINE_ITEM')
.WithBindVariable('isActive', True))
custom_fields_deactivated = 0
# Get custom fields by statement.
while True:
response = custom_field_service.getCustomFieldsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
# Display results.
for custom_field in response['results']:
print('Custom field with ID "%s" and name "%s" will'
' be deactivated.' % (custom_field['id'], custom_field['name']))
result = custom_field_service.performCustomFieldAction(
{'xsi_type': 'DeactivateCustomFields'}, statement.ToStatement())
if result and int(result['numChanges']) > 0:
custom_fields_deactivated += int(result['numChanges'])
statement.offset += statement.limit
else:
break
if custom_fields_deactivated > 0:
print('Number of custom fields deactivated: %s' % custom_fields_deactivated)
else:
print('No custom fields were deactivated.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
apache-2.0
| 8,414,907,480,595,389,000
| 35.739726
| 80
| 0.709918
| false
| 4.119816
| false
| false
| false
|
Rogentos/argent-anaconda
|
installclasses/awesome.py
|
1
|
2687
|
#
# awesome.py
#
# Copyright (C) 2010 Fabio Erculiani
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from installclass import BaseInstallClass
from constants import *
from product import *
from flags import flags
import os, types
import iutil
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import installmethod
from sabayon import Entropy
from sabayon.livecd import LiveCDCopyBackend
class InstallClass(BaseInstallClass):
id = "sabayon_awesome"
name = N_("Rogentos Awesome")
_pixmap_dirs = os.getenv("PIXMAPPATH", "/usr/share/pixmaps").split(":")
for _pix_dir in _pixmap_dirs:
_pix_path = os.path.join(_pix_dir, "awesome.png")
if os.path.isfile(_pix_path):
pixmap = _pix_path
dmrc = "awesome"
if Entropy().is_sabayon_steambox():
dmrc = "steambox"
_description = N_("Select this installation type for a default installation "
"with the Awesome desktop environment. "
"A small lightweight and functional working environment at your service.")
_descriptionFields = (productName,)
sortPriority = 10000
if not Entropy().is_installed("x11-wm/awesome"):
hidden = 1
def configure(self, anaconda):
BaseInstallClass.configure(self, anaconda)
BaseInstallClass.setDefaultPartitioning(self,
anaconda.storage, anaconda.platform)
def setSteps(self, anaconda):
BaseInstallClass.setSteps(self, anaconda)
anaconda.dispatch.skipStep("welcome", skip = 1)
#anaconda.dispatch.skipStep("network", skip = 1)
def getBackend(self):
return LiveCDCopyBackend
def productMatches(self, oldprod):
if oldprod is None:
return False
if oldprod.startswith(productName):
return True
return False
def versionMatches(self, oldver):
try:
oldVer = float(oldver)
newVer = float(productVersion)
except ValueError:
return True
return newVer >= oldVer
def __init__(self):
BaseInstallClass.__init__(self)
|
gpl-2.0
| 2,908,679,965,218,311,700
| 28.527473
| 83
| 0.679196
| false
| 3.866187
| false
| false
| false
|
Jazende/ProjectEuler
|
problem_026.py
|
1
|
2481
|
import math
def go_until_repeat_remainder(nom, den, cur_max=1000):
remainders = []
cycles = 0
while True:
if nom < den:
nom*=10
cycles += 1
if nom == den:
break
if nom > den:
remainder = nom%den
if remainder in remainders:
cycles += 1
break
remainders.append(remainder)
nom*=10
cycles += 1
return cycles
def problem_026(max_=1000):
cur_max = 0
cur_value = 0
for x in range(2, 1000)[::-1]:
new_value = go_until_repeat_remainder(1, x, cur_max)
if new_value > cur_max:
cur_max = new_value
cur_value = x
return cur_value
print(problem_026(1000))
def long_division(nom, den, max_count=100000000):
result = "0."
nom *= 10
count = 0
while True:
if find_recurring(result):
temp = float(result)
if den*0.9 < int(1 / temp) < den *1.1:
break
if nom % den == 0:
result += str(nom//den)
break
elif nom > den:
result += str(nom//den)
nom = nom%den
nom *= 10
continue
elif nom < den:
result += "0"
nom *= 10
continue
count += 1
if count == max_count:
break
return result
def find_recurring(text):
rev_text = text[::-1]
for i in range(1, len(text)//2+1)[::-1]:
if rev_text[:i] == rev_text[i:i*2] == rev_text[i*2:i*3] == rev_text[i*3:i*4] and not int(rev_text[:i]) == 0:
return True
return False
def get_recurring(text):
rev_text = text[::-1]
for i in range(1, len(text)//2+1)[::-1]:
if rev_text[:i] == rev_text[i:i*2] == rev_text[i*2:i*3] == rev_text[i*3:i*4] and not int(rev_text[:i]) == 0:
return rev_text[:i]
def get_recurring_length(nom, den):
division = long_division(nom, den)
if find_recurring(division):
return len(get_recurring(division))
else:
return 0
def problem_26(target):
# fractions = {x: get_recurring_length(1, x) for x in range(2, target+1)}
fractions = []
for x in range(2, target+1):
fractions.append([x, get_recurring_length(1, x)])
fractions = sorted(fractions, key=lambda x: x[1], reverse=True)
print(fractions[:10])
return fractions[0]
problem_26(1000)
#print(long_division(1, 261))
|
gpl-3.0
| -6,844,096,892,728,083,000
| 26.566667
| 116
| 0.512696
| false
| 3.321285
| false
| false
| false
|
leewinder/tslerp
|
automation/prepare_distribution_package.py
|
1
|
2629
|
""" Builds up a release package ready to be built or distributed by NPM. The distributable content
is taken from the development folder to make it easier to strip out unneeded package content. """
#!/usr/bin/python
# Imports
import os
import shutil
import fnmatch
import distutils.dir_util
import cli
#
# Finds all files with a specific extension
#
def remove_all_files(directory, extension):
""" Finds all files with a specific extension """
# Delete everything in the source folders
for root, _, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, extension):
file_path = os.path.join(root, filename)
os.remove(file_path)
#
# Removes all the build files so we can do a clean build
#
def clean_build_files():
""" Removes all the build files so we can do a clean build """
# Get our path
source_folder = cli.get_project_root() + '/development/src'
remove_all_files(source_folder, '*.js')
remove_all_files(source_folder, '*.js.map')
remove_all_files(source_folder, '*.d.ts')
#
# Builds the Typescript project
#
def build_project():
""" Builds the Typescript project """
config_root = cli.get_project_root() + '/development/'
return_code, _, _ = cli.run_command_line(config_root, "tsc", ['-p', 'tsconfig-ci.json'])
if return_code != 0:
exit(return_code)
#
# Gets the main package folder
#
def create_package_folder():
""" Gets the main package folder """
# Get the path to the distribution package
root_path = cli.get_project_root() + '/release'
if os.path.exists(root_path):
shutil.rmtree(root_path)
distribution_folder = '/{}/package'.format(root_path)
os.makedirs(distribution_folder)
# Send it back with the root folder
return cli.get_project_root() + '/', distribution_folder + '/'
#
# Main entry function
#
def main():
""" Main entry function """
# Clean up our current build files
clean_build_files()
# Build the project
build_project()
# Get our folder
root_folder, distribution_folder = create_package_folder()
# Copy over the root content
shutil.copyfile(root_folder + 'LICENSE', distribution_folder + 'LICENSE')
shutil.copyfile(root_folder + 'README.md', distribution_folder + 'README.md')
# Package content
shutil.copyfile(root_folder + 'development/package.json', distribution_folder + 'package.json')
# Copy over all the source files
distutils.dir_util.copy_tree(root_folder + 'development/src/lib', distribution_folder)
#
# Main entry point
#
if __name__ == "__main__":
main()
|
mit
| 491,645,362,754,421,000
| 26.103093
| 99
| 0.669836
| false
| 3.766476
| false
| false
| false
|
zigazupancic/sat-solver
|
boolean.py
|
1
|
4960
|
class Formula:
def __ne__(self, other):
return not (self == other)
def flatten(self):
return self
def getVariable(self, mapping):
if self not in mapping:
mapping[self] = freshVariable()
return mapping[self]
class Variable(Formula):
def __init__(self, x):
self.x = x
def __str__(self, parentheses=False):
return str(self.x)
def __hash__(self):
return hash(self.x)
def __eq__(self, other):
if isinstance(other, Formula):
return isinstance(other, Variable) and self.x == other.x
else:
return self.x == other
def __lt__(self, other):
return self.__str__() < other.__str__()
def evaluate(self, values):
return values[self.x]
def simplify(self):
return self
def tseytin(self, mapping):
return self
def equiv(self, variable):
return And(Or(variable, Not(self)), Or(Not(variable), self))
def listing(self):
return [self.x]
class Not(Formula):
def __init__(self, x):
self.x = makeFormula(x)
def __str__(self, parentheses=False):
return "!" + self.x.__str__(True)
def __hash__(self):
return hash(("!", self.x))
def __eq__(self, other):
return isinstance(other, Not) and self.x == other.x
def __lt__(self, other):
return self.__str__() <= self.__str__()
def evaluate(self, values):
return not self.x.evaluate(values)
def flatten(self):
if isinstance(self.x, Not):
return self.x.x
else:
return self
def simplify(self):
if isinstance(self.x, And):
return Or(*(Not(y) for y in self.x.terms)).simplify()
elif isinstance(self.x, Or):
return And(*(Not(y) for y in self.x.terms)).simplify()
elif isinstance(self.x, Variable):
return self
else:
return self.flatten().simplify()
def tseytin(self, mapping):
return Not(self.x.tseytin(mapping)).getVariable(mapping)
def equiv(self, variable):
return And(Or(variable, self.x), Or(Not(variable), self))
def listing(self):
return [self.flatten().simplify()]
class Multi(Formula):
def __init__(self, *args):
self.terms = frozenset(makeFormula(x) for x in args)
def __str__(self, parentheses = False):
if len(self.terms) == 0:
return self.empty
elif len(self.terms) == 1:
return next(iter(self.terms)).__str__(parentheses)
out = self.connective.join(x.__str__(True) for x in self.terms)
if parentheses:
return "(%s)" % out
else:
return out
def __hash__(self):
return hash((self.connective, self.terms))
def __eq__(self, other):
return isinstance(other, self.getClass()) \
and self.terms == other.terms
def evaluate(self, values):
return self.fun(x.evaluate(values) for x in self.terms)
def flatten(self):
this = self.getClass()
terms = (x.flatten() for x in self.terms)
out = this(*sum([list(x.terms) if isinstance(x, this)
else [x] for x in terms], []))
if len(out.terms) == 1:
return next(iter(out.terms))
else:
return out
def simplify(self):
terms = [x.simplify() for x in self.terms]
const = self.getDualClass()()
if const in terms:
return const
if len(terms) == 1:
return terms[0]
return self.getClass()(*terms).flatten()
def tseytin(self, mapping):
return self.getClass()(*(x.tseytin(mapping)
for x in self.terms)).getVariable(mapping)
def listing(self):
return [y.flatten().simplify() for y in self.terms]
class And(Multi):
empty = "T"
connective = r" & "
fun = all
def getClass(self):
return And
def getDualClass(self):
return Or
def equiv(self, variable):
return And(Or(variable, *(Not(x).flatten() for x in self.terms)),
*(Or(Not(variable), x) for x in self.terms))
class Or(Multi):
empty = "F"
connective = r" | "
fun = any
def getClass(self):
return Or
def getDualClass(self):
return And
def equiv(self, variable):
return And(Or(Not(variable), *self.terms),
*(Or(variable, Not(x)) for x in self.terms))
T = And()
F = Or()
def makeFormula(x):
if isinstance(x, Formula):
return x
else:
return Variable(x)
counter = 0
def freshVariable():
global counter
counter += 1
return Variable("x{}".format(counter))
def tseytin(formula, mapping=None):
if mapping is None:
mapping = {}
f = formula.tseytin(mapping)
return And(f, *(k.equiv(v) for k, v in mapping.items())).flatten()
|
mit
| 3,495,161,264,798,552,000
| 23.554455
| 73
| 0.55
| false
| 3.757576
| false
| false
| false
|
mozillazg/lark
|
lark/lark/settings.py
|
1
|
1098
|
"""
Django settings for lark project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
from .base import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')i4@2vfr##+zd3cn8ckw#!lebya1mk2sg@yq9boog+=ofi@hf9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'fm.3sd.me']
SECRET_KEY = os.environ['LARK_SECRET_KEY']
ADMIN_URL = os.environ['LARK_ADMIN_URL']
DB_NAME = os.environ['LARK_DB_NAME']
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, DB_NAME),
}
}
|
mit
| 7,931,855,443,845,683,000
| 27.153846
| 71
| 0.716758
| false
| 3.041551
| false
| false
| false
|
bi4o4ek/yandex-loganalytics
|
loganalytics.py
|
1
|
3271
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This script calculates 95th percentile for request time and shows top 10 requests ID with max send time to customers
#
# Start example:
# ./loganalytics.py /path-to-log/input.txt > /path-to-some-dir/output.txt
# then you can complete analysis by running 2nd script
# ./granalytics.py /path-to-log/input.txt >> /path-to-some-dir/output.txt
#
# If you do not set path to log, then default location will be used (default_log_path)
import sys
import math
import heapq
__author__ = 'Vladimir Bykanov'
default_log_path = '/home/bi4o4ek/yaTest/input.txt'
# Pure python func for percentile calculation
# http://code.activestate.com/recipes/511478-finding-the-percentile-of-the-values/
def percentile(N, percent, key=lambda x: x):
"""
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
k = (len(N) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c - k)
d1 = key(N[int(c)]) * (k - f)
return d0 + d1
# Dict with id:timestamp of type StartRequest
start_stamps = {}
# Dict with id:timestamp of type StartSendResult
send_stamps = {}
# Dict with id:timestamp of type FinishRequest
finish_stamps = {}
# List with send time of each request
send_times = {}
# List with full time of each request
req_times = []
# Read log path from 1st script parameter
if len(sys.argv) > 1:
log_path = sys.argv[1]
else:
log_path = default_log_path
# Parse log and harvest info into start_stamps, send_stamps and finish_stamps
with open(log_path) as log_handler:
for line in log_handler:
line_elements = line.split()
req_stamp, req_id, req_type = line_elements[:3]
if req_type == 'StartRequest':
start_stamps[req_id] = int(req_stamp)
elif req_type == 'StartSendResult':
send_stamps[req_id] = int(req_stamp)
elif req_type == 'FinishRequest':
finish_stamps[req_id] = int(req_stamp)
# Numbers of StartRequest, StartSendResult and FinishRequest must be equal
if len(start_stamps) != len(finish_stamps) or len(finish_stamps) != len(send_stamps) :
print 'Numbers of StartRequest, StartSendResult and FinishRequest are not equal each other'
exit(3)
# Compute full times of requests and send times to customer
for req_id in start_stamps:
# Full times
req_time = int(finish_stamps[req_id]) - int(start_stamps[req_id])
req_times.append(req_time)
# Send times
send_time = int(finish_stamps[req_id]) - int(send_stamps[req_id])
send_times[req_id] = send_time
req_times.sort()
print "95-й перцентиль времени работы:", percentile(req_times, 0.95)
send_times_top10 = heapq.nlargest(10, send_times, key = send_times.get)
print "Идентификаторы запросов с самой долгой фазой отправки результатов пользователю:"
print ' ', ', '.join(map(str, send_times_top10))
|
apache-2.0
| -2,882,829,045,544,299,000
| 32.442105
| 118
| 0.678628
| false
| 2.971936
| false
| false
| false
|
e-gun/HipparchiaBuilder
|
builder/parsers/regexsubstitutions.py
|
1
|
29163
|
# -*- coding: utf-8 -*-
"""
HipparchiaBuilder: compile a database of Greek and Latin texts
Copyright: E Gunderson 2016-21
License: GNU GENERAL PUBLIC LICENSE 3
(see LICENSE in the top level directory of the distribution)
"""
from string import punctuation
from typing import List
import configparser
import re
from builder.parsers.betacodeescapedcharacters import percentsubstitutes, quotesubstitutesa, quotesubstitutesb
from builder.parsers.betacodefontshifts import latinauthorandshiftparser
from builder.parsers.citationbuilder import citationbuilder
from builder.parsers.swappers import bitswapchars, hextohighunicode, highunicodetohex, hutohxgrouper
config = configparser.ConfigParser()
config.read('config.ini', encoding='utf8')
if config['buildoptions']['warnings'] == 'y':
warnings = True
else:
warnings = False
# [nb: some regex happens in dbloading.py as prep for loading]
def earlybirdsubstitutions(texttoclean):
"""
try to get out in front of some of the trickiest bits
note that you can't use quotation marks in here
:param texttoclean:
:return:
"""
if config['buildoptions']['smartsinglequotes'] == 'y':
# 'smart' single quotes; but this produces an intial elision problem for something like ’κείνων which will be ‘κείνων instead
supplement = [
(r'\s\'', r' ‘'),
(r'\'( |\.|,|;)', r'’\1')
]
else:
# single quotes are a problem because OTOH, we have elision at the first letter of the word and, OTOH, we have plain old quotes
# full width variant for now
supplement = [(r'\'', r''')]
betacodetuples = [
(r'<(?!\d)', r'‹'), # '<': this one is super-dangerous: triple-check
(r'>(?!\d)', u'›'), # '>': this one is super-dangerous: triple-check
(r'_', u' \u2014 '), # doing this without spaces was producing problems with giant 'hyphenated' line ends
(r'\\\{', r'❴'),
(r'\\\}', r'❵'),
# the papyri exposed an interesting problem with '?'
# let's try to deal with this at earlybirdsubstitutions() because if you let '?' turn into '\u0323' it seems impossible to undo that
#
# many papyrus lines start like: '[ &c ? ]$' (cf. '[ &c ? $TO\ PRA=]GMA')
# this will end up as: '[ <hmu_latin_normal>c ̣ ]</hmu_latin_normal>'
# the space after '?' is not always there
# '[ &c ?]$! KEKEI/NHKA DI/KH PERI\ U(/BREWS [4!!!!!!!!!![ &c ?]4 ]$'
# also get a version of the pattern that does not have '[' early because we are not starting a line:
# '&{10m4}10 [ c ? ]$IASNI#80 *)EZIKEH\ M[ARTURW= &c ? ]$'
# this one also fails to have '&c' because the '&' came earlier
# here's hoping there is no other way to achieve this pattern...
(r'&c\s\?(.*?)\$', r'&c ﹖\1$'), # the question mark needs to be preserved, so we substitute a small question mark
(r'\[\sc\s\?(.*?)\$', r'[ c ﹖\1$'), # try to catch '&{10m4}10 [ c ? ]$I' without doing any damage
(r'&\?(.*?)\](.*?)\$', r'&﹖\1]\2$') # some stray lonely '?' cases remain
]
betacodetuples += supplement
for i in range(0, len(betacodetuples)):
texttoclean = re.sub(betacodetuples[i][0], betacodetuples[i][1], texttoclean)
return texttoclean
def replacequotationmarks(texttoclean):
"""
purge " markup
:param texttoclean:
:return:
"""
quotes = re.compile(r'\"(\d{1,2})')
texttoclean = re.sub(quotes, quotesubstitutesa, texttoclean)
texttoclean = re.sub(r'\"(.*?)\"', r'“\1”', texttoclean)
quotes = re.compile(r'QUOTE(\d)(.*?)QUOTE(\d)')
texttoclean = re.sub(quotes, quotesubstitutesb, texttoclean)
return texttoclean
def lastsecondsubsitutions(texttoclean):
"""
regex work that for some reason or other needs to be put off until the very last second
:param texttoclean:
:return:
"""
# gr2762 and chr0012 will fail the COPY TO command because of '\\'
texttoclean = texttoclean.replace('\\', '')
betacodetuples = (
# a format shift code like '[3' if followed by a number that is supposed to print has an intervening ` to stop the TLG parser
# if you do this prematurely you will generate spurious codes by joining numbers that should be kept apart
(r'`(\d)', r'\1'),
(r'\\\(', r'('),
(r'\\\)', r')'),
)
for i in range(0, len(betacodetuples)):
texttoclean = re.sub(betacodetuples[i][0], betacodetuples[i][1], texttoclean)
if config['buildoptions']['simplifybrackets'] != 'n':
tosimplify = re.compile(r'[❨❩❴❵⟦⟧⟪⟫《》‹›⦅⦆₍₎]')
texttoclean = re.sub(tosimplify, bracketsimplifier, texttoclean)
# change:
# <span class="latin smallerthannormal">Gnom. Vatic. 743 [</span>
# into:
# <span class="latin smallerthannormal">Gnom. Vatic. 743 </span>[
bracketandspan = re.compile(r'([❨❩❴❵⟦⟧⟪⟫《》‹›⦅⦆₍₎⟨⟩\[\](){}])(</span>)')
texttoclean = re.sub(bracketandspan, r'\2\1', texttoclean)
spanandbracket = re.compile(r'(<span class="[^"]*?">)([❨❩❴❵⟦⟧⟪⟫《》‹›⦅⦆₍₎⟨⟩\[\](){}])')
texttoclean = re.sub(spanandbracket, r'\2\1', texttoclean)
# be careful not to delete whole lines: [^"]*? vs .*?
voidspan = re.compile(r'<span class="[^"]*?"></span> ')
texttoclean = re.sub(voidspan, r'', texttoclean)
# combining double inverted breve is misplaced: <3 >3
# combining breve below is misplaced: <4 >4
# combining breve (035d) ?: <5 >5
swaps = re.compile(u'(.)([\u035c\u035d\u0361])')
texttoclean = re.sub(swaps, r'\2\1', texttoclean)
# misbalanced punctuation in something like ’αὐλῶνεϲ‘: a trivial issue that will add a lot of time to builds if you do all of the variants
# easy enough to turn this off
if config['buildoptions']['smartsinglequotes'] == 'y':
# if you enable the next a problem arises with initial elision: ‘κείνων instead of ’κείνων
texttoclean = re.sub(r'(\W)’(\w)', r'\1‘\2', texttoclean)
# now we try to undo the mess we just created by looking for vowel+space+quote+char
# the assumption is that an actual quotation will have a punctuation mark that will invalidate this check
# Latin is a mess, and you will get too many bad mathces: De uerbo ’quiesco’
# but the following will still be wrong: τὰ ϲπέρματα· ‘κείνων γὰρ
# it is unfixable? how do I know that a proper quote did not just start?
previousendswithvowel = re.compile(r'([aeiouαειουηωᾳῃῳᾶῖῦῆῶάέίόύήώὰὲὶὸὺὴὼἂἒἲὂὒἢὢᾃᾓᾣᾂᾒᾢ]\s)‘(\w)')
texttoclean = re.sub(previousendswithvowel, r'\1’\2', texttoclean)
resized = re.compile(r'[﹖﹡/﹗│﹦﹢﹪﹠﹕']')
texttoclean = re.sub(resized, makepunctuationnormalsized, texttoclean)
texttoclean = re.sub(r'([\w.,;])‘([\W])', r'\1’\2', texttoclean)
texttoclean = re.sub(r'(\W)”(\w)', r'\1“\2', texttoclean)
texttoclean = re.sub(r'([\w.,;])“([\W])', r'\1”\2', texttoclean)
# ['‵', '′'], # reversed prime and prime (for later fixing)
texttoclean = re.sub(r'([\w.,])‵([\W])', r'\1′\2', texttoclean)
texttoclean = re.sub(r'(\W)′(\w)', r'\1‵\2', texttoclean)
texttoclean = re.sub(r'‵', r'‘', texttoclean)
texttoclean = re.sub(r'′', r'’', texttoclean)
return texttoclean
def makepunctuationnormalsized(match):
"""
swap a normal and (﹠) for a little one (&), etc.
:param match:
:return:
"""
val = match.group(0)
substitutions = {
'﹖': '?',
'﹡': '*',
'/': '/',
'﹗': '!',
'│': '|',
'﹦': '=',
'﹢': '+',
'﹪': '%',
'﹠': '&',
'﹕': ':',
''': u'\u0027', # simple apostrophe
}
try:
substitute = substitutions[val]
except KeyError:
substitute = ''
return substitute
def makepunctuationsmall(val):
"""
swap a little and (﹠) for a big one (&), etc.
:param val:
:return:
"""
substitutions = {
'?': '﹖',
'*': '﹡',
'/': '/',
'!': '﹗',
'|': '│',
'=': '﹦',
'+': '﹢',
'%': '﹪',
'&': '﹠',
':': '﹕',
u'\u0027': ''' # simple apostrophe
}
try:
substitute = substitutions[val]
except KeyError:
substitute = ''
return substitute
def bracketsimplifier(match):
"""
lots of brackets are out there; converge upon a smaller set
note that most of them were chosen to avoid confusing the parser, so restoring these puts us
more in line with the betacode manual
comment some of these out to restore biodiversity
:param matchgroup:
:return:
"""
val = match.group(0)
substitutions = {
'❨': '(',
'❩': ')',
'❴': '{',
'❵': '}',
'⟦': '[',
'⟧': ']',
'⦅': '(',
'⦆': ')',
'⸨': '(',
'⸩': ')',
# '₍': '(', # '[11' (enclose missing letter dots (!), expressing doubt whether there is a letter there at all)
# '₎': ')', # '11]'
# various angled brackets all set to 'mathematical left/right angle bracket' (u+27e8, u+27e9)
# alternately one could consider small versions instead of the full-sized versions (u+fe64, u+fe65)
# the main issue is that '<' and '>' are being kept out of the text data because of the HTML problem
# '⟪': '⟨', # but these are all asserted in the betacode
# '⟫': '⟩', # but these are all asserted in the betacode
'《': '⟨',
'》': '⟩',
'‹': '⟨',
'›': '⟩'
}
try:
substitute = substitutions[val]
except KeyError:
substitute = val
return substitute
def swapregexbrackets(val):
"""
get rid of [](){}
insert safe substitutes
currently unused
:param match:
:return:
"""
substitutions = {
'(': '❨',
')': '❩',
'{': '❴',
'}': '❵',
'[': '⟦',
']': '⟧',
}
try:
substitute = substitutions[val]
except KeyError:
substitute = val
return substitute
def debughostilesubstitutions(texttoclean):
"""
all sorts of things will be hard to figure out if you run this suite
but it does make many things 'look better' even if there are underlying problems.
see latinfontlinemarkupparser() for notes on what the problems are/look like
if the $ is part of an irrational 'on-without-off' Greek font toggle, then we don't care
it is anything that does not fit that pattern that is the problem
the hard part is churning through lots of texts looking for ones that do not fit that pattern
at the moment few texts seem to have even the benign toggle issue; still looking for places
where there is a genuine problem
:param texttoclean:
:return:
"""
if config['buildoptions']['hideknownblemishes'] != 'y':
return texttoclean
betacodetuples = [(r'[\$]', r''),]
# note that '&' will return to the text via the hexrunner: it can be embedded in the annotations
# and you will want it later in order to format that material when it hits HipparchiaServer:
# in 'Gel. &3N.A.& 20.3.2' the '&3' turns on italics and stripping & leaves you with 3N.A. (which is hard to deal with)
# $ is still a problem:
# e.g., 0085:
# Der Antiatt. p. 115, 3 Bekk.: ‘ὑδρηλοὺϲ’ $πίθουϲ καὶ ‘οἰνηροὺϲ’
# @&Der Antiatt. p. 115, 3 Bekk.%10 $8’U(DRHLOU\S‘ $PI/QOUS KAI\ $8’OI)NHROU\S‘$
for i in range(0, len(betacodetuples)):
texttoclean = re.sub(betacodetuples[i][0], betacodetuples[i][1], texttoclean)
return texttoclean
def cleanuplingeringmesses(texttoclean):
"""
we've made it to the bitter end but there is something ugly in the results
here we can clean things up that we are too lazy/stupid/afraid-of-worse to prevent from ending up at this end
:param texttoclean:
:return:
"""
return texttoclean
def bracketspacer(matchgroup):
"""
this is not good:
'[ <hmu_latin_normal>c 27 </hmu_latin_normal>π]όλεωϲ χ⦅αίρειν⦆. ὁμολογῶ'
it should be:
'[(spaces)c 27(spaces)π]όλεωϲ χ⦅αίρειν⦆. ὁμολογῶ'
not too hard to get the spaces right; but they will only display in a compacted manner if sent out as
so you should substitute u'\u00a0' (no-break space)
:param matchgroup:
:return:
"""
grpone = re.sub(r'\s', u'\u00a0', matchgroup.group(1))
grptwo = re.sub(r'\s', u'\u00a0', matchgroup.group(2))
grpthree = re.sub(r'\s', u'\u00a0', matchgroup.group(3))
substitute = '[{x}c{y}]{z}'.format(x=grpone, y=grptwo, z=grpthree)
return substitute
#
# fix problems with the original data
#
def fixhmuoragnizationlinebyline(txt: List[str]) -> List[str]:
"""
the original data has improper nesting of some tags; try to fix that
this is meaningless if you have set htmlifydatabase to 'y' since the 'spanning' will hide the phenomenon
:param txt:
:return:
"""
try:
htmlify = config['buildoptions']['htmlifydatabase']
except KeyError:
htmlify = 'y'
try:
rationalizetags = config['buildoptions']['rationalizetags']
except KeyError:
rationalizetags = 'n'
if htmlify == 'y' or rationalizetags == 'n':
pass
else:
txt = [fixhmuirrationaloragnization(x) for x in txt]
return txt
def fixhmuirrationaloragnization(worlkine: str):
"""
Note the irrationality (for HTML) of the following (which is masked by the 'spanner'):
[have 'EX_ON' + 'SM_ON' + 'EX_OFF' + 'SM_OFF']
[need 'EX_ON' + 'SM_ON' + 'SM_OFF' + 'EX_OFF' + 'SM_ON' + 'SM_OFF' ]
hipparchiaDB=# SELECT index, marked_up_line FROM gr0085 where index = 14697;
index | marked_up_line
-------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------
14697 | <hmu_span_expanded_text><hmu_fontshift_greek_smallerthannormal>τίϲ ἡ τάραξιϲ</hmu_span_expanded_text> τοῦ βίου; τί βάρβιτοϲ</hmu_fontshift_greek_smallerthannormal>
(1 row)
hipparchiaDB=> SELECT index, marked_up_line FROM gr0085 where index = 14697;
index | marked_up_line
-------+---------------------------------------------------------------------------------------------------------------
14697 | <span class="expanded_text"><span class="smallerthannormal">τίϲ ἡ τάραξιϲ</span> τοῦ βίου; τί βάρβιτοϲ</span>
(1 row)
fixing this is an interesting question; it seems likely that I have missed some way of doing it wrong...
but note 'b' below: this is pretty mangled and the output is roughly right...
invalidline = '<hmu_span_expanded_text><hmu_fontshift_greek_smallerthannormal>τίϲ ἡ τάραξιϲ</hmu_span_expanded_text> τοῦ βίου; τί βάρβιτοϲ</hmu_fontshift_greek_smallerthannormal>'
openspans {0: 'span_expanded_text', 24: 'fontshift_greek_smallerthannormal'}
closedspans {76: 'span_expanded_text', 123: 'fontshift_greek_smallerthannormal'}
balancetest [(False, False, True)]
validline = ' <hmu_fontshift_latin_smallcapitals>errantes</hmu_fontshift_latin_smallcapitals><hmu_fontshift_latin_normal> pascentes, ut alibi “mille meae Siculis</hmu_fontshift_latin_normal>'
openspans {36: 'fontshift_latin_smallcapitals', 115: 'fontshift_latin_normal'}
closedspans {79: 'fontshift_latin_smallcapitals', 183: 'fontshift_latin_normal'}
balancetest [(False, True, False)]
# need a third check: or not (open[okeys[x]] == closed[ckeys[x]])
z = ' <hmu_fontshift_latin_normal>II 47.</hmu_fontshift_latin_normal><hmu_fontshift_latin_italic> prognosticorum causas persecuti sunt et <hmu_span_latin_expanded_text>Boëthus Stoicus</hmu_span_latin_expanded_text>,</hmu_fontshift_latin_italic>'
openspans {18: 'fontshift_latin_normal', 81: 'fontshift_latin_italic', 150: 'span_latin_expanded_text'}
closedspans {52: 'fontshift_latin_normal', 195: 'span_latin_expanded_text', 227: 'fontshift_latin_italic'}
balancetest [(False, True, False), (True, False, True)]
a = '[]κακ<hmu_span_superscript>η</hmu_span_superscript> βου<hmu_span_superscript>λ</hmu_span_superscript>'
openspans {5: 'span_superscript', 55: 'span_superscript'}
closedspans {28: 'span_superscript', 78: 'span_superscript'}
balancetest [(False, True, False)]
b = []κακ<hmu_span_superscript>η</hmu_span_superscript> β<hmu_span_x>ο<hmu_span_y>υab</hmu_span_x>c<hmu_span_superscript>λ</hmu_span_y></hmu_span_superscript>
testresult (False, True, False)
testresult (False, False, True)
testresult (False, False, True)
balanced to:
[]κακ<hmu_span_superscript>η</hmu_span_superscript> β<hmu_span_x>ο<hmu_span_y>υab</hmu_span_y></hmu_span_x><hmu_span_y>c<hmu_span_superscript>λ</hmu_span_superscript></hmu_span_y><hmu_span_superscript></hmu_span_superscript>
"""
opener = re.compile(r'<hmu_(span|fontshift)_(.*?)>')
closer = re.compile(r'</hmu_(span|fontshift)_(.*?)>')
openings = list(re.finditer(opener, worlkine))
openspans = {x.span()[0]: '{a}_{b}'.format(a=x.group(1), b=x.group(2)) for x in openings}
closings = list(re.finditer(closer, worlkine))
closedspans = {x.span()[0]: '{a}_{b}'.format(a=x.group(1), b=x.group(2)) for x in closings}
balancetest = list()
invalidpattern = (False, False, True)
if len(openspans) == len(closedspans) and len(openspans) > 1:
# print('openspans', openspans)
# print('closedspans', closedspans)
rng = range(len(openspans) - 1)
okeys = sorted(openspans.keys())
ckeys = sorted(closedspans.keys())
# test 1: a problem if the next open ≠ this close and next open position comes before this close position
# open: {0: 'span_expanded_text', 24: 'fontshift_greek_smallerthannormal'}
# closed: {76: 'span_expanded_text', 123: 'fontshift_greek_smallerthannormal'}
# test 2: succeed if the next open comes after the this close AND the this set of tags match
# open {18: 'fontshift_latin_normal', 81: 'fontshift_latin_italic', 150: 'span_latin_expanded_text'}
# closed {52: 'fontshift_latin_normal', 195: 'span_latin_expanded_text', 227: 'fontshift_latin_italic'}
# test 3: succeed if the next open comes before the previous close
testone = [not (openspans[okeys[x + 1]] != closedspans[ckeys[x]]) and (okeys[x + 1] < ckeys[x]) for x in rng]
testtwo = [okeys[x + 1] > ckeys[x] and openspans[okeys[x]] == closedspans[ckeys[x]] for x in rng]
testthree = [okeys[x + 1] < ckeys[x] for x in rng]
balancetest = [(testone[x], testtwo[x], testthree[x]) for x in rng]
# print('balancetest', balancetest)
if invalidpattern in balancetest:
# print('{a} needs balancing:\n\t{b}'.format(a=str(), b=worlkine))
modifications = list()
balancetest.reverse()
itemnumber = 0
while balancetest:
testresult = balancetest.pop()
if testresult == invalidpattern:
needinsertionat = ckeys[itemnumber]
insertionreopentag = openings[itemnumber + 1].group(0)
insertionclosetag = re.sub(r'<', r'</', openings[itemnumber + 1].group(0))
modifications.append({'item': itemnumber,
'position': needinsertionat,
'closetag': insertionclosetag,
'opentag': insertionreopentag})
itemnumber += 1
newline = str()
placeholder = 0
for m in modifications:
item = m['item']
newline += worlkine[placeholder:m['position']]
newline += m['closetag']
newline += closings[item].group(0)
newline += m['opentag']
placeholder = m['position'] + len(closings[item].group(0))
newline += worlkine[placeholder:]
# print('{a} balanced to:\n\t{b}'.format(a=str(), b=newline))
worlkine = newline
return worlkine
#
# cleanup of the cleaned up: generative citeable texts
#
def totallemmatization(parsedtextfile: List[str]) -> List[str]:
"""
will use decoded hex commands to build a citation value for every line in the text file
can produce a formatted line+citation, but really priming us for the move to the db
note the potential gotcha: some authors have a first work that is not 001 but instead 002+
:param parsedtextfile:
:return: tuples that levelmap+the line
"""
levelmapper = {
# be careful about re returning '1' and not 1
0: 1,
1: 1,
2: 1,
3: 1,
4: 1,
5: 1
}
dbready = list()
work = 1
setter = re.compile(r'<hmu_set_level_(\d)_to_(.*?)\s/>')
adder = re.compile(r'<hmu_increment_level_(\d)_by_1\s')
wnv = re.compile(r'<hmu_cd_assert_work_number betacodeval="(\d{1,3})')
for line in parsedtextfile:
gotwork = re.search(wnv, line)
if gotwork:
work = int(gotwork.group(1))
for l in range(0, 6):
levelmapper[l] = 1
gotsetting = re.search(setter, line)
if gotsetting:
level = int(gotsetting.group(1))
setting = gotsetting.group(2)
# Euripides (0006) has <hmu_set_level_0_to_post 961 /> after πῶς οὖν ἔτ’ ἂν θνήισκοιμ’ ἂν ἐνδίκως, πόσι,
# 'post 961' becomes a problem: you need to add one to 961, but you will fail 'str(int(setting)'
# slicing at the whitespace will fix this (sort of)
# but then you get a new problem: UPZ (DDP0155) and its new documents '<hmu_set_level_5_to_2 rp />'
# the not so pretty solution of the hour is to build a quasi-condition that is seldom met
# it is almost never true that the split will yield anything other than the original item
# it also is not clear how many other similar cases are out there: 'after 1001', etc.
levelmapper[level] = setting.split('post ')[-1]
if level > 0:
for l in range(0, level):
levelmapper[l] = 1
gotincrement = re.search(adder, line)
# if you don't reset the lower counters, then you will get something like 'line 10' when you first initialize a new section
if gotincrement:
level = int(gotincrement.group(1))
setting = 1
try:
# are we adding integers?
levelmapper[level] = str(int(setting) + int(levelmapper[level]))
except ValueError:
# ok, we are incrementing a letter; hope it's not z+1
# can handle multicharacter strings, but how often is it not "a --> b"?
lastchar = levelmapper[level][-1]
newlastchar = chr(ord(lastchar) + setting)
levelmapper[level] = levelmapper[level][:-1] + newlastchar
# if you increment lvl 1, you need to reset lvl 0
# this is a bit scary because sometimes you get an 0x81 and sometimes you don't
if level > 0:
for l in range(0, level):
levelmapper[l] = 1
# db version: list of tuples + the line
tups = [('0', str(levelmapper[0])), ('1', str(levelmapper[1])), ('2', str(levelmapper[2])), ('3', str(levelmapper[3])), ('4', str(levelmapper[4])), ('5', str(levelmapper[5]))]
dbready.append([str(work), tups, line])
return dbready
def addcdlabels(texttoclean, authornumber):
"""
not totally necessary and a potential source of problems
emerged before hexrunner worked right and not always in agreement with it?
the CD would re-initilize values every block; this turns that info into human-readable info
:param texttoclean:
:param authornumber:
:return:
"""
# cd blocks end 0xf3 + 0x0
# the newline lets you reset levels right?
search = r'(█ⓕⓔ\s(█⓪\s){1,})'
replace = '\n<hmu_end_of_cd_block_re-initialize_key_variables />'
texttoclean = re.sub(search, replace, texttoclean)
template = '█ⓔⓕ █⑧⓪ █ⓑ{one} █ⓑ{two} █ⓑ{three} █ⓑ{four} █ⓕⓕ '
authornumber = hextohighunicode(authornumber)
digits = re.match(r'(.)(.)(.)(.)', authornumber)
search = template.format(one=digits.group(1), two=digits.group(2), three=digits.group(3), four=digits.group(4))
replace = '<hmu_cd_assert_author_number value=\"{v}\"/>'.format(v=highunicodetohex(authornumber))
texttoclean = re.sub(search, replace, texttoclean)
# 'primary level (81)' info stored in a run of 6 bytes:
# 0xef 0x81 0xb0 0xb0 0xb1 0xff
# the NEWLINE here has subtle implications: might need to play with it...
# if you do not then you can include the last ilne of one work in the next...
search = r'(█ⓔⓕ █⑧① █ⓑ(.) █ⓑ(.) █ⓑ(.) █ⓕⓕ )'
replace = r'\n<hmu_cd_assert_work_number betacodeval="\2\3\4"/>'
texttoclean = re.sub(search, replace, texttoclean)
# 'secondary level (82)' info stored in a run of bytes whose length varies: add 127 to them and you get an ascii value
# compare geasciistring() in idt file reader: '& int('7f',16))'
# 0xef 0x82 0xc1 0xf0 0xef 0xec 0xff
# 0xef 0x82 0xcd 0xf5 0xee 0xff
search = r'(█ⓔⓕ\s█⑧②\s((█..\s){1,}?)█ⓕⓕ) '
replace = r'<hmu_cd_assert_work_abbreviation betacodeval="\2"/>'
texttoclean = re.sub(search, replace, texttoclean)
# 'tertiary level (83)' info stored in a run of bytes whose length varies: add 127 to them and you get an ascii value
# 0xef 0x83 0xc1 0xf0 0xf5 0xec 0xff
search = r'(█ⓔⓕ\s█⑧③\s((█..\s){1,}?)█ⓕⓕ) '
replace = r'<hmu_cd_assert_author_abbrev betacodeval="\2"/>'
texttoclean = re.sub(search, replace, texttoclean)
# now reparse
search = r'<hmu_cd_assert_work_number betacodeval="..."/>'
texttoclean = re.sub(search, hutohxgrouper, texttoclean)
search = r'(<hmu_cd_assert_work_abbreviation betacodeval=")(.*?)\s("/>)'
texttoclean = re.sub(search, converthextoascii, texttoclean)
search = r'(<hmu_cd_assert_author_abbrev betacodeval=")(.*?)\s("/>)'
texttoclean = re.sub(search, converthextoascii, texttoclean)
# next comes something terrifying: after the author_abbrev we get 4 - 6 hex values
# try to handle it with the citationbuilder
search = r'(<hmu_cd_assert_author_abbrev betacodeval="(.*?)" />)((█[⓪①②③④⑤⑥⑦⑧⑨ⓐⓑⓒⓓⓔⓕ]{1,2}\s){2,})'
texttoclean = re.sub(search, citationbuilder, texttoclean)
return texttoclean
def hexrunner(texttoclean):
"""
First you find the hex runs.
Then you send these to the citation builder to be read/decoded
All of the heavy lifting happens there
:param texttoclean:
:return: texttoclean
"""
# re.sub documentation: if repl is a function, it is called for every non-overlapping occurrence of pattern. The function takes a single match object argument, and returns the replacement string
search = r'((█[⓪①②③④⑤⑥⑦⑧⑨ⓐⓑⓒⓓⓔⓕ]{1,2}\s){1,})'
texttoclean = re.sub(search, citationbuilder, texttoclean)
return texttoclean
#
# misc little tools
#
# some of these functions done similarly in idtfiles parsing
# refactor to consolidate if you care
#
def converthextoascii(hextoasciimatch):
"""
undo the human readability stuff so you can decode the raw data
:param hextoasciimatch:
:return:
"""
asciilevel = ''
hexlevel = hextoasciimatch.group(2)
hexlevel = highunicodetohex(hexlevel)
hexvals = re.split(r'█', hexlevel)
del hexvals[0]
asciilevel = bitswapchars(hexvals)
a = hextoasciimatch.group(1) + asciilevel + hextoasciimatch.group(3)
return a
def cleanworkname(betacodeworkname):
"""
turn a betacode workname into a 'proper' workname
:param betacodeworkname:
:return:
"""
if '*' in betacodeworkname and '$' not in betacodeworkname:
re.sub(r'\*', r'$*', betacodeworkname)
percents = re.compile(r'%(\d{1,3})')
workname = re.sub(percents, percentsubstitutes, betacodeworkname)
ands = re.compile(r'&(\d+)(.*?)')
workname = re.sub(ands, latinauthorandshiftparser, workname)
workname = re.sub(r'\[2(.*?)]2', r'⟨\1⟩', workname)
workname = re.sub(r'<.*?>', '', workname)
workname = re.sub(r'&\d+(`|)', '', workname) # e.g.: IG I&4`2&
workname = re.sub(r'&', '', workname)
workname = re.sub(r'`', '', workname)
# nb latin diacriticals still potentially here
return workname
def colonshift(txt):
"""
colon to middot
:param txt:
:return:
"""
return re.sub(r':', '·', txt)
def insertnewlines(txt):
"""
break up the file into something you can walk through line-by-line
:param txt:
:return:
"""
txt = re.sub(r'(<hmu_set_level)', r'\n\1', txt)
txt = txt.split('\n')
return txt
def tidyupterm(word: str, punct=None) -> str:
"""
remove gunk that should not be present in a cleaned line
pass punct if you do not feel like compiling it 100k times
:param word:
:param punct:
:return:
"""
if not punct:
elidedextrapunct = '\′‵‘·̆́“”„—†⌈⌋⌊⟫⟪❵❴⟧⟦(«»›‹⟨⟩⸐„⸏⸖⸎⸑–⏑–⏒⏓⏔⏕⏖⌐∙×⁚̄⁝͜‖͡⸓͝'
extrapunct = elidedextrapunct + '’'
punct = re.compile('[{s}]'.format(s=re.escape(punctuation + extrapunct)))
# hard to know whether or not to do the editorial insertions stuff: ⟫⟪⌈⌋⌊
# word = re.sub(r'\[.*?\]','', word) # '[o]missa' should be 'missa'
word = re.sub(r'[0-9]', '', word)
word = re.sub(punct, '', word)
invals = u'jv'
outvals = u'iu'
word = word.translate(str.maketrans(invals, outvals))
return word
def capitalvforcapitalu(thetext: str) -> str:
"""
Latin texts have "Ubi" instead of "Vbi"
Livy and Justinian even have Ualerius instead of Valerius
you need to do this right away before any markup, etc appears
a problem: Greek inside a Roman author will get mangled: "PARADOXON II: Ὅτι αϝ)τάρκηϲ ἡ ἀρετὴ πρὸϲ εϝ)δαιμονίαν."
This arises from: $*(/OTI AV)TA/RKHS H( A)RETH\ PRO\S EV)DAIMONI/AN.&}1
:param thetext:
:return:
"""
# print('applying U -> V transformation to {a}'.format(a=thisauthor))
thetext = re.sub(r'U', 'V', thetext)
lookingfor = re.compile(r'\$(.*?)&')
uswap = lambda x: '$' + re.sub(r'V', r'U', x.group(1)) + '&'
thetext = re.sub(lookingfor, uswap, thetext)
return thetext
|
gpl-3.0
| 2,887,905,583,091,091,000
| 32.451843
| 264
| 0.656619
| false
| 2.613284
| true
| false
| false
|
rspavel/spack
|
var/spack/repos/builtin/packages/ibm-java/package.py
|
1
|
3917
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import platform
import os
class IbmJava(Package):
"""Binary distribution of the IBM Java Software Development Kit
for big and little-endian powerpc (power7, 8 and 9). Note: IBM
is fairly aggressive about taking down old versions, so old
(and even current) versions may not be available."""
homepage = "https://developer.ibm.com/javasdk/"
maintainers = ['mwkrentel']
# Note: IBM is fairly aggressive about taking down old versions,
# so we may need to update this frequently. Also, old revs may
# not be available for download.
version_list = [
('8.0.6.11', 'ppc64', '6fd17a6b9a34bb66e0db37f6402dc1b7612d54084c94b859f4a42f445fd174d4'),
('8.0.6.11', 'ppc64le', 'd69ff7519e32e89db88a9a4d4d88d1881524073ac940f35d3860db2c6647be2e'),
('8.0.6.10', 'ppc64', 'ff5151ead88f891624eefe33d80d56c325ca0aa4b93bd96c135cad326993eda2'),
('8.0.6.10', 'ppc64le', 'ea99ab28dd300b08940882d178247e99aafe5a998b1621cf288dfb247394e067'),
('8.0.6.7', 'ppc64', 'a1accb461a039af4587ea86511e317fea1d423e7f781459a17ed3947afed2982'),
('8.0.6.7', 'ppc64le', '9ede76a597af28c7f10c6f8a68788cc2dcd39fdab178c9bac56df8b3766ac717'),
('8.0.6.0', 'ppc64', 'e142746a83e47ab91d71839d5776f112ed154ae180d0628e3f10886151dad710'),
('8.0.6.0', 'ppc64le', '18c2eccf99225e6e7643141d8da4110cacc39f2fa00149fc26341d2272cc0102'),
('8.0.5.30', 'ppc64', 'd39ce321bdadd2b2b829637cacf9c1c0d90235a83ff6e7dcfa7078faca2f212f'),
('8.0.5.30', 'ppc64le', 'dec6434d926861366c135aac6234fc28b3e7685917015aa3a3089c06c3b3d8f0'),
]
# There are separate tar files for big and little-endian machine
# types. And no, this won't work cross platform.
for (ver, mach, sha) in version_list:
if mach == platform.machine():
version(ver, sha256=sha, expand=False)
provides('java@8')
conflicts('target=x86_64:', msg='ibm-java is only available for ppc64 and ppc64le')
# This assumes version numbers are 4-tuples: 8.0.5.30
def url_for_version(self, version):
# Convert 8.0.5.30 to 8.0-5.30 for the file name.
dash = '{0}.{1}-{2}.{3}'.format(*(str(version).split('.')))
url = ('http://public.dhe.ibm.com/ibmdl/export/pub/systems/cloud'
'/runtimes/java/{0}/linux/{1}/ibm-java-sdk-{2}-{1}'
'-archive.bin').format(version, platform.machine(), dash)
return url
@property
def home(self):
return self.prefix
@property
def libs(self):
return find_libraries(['libjvm'], root=self.home, recursive=True)
def setup_run_environment(self, env):
env.set('JAVA_HOME', self.home)
def setup_dependent_build_environment(self, env, dependent_spec):
env.set('JAVA_HOME', self.home)
def setup_dependent_package(self, module, dependent_spec):
self.spec.home = self.home
def install(self, spec, prefix):
archive = os.path.basename(self.stage.archive_file)
# The archive.bin file is quite fussy and doesn't work as a
# symlink.
if os.path.islink(archive):
targ = os.readlink(archive)
os.unlink(archive)
copy(targ, archive)
# The properties file is how we avoid an interactive install.
prop = 'properties'
with open(prop, 'w') as file:
file.write('INSTALLER_UI=silent\n')
file.write('USER_INSTALL_DIR=%s\n' % prefix)
file.write('LICENSE_ACCEPTED=TRUE\n')
# Running the archive file installs everything.
set_executable(archive)
inst = Executable(join_path('.', archive))
inst('-f', prop)
return
|
lgpl-2.1
| -2,857,033,170,495,520,000
| 38.969388
| 100
| 0.66071
| false
| 2.985518
| false
| false
| false
|
EmanueleCannizzaro/scons
|
test/option-b.py
|
1
|
1515
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/option-b.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', "")
test.run(arguments = '-b .',
stderr = "Warning: ignoring -b option\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
| 2,320,631,516,134,854,700
| 34.232558
| 90
| 0.747855
| false
| 3.778055
| true
| false
| false
|
gt-ros-pkg/hrl_autobed_dev
|
autobed_pose_estimator/src/visualize_pressure_mat_3d.py
|
1
|
1438
|
import numpy as np
import roslib; roslib.load_manifest('hrl_msgs')
import rospy
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from hrl_msgs.msg import FloatArrayBare
class Visualize3D():
def __init__(self):
rospy.Subscriber("/fsascan", FloatArrayBare, self.pressure_map_callback)
self.fig = plt.figure()
#self.ax = self.fig.add_subplot(111, projection='3d')
#self.ax = self.fig.gca(projection='3d')
self.ax = self.fig.add_subplot(111, projection='3d')
a=np.linspace(0, 3.14, 64)
b=np.linspace(0, 3.14, 27)
self.physical_pressure_map = np.zeros((64, 27))
def pressure_map_callback(self, data):
'''This callback accepts incoming pressure map from
the Vista Medical Pressure Mat and sends it out.
Remember, this array needs to be binarized to be used'''
self.physical_pressure_map = np.resize(np.asarray(data.data), (64, 27))
def run(self):
x, y=np.meshgrid(np.linspace(0, 63, 64), np.linspace(0, 26, 27));
z=self.physical_pressure_map
self.ax.plot_wireframe(x, y, z, rstride=10, cstride=10)
plt.show()
if __name__ == "__main__":
a=Visualize3D()
rospy.init_node('visualize_pressure_3D', anonymous=True)
rate=rospy.Rate(5)
while not rospy.is_shutdown():
a.run()
rate.sleep()
|
mit
| 3,793,219,297,732,556,300
| 30.955556
| 80
| 0.625869
| false
| 3.231461
| false
| false
| false
|
jeffmarcom/checkbox
|
plainbox/plainbox/abc.py
|
1
|
6650
|
# This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.abc` -- abstract base classes
============================================
Those classes are actually implemented in the plainbox.impl package. This
module is here so that the essential API concepts are in a single spot and are
easier to understand (by not being mixed with additional source code).
.. note::
This module has API stability guarantees. We are not going to break or
introduce backwards incompatible interfaces here without following our API
deprecation policy. All existing features will be retained for at least
three releases. All deprecated symbols will warn when they will cease to be
available.
"""
from abc import ABCMeta, abstractproperty, abstractmethod
class IJobDefinition(metaclass=ABCMeta):
"""
Job definition that contains a mixture of meta-data and executable
information that can be consumed by the job runner to produce results.
"""
# XXX: All IO methods to save/load this would be in a helper class/function
# that would also handle format detection, serialization and validation.
@abstractproperty
def plugin(self):
"""
Name of the job interpreter.
Various interpreters are provided by the job runner.
"""
@abstractproperty
def name(self):
"""
Name of the job
"""
@abstractproperty
def requires(self):
"""
List of expressions that need to be true for this job to be available
This value can be None
"""
@abstractproperty
def command(self):
"""
The shell command to execute to perform the job.
The return code, standard output and standard error streams are
automatically recorded and processed, depending on the plugin type.
This value can be None
"""
@abstractproperty
def description(self):
"""
Human-readable description of the job.
This field is typically used to include execution and verification
steps for manual and human-assisted tests.
This value can be None
"""
@abstractproperty
def depends(self):
"""
Comma-delimited dependency expression
This field can be used to express job dependencies. If a job depends on
another job it can only start if the other job had ran and succeeded.
This is the original data as provided when constructed. Use
get_direct_dependencies() to obtain the parsed equivalent.
This value can be None
"""
class IJobResult(metaclass=ABCMeta):
"""
Class for representing results from a single job
"""
# XXX: We could also store stuff like job duration and other meta-data but
# I wanted to avoid polluting this proposal with mundane details
@abstractproperty
def job(self):
"""
Definition of the job
The object implements IJobDefinition
"""
@abstractproperty
def outcome(self):
"""
Outcome of the test.
The result of either automatic or manual verification. Depending on the
plugin (test type). Available values are defined as class properties
above.
"""
@abstractproperty
def comments(self):
"""
The comment that was added by the user, if any
"""
@abstractproperty
def io_log(self):
"""
A sequence of tuples (delay, stream-name, data) where delay is the
delay since the previous message seconds (typically a fractional
number), stream name is either 'stdout' or 'stderr' and data is the
bytes object that was obtained from that stream.
"""
# XXX: it could also encode 'stdin' if the user was presented with a
# console to type in and we sent that to the process.
# XXX: This interface is low-level but captures everything that has
# occurred and is text-safe. You can call an utility function to
# convert that to a text string that most closely represents what a
# user would see, having ran this command in the terminal.
@abstractproperty
def return_code(self):
"""
Command return code.
This is the return code of the process started to execute the command
from the job definition. It can also encode the signal that the
process was killed with, if any.
"""
class IJobRunner(metaclass=ABCMeta):
"""
Something that can run a job definition and produce results.
You can run many jobs with one runner, each time you'll get additional
result object. Typically you will need to connect the runner to a user
interface but headless mode is also possible.
"""
@abstractmethod
def run_job(self, job):
"""
Run the specified job.
Calling this method may block for arbitrary amount of time. User
interfaces should ensure that it runs in a separate thread.
The return value is a JobResult object that contains all the data that
was captured during the execution of the job. Some jobs may not return
a JobResult value.
"""
# XXX: threads suck, could we make this fully asynchronous? The only
# thing that we really want is to know when the command has stopped
# executing. We could expose the underlying process mechanics so that
# QT/GTK applications could tie that directly into their event loop.
class IUserInterfaceIO(metaclass=ABCMeta):
"""
Base class that allows job runner to interact with the user interface.
"""
@abstractmethod
def get_manual_verification_outcome(self):
"""
Get the outcome of the manual verification, as according to the user
May raise NotImplementedError if the user interface cannot provide this
answer.
"""
|
gpl-3.0
| -5,495,530,179,601,678,000
| 31.281553
| 79
| 0.671278
| false
| 4.808388
| false
| false
| false
|
kriberg/stationspinner
|
stationspinner/accounting/urls.py
|
1
|
1078
|
from django.conf.urls import url, include
from rest_framework import routers
from stationspinner.accounting.views import APIKeyViewset, LoginView, \
CapsulerViewset, LogoutView, MissingTrainingViewset, RevalidateKeyView, \
ObtainAuthTokenView, CheckAuthTokenView, RefreshAuthTokenView
router = routers.DefaultRouter()
router.register(r'capsuler', CapsulerViewset, 'capsuler')
router.register(r'missing-training', MissingTrainingViewset, 'missing-training')
router.register(r'apikeys', APIKeyViewset, 'apikeys')
urlpatterns = [
url(r'^obtaintoken/$', ObtainAuthTokenView.as_view(), name='accounting_obtaintoken'),
url(r'^checktoken/$', CheckAuthTokenView.as_view(), name='accounting_checktoken'),
url(r'^refreshtoken/$', RefreshAuthTokenView.as_view(), name='accounting_refreshtoken'),
url(r'^logout/$', LogoutView.as_view(), name='accounting_logout'),
url(r'^login/$', LoginView.as_view(), name='accounting_login'),
url(r'^revalidate-key/$', RevalidateKeyView.as_view(), name='accounting_revalidate_key'),
url(r'^', include(router.urls)),
]
|
agpl-3.0
| -8,888,957,358,530,266,000
| 52.95
| 93
| 0.751391
| false
| 3.488673
| false
| true
| false
|
robert-7/gambit
|
src/python/gambit/tests/test_games/personal_test_games/read_and_solve.py
|
1
|
1668
|
import gambit, time, os, sys
from utils import compute_time_of
'''
Read GAME_FILE in SAVED_GAMES_DIRECTORY and create a tree from it.
'''
def create_tree(args):
os.chdir(SAVED_GAMES_DIRECTORY)
g = gambit.Game.read_game(GAME_FILE)
os.chdir(PARENT_DIRECTORY)
return g
'''
Solve the game.
'''
def solve_game(args):
# create solver
solver = gambit.nash.ExternalEnumMixedSolver()
# solve game
solutions = solver.solve(g)
return solutions
'''
Create a solutions directory and save the solutions there.
'''
def print_solutions(args):
# create directory and cd in
os.mkdir(SOLUTIONS_DIRECTORY)
os.chdir(SOLUTIONS_DIRECTORY)
# create file
file_name = "{}-PSP.nfg".format(time.strftime("%Y-%m-%d %H:%M:%S"))
target_file = open(file_name, 'w')
# print solutions
for solution in solutions:
target_file.write("{}\n".format(str(solution)))
# go back out
os.chdir(PARENT_DIRECTORY)
if __name__ == '__main__':
# directory names
PARENT_DIRECTORY = ".."
SAVED_GAMES_DIRECTORY = "saved"
SOLUTIONS_DIRECTORY = "Solutions-for-PSP-Games-{}".format(time.strftime("%Y-%m-%d %H:%M:%S"))
# get file name
if len(sys.argv) != 2:
print("ERROR: Please supply a filename in the {} directory".format(SAVED_GAMES_DIRECTORY))
sys.exit(2)
else:
GAME_FILE = sys.argv[1]
# read file and create game tree
g = compute_time_of(1, "Creating Tree", create_tree)
# solve the game
solutions = compute_time_of(2, "Solving Game", solve_game)
# print the solutions to a file
compute_time_of(3, "Printing Solutions", print_solutions)
|
gpl-2.0
| 2,718,975,047,332,783,600
| 24.272727
| 98
| 0.642086
| false
| 3.264188
| false
| false
| false
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/core/basics/map.py
|
1
|
3291
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.core.basics.map Contains the Map class, a dictionary-like object that provides
# access to its values by using the 'dot' notation.
# Import standard modules
import warnings
# -----------------------------------------------------------------
class Map(dict):
"""
With this class you can use the Map object like another dictionary (including json serialization) or with the dot notation.
Credit: 'epool' (http://stackoverflow.com/questions/2352181/how-to-use-a-dot-to-access-members-of-dictionary)
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.iteritems():
self[k] = v
if kwargs:
for k, v in kwargs.iteritems():
self[k] = v
# -----------------------------------------------------------------
def __getattr__(self, attr):
if attr.startswith("__") and attr.endswith("__"): raise AttributeError
return self.get(attr)
# -----------------------------------------------------------------
def __setattr__(self, key, value):
if key.startswith("__") and key.endswith("__"): super(Map, self).__setattr__(key, value)
self.__setitem__(key, value)
# -----------------------------------------------------------------
def __setitem__(self, key, value):
super(Map, self).__setitem__(key, value)
self.__dict__.update({key: value})
# -----------------------------------------------------------------
def __delattr__(self, item):
self.__delitem__(item)
# -----------------------------------------------------------------
def __delitem__(self, key):
super(Map, self).__delitem__(key)
del self.__dict__[key]
# -----------------------------------------------------------------
def set_items(self, items):
"""
This function allows setting multiple items in the Map from a dictionary (or dictionary-like)
:param items:
:return:
"""
# Loop over all the items in the 'items' dictionary
for key in items:
# Check whether an item with this key exists in this Map
if key in self:
# Check if the item is composed of other options (i.e. this is a nested Map), or if it is just a simple variable
if isinstance(self[key], Map): self[key].set_items(items[key])
# If it is a simple variable, just set the corresponding item of this Map
else: self[key] = items[key]
# If the key is not present, show a warning
else: warnings.warn("An item with the key '" + key + "' is not present in the Map")
# -----------------------------------------------------------------
|
mit
| 8,885,819,028,416,556,000
| 35.555556
| 128
| 0.452888
| false
| 4.747475
| false
| false
| false
|
legalsylvain/oca-custom
|
__unported__/oca_freestore/models/github_organization.py
|
1
|
5227
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-Today: Odoo Community Association (OCA)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import models, fields, api
class GithubOrganization(models.Model):
_name = 'github.organization'
_inherit = ['github.connector']
_order = 'name'
# Columns Section
name = fields.Char(string='Organization Name', required=True)
billing_email = fields.Char(string='Billing Email', readonly=True)
image = fields.Binary(string='Image', readonly=True)
github_login = fields.Char(
string='Github Name', required=True, help="The technical name of"
" your organization on github.\n\nShould be organization_name if the"
" url of your organization is https://github.com/organization_name/")
description = fields.Char(string='Description', readonly=True)
email = fields.Char(string='Email', readonly=True)
website_url = fields.Char(string='Website URL', readonly=True)
location = fields.Char(string='Location', readonly=True)
public_member_ids = fields.Many2many(
string='Members', comodel_name='res.partner',
relation='github_organization_partner_rel', column1='organization_id',
column2='partner_id', readonly=True)
public_member_qty = fields.Integer(
string='Members Quantity', compute='compute_public_member_qty',
store=True)
repository_ids = fields.Many2many(
string='Repositories', comodel_name='github.repository',
relation='github_organization_repository_rel',
column1='organization_id', column2='repository_id', readonly=True)
repository_qty = fields.Integer(
string='Repositories Quantity', compute='compute_repository_qty',
store=True)
organization_serie_ids = fields.One2many(
string='Organization Series',
comodel_name='github.organization.serie',
inverse_name='organization_id')
ignore_repository_names = fields.Text(
string='Ignored Repositories', help="Set here repository names you"
" you want to ignore. One repository per line. Exemple:\n"
"odoo-community.org\n"
"OpenUpgrade\n")
# Compute Section
@api.multi
@api.depends('public_member_ids')
def compute_public_member_qty(self):
for organization in self:
organization.public_member_qty =\
len(organization.public_member_ids)
@api.multi
@api.depends('repository_ids', 'repository_ids.organization_id')
def compute_repository_qty(self):
for organization in self:
organization.repository_qty =\
len(organization.repository_ids)
# Custom Section
def github_2_odoo(self, data):
return {
'name': data['name'],
'description': data['description'],
'location': data['location'],
'website_url': data['blog'],
'email': data['email'],
'billing_email': data['billing_email'],
'image': self.get_base64_image_from_github(data['avatar_url']),
}
# Action Section
@api.multi
def button_full_synchronize(self):
return self.button_synchronize(True)
@api.multi
def button_light_synchronize(self):
return self.button_synchronize(False)
@api.multi
def button_synchronize(self, full):
partner_obj = self.env['res.partner']
repository_obj = self.env['github.repository']
team_obj = self.env['github.team']
for organization in self:
# Get organization data
data = self.get_data_from_github(
'organization', [organization.github_login])
organization.write(self.github_2_odoo(data))
# Get Members datas
member_ids = []
for data in self.get_datalist_from_github(
'organization_members', [organization.github_login]):
partner = partner_obj.create_or_update_from_github(data, full)
member_ids.append(partner.id)
organization.public_member_ids = member_ids
# Get Repositories datas
repository_ids = []
ignored_list = organization.ignore_repository_names and\
organization.ignore_repository_names.split("\n") or []
for data in self.get_datalist_from_github(
'organization_repositories', [organization.github_login]):
if data['name'] not in ignored_list:
repository = repository_obj.create_or_update_from_github(
organization.id, data, full)
repository_ids.append(repository.id)
organization.repository_ids = repository_ids
# Get Teams datas
team_ids = []
for data in self.get_datalist_from_github(
'organization_teams', [organization.github_login]):
team = team_obj.create_or_update_from_github(
organization.id, data, full)
team_ids.append(team.id)
organization.team_ids = team_ids
|
agpl-3.0
| 4,811,223,040,879,998,000
| 36.604317
| 78
| 0.62005
| false
| 4.178257
| false
| false
| false
|
Edern76/MatchsticksGame
|
name.py
|
1
|
11395
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
import threading, time
from tkinter import *
from tkinter.messagebox import *
status = '' #Variable servant à la fois à indiquer si l'on peut poursuivre l'exécution du programme (càd si l'on a entré un (ou plusieurs, selon la situation) nom valide et cliqué sur OK) et à récupérer le (ou les) nom entré.
class simpleGUI(threading.Thread):
'''
Classe associée à l'interface demandant un seul nom.
Voir la description de la classe GUI du fichier game.py pour l'explication quant au Thread (il s'agit en effet d'une situation semblable, où l'on doit faire tourner un autre programme en parallèle de l'interface)
'''
def __init__(self, attachTo = None, rootConsole = None):
threading.Thread.__init__(self)
self.attachTo = attachTo
self.rootConsole = None
def run(self):
if self.attachTo is None:
mainCon = Tk()
else:
mainCon = self.attachTo #Même remarque que pour l'interface de game.py : on ne peut avoir qu'une seule fenêtre crée avec la fonction Tk() par application
global status
nom = StringVar("") #Les variables associées à des entrées Tkinter ne sont pas du même type que les variables Python traditionnelles. Pour les chaînes de caractères, ce sont des instances de la classe StringVar (définie dans le module Tkinter)
status = '' #On réinitialise la variable status
titleFrame = Frame(mainCon)
title = Label(titleFrame, text = 'Veuillez entrer votre nom.', justify = CENTER) #On crée le message demandant le nom
title.pack(fill = X, expand = Y, anchor = CENTER) #On fait en sorte que le label remplisse tout l'espace horizontal de la fenêtre, afin que le texte soit centré
titleFrame.grid(row = 0, column = 0, columnspan = 8) #La méthode grid permet de placer les éléments selon un tableau, dont le fonctionnement peut rappeler celui d'un tableur. Ici, on place titleFrame dans la 'cellule' (0,0), et on lui fait occuper 8 colonnes.
field = Entry(mainCon, textvariable = nom, justify = CENTER) #On crée le champ de texte dans lequel on va entrer le nom
field.grid(row = 1, column = 2, columnspan = 4)
def cancel():
'''
Fonction appelée lors du clic sur le bouton annuler
'''
global status
status = None
mainCon.destroy()
def confirm():
'''
Fonction appelée lors du clic sur le bouton OK
'''
global status
#NB : Afin de convertir une StringVar en une chaîne de caractère 'classique', on doit appeler la méthode get(). Sinon, si l'on récupère directement la valeur de la StringVar on obtient 'PY_VAR0'
if nom.get() == "" or " " in nom.get():
showerror('Saisie invalide', "Le nom ne doit pas contenir d'espace ou être vide")
status = ''
elif len(nom.get()) > 12:
showerror('Saisie invalide', 'Le nom ne doit pas excéder 12 caractères')
status = ''
elif nom.get() == "None": #Artefact de développement : les anciennes versions utilisaient la chaîne de caractère 'None' plutôt que le 'symbole' spécial None pour l'annulation, à cause d'un problème dans la fonction askSimpleName. Ce problème a depuis été résolu, donc théoriquement avoir 'None' en chaîne de caractères pour nom ne devrait pas poser de problème, mais nous avons préféré garder cette condition au cas où.
showerror('Saisie invalide', 'Le nom ne doit pas être "None"')
status = ''
elif '\n' in nom.get() or chr(92) in nom.get(): #La fonction chr(92) renvoie le 92 ème caractère de la table ASCII, c'est à dire l'antislash (\). On ne peut en effet pas l'écrire directement, car c'est un symbole réservé à Python (associé à une lettre, il permet de modifier des chaînes de caractères, en ajoutant par exemple des retours à la ligne, et l'on ne souhaite pas avoir de telles choses dans nos noms afin de ne pas provoquer d'erreurs d'affichage)
showerror('Saisie invalide', "Le nom ne doit pas contenir d'antislash")
else: #Si aucune anomalie n'a été rencontrée
status = nom.get() #La variable status prend la valeur entrée dans le champ de texte
mainCon.destroy()
buttonOk = Button(mainCon, text = 'OK', width = 5, command = confirm)
buttonCancel = Button(mainCon, text = 'Annuler', width = 5, command = cancel)
buttonOk.grid(row = 5, column = 1, columnspan = 3)
buttonCancel.grid(row = 5, column = 4, columnspan = 3)
colNum, rowNum = mainCon.grid_size() #On récupère la taille de la console dans laquelle on affiche l'interface en termes de lignes et colonnes (utilisées par la méthode grid())
for x in range(colNum):
mainCon.grid_columnconfigure(x, minsize = 25) #On fait en sorte que toutes les colonnes, mêmes vides, aient une largeur d'au moins 25 pixels. Cela empêche les colonnes vides d'être invisibles.
for y in range(rowNum):
mainCon.grid_rowconfigure(y, minsize = 5) #Même principe qu'au dessus, mais avec les lignes, et une hauteur minimale de 5 pixels
if self.attachTo is None:
mainCon.mainloop()
else:
mainCon.update() #Artefact de développement : Ne devrait plus être nécessaire en théorie (date en effet de lorsque nous avions essayé d'avoir plusieurs fenêtres crées avec Tk(), ce qui crée énormément d'erreurs et de problèmes, même après avoir rajouté cette ligne), mais gardé au cas où.
if self.rootConsole is not None:
self.rootConsole.update() #Même remarque que pour mainCon.update().
class multiGUI(threading.Thread):
'''
Classe associée à l'interface demandant deux noms.
Comme elle possède beaucoup de similitudes avec la classe simpleGUI, la plupart des lignes redondantes ne sont pas commentées à nouveau dans cette classe.
'''
def __init__(self, attachTo = None, rootConsole = None):
threading.Thread.__init__(self)
self.attachTo = attachTo
self.rootConsole = None
def run(self):
if self.attachTo is None:
mainCon = Tk()
else:
mainCon = self.attachTo
global status
nom1 = StringVar("")
nom2 = StringVar("")
status = ''
titleFrame = Frame(mainCon)
title = Label(titleFrame, text = 'Veuillez entrer vos noms.', justify = CENTER)
title.pack(fill = X, expand = Y, anchor = CENTER)
titleFrame.grid(row = 0, column = 0, columnspan = 8)
P1label = Label(mainCon, text = "Joueur 1 :") #Label situé à gauche du champ de texte du nom du joueur 1
P1field = Entry(mainCon, textvariable = nom1, justify = CENTER) #Champ de texte du nom du joueur 1
P1label.grid(row = 2, column = 0, columnspan = 2)
P1field.grid(row = 2, column = 2, columnspan = 6)
P2label = Label(mainCon, text = "Joueur 2 :") #Label situé à gauche du champ de texte du nom du joueur 2
P2field = Entry(mainCon, textvariable = nom2, justify = CENTER) #Champ de texte du nom du joueur 2
P2label.grid(row = 3, column = 0, columnspan = 2)
P2field.grid(row = 3, column = 2, columnspan = 6)
def cancel():
global status
status = None
mainCon.destroy()
def confirm():
global status
if (nom1.get() == "" or " " in nom1.get()) or (nom2.get() == "" or " " in nom2.get()):
showerror('Saisie invalide', "Le nom ne doit pas contenir d'espace ou être vide")
status = ''
elif (len(nom1.get()) > 12) or (len(nom2.get()) > 12) :
showerror('Saisie invalide', 'Le nom ne doit pas excéder 12 caractères')
status = ''
elif (nom1.get() == "None") or (nom2.get() == "None"):
showerror('Saisie invalide', 'Le nom ne doit pas être "None"')
status = ''
elif nom1.get() == nom2.get():
showerror('Saisie invalide', 'Les deux noms ne doivent pas être identiques')
elif ('\n' in nom1.get() or chr(92) in nom1.get()) or ('\n' in nom2.get() or chr(92) in nom2.get()): #La fonction chr(92) renvoie le 92 ème caractère de la table ASCII, c'est à dire l'antislash (\). On ne peut en effet pas l'écrire directement, car c'est un symbole réservé à Python (associé à une lettre, il permet de modifier des chaînes de caractères, en ajoutant par exemple des retours à la ligne, et l'on ne souhaite pas avoir de telles choses dans nos noms afin de ne pas provoquer d'erreurs d'affichage)
showerror('Saisie invalide', "Le nom ne doit pas contenir d'antislash")
else:
status = (nom1.get(), nom2.get()) #La variable status prend la valeur d'un tuple (objet simillaire à une liste, mais non modifiable) contenant le nom du joueur 1 et celui du joueur 2
mainCon.destroy()
buttonOk = Button(mainCon, text = 'OK', width = 5, command = confirm)
buttonCancel = Button(mainCon, text = 'Annuler', width = 5, command = cancel)
buttonOk.grid(row = 5, column = 1, columnspan = 3)
buttonCancel.grid(row = 5, column = 4, columnspan = 3)
colNum, rowNum = mainCon.grid_size()
for x in range(colNum):
mainCon.grid_columnconfigure(x, minsize = 25)
for y in range(rowNum):
mainCon.grid_rowconfigure(y, minsize = 5)
if self.attachTo is None:
mainCon.mainloop()
else:
mainCon.update()
if self.rootConsole is not None:
self.rootConsole.update()
def askSimpleName(attachTo = None, rootConsole = None):
'''
Fonction permettant de demander un seul nom
'''
global status
status = ''
asker = simpleGUI(attachTo, rootConsole) #On crée une instance de simpleGUI
asker.start() #On démarre le Thread de l'interface
time.sleep(3) #On laisse le temps à l'interface de se charger
while True:
if status == '': #Si l'on a entré aucun nom
continue #On continue la boucle (çad la fonction ne fait rien tant que l'interface n'a pas renvoyé de nom)
else:
break #Sinon, on sort de la boucle
return status #On renvoie le nom entré
def askMultipleNames(attachTo = None, rootConsole = None):
'''
Fonction permettant de demander plusieurs noms
Quasi identique à la fonction askSimpleName, les parties redondantes n'ont pas été commentées à nouveau
'''
global status
status = ''
asker = multiGUI(attachTo, rootConsole)
asker.start()
time.sleep(3)
while True:
if status == "":
continue
else:
break
return status #On renvoie les noms. Notez qu'il ne s'agit plus d'une chaîne de caractères comme dans askSimpleName, mais d'un tuple constitué de deux chaînes de caractères.
if __name__ == '__main__':
from tkinter.messagebox import *
showerror('Erreur', 'Veuillez lancer main.pyw pour démarrer le programme')
|
mit
| -1,213,387,413,839,428,000
| 59.473118
| 523
| 0.639459
| false
| 3.196078
| false
| false
| false
|
DataONEorg/d1_python
|
gmn/src/d1_gmn/app/management/commands/diag-restore-sciobj.py
|
1
|
8217
|
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Attempt to restore missing local Science Objects from replicas.
A DataONE Science Object is a block of bytes with an associated System Metadata XML
doc.
GMN stores the bytes in a directory hierarchy on the local filesystem and the System
Metadata in a Postgres database.
This will attempt to restore objects that have been lost or damaged due to data
corruption or loss in the filesystem or database.
This procedure should be able to always restore system metadata. However, restore of
object bytes depends on a valid replica being available on the CN or another MN.
The procedure is as follows:
- For the purposes of this command, "damaged" and "lost" data are equivalents. Both are
handled with the same software procedure, where an attempt is made to completely
replace the data with a recovered version. So this documentation uses "lost" to
describe both lost and damaged data.
- The CN is queried for a list of PIDs of objects for which this GMN is registered as
either the authoritative source, or holder of a replica.
- For each PID, both the System Metadata and the object bytes are checked to be
available and undamaged on this GMN.
- System Metadata is checked by fully generating the System Metadata document from
the database, then validating it against the XMLSchema for the DataONE types. The
System metadata is considered to be lost if any step of the procedure cannot be
completed.
- Object bytes are checked by recalculating the checksum from the currently stored
bytes (if any) and comparing it with the correct checksum, stored in the System
Metadata. The object is considered to be lost if unable to generate a checksum or
if the checksum does not match the checksum stored for the object.
- Proxy objects are checked in the same way, except that the checksum is recalculated
on the object bytes as streamed from its location on the 3rd party server.
- Lost System Metadata is always restored from the CN, which holds a copy of system
metadata for all objects that are known to the CN, which will include the objects for
which the CN returned the PIDs in the initial query that this procedure performed.
- For lost object bytes, the restore process depends on the type of storage used for
the object bytes, which is either local filesystem or proxy from 3rd party server.
- The bytes for objects stored in the filesystem, which is the most common situation,
are restored by querying the CN for a list of known locations for the object. If this
GMN, where the object bytes are known to be lost, is the only available location
listed, the object bytes cannot be restored by this command. If the object bytes are
not available elsewhere, the object will have to be considered as lost by DataONE. It
should be set as archived in the CN system metadata, so that it is not listed in any
further search results. To help prevent this from happening, make sure that all
objects on this GMN have a replication policy allowing replicas to be distributed to
other MNs in the DataONE federation.
- Proxy objects are objects where the bytes are stored on a 3rd party server instead of
on the local filesystem, and GMN stores only a URL reference to the location. Support
for proxy objects is a vendor specific GMN feature, so the URL is not part of the
official system metadata. As the URL is stored together with the system metadata in
the database, lost system metadata will mean lost object reference URL as well. Since
the URL is not in the system metadata, restoring the system metadata from the CN will
not restore the URL and so will not recover the actual location.
- Since object bytes for proxy objects are not stored locally, lost object bytes will
either have been caused by lost URL reference, which is handled as described above,
or by the 3rd party server no longer returning the object bytes at the URL reference
location. In both cases,the only remaining option for a fully automated restore of
the object depends on a valid replica being available on the CN or another MN, in
which case GMN can restore the object as a regular local object from the replica.
However, this converts the object from a proxy object to a local object. Depending on
the available hardware vs. the added storage space that will be required, this may
not be desirable, so the option to convert proxy objects to local if required for
automated restore is disabled by default. See --help for how to set this option.
- See the documentation for ``audit-proxy-sciobj`` for information on how to repair
proxy objects that could not be restored automatically by this command.
"""
import d1_gmn.app.did
import d1_gmn.app.mgmt_base
import d1_gmn.app.sysmeta
class Command(d1_gmn.app.mgmt_base.GMNCommandBase):
def __init__(self, *args, **kwargs):
super().__init__(__doc__, __name__, *args, **kwargs)
def add_components(self, parser):
self.using_single_instance(parser)
def handle_serial(self):
pass
# async def fix(self, async_client, url):
# self.log.info("Processing: {}".format(url))
# proxy_tracker = self.tracker("Create missing proxy objects")
#
# sysmeta_pyxb = await async_client.get_system_metadata(url)
#
# sysmeta_checksum_pyxb = sysmeta_pyxb.checksum
# # print(d1_common.checksum.format_checksum(calculated_checksum_pyxb))
# calculated_checksum_pyxb = await self.calculate_checksum(
# async_client, url, sysmeta_checksum_pyxb.algorithm
# )
# # print(d1_common.checksum.format_checksum(sysmeta_checksum_pyxb))
# if not d1_common.checksum.are_checksums_equal(
# sysmeta_checksum_pyxb, calculated_checksum_pyxb
# ):
# proxy_tracker.event(
# "Skipped: Checksum mismatch", f'url="{url}"', is_error=True
# )
#
# d1_gmn.app.sysmeta.create_or_update(sysmeta_pyxb, url)
#
# proxy_tracker.event("Fixed", f'url="{url}"')
#
# async def is_online(self, async_client, url):
# try:
# async with await async_client.session.head(url) as response:
# # Handle redirect responses as well, as redirects are not followed for
# # HEAD requests.
# return response.status in (200, 300, 301, 302, 303, 307, 308)
# except aiohttp.ClientError:
# return False
#
# async def calculate_checksum(self, async_client: t.D1Client, url: str, checksum_algo: str) -> t.Checksum:
# """Calculate the checksum on proxy object stored on a 3rd party server.
#
# The objected is calculated on the stream, without bytes being buffered in memory
# or stored locally.
#
# Returns:
# A DataONE Checksum PyXB type.
#
# """
# checksum_calculator = d1_common.checksum.get_checksum_calculator_by_dataone_designator(
# checksum_algo
# )
# async with await async_client.session.get(url) as response:
# async for chunk_str, _ in response.content.iter_chunks():
# checksum_calculator.update(chunk_str)
#
# checksum_pyxb = d1_common.types.dataoneTypes.checksum(
# checksum_calculator.hexdigest()
# )
# checksum_pyxb.algorithm = checksum_algo
# return checksum_pyxb
|
apache-2.0
| -1,802,804,323,378,339,300
| 48.203593
| 111
| 0.720579
| false
| 4.098254
| false
| false
| false
|
Russell-Jones/django-wiki
|
wiki/migrations/0001_initial.py
|
1
|
18236
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Article'
db.create_table('wiki_article', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('current_revision', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='current_set', unique=True, null=True, to=orm['wiki.ArticleRevision'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.Group'], null=True, blank=True)),
('group_read', self.gf('django.db.models.fields.BooleanField')(default=True)),
('group_write', self.gf('django.db.models.fields.BooleanField')(default=True)),
('other_read', self.gf('django.db.models.fields.BooleanField')(default=True)),
('other_write', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('wiki', ['Article'])
# Adding model 'ArticleForObject'
db.create_table('wiki_articleforobject', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_type_set_for_articleforobject', to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('is_mptt', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('wiki', ['ArticleForObject'])
# Adding unique constraint on 'ArticleForObject', fields ['content_type', 'object_id']
db.create_unique('wiki_articleforobject', ['content_type_id', 'object_id'])
# Adding model 'ArticleRevision'
db.create_table('wiki_articlerevision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision_number', self.gf('django.db.models.fields.IntegerField')()),
('user_message', self.gf('django.db.models.fields.TextField')(blank=True)),
('automatic_log', self.gf('django.db.models.fields.TextField')(blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('previous_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.ArticleRevision'], null=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=512)),
('redirect', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='redirect_set', null=True, to=orm['wiki.Article'])),
))
db.send_create_signal('wiki', ['ArticleRevision'])
# Adding unique constraint on 'ArticleRevision', fields ['article', 'revision_number']
db.create_unique('wiki_articlerevision', ['article_id', 'revision_number'])
# Adding model 'URLPath'
db.create_table('wiki_urlpath', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name='children', null=True, to=orm['wiki.URLPath'])),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('wiki', ['URLPath'])
# Adding unique constraint on 'URLPath', fields ['site', 'parent', 'slug']
db.create_unique('wiki_urlpath', ['site_id', 'parent_id', 'slug'])
# Adding model 'ArticlePlugin'
db.create_table('wiki_articleplugin', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('wiki', ['ArticlePlugin'])
# Adding model 'ReusablePlugin'
db.create_table('wiki_reusableplugin', (
('articleplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ArticlePlugin'], unique=True, primary_key=True)),
))
db.send_create_signal('wiki', ['ReusablePlugin'])
# Adding M2M table for field articles on 'ReusablePlugin'
db.create_table('wiki_reusableplugin_articles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('reusableplugin', models.ForeignKey(orm['wiki.reusableplugin'], null=False)),
('article', models.ForeignKey(orm['wiki.article'], null=False))
))
db.create_unique('wiki_reusableplugin_articles', ['reusableplugin_id', 'article_id'])
# Adding model 'RevisionPlugin'
db.create_table('wiki_revisionplugin', (
('articleplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ArticlePlugin'], unique=True, primary_key=True)),
('revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.ArticleRevision'])),
))
db.send_create_signal('wiki', ['RevisionPlugin'])
def backwards(self, orm):
# Removing unique constraint on 'URLPath', fields ['site', 'parent', 'slug']
db.delete_unique('wiki_urlpath', ['site_id', 'parent_id', 'slug'])
# Removing unique constraint on 'ArticleRevision', fields ['article', 'revision_number']
db.delete_unique('wiki_articlerevision', ['article_id', 'revision_number'])
# Removing unique constraint on 'ArticleForObject', fields ['content_type', 'object_id']
db.delete_unique('wiki_articleforobject', ['content_type_id', 'object_id'])
# Deleting model 'Article'
db.delete_table('wiki_article')
# Deleting model 'ArticleForObject'
db.delete_table('wiki_articleforobject')
# Deleting model 'ArticleRevision'
db.delete_table('wiki_articlerevision')
# Deleting model 'URLPath'
db.delete_table('wiki_urlpath')
# Deleting model 'ArticlePlugin'
db.delete_table('wiki_articleplugin')
# Deleting model 'ReusablePlugin'
db.delete_table('wiki_reusableplugin')
# Removing M2M table for field articles on 'ReusablePlugin'
db.delete_table('wiki_reusableplugin_articles')
# Deleting model 'RevisionPlugin'
db.delete_table('wiki_revisionplugin')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'wiki.article': {
'Meta': {'object_name': 'Article'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.ArticleRevision']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'wiki.articleforobject': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'ArticleForObject'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_articleforobject'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_mptt': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'wiki.articleplugin': {
'Meta': {'object_name': 'ArticlePlugin'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'wiki.articlerevision': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('article', 'revision_number'),)", 'object_name': 'ArticleRevision'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']", 'null': 'True', 'blank': 'True'}),
'redirect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'redirect_set'", 'null': 'True', 'to': "orm['wiki.Article']"}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.reusableplugin': {
'Meta': {'object_name': 'ReusablePlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shared_plugins_set'", 'symmetrical': 'False', 'to': "orm['wiki.Article']"})
},
'wiki.revisionplugin': {
'Meta': {'object_name': 'RevisionPlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']"})
},
'wiki.urlpath': {
'Meta': {'unique_together': "(('site', 'parent', 'slug'),)", 'object_name': 'URLPath'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.URLPath']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['wiki']
|
gpl-3.0
| -3,276,954,538,703,390,000
| 67.556391
| 209
| 0.592948
| false
| 3.736885
| false
| false
| false
|
MrYsLab/razmq
|
razmq/motors/motors.py
|
1
|
5067
|
"""
Copyright (c) 2016 Alan Yorinks All right reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import time
import signal
import sys
import argparse
from razmq.razbase.razbase import Razbase
class Motors(Razbase):
"""
This is the user side interface for motor control
Move left motor forward with a speed of 100:
{'command': 'left_motor_forward', 'speed': 100 }
Move left motor reverse with a speed of 100:
{'command': 'left_motor_reverse', 'speed': 100 }
Move left motor forward with a speed of 100:
{'command': 'right_motor_forward', 'speed': 100 }
Move left motor reverse with a speed of 100:
{'command': 'right_motor_reverse', 'speed': 100 }
Brake left motor
{'command': 'left_motor_brake' }}
Brake both motors
{'command": 'brake both;}
coast both motors
{'command": 'coast both;}
Coast left motor
{'command': 'left_motor_coast' }}
Brake right motor
{'command': 'right_motor_brake' }}
Coast right motor
{'command': 'right_motor_coast' }}
"""
def __init__(self, back_plane_ip_address=None, subscriber_port='43125', publisher_port='43124', process_name=None):
"""
:param back_plane_ip_address:
:param subscriber_port:
:param publisher_port:
"""
# initialize the base class
super().__init__(back_plane_ip_address, subscriber_port, publisher_port, process_name=process_name)
# allow time for connection
time.sleep(.03)
self.set_subscriber_topic('user_motor_command')
self.publisher_topic = 'system_motor_command'
# receive loop is defined in the base class
self.receive_loop()
# noinspection PyMethodMayBeStatic
def incoming_message_processing(self, topic, payload):
"""
Override this method with a message processor for the application
:param topic: Message Topic string
:param payload: Message Data
:return:
"""
try:
command = payload['command']
if command == 'left_motor_forward':
speed = payload['speed']
payload = {'command': 'left_motor_forward', 'speed': speed}
elif command == 'left_motor_reverse':
speed = payload['speed']
payload = {'command': 'left_motor_reverse', 'speed': speed}
elif command == 'left_motor_brake':
payload = {'command': 'left_motor_brake'}
elif command == 'left_motor_coast':
payload = {'command': 'left_motor_coast'}
elif command == 'right_motor_forward':
speed = payload['speed']
payload = {'command': 'right_motor_forward', 'speed': speed}
elif command == 'right_motor_reverse':
speed = payload['speed']
payload = {'command': 'right_motor_reverse', 'speed': speed}
elif command == 'right_motor_brake':
payload = {'command': 'right_motor_brake'}
elif command == 'right_motor_coast':
payload = {'command': 'right_motor_coast'}
else:
raise ValueError
self.publish_payload(payload, self.publisher_topic)
except ValueError:
print('led topic: ' + topic + ' payload: ' + payload)
raise
def motors():
# noinspection PyShadowingNames
parser = argparse.ArgumentParser()
parser.add_argument("-b", dest="back_plane_ip_address", default="None",
help="None or IP address used by Back Plane")
parser.add_argument("-n", dest="process_name", default="Motors Front End", help="Set process name in banner")
args = parser.parse_args()
kw_options = {}
if args.back_plane_ip_address != 'None':
kw_options['back_plane_ip_address'] = args.back_plane_ip_address
kw_options['process_name'] = args.process_name
my_motors = Motors(**kw_options)
# signal handler function called when Control-C occurs
# noinspection PyShadowingNames,PyUnusedLocal,PyUnusedLocal
def signal_handler(signal, frame):
print('Control-C detected. See you soon.')
my_motors.clean_up()
sys.exit(0)
# listen for SIGINT
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if __name__ == '__main__':
motors()
|
gpl-3.0
| 8,621,895,000,938,363,000
| 31.273885
| 119
| 0.623051
| false
| 4.073151
| false
| false
| false
|
kyhau/reko
|
reko/speechreko.py
|
1
|
3825
|
"""
NOTE: this example requires PyAudio because it uses the Microphone class
"""
import os
import speech_recognition as sr
import time
from playsound import playsound
from reko.polly import Polly
from reko.reko import Reko
class SpeechReko(Reko):
def __init__(self, profile, collection_id, audio_on=False):
Reko.__init__(self, profile, collection_id)
self._audio_on = audio_on
self._polly = Polly(profile)
def signin(self, id=None):
"""
:param id: (optional) external_image_id
:return: external_image_id or None if not found
"""
ret_id = super(SpeechReko, self).signin(id)
if self._audio_on is True:
self.speak("Hello {}!".format(ret_id) \
if ret_id is not None else "Sorry! I do not recognise you.")
return ret_id
def signup(self, id):
"""
:param id: external_image_id
:return:
"""
succeeded = super(SpeechReko, self).signup(id)
if self._audio_on is True:
self.speak("Hello {}!".format(id) if succeeded is True else "Sorry {}! I have problem remembering you!".format(id))
return succeeded
def take_picture(self):
"""Connect to the webcam and capture an image and save to the give file.
"""
succeeded = super(SpeechReko, self).take_picture()
if succeeded is False and self._audio_on:
self.speak("Sorry! I'm unable to connect to the camera.")
return succeeded
def speak(self, msg):
"""Create an audio file for the given msg and play it.
"""
if self._audio_on is False:
print(msg)
return True
filename = self._cache.get_filename(msg, "mp3")
filepath = self._cache.get_filepath(filename)
if os.path.exists(filepath):
SpeechReko.play_audio(filepath)
return True
if self._polly.synthesize_speech(text_message=msg, output_file=filepath) is True:
SpeechReko.play_audio(filepath)
return True
return False
@staticmethod
def play_audio(audio_file):
"""
Play sound
"""
playsound(audio_file)
def watching(self, interval_sec=30):
"""
"""
while True:
print("Watching ...")
try:
ret_id = super(SpeechReko, self).signin()
if ret_id and self._audio_on is True:
self.speak("Hello {}!".format(ret_id))
except Exception as e:
print("Error: {0}".format(e))
time.sleep(interval_sec)
def listening(self):
"""Obtain audio from the microphone
"""
while True:
recognizer = sr.Recognizer()
with sr.Microphone() as source:
print("Listening ...")
audio = recognizer.listen(source)
try:
input_msg = recognizer.recognize_google(audio)
if self.process_message(input_msg) is False:
break
except sr.UnknownValueError:
self.speak("Please say it again")
except sr.RequestError as e:
self.speak("I have problem listening to you")
print("Error: {0}".format(e))
def process_message(self, input_msg):
"""Process message and return False if stop listening
"""
print("You said " + input_msg)
# TODO still in progress, this part is tmp code
if 'bye' in input_msg or 'goodbye' in input_msg or 'good bye' in input_msg:
self.speak("Goodbye")
return False
if 'sign in' in input_msg or 'sign-in' in input_msg:
self.signin()
return True
|
mit
| -4,202,663,694,091,020,300
| 31.692308
| 127
| 0.554248
| false
| 4.056204
| false
| false
| false
|
travistang/late_fyt
|
ntm.py
|
1
|
1191
|
import tensorflow as tf
import numpy as np
class NTM(object):
def __init__(self,session, mem_size, mem_dim,controller):
self.sess = session
self.memory_dim = mem_dim
self.memory_length = mem_size
# construct memory variables
self.memory = [tf.Variable(np.zeros(self.memory_dim).astype(np.float32)) for _ in range(mem_size)]
self.controller = controller
self.write_vector = [tf.Variable(np.random.rand()) for _ in range(mem_size)]
# operations
self.read_op = tf.reduce_sum([a * b for (a,b) in zip(self.write_vector,self.memory)],0)
# finally initialize all variables
self.sess.run(tf.global_variables_initializer())
def read_vector(self):
self._normalize(self.write_vector)
return self.sess.run(self.read_op)
def write(self,erase_v,add_v)
# normalize a list of tf.Variable and return the new values
def _normalize(self,vec):
total = tf.reduce_sum(map(lambda v: tf.abs(v),vec))
# check if everything is 0
if total == 0.:
return sess.run(map(lambda v: v.assign(0.),vec))
else:
return sess.run(map(lambda v: v.assign(v/total),vec))
if __name__ == '__main__':
with tf.Session() as sess:
ntm = NTM(sess,10,6,None)
print ntm.read_vector()
|
mit
| -5,308,125,148,231,301,000
| 28.073171
| 100
| 0.690176
| false
| 2.862981
| false
| false
| false
|
hp-storage/python-lefthandclient
|
hpelefthandclient/__init__.py
|
2
|
1185
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013-2016 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
HPE LeftHand REST Client
:Author: Kurt Martin
:Author: Walter A. Boring IV
:Copyright: Copyright 2013-2015 Hewlett Packard Enterprise Development LP
:License: Apache v2.0
"""
version_tuple = (2, 1, 0)
def get_version_string():
if isinstance(version_tuple[-1], str):
return '.'.join(map(str, version_tuple[:-1])) + version_tuple[-1]
return '.'.join(map(str, version_tuple))
version = get_version_string()
"""Current version of HPELeftHandClient."""
|
apache-2.0
| 7,481,040,198,847,530,000
| 31.916667
| 78
| 0.714768
| false
| 3.703125
| false
| false
| false
|
migimigi/bme280-1
|
docs/conf.py
|
2
|
8446
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# bme280 documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import bme280
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'BME280 Python Driver'
copyright = u'2015, Kieran Brownlees'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = bme280.__version__
# The full version, including alpha/beta/rc tags.
release = bme280.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bme280doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'bme280.tex',
u'BME280 Python Driver Documentation',
u'Kieran Brownlees', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bme280',
u'BME280 Python Driver Documentation',
[u'Kieran Brownlees'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bme280',
u'BME280 Python Driver Documentation',
u'Kieran Brownlees',
'bme280',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
bsd-3-clause
| 7,603,664,539,946,308,000
| 29.712727
| 76
| 0.705186
| false
| 3.709267
| true
| false
| false
|
jeremiedecock/snippets
|
python/tkinter/python3/geometry_manager_pack_test_with_1_widget.py
|
1
|
6263
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See: http://effbot.org/tkinterbook/frame.htm
import tkinter as tk
# WINDOW 1 (there should be only one "Tk" object) #########################
window1 = tk.Tk()
window1.title("Result Window")
widget1 = tk.Canvas(window1, bg="red", width=200, height=200)
widget1.create_text((100, 100), text="Widget 1", font="sans 16 bold", fill="white", anchor="c")
widget1.pack()
frame1_pack_info = widget1.pack_info()
# WINDOW 2 (Toplevel object) ##############################################
window2 = tk.Toplevel()
window2.title("Control Window")
window2.geometry("+200+200")
# Widget 1 frame ##################
frame_widget1 = tk.LabelFrame(window2, text="Widget 1", padx=5, pady=5)
frame_widget1.pack(fill=tk.X, padx=10, pady=5)
# Fill ########
# Must be none, x, y, or both
var_fill = tk.StringVar()
var_fill.set(frame1_pack_info['fill'])
def fill_callback():
widget1.pack_configure(fill=var_fill.get())
print("Widget 1:", widget1.pack_info())
rb_fill_none = tk.Radiobutton(frame_widget1, text="fill = none", variable=var_fill, value="none", command=fill_callback)
rb_fill_x = tk.Radiobutton(frame_widget1, text="fill = x", variable=var_fill, value="x", command=fill_callback)
rb_fill_y = tk.Radiobutton(frame_widget1, text="fill = y", variable=var_fill, value="y", command=fill_callback)
rb_fill_both = tk.Radiobutton(frame_widget1, text="fill = both", variable=var_fill, value="both", command=fill_callback)
rb_fill_none.pack(anchor=tk.W)
rb_fill_x.pack(anchor=tk.W)
rb_fill_y.pack(anchor=tk.W)
rb_fill_both.pack(anchor=tk.W)
# Separator
tk.Frame(frame_widget1, height=1, bd=1, relief=tk.SUNKEN).pack(fill=tk.X, padx=5, pady=5)
# Expand ######
var_expand = tk.IntVar()
var_expand.set(frame1_pack_info['expand'])
def expand_callback():
print(var_expand.get())
widget1.pack_configure(expand=var_expand.get())
print("Widget 1:", widget1.pack_info())
cb_expand = tk.Checkbutton(frame_widget1, text="expand", variable=var_expand, command=expand_callback)
cb_expand.pack(anchor=tk.W)
# Separator
tk.Frame(frame_widget1, height=1, bd=1, relief=tk.SUNKEN).pack(fill=tk.X, padx=5, pady=5)
# Side ########
# Must be top, bottom, left, or right
var_side = tk.StringVar()
var_side.set(frame1_pack_info['side'])
def side_callback():
widget1.pack_configure(side=var_side.get())
print("Widget 1:", widget1.pack_info())
rb_side_top = tk.Radiobutton(frame_widget1, text="side = top", variable=var_side, value="top", command=side_callback)
rb_side_bottom = tk.Radiobutton(frame_widget1, text="side = bottom", variable=var_side, value="bottom", command=side_callback)
rb_side_left = tk.Radiobutton(frame_widget1, text="side = left", variable=var_side, value="left", command=side_callback)
rb_side_right = tk.Radiobutton(frame_widget1, text="side = right", variable=var_side, value="right", command=side_callback)
rb_side_top.pack(anchor=tk.W)
rb_side_bottom.pack(anchor=tk.W)
rb_side_left.pack(anchor=tk.W)
rb_side_right.pack(anchor=tk.W)
# Separator
tk.Frame(frame_widget1, height=1, bd=1, relief=tk.SUNKEN).pack(fill=tk.X, padx=5, pady=5)
# Anchor ######
# Must be n, ne, e, se, s, sw, w, nw, or center
var_anchor = tk.StringVar()
var_anchor.set(frame1_pack_info['anchor'])
def anchor_callback():
widget1.pack_configure(anchor=var_anchor.get())
print("Widget 1:", widget1.pack_info())
rb_anchor_n = tk.Radiobutton(frame_widget1, text="anchor = n", variable=var_anchor, value="n", command=anchor_callback)
rb_anchor_s = tk.Radiobutton(frame_widget1, text="anchor = s", variable=var_anchor, value="s", command=anchor_callback)
rb_anchor_e = tk.Radiobutton(frame_widget1, text="anchor = e", variable=var_anchor, value="e", command=anchor_callback)
rb_anchor_w = tk.Radiobutton(frame_widget1, text="anchor = w", variable=var_anchor, value="w", command=anchor_callback)
rb_anchor_ne = tk.Radiobutton(frame_widget1, text="anchor = ne", variable=var_anchor, value="ne", command=anchor_callback)
rb_anchor_nw = tk.Radiobutton(frame_widget1, text="anchor = nw", variable=var_anchor, value="nw", command=anchor_callback)
rb_anchor_se = tk.Radiobutton(frame_widget1, text="anchor = se", variable=var_anchor, value="se", command=anchor_callback)
rb_anchor_sw = tk.Radiobutton(frame_widget1, text="anchor = sw", variable=var_anchor, value="sw", command=anchor_callback)
rb_anchor_center = tk.Radiobutton(frame_widget1, text="anchor = center", variable=var_anchor, value="center", command=anchor_callback)
rb_anchor_n.pack(anchor=tk.W)
rb_anchor_s.pack(anchor=tk.W)
rb_anchor_e.pack(anchor=tk.W)
rb_anchor_w.pack(anchor=tk.W)
rb_anchor_ne.pack(anchor=tk.W)
rb_anchor_nw.pack(anchor=tk.W)
rb_anchor_se.pack(anchor=tk.W)
rb_anchor_sw.pack(anchor=tk.W)
rb_anchor_center.pack(anchor=tk.W)
# Setup close button ##############
# Let window2's close button quit the application
window2.protocol("WM_DELETE_WINDOW", window1.quit)
# MAIN LOOP ("Tk" object) #################################################
window1.mainloop()
|
mit
| 4,548,765,682,759,385,600
| 40.190789
| 134
| 0.691902
| false
| 3.030494
| false
| false
| false
|
fernandog/Medusa
|
medusa/providers/torrent/html/zooqle.py
|
1
|
5673
|
# coding=utf-8
"""Provider code for Zooqle."""
from __future__ import unicode_literals
import logging
from medusa import tv
from medusa.bs4_parser import BS4Parser
from medusa.helper.common import (
convert_size,
try_int,
)
from medusa.logger.adapters.style import BraceAdapter
from medusa.providers.torrent.torrent_provider import TorrentProvider
from requests.compat import urljoin
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class ZooqleProvider(TorrentProvider):
"""Zooqle Torrent provider."""
def __init__(self):
"""Initialize the class."""
super(ZooqleProvider, self).__init__('Zooqle')
# Credentials
self.public = True
# URLs
self.url = 'https://zooqle.com'
self.urls = {
'search': urljoin(self.url, '/search'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK', 'REAL']
# Miscellaneous Options
# Torrent Stats
self.minseed = None
self.minleech = None
# Cache
self.cache = tv.Cache(self, min_time=15)
def search(self, search_strings, age=0, ep_obj=None, **kwargs):
"""
Search a provider and parse the results.
:param search_strings: A dict with mode (key) and the search value (value)
:param age: Not used
:param ep_obj: Not used
:returns: A list of search results (structure)
"""
results = []
# Search Params
search_params = {
'q': '* category:TV',
's': 'dt',
'v': 't',
'sd': 'd',
}
for mode in search_strings:
log.debug('Search mode: {0}', mode)
for search_string in search_strings[mode]:
if mode != 'RSS':
log.debug('Search string: {search}',
{'search': search_string})
search_params = {'q': '{0} category:TV'.format(search_string)}
response = self.session.get(self.urls['search'], params=search_params)
if not response or not response.text:
log.debug('No data returned from provider')
continue
results += self.parse(response.text, mode)
return results
def parse(self, data, mode):
"""
Parse search results for items.
:param data: The raw response from a search
:param mode: The current mode used to search, e.g. RSS
:return: A list of items found
"""
items = []
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='panel-body')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one release is found
if len(torrent_rows) < 2:
log.debug('Data returned from provider does not contain any torrents')
return items
# Skip column headers
for row in torrent_rows[1:]:
cells = row('td')
try:
title = cells[1].find('a').get_text()
magnet = cells[2].find('a', title='Magnet link')['href']
download_url = '{magnet}{trackers}'.format(magnet=magnet,
trackers=self._custom_trackers)
if not all([title, download_url]):
continue
seeders = 1
leechers = 0
if len(cells) > 5:
peers = cells[5].find('div')
if peers and peers.get('title'):
peers = peers['title'].replace(',', '').split(' | ', 1)
# Removes 'Seeders: '
seeders = try_int(peers[0][9:])
# Removes 'Leechers: '
leechers = try_int(peers[1][10:])
# Filter unseeded torrent
if seeders < min(self.minseed, 1):
if mode != 'RSS':
log.debug("Discarding torrent because it doesn't meet the"
" minimum seeders: {0}. Seeders: {1}",
title, seeders)
continue
torrent_size = cells[3].get_text().replace(',', '')
size = convert_size(torrent_size) or -1
pubdate_raw = cells[4].get_text().replace('yesterday', '24 hours')
# "long ago" can't be translated to a date
if pubdate_raw == 'long ago':
pubdate_raw = None
pubdate = self.parse_pubdate(pubdate_raw, human_time=True)
item = {
'title': title,
'link': download_url,
'size': size,
'seeders': seeders,
'leechers': leechers,
'pubdate': pubdate,
}
if mode != 'RSS':
log.debug('Found result: {0} with {1} seeders and {2} leechers',
title, seeders, leechers)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError, IndexError):
log.exception('Failed parsing provider.')
return items
provider = ZooqleProvider()
|
gpl-3.0
| 7,063,182,022,490,597,000
| 32.568047
| 94
| 0.482285
| false
| 4.513126
| false
| false
| false
|
ncareol/qmicromap
|
tool_qmicromap.py
|
1
|
1595
|
import os
import sys
tools = ['qt5','spatialdb','doxygen','prefixoptions']
env = Environment(tools = ['default'] + tools)
# qt modules
qtModules = Split('QtCore QtGui QtSvg')
env.EnableQtModules(qtModules)
def win_qt_setup(env):
# Windows needs an extra include path for Qt modules.
qt5include = env['QT5DIR'] + '/include'
env.AppendUnique(CPPPATH=[qt5include,])
env.EnableQtModules(qtModules)
def mac_qt_setup(env):
# Mac OS setup
# Qt configuration:
# the following uses the frameworks scheme available for gcc on Mac OS
# to provide libraries and library paths
#frameworkpath='/usr/local/lib'
#env.AppendUnique(FRAMEWORKPATH=[frameworkpath,])
#env.AppendUnique(FRAMEWORKS=qt4Modules)
pass
libsources = Split("""
QMicroMap.cpp
QStationModelGraphicsItem.cpp
""")
headers = Split("""
QMicroMap.h
QStationModelGraphicsItem.h
MicroMapOverview.h
""")
if env['PLATFORM'] == 'darwin':
mac_qt_setup(env)
if env['PLATFORM'] == 'win32':
win_qt_setup(env)
libqmicromap = env.Library('qmicromap', libsources)
env.Default(libqmicromap)
html = env.Apidocs(libsources + headers, DOXYFILE_DICT={'PROJECT_NAME':'QMicroMap', 'PROJECT_NUMBER':'1.0'})
thisdir = env.Dir('.').srcnode().abspath
def qmicromap(env):
env.AppendLibrary('qmicromap')
env.Require(tools)
env.EnableQtModules(qtModules)
env.AppendUnique(CPPPATH =[thisdir,])
env.AppendDoxref('QMicroMap')
if env['PLATFORM'] == 'darwin':
mac_qt_setup(env)
if env['PLATFORM'] == 'win32':
win_qt_setup(env)
Export('qmicromap')
|
bsd-3-clause
| -6,514,323,080,600,594,000
| 24.725806
| 109
| 0.685266
| false
| 3.235294
| false
| false
| false
|
ermongroup/a-nice-mc
|
a_nice_mc/objectives/expression/mog2.py
|
1
|
1170
|
import numpy as np
import tensorflow as tf
from a_nice_mc.objectives.expression import Expression
from a_nice_mc.utils.logger import create_logger
logger = create_logger(__name__)
class MixtureOfGaussians(Expression):
def __init__(self, name='mog2', display=True):
super(MixtureOfGaussians, self).__init__(name=name, display=display)
self.z = tf.placeholder(tf.float32, [None, 2], name='z')
def __call__(self, z):
z1 = tf.reshape(tf.slice(z, [0, 0], [-1, 1]), [-1])
z2 = tf.reshape(tf.slice(z, [0, 1], [-1, 1]), [-1])
v1 = tf.sqrt((z1 - 5) * (z1 - 5) + z2 * z2) * 2
v2 = tf.sqrt((z1 + 5) * (z1 + 5) + z2 * z2) * 2
pdf1 = tf.exp(-0.5 * v1 * v1) / tf.sqrt(2 * np.pi * 0.25)
pdf2 = tf.exp(-0.5 * v2 * v2) / tf.sqrt(2 * np.pi * 0.25)
return -tf.log(0.5 * pdf1 + 0.5 * pdf2)
@staticmethod
def mean():
return np.array([0.0, 0.0])
@staticmethod
def std():
return np.array([5.0, 0.5])
@staticmethod
def statistics(z):
return z
@staticmethod
def xlim():
return [-8, 8]
@staticmethod
def ylim():
return [-8, 8]
|
mit
| 8,517,003,602,130,317,000
| 27.536585
| 76
| 0.54188
| false
| 2.805755
| false
| false
| false
|
LynnCo/toolkit
|
graphVis/interface.py
|
1
|
3989
|
#Basic
'''
import tkinter as tk
class GUI(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.create()
def create(self):
w = tk.Canvas(self,width=600,height=400)
# w.create_image = (image=vp)
w.pack()
run = GUI()
run.mainloop()
'''
#Buttons
'''
import tkinter
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
class GUI ():
def __init__(self, master):
# Create a container
frame = tkinter.Frame(master)
# Create 2 buttons
self.button_left = tkinter.Button(frame,text="< Decrease Slope",command=self.decrease)
self.button_left.pack(side="left")
self.button_right = tkinter.Button(frame,text="Increase Slope >",command=self.increase)
self.button_right.pack(side="left")
fig = Figure()
ax = fig.add_subplot(111)
self.line, = ax.plot(range(10))
self.canvas = FigureCanvasTkAgg(fig,master=master)
self.canvas.show()
self.canvas.get_tk_widget().pack(side='top', fill='both', expand=1)
frame.pack()
def decrease(self):
x, y = self.line.get_data()
self.line.set_ydata(y - 0.2 * x)
self.canvas.draw()
def increase(self):
x, y = self.line.get_data()
self.line.set_ydata(y + 0.2 * x)
self.canvas.draw()
root = tkinter.Tk()
run = GUI(root)
root.mainloop()
'''
#Click event handler and annotation
'''
import math
import pylab
import matplotlib
class AnnoteFinder:
"""
callback for matplotlib to display an annotation when points are clicked on. The
point which is closest to the click and within xtol and ytol is identified.
Register this function like this:
scatter(xdata, ydata)
af = AnnoteFinder(xdata, ydata, annotes)
connect('button_press_event', af)
"""
def __init__(self, xdata, ydata, annotes, axis=None, xtol=None, ytol=None):
self.data = zip(xdata, ydata, annotes)
if xtol is None:
xtol = ((max(xdata) - min(xdata))/float(len(xdata)))/2
if ytol is None:
ytol = ((max(ydata) - min(ydata))/float(len(ydata)))/2
self.xtol = xtol
self.ytol = ytol
if axis is None:
self.axis = pylab.gca()
else:
self.axis= axis
self.drawnAnnotations = dict()
self.links = list()
def distance(self, x1, x2, y1, y2):
"""
return the distance between two points
"""
return math.hypot(x1 - x2, y1 - y2)
def __call__(self, event):
if event.inaxes:
clickX = event.xdata
clickY = event.ydata
if self.axis is None or self.axis==event.inaxes:
annotes = list()
for x,y,a in self.data:
if clickX-self.xtol < x < clickX+self.xtol and clickY-self.ytol < y < clickY+self.ytol :
annotes.append((self.distance(x,clickX,y,clickY),x,y, a) )
if annotes:
annotes.sort()
distance, x, y, annote = annotes[0]
self.drawAnnote(event.inaxes, x, y, annote)
for l in self.links:
l.drawSpecificAnnote(annote)
def drawAnnote(self, axis, x, y, annote):
"""
Draw the annotation on the plot
"""
if (x,y) in self.drawnAnnotations:
markers = self.drawnAnnotations[(x,y)]
for m in markers:
m.set_visible(not m.get_visible())
self.axis.figure.canvas.draw()
else:
t = axis.text(x,y, "(%3.2f, %3.2f) - %s"%(x,y,annote), )
m = axis.scatter([x],[y], marker='d', c='r', zorder=100)
self.drawnAnnotations[(x,y)] =(t,m)
self.axis.figure.canvas.draw()
def drawSpecificAnnote(self, annote):
annotesToDraw = [(x,y,a) for x,y,a in self.data if a==annote]
for x,y,a in annotesToDraw:
self.drawAnnote(self.axis, x, y, a)
x = range(2)
y = range(2)
annotes = ["point 1","point 2"]
pylab.scatter(x,y)
af = AnnoteFinder(x, y, annotes)
pylab.connect('button_press_event', af)
pylab.show()
'''
|
mit
| -7,537,670,444,090,344,000
| 26.142857
| 100
| 0.604161
| false
| 3.116406
| false
| false
| false
|
rsms/smisk
|
admin/old_examples/testbed/process.py
|
1
|
3051
|
#!/usr/bin/env python
# encoding: utf-8
import sys, os, platform
from smisk import Application, Request, Response, request
class MyRequest(Request):
def accepts_charsets(self):
'''Return a list of charsets which the client can handle, ordered by priority and appearing order.'''
vv = []
if not 'HTTP_ACCEPT_CHARSET' in self.env:
return vv
for cs in self.env['HTTP_ACCEPT_CHARSET'].split(','):
p = cs.find(';')
if p != -1:
pp = cs.find('q=', p)
if pp != -1:
vv.append([cs[:p], int(float(cs[pp+2:])*100)])
continue
vv.append([cs, 100])
vv.sort(lambda a,b: b[1] - a[1])
return [v[0] for v in vv]
class MyResponse(Response):
def redirect_to_path(self, path):
url = request.url
include_port = True
if url.port == 80:
include_port = False
url = url.to_s(port=include_port, path=False, query=False, fragment=False)
self.headers += ['Status: 302 Found', 'Location: %s%s' % (url, path)]
class MyApp(Application):
chunk = '.'*8000
def __init__(self):
self.request_class = MyRequest
self.response_class = MyResponse
Application.__init__(self)
def service(self):
# Test sending alot of data with content length
#self.response.out.write("Content-Length: 8000\r\n\r\n")
#self.response.out.write(self.chunk)
# Test sending alot of data with chunked content
#self.response.write(self.chunk)
if self.request.url.path == "/go-away":
self.response.redirect_to_path("/redirected/away")
return
if 'CONTENT_LENGTH' in self.request.env:
# Test smisk_Request___iter__
for line in self.request:
self.response.write(line)
self.response.headers = ["Content-Type: text/plain"]
self.response.write("self.request.url = %s\n" % self.request.url)
self.response.write("self.request.env.get('HTTP_ACCEPT_CHARSET') => %s\n" % self.request.env.get('HTTP_ACCEPT_CHARSET'))
self.response.write("self.request.acceptsCharsets() = %s\n" % self.request.accepts_charsets())
# Test smisk_Response___call__
self.response(
"__call__ Line1\n",
"__call__ Line2\n",
"__call__ Line3\n",
"__call__ Line4\n",
)
# Test smisk_Response_writelines and at the same time test smisk_Stream_perform_writelines
self.response.writelines((
"writelines Line1\n",
"writelines Line2\n",
"writelines Line3\n",
"writelines Line4\n",
))
#self.response.write(self.chunk)
#self.response.write("<h1>Hello World!</h1>"
# "request.env = <tt>%s</tt>\n" % self.request.env)
#self.response.headers = ["Content-Type: text/html"]
#err1()
# test exception response
def err1(): err2()
def err2(): err3()
def err3(): err4()
def err4(): err5()
def err5(): raise IOError("Kabooom!")
try:
MyApp().run()
except KeyboardInterrupt:
pass
except:
import traceback
traceback.print_exc(1000, open(os.path.abspath(os.path.dirname(__file__)) + "/process-error.log", "a"))
|
mit
| -5,327,384,261,450,466,000
| 28.621359
| 124
| 0.620125
| false
| 3.273605
| true
| false
| false
|
eamontoyaa/pyCSS
|
validations/validation03-comparisonZhao.etal.,2014.py
|
1
|
5013
|
'''
# Description.
This is a minimal module in order to perform a circular arc slope stability
analysis by the limit equilibrium model by Fellenius and Bishop symplified
methods.
'''
#------------------------------------------------------------------------------
## Add functions directory
import sys
sys.path += ['../functions']
#------------------------------------------------------------------------------
## Modules/Functions import
import numpy as np
import time
from automaticslipcircles import automaticslipcircles
from onlyonecircle import onlyonecircle
#------------------------------------------------------------------------------
## Poject data
projectName = 'Validation-03'
projectAuthor = 'Exneyder A. Montoya Araque'
projectDate = time.strftime("%d/%m/%y")
#------------------------------------------------------------------------------
## Define inputs
# The slope geometry
slopeHeight = [10, 'm']
slopeDip = np.array([2, 1])
crownDist = [5, 'm']
toeDist = [5, 'm']
wantAutomaticToeDepth = False
if wantAutomaticToeDepth == True:
toeDepth = ['automatic toe Depth']
else:
toeDepth = [3, 'm']
# The slip arc-circle
wantEvaluateOnlyOneSurface = True
if wantEvaluateOnlyOneSurface == True:
hztDistPointAtCrownFromCrown = [-2, 'm']
hztDistPointAtToeFromCrown = [20, 'm']
slipRadius = [34.95, 'm']
else:
numCircles = 2000
radiusIncrement = [2, 'm']
numberIncrements = 40
maxFsValueCont = 2
# Watertable
wantWatertable = False
if wantWatertable == True:
wtDepthAtCrown = [0, 'm']
else:
wtDepthAtCrown = ['No watertable']
toeUnderWatertable = False
# Materials properties.
waterUnitWeight = [0, 'kN/m3']
materialUnitWeight = [20, 'kN/m3']
frictionAngleGrad = [19.6, 'degrees']
cohesion = [3, 'kPa']
## Advanced inputs
# Want divide the slip surface in constant width slices?
wantConstSliceWidthTrue = False
# Number of discretizations of slip surface.
numSlices = 15
# Number of discretizations of circular arcs.
nDivs = numSlices
# Select the method to calcualte the safety factor ['Flns', 'Bshp' or 'Allm'].
methodString = 'Allm'
# Select the output format image #['.eps', '.jpeg', '.jpg', '.pdf', '.pgf', \
# '.png', '.ps', '.raw', '.rgba', '.svg', '.svgz', '.tif', '.tiff'].
outputFormatImg = '.svg'
#------------------------------------------------------------------------------
# Operations for only one slip surface
if wantEvaluateOnlyOneSurface == True:
msg = onlyonecircle(projectName, projectAuthor, projectDate, slopeHeight, \
slopeDip, crownDist, toeDist, wantAutomaticToeDepth, toeDepth, \
hztDistPointAtCrownFromCrown, hztDistPointAtToeFromCrown, \
slipRadius, wantWatertable, wtDepthAtCrown, toeUnderWatertable, \
waterUnitWeight, materialUnitWeight, frictionAngleGrad, cohesion, \
wantConstSliceWidthTrue, numSlices, nDivs, methodString, \
outputFormatImg)
#------------------------------------------------------------------------------
# Operations for multiple slip surface
else:
automaticslipcircles(projectName, projectAuthor, projectDate, slopeHeight,\
slopeDip, crownDist, toeDist, wantAutomaticToeDepth, toeDepth, \
numCircles, radiusIncrement, numberIncrements, maxFsValueCont, \
wantWatertable, wtDepthAtCrown, toeUnderWatertable, waterUnitWeight, \
materialUnitWeight, frictionAngleGrad, cohesion, \
wantConstSliceWidthTrue, numSlices, nDivs, methodString, \
outputFormatImg)
'''
BSD 2 license.
Copyright (c) 2016, Universidad Nacional de Colombia, Ludger O.
Suarez-Burgoa and Exneyder Andrés Montoya Araque.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
|
bsd-2-clause
| 6,933,199,822,277,709,000
| 37.777778
| 79
| 0.656425
| false
| 3.723626
| false
| false
| false
|
viewportvr/daysinvr
|
backend/remixvr/database.py
|
1
|
1343
|
# -*- coding: utf-8 -*-
"""Database module, including the SQLAlchemy database object and DB-related utilities."""
from sqlalchemy.orm import relationship
from .compat import basestring
from .extensions import db
# Alias common SQLAlchemy names
Column = db.Column
relationship = relationship
Model = db.Model
# From Mike Bayer's "Building the app" talk
# https://speakerdeck.com/zzzeek/building-the-app
class SurrogatePK(object):
"""A mixin that adds a surrogate integer 'primary key' column named ``id`` \
to any declarative-mapped class.
"""
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
@classmethod
def get_by_id(cls, record_id):
"""Get record by ID."""
if any(
(isinstance(record_id, basestring) and record_id.isdigit(),
isinstance(record_id, (int, float))),
):
return cls.query.get(int(record_id))
def reference_col(tablename, nullable=False, pk_name='id', **kwargs):
"""Column that adds primary key foreign key reference.
Usage: ::
category_id = reference_col('category')
category = relationship('Category', backref='categories')
"""
return db.Column(
db.ForeignKey('{0}.{1}'.format(tablename, pk_name)),
nullable=nullable, **kwargs)
|
mit
| 9,041,744,562,209,402,000
| 28.195652
| 89
| 0.647059
| false
| 3.997024
| false
| false
| false
|
super-goose/orbit
|
space/planet.py
|
1
|
3201
|
import pygame
import math
class Planet:
def __init__(self, surface, color, position, radius, center):
self.radius = radius
self.surface = surface
self.color = color
self.setPosition(position)
self.center = center
self.setOrbitOffset(0)
self.setOrbitPeriod(1)
self.setOrbitRadius(0)
self.year = 0
self.mass = 0
self.velocity = 0
self.angle = 0
self.name = ''
def drawPlanet(self):
x = int(self.position[0])
y = int(self.position[1])
pygame.draw.circle(self.surface, self.color, (x, y), self.radius)
def getRadius(self): return self.radius
def setPosition(self, newPos):
self.position = newPos
return self
def getPosition(self): return self.position
def setVelocity(self, vel):
self.velocity = vel
return self
def getVelocity(self): return self.velocity
def setAngle(self, angle):
self.angle = angle
return self
def getAngle(self): return self.angle
def setName(self, name):
self.name = name
return self
def getName(self): return self.name
def setGravity(self, gravity):
self.gravity = gravity
return self
def getGravity(self): return self.gravity
def setOrbitRadius(self, radius):
self.orbitRadius = radius
return self
def getOrbitRadius(self): return self.orbitRadius
def setOrbitOffset(self, offset):
self.orbitOffset = offset
return self
def getOrbitOffset(self): return self.orbitOffset
def setOrbitPeriod(self, period):
self.orbitPeriod = period
return self
def getOrbitPeriod(self): return self.orbitPeriod
def advancePosition(self, sun):
x, y = self.position
# get new point with no gravity
v = self.velocity
angle = self.angle
vx = v * math.sin(angle)
vy = v * math.cos(angle)
# get the pull fromt he sun
gravitaionalConstant = 14 # this is the number that made it work well
# i don't know why this number and not another
sunX, sunY = sun.getPosition()
sunX -= x
sunY -= y
d = math.sqrt(sunX**2 + sunY**2)
g = sun.getGravity() * gravitaionalConstant / (d ** 2)
ax = (g * sunX) / d
ay = (g * sunY) / d
# add these vectors together
dx = vx + ax
dy = vy + ay
newV = math.sqrt(dx**2 + dy**2)
# using law of cosines to get the angle
# by getting the cosine first, then using arccos to find the angle
ac = (g**2 - v**2 - newV**2)/(-2 * v * newV)
A = math.acos(ac)
#update attributes
self.angle += A
self.velocity = newV
x += newV * math.sin(self.angle)
y += newV * math.cos(self.angle)
self.setPosition((x, y))
return self
def distanceFrom(self, pos):
x1 = self.position[0]
y1 = self.position[1]
x2 = pos[0]
y2 = pos[1]
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
# EOF for planets
|
mit
| -2,533,718,706,393,433,600
| 24.616
| 80
| 0.564199
| false
| 3.709154
| false
| false
| false
|
krasnoperov/django-formalizr
|
formalizr/views.py
|
1
|
3769
|
import json
from django.contrib import messages
from django.http import HttpResponse
from django.views.generic.edit import CreateView, UpdateView, FormView
from django.core.serializers.json import DjangoJSONEncoder
class AjaxFormMixin(object):
"""
Mixin which adds support of AJAX requests to the form.
Can be used with any view which has FormMixin.
"""
json_dumps_kwargs = None
success_message = ''
def get_success_message(self, cleaned_data):
return self.success_message % cleaned_data
def get_json_dumps_kwargs(self):
if self.json_dumps_kwargs is None:
self.json_dumps_kwargs = {}
self.json_dumps_kwargs.setdefault('ensure_ascii', False)
return self.json_dumps_kwargs
def render_to_json_response(self, context, **response_kwargs):
data = json.dumps(context, cls=DjangoJSONEncoder, **self.get_json_dumps_kwargs())
response_kwargs['content_type'] = 'application/json'
return HttpResponse(data, **response_kwargs)
def form_valid(self, form):
success_message = self.get_success_message(form.cleaned_data)
if success_message:
messages.info(self.request, success_message)
if self.request.is_ajax():
context = self.get_json_context(form)
return self.render_to_json_response(context)
else:
return super(AjaxFormMixin, self).form_valid(form)
def form_invalid(self, form):
if self.request.is_ajax():
context = {
'status': 'error',
'error': 'Bad Request'
}
errors = self.get_json_errors(form)
if errors:
context["errors"] = errors
return self.render_to_json_response(context, status=400)
else:
return super(AjaxFormMixin, self).form_invalid(form)
def is_result_requested(self):
return self.request.POST.get("_return", "redirect") == "result"
def get_json_context(self, form):
if self.request.POST.get("_return", "redirect") == "result":
context = {
"status": "success"
}
msgs = self.get_json_messages()
if msgs:
context["messages"] = msgs
obj = self.get_json_object(form)
if obj:
context["object"] = obj
else:
context = {
"status": "redirect",
"location": self.get_success_url()
}
return context
def get_json_messages(self):
msgs = []
for message in messages.get_messages(self.request):
msgs.append({
"level": message.tags,
"message": message.message,
})
return msgs
def get_json_errors(self, form):
errors = {}
for error in form.errors.iteritems():
errors.update({
form.prefix + "-" + error[0] if form.prefix else error[0]: [unicode(msg) for msg in error[1]]
})
return errors
def get_json_object(self, form):
"""
Method returns dict representation of result (self.object of form.instance, etc)
"""
return None
class AjaxModelFormMixin(AjaxFormMixin):
"""
This mixin adds AJAX handling of model form.
Can be used with any view which has ModelFormMixin.
"""
def form_valid(self, form):
if self.request.is_ajax():
self.object = form.save()
return super(AjaxModelFormMixin, self).form_valid(form)
class AjaxFormView(AjaxFormMixin, FormView):
pass
class AjaxUpdateView(AjaxModelFormMixin, UpdateView):
pass
class AjaxCreateView(AjaxModelFormMixin, CreateView):
pass
|
bsd-3-clause
| 5,784,175,414,557,377,000
| 30.14876
| 109
| 0.59273
| false
| 4.119126
| false
| false
| false
|
moelius/async-task-processor
|
async_task_processor/primitives/tarantool_task.py
|
1
|
1986
|
import asyncio
from async_task_processor.primitives.base_task import BaseTask
class TarantoolTask(BaseTask):
conn_max_retries = None
conn_retries = None
conn_retry_countdown = None
ack = True # Using to prevent tarantool ack task
connection = None
data = None
queue_name = None
_task = None
_tube = None
def __init__(self, loop, connection, tube, foo, args, bind, timeout, max_retries, retry_countdown,
conn_max_retries, conn_retry_countdown, name):
"""
:type connection: asynctnt.Connection
:type tube: asynctnt_queue.tube.Tube
"""
self.conn_max_retries = conn_max_retries or 0
self.conn_retry_countdown = conn_retry_countdown or 1
self.conn_retries = 0
self.queue_name = tube.name
self.connection = connection
self._tube = tube
super().__init__(loop, type(self).__name__, foo, args, bind, timeout, max_retries, retry_countdown, name)
def set_tube(self, task):
"""
:type task: asynctnt_queue.Task
:return:
"""
self._task = task
self.data = task.data
def reset(self):
self._task, self.data = None, None
async def _tnt_call(self, func_name, args, timeout):
return await self.connection.call(func_name, args, timeout=timeout)
def __call__(self, func_name, args, timeout=-1):
"""Tarantool command execute. You may use self(<tarantool_command>) in task function.
Use this method if you want to redeclare ack method for example or bury, release task manually.
:type func_name: str
:type args: list
:type timeout: int
:return:
"""
future = asyncio.run_coroutine_threadsafe(self._tnt_call(func_name, args, timeout=timeout), self.app.loop)
return future.result()
@property
def tube(self):
return self._tube
@property
def task(self):
return self._task
|
mit
| -3,077,405,517,272,907,000
| 29.553846
| 114
| 0.6143
| false
| 3.726079
| false
| false
| false
|
uncled1023/pygments
|
Pygments/pygments-lib/pygments/lexers/php.py
|
1
|
10730
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.php
~~~~~~~~~~~~~~~~~~~
Lexers for PHP and related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, using, \
this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['ZephirLexer', 'PhpLexer']
class ZephirLexer(RegexLexer):
"""
For `Zephir language <http://zephir-lang.com/>`_ source code.
Zephir is a compiled high level language aimed
to the creation of C-extensions for PHP.
.. versionadded:: 2.0
"""
name = 'Zephir'
aliases = ['zephir']
filenames = ['*.zep']
zephir_keywords = ['fetch', 'echo', 'isset', 'empty']
zephir_type = ['bit', 'bits', 'string']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|->|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|loop|'
r'require|inline|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'namespace|use|extends|this|fetch|isset|unset|echo|fetch|likely|unlikely|'
r'empty)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|bool|char|class|const|double|enum|export|extends|final|'
r'native|goto|implements|import|int|string|interface|long|ulong|char|uchar|'
r'float|unsigned|private|protected|public|short|static|self|throws|reverse|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|_REQUEST|_COOKIE|_SESSION|'
r'_GET|_POST|_SERVER|this|stdClass|range|count|iterator|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][\w\\]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_php_builtins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._php_builtins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]', '*.inc']
mimetypes = ['text/x-php']
# Note that a backslash is included in the following two patterns
# PHP uses a backslash as a namespace separator
_ident_char = r'[\\\w]|[^\x00-\x7f]'
_ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
_ident_end = r'(?:' + _ident_char + ')*'
_ident_inner = _ident_begin + _ident_end
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'(<<<)([\'"]?)(' + _ident_inner + r')(\2\n.*?\n\s*)(\3)(;?)(\n)',
bygroups(String, String, String.Delimiter, String, String.Delimiter,
Punctuation, Text)),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)(' + _ident_inner + ')',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/@-]+', Operator),
(r'\?', Operator), # don't add to the charclass above!
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)(' + _ident_inner + ')',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|'
r'endif|list|endswitch|new|endwhile|not|'
r'array|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait|yield|'
r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
include('magicconstants'),
(r'\$\{\$+' + _ident_inner + '\}', Name.Variable),
(r'\$+' + _ident_inner, Name.Variable),
(_ident_inner, Name.Other),
(r'(\d+\.\d*|\d*\.\d+)(e[+-]?[0-9]+)?', Number.Float),
(r'\d+e[+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0x[a-f0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r'0b[01]+', Number.Bin),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'magicfuncs': [
# source: http://php.net/manual/en/language.oop5.magic.php
(words((
'__construct', '__destruct', '__call', '__callStatic', '__get', '__set',
'__isset', '__unset', '__sleep', '__wakeup', '__toString', '__invoke',
'__set_state', '__clone', '__debugInfo',), suffix=r'\b'),
Name.Function.Magic),
],
'magicconstants': [
# source: http://php.net/manual/en/language.constants.predefined.php
(words((
'__LINE__', '__FILE__', '__DIR__', '__FUNCTION__', '__CLASS__',
'__TRAIT__', '__METHOD__', '__NAMESPACE__',),
suffix=r'\b'),
Name.Constant),
],
'classname': [
(_ident_inner, Name.Class, '#pop')
],
'functionname': [
include('magicfuncs'),
(_ident_inner, Name.Function, '#pop'),
default('#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt"$\\]|[0-7]{1,3}|x[0-9a-f]{1,2})', String.Escape),
(r'\$' + _ident_inner + '(\[\S+?\]|->' + _ident_inner + ')?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._php_builtins import MODULES
for key, value in iteritems(MODULES):
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
return rv
|
bsd-2-clause
| -9,095,089,548,564,730,000
| 39.187266
| 89
| 0.502703
| false
| 3.721818
| false
| false
| false
|
mateor/pants
|
src/python/pants/engine/engine.py
|
1
|
5985
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from abc import abstractmethod
from twitter.common.collections import maybe_list
from pants.base.exceptions import TaskError
from pants.engine.nodes import Return, State, Throw
from pants.engine.storage import Cache, Storage
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class ExecutionError(Exception):
pass
class Engine(AbstractClass):
"""An engine for running a pants command line."""
class Result(datatype('Result', ['error', 'root_products'])):
"""Represents the result of a single engine run."""
@classmethod
def finished(cls, root_products):
"""Create a success or partial success result from a finished run.
Runs can either finish with no errors, satisfying all promises, or they can partially finish
if run in fail-slow mode producing as many products as possible.
:param root_products: Mapping of root SelectNodes to their State values.
:rtype: `Engine.Result`
"""
return cls(error=None, root_products=root_products)
@classmethod
def failure(cls, error):
"""Create a failure result.
A failure result represent a run with a fatal error. It presents the error but no
products.
:param error: The execution error encountered.
:type error: :class:`pants.base.exceptions.TaskError`
:rtype: `Engine.Result`
"""
return cls(error=error, root_products=None)
def __init__(self, scheduler, storage=None, cache=None, use_cache=True):
"""
:param scheduler: The local scheduler for creating execution graphs.
:type scheduler: :class:`pants.engine.scheduler.LocalScheduler`
:param storage: The storage instance for serializables keyed by their hashes.
:type storage: :class:`pants.engine.storage.Storage`
:param cache: The cache instance for storing execution results, by default it uses the same
Storage instance if not specified.
:type cache: :class:`pants.engine.storage.Cache`
:param use_cache: True to enable usage of the cache. The cache incurs a large amount of
overhead for small tasks, and needs TODO: further improvement.
:type use_cache: bool
"""
self._scheduler = scheduler
self._storage = storage or Storage.create()
self._cache = cache or Cache.create(storage)
self._use_cache = use_cache
def execute(self, execution_request):
"""Executes the requested build.
:param execution_request: The description of the goals to achieve.
:type execution_request: :class:`ExecutionRequest`
:returns: The result of the run.
:rtype: :class:`Engine.Result`
"""
try:
self.reduce(execution_request)
return self.Result.finished(self._scheduler.root_entries(execution_request))
except TaskError as e:
return self.Result.failure(e)
def product_request(self, product, subjects):
"""Executes a request for a singular product type from the scheduler for one or more subjects
and yields the products.
:param class product: A product type for the request.
:param list subjects: A list of subjects for the request.
:yields: The requested products.
"""
request = self._scheduler.execution_request([product], subjects)
result = self.execute(request)
if result.error:
raise result.error
result_items = self._scheduler.root_entries(request).items()
# State validation.
unknown_state_types = tuple(
type(state) for _, state in result_items if type(state) not in (Throw, Return)
)
if unknown_state_types:
State.raise_unrecognized(unknown_state_types)
# Throw handling.
# TODO: See https://github.com/pantsbuild/pants/issues/3912
throw_roots = tuple(root for root, state in result_items if type(state) is Throw)
if throw_roots:
cumulative_trace = '\n'.join(self._scheduler.trace())
raise ExecutionError('Received unexpected Throw state(s):\n{}'.format(cumulative_trace))
# Return handling.
returns = tuple(state.value for _, state in result_items if type(state) is Return)
for return_value in returns:
for computed_product in maybe_list(return_value, expected_type=product):
yield computed_product
def close(self):
"""Shutdown this engine instance, releasing resources it was using."""
self._storage.close()
self._cache.close()
def cache_stats(self):
"""Returns cache stats for the engine."""
return self._cache.get_stats()
def _maybe_cache_get(self, node_entry, runnable):
"""If caching is enabled for the given Entry, create a key and perform a lookup.
The sole purpose of a keyed request is to get a stable cache key, so we can sort
keyed_request.dependencies by keys as opposed to requiring dep nodes to support compare.
:returns: A tuple of a key and result, either of which may be None.
"""
if not self._use_cache or not runnable.cacheable:
return None, None
return self._cache.get(runnable)
def _maybe_cache_put(self, key, result):
if key is not None:
self._cache.put(key, result)
@abstractmethod
def reduce(self, execution_request):
"""Reduce the given execution graph returning its root products.
:param execution_request: The description of the goals to achieve.
:type execution_request: :class:`ExecutionRequest`
:returns: The root products promised by the execution graph.
:rtype: dict of (:class:`Promise`, product)
"""
class LocalSerialEngine(Engine):
"""An engine that runs tasks locally and serially in-process."""
def reduce(self, execution_request):
self._scheduler.schedule(execution_request)
|
apache-2.0
| -4,252,687,880,089,084,000
| 35.717791
| 98
| 0.705263
| false
| 4.139004
| false
| false
| false
|
maas/maas
|
src/maasserver/websockets/handlers/domain.py
|
1
|
8142
|
# Copyright 2016-2019 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""The domain handler for the WebSocket connection."""
from django.core.exceptions import ValidationError
from maasserver.forms.dnsdata import DNSDataForm
from maasserver.forms.dnsresource import DNSResourceForm
from maasserver.forms.domain import DomainForm
from maasserver.models import DNSData, DNSResource, GlobalDefault
from maasserver.models.domain import Domain
from maasserver.permissions import NodePermission
from maasserver.websockets.base import (
AdminOnlyMixin,
HandlerPermissionError,
HandlerValidationError,
)
from maasserver.websockets.handlers.timestampedmodel import (
TimestampedModelHandler,
)
class DomainHandler(TimestampedModelHandler, AdminOnlyMixin):
class Meta:
queryset = Domain.objects.all()
pk = "id"
form = DomainForm
form_requires_request = False
allowed_methods = [
"list",
"get",
"create",
"update",
"delete",
"set_active",
"set_default",
"create_dnsresource",
"update_dnsresource",
"delete_dnsresource",
"create_address_record",
"update_address_record",
"delete_address_record",
"create_dnsdata",
"update_dnsdata",
"delete_dnsdata",
]
listen_channels = ["domain"]
def dehydrate(self, domain, data, for_list=False):
rrsets = domain.render_json_for_related_rrdata(
for_list=for_list, user=self.user
)
if not for_list:
data["rrsets"] = rrsets
data["hosts"] = len(
{rr["system_id"] for rr in rrsets if rr["system_id"] is not None}
)
data["resource_count"] = len(rrsets)
if domain.is_default():
data["displayname"] = "%s (default)" % data["name"]
data["is_default"] = True
else:
data["displayname"] = data["name"]
data["is_default"] = False
return data
def _get_domain_or_permission_error(self, params):
domain = params.get("domain")
if domain is None:
raise HandlerValidationError(
{"domain": ["This field is required"]}
)
domain = self.get_object({"id": domain})
if not self.user.has_perm(NodePermission.admin, domain):
raise HandlerPermissionError()
return domain
def create_dnsresource(self, params):
self._get_domain_or_permission_error(params)
form = DNSResourceForm(data=params, user=self.user)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def update_dnsresource(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource = DNSResource.objects.get(
domain=domain, id=params["dnsresource_id"]
)
form = DNSResourceForm(
instance=dnsresource, data=params, user=self.user
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
return self.full_dehydrate(domain)
def delete_dnsresource(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource = DNSResource.objects.get(
domain=domain, id=params["dnsresource_id"]
)
dnsresource.delete()
def create_address_record(self, params):
domain = self._get_domain_or_permission_error(params)
if params["ip_addresses"] == [""]:
raise ValidationError(
"Data field is required when creating an %s record."
% params["rrtype"]
)
dnsresource, created = DNSResource.objects.get_or_create(
domain=domain, name=params["name"]
)
if created:
ip_addresses = []
else:
ip_addresses = dnsresource.get_addresses()
ip_addresses.extend(params["ip_addresses"])
params["ip_addresses"] = " ".join(ip_addresses)
form = DNSResourceForm(
data=params, user=self.user, instance=dnsresource
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def update_address_record(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource, created = DNSResource.objects.get_or_create(
domain=domain, name=params["name"]
)
if created:
# If we ended up creating a record, that's because the name
# was changed, so we'll start with an empty list. But that also
# means we need to edit the record with the original name.
ip_addresses = []
previous_dnsresource = DNSResource.objects.get(
domain=domain, name=params["previous_name"]
)
prevoius_ip_addresses = previous_dnsresource.get_addresses()
prevoius_ip_addresses.remove(params["previous_rrdata"])
modified_addresses = " ".join(prevoius_ip_addresses)
form = DNSResourceForm(
data=dict(ip_addresses=modified_addresses),
user=self.user,
instance=previous_dnsresource,
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
else:
ip_addresses = dnsresource.get_addresses()
# Remove the previous address for the record being edited.
# The previous_rrdata field will contain the original value
# for the IP address in the edited row.
ip_addresses.remove(params["previous_rrdata"])
ip_addresses.extend(params["ip_addresses"])
params["ip_addresses"] = " ".join(ip_addresses)
form = DNSResourceForm(
data=params, user=self.user, instance=dnsresource
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def delete_address_record(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource = DNSResource.objects.get(
domain=domain, id=params["dnsresource_id"]
)
ip_addresses = dnsresource.get_addresses()
ip_addresses.remove(params["rrdata"])
params["ip_addresses"] = " ".join(ip_addresses)
form = DNSResourceForm(
data=params, user=self.user, instance=dnsresource
)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def create_dnsdata(self, params):
domain = self._get_domain_or_permission_error(params)
dnsresource, _ = DNSResource.objects.get_or_create(
domain=domain, name=params["name"]
)
params["dnsresource"] = dnsresource.id
form = DNSDataForm(data=params)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
def update_dnsdata(self, params):
domain = self._get_domain_or_permission_error(params)
dnsdata = DNSData.objects.get(
id=params["dnsdata_id"], dnsresource_id=params["dnsresource_id"]
)
form = DNSDataForm(data=params, instance=dnsdata)
if form.is_valid():
form.save()
else:
raise ValidationError(form.errors)
return self.full_dehydrate(domain)
def delete_dnsdata(self, params):
self._get_domain_or_permission_error(params)
dnsdata = DNSData.objects.get(id=params["dnsdata_id"])
dnsdata.delete()
def set_default(self, params):
domain = self._get_domain_or_permission_error(params)
global_defaults = GlobalDefault.objects.instance()
global_defaults.domain = domain
global_defaults.save()
return self.full_dehydrate(domain)
|
agpl-3.0
| -6,462,518,231,539,437,000
| 35.348214
| 77
| 0.594203
| false
| 4.205579
| false
| false
| false
|
h2oloopan/easymerge
|
EasyMerge/tests/reddit/r2/r2/controllers/error.py
|
1
|
8526
|
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2013 reddit
# Inc. All Rights Reserved.
###############################################################################
import json
import os
import random
import pylons
from webob.exc import HTTPFound, HTTPMovedPermanently
from pylons.i18n import _
from pylons import c, g, request, response
try:
# place all r2 specific imports in here. If there is a code error, it'll
# get caught and the stack trace won't be presented to the user in
# production
from r2.config import extensions
from r2.controllers.reddit_base import RedditController, Cookies
from r2.lib.errors import ErrorSet
from r2.lib.filters import websafe_json
from r2.lib import log, pages
from r2.lib.strings import rand_strings
from r2.lib.template_helpers import static
from r2.models.link import Link
from r2.models.subreddit import DefaultSR, Subreddit
except Exception, e:
if g.debug:
# if debug mode, let the error filter up to pylons to be handled
raise e
else:
# production environment: protect the code integrity!
print "HuffmanEncodingError: make sure your python compiles before deploying, stupid!"
# kill this app
os._exit(1)
redditbroke = \
'''<html>
<head>
<title>reddit broke!</title>
</head>
<body>
<div style="margin: auto; text-align: center">
<p>
<a href="/">
<img border="0" src="%s" alt="you broke reddit" />
</a>
</p>
<p>
%s
</p>
</body>
</html>
'''
FAILIEN_COUNT = 3
def make_failien_url():
failien_number = random.randint(1, FAILIEN_COUNT)
failien_name = "youbrokeit%d.png" % failien_number
return static(failien_name)
class ErrorController(RedditController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def check_for_bearer_token(self):
pass
allowed_render_styles = ('html', 'xml', 'js', 'embed', '', "compact", 'api')
# List of admins to blame (skip the first admin, "reddit")
# If list is empty, just blame "an admin"
admins = g.admins[1:] or ["an admin"]
def __before__(self):
try:
c.error_page = True
RedditController.__before__(self)
except (HTTPMovedPermanently, HTTPFound):
# ignore an attempt to redirect from an error page
pass
except Exception as e:
handle_awful_failure("ErrorController.__before__: %r" % e)
def __after__(self):
try:
RedditController.__after__(self)
except Exception as e:
handle_awful_failure("ErrorController.__after__: %r" % e)
def __call__(self, environ, start_response):
try:
return RedditController.__call__(self, environ, start_response)
except Exception as e:
return handle_awful_failure("ErrorController.__call__: %r" % e)
def send403(self):
c.site = DefaultSR()
if 'usable_error_content' in request.environ:
return request.environ['usable_error_content']
else:
res = pages.RedditError(
title=_("forbidden (%(domain)s)") % dict(domain=g.domain),
message=_("you are not allowed to do that"),
explanation=request.GET.get('explanation'))
return res.render()
def send404(self):
if 'usable_error_content' in request.environ:
return request.environ['usable_error_content']
return pages.RedditError(_("page not found"),
_("the page you requested does not exist")).render()
def send429(self):
retry_after = request.environ.get("retry_after")
if retry_after:
response.headers["Retry-After"] = str(retry_after)
template_name = '/ratelimit_toofast.html'
else:
template_name = '/ratelimit_throttled.html'
template = g.mako_lookup.get_template(template_name)
return template.render(logo_url=static(g.default_header_url))
def send503(self):
retry_after = request.environ.get("retry_after")
if retry_after:
response.headers["Retry-After"] = str(retry_after)
return request.environ['usable_error_content']
def GET_document(self):
try:
c.errors = c.errors or ErrorSet()
# clear cookies the old fashioned way
c.cookies = Cookies()
code = request.GET.get('code', '')
try:
code = int(code)
except ValueError:
code = 404
srname = request.GET.get('srname', '')
takedown = request.GET.get('takedown', "")
# StatusBasedRedirect will override this anyway, but we need this
# here for pagecache to see.
response.status_int = code
if srname:
c.site = Subreddit._by_name(srname)
if code in (204, 304):
# NEVER return a content body on 204/304 or downstream
# caches may become very confused.
if request.GET.has_key('x-sup-id'):
x_sup_id = request.GET.get('x-sup-id')
if '\r\n' not in x_sup_id:
response.headers['x-sup-id'] = x_sup_id
return ""
elif c.render_style not in self.allowed_render_styles:
return str(code)
elif c.render_style in extensions.API_TYPES:
data = request.environ.get('extra_error_data', {'error': code})
return websafe_json(json.dumps(data))
elif takedown and code == 404:
link = Link._by_fullname(takedown)
return pages.TakedownPage(link).render()
elif code == 403:
return self.send403()
elif code == 429:
return self.send429()
elif code == 500:
randmin = {'admin': random.choice(self.admins)}
failien_url = make_failien_url()
return redditbroke % (failien_url, rand_strings.sadmessages % randmin)
elif code == 503:
return self.send503()
elif c.site:
return self.send404()
else:
return "page not found"
except Exception as e:
return handle_awful_failure("ErrorController.GET_document: %r" % e)
POST_document = PUT_document = DELETE_document = GET_document
def handle_awful_failure(fail_text):
"""
Makes sure that no errors generated in the error handler percolate
up to the user unless debug is enabled.
"""
if g.debug:
import sys
s = sys.exc_info()
# reraise the original error with the original stack trace
raise s[1], None, s[2]
try:
# log the traceback, and flag the "path" as the error location
import traceback
log.write_error_summary(fail_text)
for line in traceback.format_exc().splitlines():
g.log.error(line)
return redditbroke % (make_failien_url(), fail_text)
except:
# we are doomed. Admit defeat
return "This is an error that should never occur. You win."
|
mit
| 9,156,844,164,749,541,000
| 35.75
| 94
| 0.604621
| false
| 4.050356
| false
| false
| false
|
PnEcrins/GeoNature
|
backend/geonature/core/gn_permissions/tools.py
|
1
|
9278
|
import logging, json
from flask import current_app, redirect, Response
from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer,
SignatureExpired, BadSignature)
import sqlalchemy as sa
from sqlalchemy.sql.expression import func
from pypnusershub.db.tools import (
InsufficientRightsError,
AccessRightsExpiredError,
UnreadableAccessRightsError
)
from geonature.core.gn_permissions.models import VUsersPermissions, TFilters
from geonature.utils.env import DB
log = logging.getLogger(__name__)
def user_from_token(token, secret_key=None):
secret_key = secret_key or current_app.config['SECRET_KEY']
try:
s = Serializer(current_app.config['SECRET_KEY'])
user = s.loads(token)
return user
except SignatureExpired:
raise AccessRightsExpiredError("Token expired")
except BadSignature:
raise UnreadableAccessRightsError('Token BadSignature', 403)
def get_user_from_token_and_raise(
request,
secret_key=None,
redirect_on_expiration=None,
redirect_on_invalid_token=None
):
"""
Deserialize the token
catch excetpion and return appropriate Response(403, 302 ...)
"""
try:
token = request.cookies['token']
return user_from_token(token, secret_key)
except AccessRightsExpiredError:
if redirect_on_expiration:
res = redirect(redirect_on_expiration, code=302)
else:
res = Response('Token Expired', 403)
res.set_cookie('token', expires=0)
return res
except InsufficientRightsError as e:
log.info(e)
if redirect_on_expiration:
res = redirect(redirect_on_expiration, code=302)
else:
res = Response('Forbidden', 403)
return res
except KeyError as e:
if redirect_on_expiration:
return redirect(redirect_on_expiration, code=302)
return Response('No token', 403)
except UnreadableAccessRightsError:
log.info('Invalid Token : BadSignature')
# invalid token
if redirect_on_invalid_token:
res = redirect(redirect_on_invalid_token, code=302)
else:
res = Response('Token BadSignature', 403)
res.set_cookie('token', expires=0)
return res
except Exception as e:
trap_all_exceptions = current_app.config.get(
'TRAP_ALL_EXCEPTIONS',
True
)
if not trap_all_exceptions:
raise
log.critical(e)
msg = json.dumps({'type': 'Exception', 'msg': repr(e)})
return Response(msg, 403)
def get_user_permissions(user, code_action, code_filter_type, module_code=None, code_object=None):
"""
Get all the permissions of a user for an action, a module (or an object) and a filter_type
Users permissions could be multiples because of user's group. The view mapped by VUsersPermissions does not take the
max because some filter type could be not quantitative
Parameters:
user(dict)
code_action(str): <C,R,U,V,E,D>
code_filter_type(str): <SCOPE, GEOGRAPHIC ...>
module_code(str): 'GEONATURE', 'OCCTAX'
code_object(str): 'PERMISSIONS', 'DATASET' (table gn_permissions.t_oject)
Return:
Array<VUsersPermissions>
"""
id_role = user['id_role']
ors = [VUsersPermissions.module_code.ilike('GEONATURE')]
q = (
VUsersPermissions
.query
.filter(VUsersPermissions.id_role == id_role)
.filter(VUsersPermissions.code_action == code_action)
.filter(VUsersPermissions.code_filter_type == code_filter_type)
)
# if code_object we take only autorization of this object
# no heritage from GeoNature
if code_object:
user_cruved = q.filter(VUsersPermissions.code_object == code_object).all()
object_for_error = code_object
# else: heritage cruved of the module or from GeoNature
else:
object_for_error = 'GEONATURE'
if module_code:
ors.append(VUsersPermissions.module_code.ilike(module_code))
object_for_error = module_code
user_cruved = q.filter(sa.or_(*ors)).all()
try:
assert len(user_cruved) > 0
return user_cruved
except AssertionError:
raise InsufficientRightsError(
'User "{}" cannot "{}" in module/app/object "{}"'.format(
id_role, code_action, object_for_error
)
)
def build_cruved_dict(cruved, get_id):
'''
function utils to build a dict like {'C':'3', 'R':'2'}...
from Array<VUsersPermissions>
'''
cruved_dict = {}
for action_scope in cruved:
if get_id:
cruved_dict[action_scope[0]] = action_scope[2]
else:
cruved_dict[action_scope[0]] = action_scope[1]
return cruved_dict
def beautifulize_cruved(actions, cruved):
"""
Build more readable the cruved dict with the actions label
Params:
actions: dict action {'C': 'Action de créer'}
cruved: dict of cruved
Return:
Array<dict> [{'label': 'Action de Lire', 'value': '3'}]
"""
cruved_beautiful = []
for key, value in cruved.items():
temp = {}
temp['label'] = actions.get(key)
temp['value'] = value
cruved_beautiful.append(temp)
return cruved_beautiful
def cruved_scope_for_user_in_module(
id_role=None,
module_code=None,
object_code='ALL',
get_id=False
):
"""
get the user cruved for a module
if no cruved for a module, the cruved parent module is taken
Child app cruved alway overright parent module cruved
Params:
- id_role(int)
- module_code(str)
- get_id(bool): if true return the id_scope for each action
if false return the filter_value for each action
Return a tuple
- index 0: the cruved as a dict : {'C': 0, 'R': 2 ...}
- index 1: a boolean which say if its an herited cruved
"""
q = DB.session.query(
VUsersPermissions.code_action,
func.max(VUsersPermissions.value_filter),
func.max(VUsersPermissions.id_filter)
).distinct(VUsersPermissions.code_action).filter(
VUsersPermissions.id_role == id_role
).filter(
VUsersPermissions.code_filter_type == 'SCOPE'
).filter(
VUsersPermissions.code_object == object_code
).group_by(VUsersPermissions.code_action)
cruved_actions = ['C', 'R', 'U', 'V', 'E', 'D']
# if object not ALL, no heritage
if object_code != 'ALL':
object_cruved = q.all()
cruved_dict = build_cruved_dict(object_cruved, get_id)
update_cruved = {}
for action in cruved_actions:
if action in cruved_dict:
update_cruved[action] = cruved_dict[action]
else:
update_cruved[action] = '0'
return update_cruved, False
# get max scope cruved for module GEONATURE
parent_cruved_data = q.filter(VUsersPermissions.module_code.ilike('GEONATURE')).all()
parent_cruved = {}
# build a dict like {'C':'0', 'R':'2' ...} if get_id = False or
# {'C': 1, 'R':3 ...} if get_id = True
parent_cruved = build_cruved_dict(parent_cruved_data, get_id)
# get max scope cruved for module passed in parameter
module_cruved = {}
if module_code:
module_cruved_data = q.filter(VUsersPermissions.module_code.ilike(module_code)).all()
module_cruved = build_cruved_dict(module_cruved_data, get_id)
# for the module
for action_scope in module_cruved_data:
if get_id:
module_cruved[action_scope[0]] = action_scope[2]
else:
module_cruved[action_scope[0]] = action_scope[1]
# get the id for code 0
if get_id:
id_scope_no_data = DB.session.query(TFilters.id_filter).filter(TFilters.value_filter == '0').one()[0]
# update cruved with child module if action exist, otherwise take geonature cruved
update_cruved = {}
herited = False
for action in cruved_actions:
if action in module_cruved:
update_cruved[action] = module_cruved[action]
elif action in parent_cruved:
update_cruved[action] = parent_cruved[action]
herited = True
else:
if get_id:
update_cruved[action] = id_scope_no_data
else:
update_cruved[action] = '0'
return update_cruved, herited
def get_or_fetch_user_cruved(
session=None,
id_role=None,
module_code=None,
object_code= 'ALL'
):
"""
Check if the cruved is in the session
if not, get the cruved from the DB with
cruved_for_user_in_app()
"""
if module_code in session and 'user_cruved' in session[module_code]:
return session[module_code]['user_cruved']
else:
user_cruved = cruved_scope_for_user_in_module(
id_role=id_role,
module_code=module_code,
object_code=object_code
)[0]
session[module_code] = {}
session[module_code]['user_cruved'] = user_cruved
return user_cruved
|
bsd-2-clause
| 3,092,783,275,424,942,600
| 32.014235
| 124
| 0.612267
| false
| 3.59156
| false
| false
| false
|
allanliebold/data-structures
|
src/test_linked_list.py
|
1
|
2515
|
"""Tests for singly-linked list."""
import pytest
def test_node_attributes():
"""Test that node object has expected attributes."""
from linked_list import Node
n = Node('test')
assert n.data == 'test' and n.next_node is None
def test_list_push():
"""Test that linked_list has node pushed to it."""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push(1)
assert linked_list.head.data == 1
def test_list_push_next():
"""Test push with second node.
Head should be new node, next attribute should point to previous head.
"""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push('first')
linked_list.push('second')
assert linked_list.head.data == 'second' and linked_list.head.next_node.data == 'first'
def test_list_push_iterable():
"""."""
from linked_list import LinkedList
datas = [1, 2, 3, 4, 5]
linked_list = LinkedList(datas)
for i in datas:
assert linked_list.search(i).data == i
def test_list_pop():
"""Test that pop returns the data of the deleted node."""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push(5)
assert linked_list.pop() == 5
def test_list_pop_empty():
"""Test pop called on an empty linked list."""
from linked_list import LinkedList
linked_list = LinkedList()
assert linked_list.pop() is None
def test_list_search():
"""Test that search method returns the node with the data passed."""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push(1)
linked_list.push('target')
linked_list.push(3)
assert linked_list.search('target').data == 'target'
def test_list_search_invalid():
"""Test that search for node not in list raises dataError."""
from linked_list import LinkedList
linked_list = LinkedList()
linked_list.push(1)
linked_list.push(2)
linked_list.push(3)
with pytest.raises(ValueError):
linked_list.search('target')
def test_list_size():
"""Test that size method returns correct number."""
from linked_list import LinkedList
linked_list = LinkedList()
for i in range(10):
linked_list.push(i)
assert linked_list.size() == 10
def test_string_not_iterated_upon_init():
"""Test that strings passed on init are not split."""
from linked_list import LinkedList
linked_list = LinkedList('68')
assert linked_list.head.data == '68'
|
mit
| 2,257,836,508,669,859,300
| 26.944444
| 91
| 0.662028
| false
| 3.608321
| true
| false
| false
|
fcbond/OMW
|
omw/__init__.py
|
1
|
28562
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, sys, sqlite3, datetime, urllib, gzip, requests
from time import sleep
from flask import Flask, render_template, g, request, redirect, url_for, send_from_directory, session, flash, jsonify, make_response, Markup, Response
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user, wraps
from itsdangerous import URLSafeTimedSerializer # for safe session cookies
from collections import defaultdict as dd
from collections import OrderedDict as od
from hashlib import md5
from werkzeug import secure_filename
from lxml import etree
from packaging.version import Version
## profiler
#from werkzeug.contrib.profiler import ProfilerMiddleware
from common_login import *
from common_sql import *
from omw_sql import *
from wn_syntax import *
from math import log
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = "!$flhgSgngNO%$#SOET!$!"
app.config["REMEMBER_COOKIE_DURATION"] = datetime.timedelta(minutes=30)
## profiler
#app.config['PROFILE'] = True
#app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
#app.run(debug = True)
################################################################################
# LOGIN
################################################################################
login_manager.init_app(app)
@app.route("/login", methods=["GET", "POST"])
def login():
""" This login function checks if the username & password
match the admin.db; if the authentication is successful,
it passes the id of the user into login_user() """
if request.method == "POST" and \
"username" in request.form and \
"password" in request.form:
username = request.form["username"]
password = request.form["password"]
user = User.get(username)
# If we found a user based on username then compare that the submitted
# password matches the password in the database. The password is stored
# is a slated hash format, so you must hash the password before comparing it.
if user and hash_pass(password) == user.password:
login_user(user, remember=True)
# FIXME! Get this to work properly...
# return redirect(request.args.get("next") or url_for("index"))
return redirect(url_for("index"))
else:
flash(u"Invalid username, please try again.")
return render_template("login.html")
@app.route("/logout")
@login_required(role=0, group='open')
def logout():
logout_user()
return redirect(url_for("index"))
################################################################################
################################################################################
# SET UP CONNECTION WITH DATABASES
################################################################################
@app.before_request
def before_request():
g.admin = connect_admin()
g.omw = connect_omw()
@app.teardown_request
def teardown_request(exception):
if hasattr(g, 'db'):
g.admin.close()
g.omw.close()
################################################################################
################################################################################
# AJAX REQUESTS
################################################################################
@app.route('/_thumb_up_id')
def thumb_up_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = 1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="{}">+{}</span><br>
<span style="color:red" title="{}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_thumb_down_id')
def thumb_down_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
rate = -1
r = rate_ili_id(ili_id, rate, user)
counts, up_who, down_who = f_rate_summary([ili_id])
html = """ <span style="color:green" title="{}">+{}</span><br>
<span style="color:red" title="{}">-{}</span>
""".format(up_who[int(ili_id)], counts[int(ili_id)]['up'],
down_who[int(ili_id)], counts[int(ili_id)]['down'])
return jsonify(result=html)
@app.route('/_comment_id')
def comment_id():
user = fetch_id_from_userid(current_user.id)
ili_id = request.args.get('ili_id', None)
comment = request.args.get('comment', None)
comment = str(Markup.escape(comment))
dbinsert = comment_ili_id(ili_id, comment, user)
return jsonify(result=dbinsert)
@app.route('/_detailed_id')
def detailed_id():
ili_id = request.args.get('ili_id', None)
rate_hist = fetch_rate_id([ili_id])
comm_hist = fetch_comment_id([ili_id])
users = fetch_allusers()
r_html = ""
for r, u, t in rate_hist[int(ili_id)]:
r_html += '{} ({}): {} <br>'.format(users[u]['userID'], t, r)
c_html = ""
for c, u, t in comm_hist[int(ili_id)]:
c_html += '{} ({}): {} <br>'.format(users[u]['userID'], t, c)
html = """
<td colspan="9">
<div style="width: 49%; float:left;">
<h6>Ratings</h6>
{}</div>
<div style="width: 49%; float:right;">
<h6>Comments</h6>
{}</div>
</td>""".format(r_html, c_html)
return jsonify(result=html)
@app.route('/_confirm_wn_upload')
def confirm_wn_upload_id():
user = fetch_id_from_userid(current_user.id)
fn = request.args.get('fn', None)
upload = confirmUpload(fn, user)
labels = updateLabels()
return jsonify(result=upload)
@app.route('/_add_new_project')
def add_new_project():
user = fetch_id_from_userid(current_user.id)
proj = request.args.get('proj_code', None)
proj = str(Markup.escape(proj))
if user and proj:
dbinsert = insert_new_project(proj, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route("/_load_lang_selector",methods=["GET"])
def omw_lang_selector():
selected_lang = request.cookies.get('selected_lang')
selected_lang2 = request.cookies.get('selected_lang2')
lang_id, lang_code = fetch_langs()
html = '<select name="lang" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang == str(lid):
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
html += '<select name="lang2" style="font-size: 85%; width: 9em" required>'
for lid in lang_id.keys():
if selected_lang2 == str(lid):
html += """<option value="{}" selected>{}</option>
""".format(lid, lang_id[lid][1])
else:
html += """<option value="{}">{}</option>
""".format(lid, lang_id[lid][1])
html += '</select>'
return jsonify(result=html)
@app.route('/_add_new_language')
def add_new_language():
user = fetch_id_from_userid(current_user.id)
bcp = request.args.get('bcp', None)
bcp = str(Markup.escape(bcp))
iso = request.args.get('iso', None)
iso = str(Markup.escape(iso))
name = request.args.get('name', None)
name = str(Markup.escape(name))
if bcp and name:
dbinsert = insert_new_language(bcp, iso, name, user)
return jsonify(result=dbinsert)
else:
return jsonify(result=False)
@app.route('/_load_proj_details')
def load_proj_details():
proj_id = request.args.get('proj', 0)
if proj_id:
proj_id = int(proj_id)
else:
proj_id = None
projs = fetch_proj()
srcs = fetch_src()
srcs_meta = fetch_src_meta()
html = str()
if proj_id:
i = 0
for src_id in srcs.keys():
if srcs[src_id][0] == projs[proj_id]:
i += 1
html += "<br><p><b>Source {}: {}-{}</b></p>".format(i,
projs[proj_id],srcs[src_id][1])
for attr, val in srcs_meta[src_id].items():
html += "<p style='margin-left: 40px'>"
html += attr + ": " + val
html += "</p>"
return jsonify(result=html)
@app.route('/_load_min_omw_concept/<ss>')
@app.route('/_load_min_omw_concept_ili/<ili_id>')
def min_omw_concepts(ss=None, ili_id=None):
if ili_id:
ss_ids = f_ss_id_by_ili_id(ili_id)
else:
ss_ids = [ss]
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
ssrels = fetch_ssrel()
return jsonify(result=render_template('min_omw_concept.html',
pos = pos,
langs = langs_id,
senses=senses,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes))
@app.route('/_load_min_omw_sense/<sID>')
def min_omw_sense(sID=None):
if sID:
s_id=int(sID)
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
sense = fetch_sense(s_id)
forms=fetch_forms(sense[3])
selected_lang = int(request.cookies.get('selected_lang'))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
sdefs = fetch_defs_by_sense([s_id])
sdef = ''
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[min(sdefs[s_id].keys())] ## a language
# return jsonify(result=render_template('omw_sense.html',
return jsonify(result=render_template('min_omw_sense.html',
s_id = s_id,
sdef=sdef,
sense = sense,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
src_sid = src_sid,
src_meta = src_meta))
# l=lambda:dd(l)
# vr = l() # wn-lmf validation report
# @app.route('/_report_val1')
# def report_val1():
# filename = request.args.get('fn', None)
# if filename:
# vr1 = val1_DTD(current_user, filename)
# vr.update(vr1)
# if vr1['dtd_val'] == True:
# html = "DTD PASSED"
# return jsonify(result=html)
# else:
# html = "DTD FAILED" + '<br>' + vr['dtd_val_errors']
# return jsonify(result=html)
# else:
# return jsonify(result="ERROR")
@app.route('/_report_val2', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report_val2():
filename = request.args.get('fn', None)
vr, filename, wn, wn_dtls = validateFile(current_user.id, filename)
return jsonify(result=render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls, filename=filename))
# validateFile()
# filename = request.args.get('fn', None)
# if filename:
# vr = val1_DTD(current_user, filename)
# if vr['dtd_val'] == True:
# html = "DTD PASSED"
# return jsonify(result=html)
# else:
# html = "DTD FAILED" + '<br>' + vr['dtd_val_errors']
# return jsonify(result=html)
# else:
# return jsonify(result="ERROR")
# return jsonify(result="TEST_VAL2")
################################################################################
################################################################################
# VIEWS
################################################################################
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/ili', methods=['GET', 'POST'])
def ili_welcome(name=None):
return render_template('ili_welcome.html')
@app.route('/omw', methods=['GET', 'POST'])
def omw_welcome(name=None):
projects = request.args.get('projects','current')
#print(projects)
lang_id, lang_code = fetch_langs()
src_meta=fetch_src_meta()
### sort by language, project version (Newest first)
src_sort=od()
keys=list(src_meta.keys())
keys.sort(key=lambda x: Version(src_meta[x]['version']),reverse=True) #Version
keys.sort(key=lambda x: src_meta[x]['id']) #id
keys.sort(key=lambda x: lang_id[lang_code['code'][src_meta[x]['language']]][1]) #Language
for k in keys:
if projects=='current': # only get the latest version
if src_meta[k]['version'] != max((src_meta[i]['version'] for i in src_meta
if src_meta[i]['id'] == src_meta[k]['id']),
key=lambda x: Version(x)):
continue
src_sort[k] = src_meta[k]
return render_template('omw_welcome.html',
src_meta=src_sort,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route('/wordnet', methods=['GET', 'POST'])
def wordnet_license(name=None):
return render_template('wordnet_license.html')
@app.route('/omw_wns', methods=['GET', 'POST'])
def omw_wns(name=None):
projects = request.args.get('projects','current')
src_meta=fetch_src_meta()
stats = []
lang_id, lang_code = fetch_langs()
### sort by language name (1), id, version (FIXME -- reverse version)
src_sort=od()
keys=list(src_meta.keys())
keys.sort(key=lambda x: Version(src_meta[x]['version']),reverse=True) #Version
keys.sort(key=lambda x: src_meta[x]['id']) #id
keys.sort(key=lambda x: lang_id[lang_code['code'][src_meta[x]['language']]][1]) #Language
for k in keys:
if projects=='current': # only get the latest version
if src_meta[k]['version'] != max((src_meta[i]['version'] for i in src_meta
if src_meta[i]['id'] == src_meta[k]['id']),
key=lambda x: Version(x)):
continue
stats.append((src_meta[k], fetch_src_id_stats(k)))
return render_template('omw_wns.html',
stats=stats,
lang_id=lang_id,
lang_code=lang_code,
licenses=licenses)
@app.route("/useradmin",methods=["GET"])
@login_required(role=99, group='admin')
def useradmin():
users = fetch_allusers()
return render_template("useradmin.html", users=users)
@app.route("/langadmin",methods=["GET"])
@login_required(role=99, group='admin')
def langadmin():
lang_id, lang_code = fetch_langs()
return render_template("langadmin.html", langs=lang_id)
@app.route("/projectadmin",methods=["GET"])
@login_required(role=99, group='admin')
def projectadmin():
projs = fetch_proj()
return render_template("projectadmin.html", projs=projs)
@app.route('/allconcepts', methods=['GET', 'POST'])
def allconcepts():
ili, ili_defs = fetch_ili()
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/temporary', methods=['GET', 'POST'])
def temporary():
ili = fetch_ili_status(2)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/deprecated', methods=['GET', 'POST'])
def deprecated():
ili = fetch_ili_status(0)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/concepts/<c>', methods=['GET', 'POST'])
def concepts_ili(c=None):
c = c.split(',')
ili, ili_defs = fetch_ili(c)
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/ili/search', methods=['GET', 'POST'])
@app.route('/ili/search/<q>', methods=['GET', 'POST'])
def search_ili(q=None):
if q:
query = q
else:
query = request.form['query']
src_id = fetch_src()
kind_id = fetch_kind()
status_id = fetch_status()
ili = dict()
for c in query_omw("""SELECT * FROM ili WHERE def GLOB ?
""", [query]):
ili[c['id']] = (kind_id[c['kind_id']], c['def'],
src_id[c['origin_src_id']], c['src_key'],
status_id[c['status_id']], c['superseded_by_id'],
c['t'])
rsumm, up_who, down_who = f_rate_summary(list(ili.keys()))
return render_template('concept-list.html', ili=ili,
rsumm=rsumm, up_who=up_who, down_who=down_who)
@app.route('/upload', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def upload():
return render_template('upload.html')
@app.route('/metadata', methods=['GET', 'POST'])
def metadata():
return render_template('metadata.html')
@app.route('/join', methods=['GET', 'POST'])
def join():
return render_template('join.html')
@app.route('/omw/uploads/<filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
@app.route('/ili/validation-report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def validationReport():
vr, filename, wn, wn_dtls = validateFile(current_user.id)
return render_template('validation-report.html',
vr=vr, wn=wn, wn_dtls=wn_dtls,
filename=filename)
@app.route('/ili/report', methods=['GET', 'POST'])
@login_required(role=0, group='open')
def report():
passed, filename = uploadFile(current_user.id)
return render_template('report.html',
passed=passed,
filename=filename)
# return render_template('report.html')
@app.route('/omw/search', methods=['GET', 'POST'])
@app.route('/omw/search/<lang>,<lang2>/<q>', methods=['GET', 'POST'])
def search_omw(lang=None, q=None):
if lang and q:
lang_id = lang
lang_id2 = lang2
query = q
else:
lang_id = request.form['lang']
lang_id2 = request.form['lang2']
query = request.form['query']
query = query.strip()
sense = dd(list)
lang_sense = dd(lambda: dd(list))
# GO FROM FORM TO SENSE
for s in query_omw("""
SELECT s.id as s_id, ss_id, wid, fid, lang_id, pos_id, lemma
FROM (SELECT w_id as wid, form.id as fid, lang_id, pos_id, lemma
FROM (SELECT id, lang_id, pos_id, lemma
FROM f WHERE lemma GLOB ? AND lang_id in (?,?)) as form
JOIN wf_link ON form.id = wf_link.f_id) word
JOIN s ON wid=w_id
""", ['['+query[0].upper() + query[0].lower()+']'+query[1:],
lang_id,
lang_id2]):
sense[s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['lang_id'], s['pos_id'], s['lemma']]
lang_sense[s['lang_id']][s['ss_id']] = [s['s_id'], s['wid'], s['fid'],
s['pos_id'], s['lemma']]
pos = fetch_pos()
lang_dct, lang_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(sense.keys())
labels = fetch_labels(lang_id, set(senses.keys()))
resp = make_response(render_template('omw_results.html',
langsel = int(lang_id),
langsel2 = int(lang_id2),
pos = pos,
lang_dct = lang_dct,
sense=sense,
senses=senses,
ss=ss,
links=links,
defs=defs,
exes=exes,
labels=labels))
resp.set_cookie('selected_lang', lang_id)
resp.set_cookie('selected_lang2', lang_id2)
return resp
@app.route('/omw/core', methods=['GET', 'POST'])
def omw_core(): ### FIXME add lang as a paramater?
return render_template('omw_core.html')
@app.route('/omw/concepts/<ssID>', methods=['GET', 'POST'])
@app.route('/omw/concepts/ili/<iliID>', methods=['GET', 'POST'])
def concepts_omw(ssID=None, iliID=None):
if iliID:
ss_ids = f_ss_id_by_ili_id(iliID)
ili, ilidefs = fetch_ili([iliID])
else:
ss_ids = [ssID]
ili, ili_defs = dict(), dict()
pos = fetch_pos()
langs_id, langs_code = fetch_langs()
ss, senses, defs, exes, links = fetch_ss_basic(ss_ids)
if (not iliID) and int(ssID) in ss:
iliID = ss[int(ssID)][0]
ili, ilidefs = fetch_ili([iliID])
sss = list(ss.keys())
for s in links:
for l in links[s]:
sss.extend(links[s][l])
selected_lang = request.cookies.get('selected_lang')
labels = fetch_labels(selected_lang, set(sss))
ssrels = fetch_ssrel()
ss_srcs=fetch_src_for_ss_id(ss_ids)
src_meta=fetch_src_meta()
core_ss, core_ili = fetch_core()
s_ids = []
for x in senses:
for y in senses[x]:
for (s_id, lemma, freq) in senses[x][y]:
s_ids.append(s_id)
slinks = fetch_sense_links(s_ids)
return render_template('omw_concept.html',
ssID=ssID,
iliID=iliID,
pos = pos,
langs = langs_id,
senses=senses,
slinks=slinks,
ss=ss,
links=links,
ssrels=ssrels,
defs=defs,
exes=exes,
ili=ili,
selected_lang = selected_lang,
selected_lang2 = request.cookies.get('selected_lang2'),
labels=labels,
ss_srcs=ss_srcs,
src_meta=src_meta,
core=core_ss)
@app.route('/omw/senses/<sID>', methods=['GET', 'POST'])
def omw_sense(sID=None):
"""display a single sense (and its variants)"""
if sID:
langs_id, langs_code = fetch_langs()
pos = fetch_pos()
s_id=int(sID)
sense = fetch_sense(s_id)
slinks = fetch_sense_links([s_id])
forms=fetch_forms(sense[3])
selected_lang = int(request.cookies.get('selected_lang'))
labels= fetch_labels(selected_lang,[sense[4]])
src_meta= fetch_src_meta()
src_sid=fetch_src_for_s_id([s_id])
srel = fetch_srel()
## get the canonical form for each linked sense
slabel=fetch_sense_labels([x for v in slinks[int(s_id)].values() for x in v])
sdefs = fetch_defs_by_sense([s_id])
sdef = ''
if selected_lang in sdefs[s_id]:
sdef = sdefs[s_id][selected_lang] ## requested language
else:
sdef = sdefs[min(sdefs[s_id].keys())] ## a language
return render_template('omw_sense.html',
s_id = sID,
sdef = sdef,
sense = sense,
slinks = slinks[s_id],
srel = srel,
forms=forms,
langs = langs_id,
pos = pos,
labels = labels,
slabel = slabel,
src_sid = src_sid,
src_meta = src_meta)
# URIs FOR ORIGINAL CONCEPT KEYS, BY INDIVIDUAL SOURCES
@app.route('/omw/src/<src>/<originalkey>', methods=['GET', 'POST'])
def src_omw(src=None, originalkey=None):
try:
proj = src[:src.index('-')]
ver = src[src.index('-')+1:]
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
if src_id:
ss = fetch_ss_id_by_src_orginalkey(src_id, originalkey)
else:
ss = None
return concepts_omw(ss)
## show wn statistics
##
##
@app.route('/omw/src/<src>', methods=['GET', 'POST'])
def omw_wn(src=None):
if src:
try:
proj = src[:src.index('-')]
ver = src[src.index('-')+1:]
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
return render_template('omw_wn.html',
wn = src,
src_id=src_id,
src_info=src_info,
ssrel_stats=fetch_ssrel_stats(src_id),
pos_stats= fetch_src_id_pos_stats(src_id),
src_stats=fetch_src_id_stats(src_id),
licenses=licenses)
@app.route('/omw/src-latex/<src>', methods=['GET', 'POST'])
def omw_wn_latex(src=None):
if src:
try:
proj = src[:src.index('-')]
ver = src[src.index('-')+1:]
src_id = f_src_id_by_proj_ver(proj, ver)
except:
src_id = None
srcs_meta = fetch_src_meta()
src_info = srcs_meta[src_id]
return render_template('omw_wn_latex.html',
wn = src,
src_id=src_id,
src_info=src_info,
ssrel_stats=fetch_ssrel_stats(src_id),
pos_stats= fetch_src_id_pos_stats(src_id),
src_stats=fetch_src_id_stats(src_id))
@app.route('/cili.tsv')
def generate_cili_tsv():
tsv="""# omw_id ili_id projects\n"""
srcs = fetch_src()
ss =dict()
r = query_omw_direct("SELECT id, ili_id from ss")
for (ss_id, ili_id) in r:
ss[ss_id] = [ili_id]
src = dd(list)
r = query_omw_direct("SELECT ss_id, src_id, src_key from ss_src")
for (ss_id, src_id, src_key) in r:
src[ss_id].append("{}-{}:{}".format(srcs[src_id][0],
srcs[src_id][1],
src_key))
for ss_id in ss:
ili = 'i' + str(ss[ss_id][0]) if ss[ss_id][0] else 'None'
tsv += "{}\t{}\t{}\n".format(ss_id, ili, ";".join(src[ss_id]))
return Response(tsv, mimetype='text/tab-separated-values')
@app.context_processor
def utility_processor():
def scale_freq(f, maxfreq=1000):
if f > 0:
return 100 + 100 * log(f)/log(maxfreq)
else:
return 100
return dict(scale_freq=scale_freq)
# def style_sense(freq, conf, lang):
# """show confidence as opacity, show freq as size
# opacity is the square of the confidence
# freq is scaled as a % of maxfreq for that language
# TODO: highlight a word if searched for?"""
# style = ''
# if conf and conf < 1.0: ## should not be more than 1.0
# style += 'opacity: {f};'.format(conf*conf) ## degrade quicker
# if freq:
# ### should I be using a log here?
# maxfreq=1000 #(should do per lang)
# style += 'font-size: {f}%;'.format(100*(1+ log(freq)/log(maxfreq)))
# if style:
# style = "style='{}'".format(style)
## show proj statistics
#for proj in fetch_proj/
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', threaded=True)
|
mit
| -2,032,441,808,575,662,300
| 33.165072
| 150
| 0.51418
| false
| 3.507552
| false
| false
| false
|
schef/schef.github.io
|
source/07/mc-7-4-tp-cde-md.py
|
1
|
2572
|
#!/usr/bin/python
# Written by Stjepan Horvat
# ( zvanstefan@gmail.com )
# by the exercises from David Lucal Burge - Perfect Pitch Ear Traning Supercourse
# Thanks to Wojciech M. Zabolotny ( wzab@ise.pw.edu.pl ) for snd-virmidi example
# ( wzab@ise.pw.edu.pl )
import random
import time
import sys
import re
fname="/dev/snd/midiC2D0"
#fname=sys.argv[1]
fin=open(fname,"rb")
fout=open(fname,"wb")
#keymin=int(sys.argv[2])
#keymax=int(sys.argv[3])
#keymin=int(60)
#keymax=int(72)
#c major scale
print ("Exercise 7-4:")
print ("C D and E. Harmonic and melodic pitch indentification. Melodic doubles.")
#from c to c'' white tones
#c major scale
#notes = [ 36, 38, 40, 41, 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72, 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96 ]
notes = [ 36, 38, 40, 48, 50, 52, 60, 62, 64, 72, 74, 76, 84, 86, 88, 96 ]
noteC = [ 36, 48, 60, 72, 84, 96 ]
def playNote(note):
fout.write((chr(0x90)+chr(note)+chr(127)).encode('utf-8'))
fout.flush()
time.sleep(0.7)
fout.write((chr(0x80)+chr(note)+chr(127)).encode('utf-8'))
fout.flush()
def nameNote(note):
if note in noteC:
return("C")
elif note-2 in noteC:
return("D")
elif note-4 in noteC:
return("E")
elif note-5 in noteC:
return("F")
elif note-7 in noteC:
return("G")
elif note-9 in noteC:
return("A")
elif note-11 in noteC:
return("H")
def name2Note(name):
if name == "c":
return(60)
if name == "d":
return(62)
if name == "e":
return(64)
usage = "Usage: 1-repeat, <note> <note> \"c d\", ?-usage."
round = 1
a = re.compile("^[c-e] [c-e]$")
try:
print(usage)
while True:
noteOne = random.choice(notes)
while True:
noteTwo = random.choice(notes)
if nameNote(noteOne) != nameNote(noteTwo):
break
match = False
while not match:
done = False
playNote(noteOne)
playNote(noteTwo)
while not done:
n = input("? ")
if n == "1":
playNote(noteOne)
playNote(noteTwo)
if n == "?":
print(usage)
#TODO:bug da prima sve umjesto samo imena nota
elif a.match(n):
splitNote = n.split()
if splitNote[0] == nameNote(noteOne).lower() and splitNote[1] == nameNote(noteTwo).lower():
round += 1
print("Correct. Next round. " + str(round) + ".:")
done = True
match = True
else:
playNote(name2Note(splitNote[0]))
playNote(name2Note(splitNote[1]))
except KeyboardInterrupt:
pass
|
mit
| -7,023,858,542,194,064,000
| 24.979798
| 155
| 0.588647
| false
| 2.786566
| false
| false
| false
|
fortesg/fortrantestgenerator
|
config_fortrantestgenerator.py
|
1
|
1665
|
import os
ftgDir = os.path.dirname(os.path.realpath(__file__))
# Directory where FortranCallGraph is located
# REQUIRED
FCG_DIR = ftgDir + '/../fortrancallgraph'
# Configuration file to be used by FortranCallGraph
# OPTIONAL: When omitted or None, config variables required by FortranCallGraph must be assigned here
# When empty string (''), FortranCallGraph's default (config_fortrancallgraph.py) will be used.
# Variables can always be overwritten here
FCG_CONFIG_FILE = 'config_fortrancallgraph.py'
# Path of the templates to be used
# REQUIRED
TEMPLATE = ftgDir + '/templates/Standalone/Standalone.tmpl'
# Directory where the test drivers generated by -r will be placed
# REQUIRED
TEST_SOURCE_DIR = ''
# List containing locations of the source files that will be modified by -c
# OPTIONAL: When omitted or None same as SOURCE_DIRS from FortranCallGraph's configuration
# Can be used for example, when preprocessed source files shall be analysed but original source files modified.
# Handle with care! -c creates backups (see BACKUP_SUFFIX) of the unmodified source files for later analyses,
# but those are only used when SOURCE_DIRS and MODIFIY_SOURCE_DIRS are the same.
MODIFY_SOURCE_DIRS = None
# Suffix for backuped source files
# Will used by later analyses, for example when you first run -c and later -r
# OPTIONAL, default: 'ftg-backup'
BACKUP_SUFFIX = 'ftg-backup'
# Prefix of subroutines generated by -c, will be excluded from later analyses
# Must fit to subroutine names defined in the template
# OPTIONAL, default: 'ftg_'
FTG_PREFIX = 'ftg_'
# Value of the template variable ${dataDir}
# OPTIONAL, default: '.'
TEST_DATA_BASE_DIR = '.'
|
gpl-3.0
| -4,443,048,437,855,805,400
| 38.642857
| 111
| 0.763363
| false
| 3.708241
| true
| false
| false
|
ethereum/pyethapp
|
pyethapp/app.py
|
1
|
30125
|
# -*- coding: utf8 -*-
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import next
from builtins import range
import copy
import json
import os
import signal
import sys
from logging import StreamHandler
from uuid import uuid4
import click
import ethereum.slogging as slogging
import gevent
import rlp
from click import BadParameter
from devp2p.app import BaseApp
from devp2p.discovery import NodeDiscovery
from devp2p.peermanager import PeerManager
from devp2p.service import BaseService
from ethereum import config as eth_config
from ethereum.block import Block
from ethereum.snapshot import create_snapshot, load_snapshot as _load_snapshot
from ethereum.utils import (
encode_hex,
decode_hex,
to_string,
)
from gevent.event import Event
from . import config as app_config
from . import eth_protocol
from . import utils
from .accounts import AccountsService, Account
from .console_service import Console
from .db_service import DBService
from .eth_service import ChainService
from .jsonrpc import JSONRPCServer, IPCRPCServer
from .pow_service import PoWService
from pyethapp import __version__
from pyethapp.profiles import PROFILES, DEFAULT_PROFILE
from pyethapp.utils import merge_dict, load_contrib_services, FallbackChoice, \
enable_greenlet_debugger
log = slogging.get_logger('app')
services = [DBService, AccountsService, NodeDiscovery, PeerManager, ChainService,
PoWService, JSONRPCServer, IPCRPCServer, Console]
class EthApp(BaseApp):
client_name = 'pyethapp'
client_version = '%s/%s/%s' % (__version__, sys.platform,
'py%d.%d.%d' % sys.version_info[:3])
client_version_string = '%s/v%s' % (client_name, client_version)
start_console = False
default_config = dict(BaseApp.default_config)
default_config['client_version_string'] = client_version_string
default_config['post_app_start_callback'] = None
script_globals = {}
# TODO: Remove `profile` fallbacks in 1.4 or so
# Separators should be underscore!
@click.group(help='Welcome to {} {}'.format(EthApp.client_name, EthApp.client_version))
@click.option('--profile', type=FallbackChoice(
list(PROFILES.keys()),
{'frontier': 'livenet', 'morden': 'testnet'},
"PyEthApp's configuration profiles have been renamed to "
"'livenet' and 'testnet'. The previous values 'frontier' and "
"'morden' will be removed in a future update."),
default=DEFAULT_PROFILE, help="Configuration profile.", show_default=True)
@click.option('alt_config', '--Config', '-C', type=str, callback=app_config.validate_alt_config_file,
help='Alternative config file')
@click.option('config_values', '-c', multiple=True, type=str,
help='Single configuration parameters (<param>=<value>)')
@click.option('alt_data_dir', '-d', '--data-dir', multiple=False, type=str,
help='data directory', default=app_config.default_data_dir, show_default=True)
@click.option('-l', '--log_config', multiple=False, type=str, default=":info",
help='log_config string: e.g. ":info,eth:debug', show_default=True)
@click.option('--log-json/--log-no-json', default=False,
help='log as structured json output')
@click.option('--log-file', type=click.Path(dir_okay=False, writable=True, resolve_path=True),
help="Log to file instead of stderr.")
@click.option('-b', '--bootstrap_node', multiple=False, type=str,
help='single bootstrap_node as enode://pubkey@host:port')
@click.option('-m', '--mining_pct', multiple=False, type=int, default=0,
help='pct cpu used for mining')
@click.option('--unlock', multiple=True, type=str,
help='Unlock an account (prompts for password)')
@click.option('--password', type=click.File(), help='path to a password file')
@click.pass_context
def app(ctx, profile, alt_config, config_values, alt_data_dir, log_config,
bootstrap_node, log_json, mining_pct, unlock, password, log_file):
# configure logging
slogging.configure(log_config, log_json=log_json, log_file=log_file)
# data dir default or from cli option
alt_data_dir = os.path.expanduser(alt_data_dir)
data_dir = alt_data_dir or app_config.default_data_dir
app_config.setup_data_dir(data_dir) # if not available, sets up data_dir and required config
log.info('using data in', path=data_dir)
# prepare configuration
# config files only contain required config (privkeys) and config different from the default
if alt_config: # specified config file
config = app_config.load_config(alt_config)
if not config:
log.warning('empty config given. default config values will be used')
else: # load config from default or set data_dir
config = app_config.load_config(data_dir)
config['data_dir'] = data_dir
# Store custom genesis to restore if overridden by profile value
genesis_from_config_file = config.get('eth', {}).get('genesis')
# Store custom network_id to restore if overridden by profile value
network_id_from_config_file = config.get('eth', {}).get('network_id')
# Store custom bootstrap_nodes to restore them overridden by profile value
bootstrap_nodes_from_config_file = config.get('discovery', {}).get('bootstrap_nodes')
# add default config
app_config.update_config_with_defaults(config, app_config.get_default_config([EthApp] + services))
app_config.update_config_with_defaults(config, {'eth': {'block': eth_config.default_config}})
# Set config values based on profile selection
merge_dict(config, PROFILES[profile])
if genesis_from_config_file:
# Fixed genesis_hash taken from profile must be deleted as custom genesis loaded
del config['eth']['genesis_hash']
config['eth']['genesis'] = genesis_from_config_file
if network_id_from_config_file:
del config['eth']['network_id']
config['eth']['network_id'] = network_id_from_config_file
if bootstrap_nodes_from_config_file:
# Fixed bootstrap_nodes taken from profile must be deleted as custom bootstrap_nodes loaded
del config['discovery']['bootstrap_nodes']
config['discovery']['bootstrap_nodes'] = bootstrap_nodes_from_config_file
pre_cmd_line_config_genesis = config.get('eth', {}).get('genesis')
# override values with values from cmd line
for config_value in config_values:
try:
app_config.set_config_param(config, config_value)
except ValueError:
raise BadParameter('Config parameter must be of the form "a.b.c=d" where "a.b.c" '
'specifies the parameter to set and d is a valid yaml value '
'(example: "-c jsonrpc.port=5000")')
if pre_cmd_line_config_genesis != config.get('eth', {}).get('genesis'):
# Fixed genesis_hash taked from profile must be deleted as custom genesis loaded
if 'genesis_hash' in config['eth']:
del config['eth']['genesis_hash']
# Load genesis config
app_config.update_config_from_genesis_json(config,
genesis_json_filename_or_dict=config['eth']['genesis'])
if bootstrap_node:
# [NOTE]: check it
config['discovery']['bootstrap_nodes'] = [to_string(bootstrap_node)]
if mining_pct > 0:
config['pow']['activated'] = True
config['pow']['cpu_pct'] = int(min(100, mining_pct))
if not config.get('pow', {}).get('activated'):
config['deactivated_services'].append(PoWService.name)
ctx.obj = {'config': config,
'unlock': unlock,
'password': password.read().rstrip() if password else None,
'log_file': log_file}
assert (password and ctx.obj['password'] is not None and len(
ctx.obj['password'])) or not password, "empty password file"
@app.command()
@click.option('--dev/--nodev', default=False,
help='Drop into interactive debugger on unhandled exceptions.')
@click.option('--nodial/--dial', default=False, help='Do not dial nodes.')
@click.option('--fake/--nofake', default=False, help='Fake genesis difficulty.')
@click.option('--console', is_flag=True, help='Immediately drop into interactive console.')
@click.pass_context
def run(ctx, dev, nodial, fake, console):
"""Start the client ( --dev to stop on error)"""
config = ctx.obj['config']
if nodial:
# config['deactivated_services'].append(PeerManager.name)
# config['deactivated_services'].append(NodeDiscovery.name)
config['discovery']['bootstrap_nodes'] = []
config['discovery']['listen_port'] = 29873
config['p2p']['listen_port'] = 29873
config['p2p']['min_peers'] = 0
if fake:
config['eth']['block']['GENESIS_DIFFICULTY'] = 1024
config['eth']['block']['BLOCK_DIFF_FACTOR'] = 16
# create app
app = EthApp(config)
# development mode
if dev:
enable_greenlet_debugger()
try:
config['client_version'] += '/' + os.getlogin()
except:
log.warn("can't get and add login name to client_version")
pass
# dump config
if log.is_active('debug'):
dump_config(config)
# init and unlock accounts first to check coinbase
if AccountsService in services:
AccountsService.register_with_app(app)
unlock_accounts(ctx.obj['unlock'], app.services.accounts, password=ctx.obj['password'])
try:
app.services.accounts.coinbase
except ValueError as e:
log.fatal('invalid coinbase', coinbase=config.get('pow', {}).get('coinbase_hex'),
error=e.message)
sys.exit()
app.start_console = console
# register services
contrib_services = load_contrib_services(config)
for service in services + contrib_services:
assert issubclass(service, BaseService)
if service.name not in app.config['deactivated_services'] + [AccountsService.name]:
assert service.name not in app.services
service.register_with_app(app)
assert hasattr(app.services, service.name)
# start app
log.info('starting')
app.start()
if ctx.obj['log_file']:
log.info("Logging to file %s", ctx.obj['log_file'])
# User requested file logging - remove stderr handler
root_logger = slogging.getLogger()
for hndlr in root_logger.handlers:
if isinstance(hndlr, StreamHandler) and hndlr.stream == sys.stderr:
root_logger.removeHandler(hndlr)
break
if config['post_app_start_callback'] is not None:
config['post_app_start_callback'](app)
# wait for interrupt
evt = Event()
gevent.signal(signal.SIGQUIT, evt.set)
gevent.signal(signal.SIGTERM, evt.set)
evt.wait()
# finally stop
app.stop()
def dump_config(config):
cfg = copy.deepcopy(config)
alloc = cfg.get('eth', {}).get('block', {}).get('GENESIS_INITIAL_ALLOC', {})
if len(alloc) > 100:
log.debug('omitting reporting of %d accounts in genesis' % len(alloc))
del cfg['eth']['block']['GENESIS_INITIAL_ALLOC']
app_config.dump_config(cfg)
@app.command()
@click.pass_context
def config(ctx):
"""Show the config"""
dump_config(ctx.obj['config'])
@app.command()
@click.argument('file', type=click.File(), required=True)
@click.argument('name', type=str, required=True)
@click.pass_context
def blocktest(ctx, file, name):
"""Start after importing blocks from a file.
In order to prevent replacement of the local test chain by the main chain from the network, the
peermanager, if registered, is stopped before importing any blocks.
Also, for block tests an in memory database is used. Thus, a already persisting chain stays in
place.
"""
app = EthApp(ctx.obj['config'])
app.config['db']['implementation'] = 'EphemDB'
# register services
for service in services:
assert issubclass(service, BaseService)
if service.name not in app.config['deactivated_services']:
assert service.name not in app.services
service.register_with_app(app)
assert hasattr(app.services, service.name)
if ChainService.name not in app.services:
log.fatal('No chainmanager registered')
ctx.abort()
if DBService.name not in app.services:
log.fatal('No db registered')
ctx.abort()
log.info('loading block file', path=file.name)
try:
data = json.load(file)
except ValueError:
log.fatal('Invalid JSON file')
if name not in data:
log.fatal('Name not found in file')
ctx.abort()
try:
blocks = utils.load_block_tests(list(data.values())[0], app.services.chain.chain.db)
except ValueError:
log.fatal('Invalid blocks encountered')
ctx.abort()
# start app
app.start()
if 'peermanager' in app.services:
app.services.peermanager.stop()
log.info('building blockchain')
Block.is_genesis = lambda self: self.number == 0
app.services.chain.chain._initialize_blockchain(genesis=blocks[0])
for block in blocks[1:]:
app.services.chain.chain.add_block(block)
# wait for interrupt
evt = Event()
gevent.signal(signal.SIGQUIT, evt.set)
gevent.signal(signal.SIGTERM, evt.set)
gevent.signal(signal.SIGINT, evt.set)
evt.wait()
# finally stop
app.stop()
@app.command('snapshot')
@click.option('-r', '--recent', type=int, default=1024,
help='Number of recent blocks. State before these blocks and these blocks will be dumped. On recover these blocks will be applied on restored state. (default: 1024)')
@click.option('-f', '--filename', type=str, default=None,
help='Output file name. (default: auto-gen file prefixed by snapshot-')
@click.pass_context
def snapshot(ctx, recent, filename):
"""Take a snapshot of current world state.
The snapshot will be saved in JSON format, including data like chain configurations and accounts.
It will overwrite exiting file if it already exists.
"""
app = EthApp(ctx.obj['config'])
DBService.register_with_app(app)
AccountsService.register_with_app(app)
ChainService.register_with_app(app)
if not filename:
import time
filename = 'snapshot-%d.json' % int(time.time()*1000)
s = create_snapshot(app.services.chain.chain, recent)
with open(filename, 'w') as f:
json.dump(s, f, sort_keys=False, indent=4, separators=(',', ': '), encoding='ascii')
print('snapshot saved to %s' % filename)
@app.command('load_snapshot')
@click.argument('filename', type=str)
@click.pass_context
def load_snapshot(ctx, filename):
"""Load snapshot FILE into local node database.
This process will OVERWRITE data in current database!!!
"""
app = EthApp(ctx.obj['config'])
DBService.register_with_app(app)
AccountsService.register_with_app(app)
ChainService.register_with_app(app)
with open(filename, 'r') as f:
s = json.load(f, encoding='ascii')
_load_snapshot(app.services.chain.chain, s)
print('snapshot %s loaded.' % filename)
@app.command('export')
@click.option('--from', 'from_', type=int, help='Number of the first block (default: genesis)')
@click.option('--to', type=int, help='Number of the last block (default: latest)')
@click.argument('file', type=click.File('ab'))
@click.pass_context
def export_blocks(ctx, from_, to, file):
"""Export the blockchain to FILE.
The chain will be stored in binary format, i.e. as a concatenated list of RLP encoded blocks,
starting with the earliest block.
If the file already exists, the additional blocks are appended. Otherwise, a new file is
created.
Use - to write to stdout.
"""
app = EthApp(ctx.obj['config'])
DBService.register_with_app(app)
AccountsService.register_with_app(app)
ChainService.register_with_app(app)
if from_ is None:
from_ = 0
head_number = app.services.chain.chain.head.number
if to is None:
to = head_number
if from_ < 0:
log.fatal('block numbers must not be negative')
sys.exit(1)
if to < from_:
log.fatal('"to" block must be newer than "from" block')
sys.exit(1)
if to > head_number:
log.fatal('"to" block not known (current head: {})'.format(head_number))
sys.exit(1)
log.info('Starting export')
for n in range(from_, to + 1):
log.debug('Exporting block {}'.format(n))
if (n - from_) % 50000 == 0:
log.info('Exporting block {} to {}'.format(n, min(n + 50000, to)))
block_hash = app.services.chain.chain.get_blockhash_by_number(n)
# bypass slow block decoding by directly accessing db
block_rlp = app.services.db.get(block_hash)
file.write(block_rlp)
log.info('Export complete')
@app.command('import')
@click.argument('file', type=click.File('rb'))
@click.pass_context
def import_blocks(ctx, file):
"""Import blocks from FILE.
Blocks are expected to be in binary format, i.e. as a concatenated list of RLP encoded blocks.
Blocks are imported sequentially. If a block can not be imported (e.g. because it is badly
encoded, it is in the chain already or its parent is not in the chain) it will be ignored, but
the process will continue. Sole exception: If neither the first block nor its parent is known,
importing will end right away.
Use - to read from stdin.
"""
app = EthApp(ctx.obj['config'])
DBService.register_with_app(app)
AccountsService.register_with_app(app)
ChainService.register_with_app(app)
chain = app.services.chain
assert chain.block_queue.empty()
data = file.read()
app.start()
def blocks():
"""Generator for blocks encoded in `data`."""
i = 0
while i < len(data):
try:
block_data, next_i = rlp.codec.consume_item(data, i)
except rlp.DecodingError:
log.fatal('invalid RLP encoding', byte_index=i)
sys.exit(1) # have to abort as we don't know where to continue
try:
if not isinstance(block_data, list) or len(block_data) != 3:
raise rlp.DeserializationError('', block_data)
yield eth_protocol.TransientBlock.init_from_rlp(block_data)
except (IndexError, rlp.DeserializationError):
log.warning('not a valid block', byte_index=i) # we can still continue
yield None
i = next_i
log.info('importing blocks')
# check if it makes sense to go through all blocks
first_block = next(blocks())
if first_block is None:
log.fatal('first block invalid')
sys.exit(1)
if not (chain.knows_block(first_block.header.hash) or
chain.knows_block(first_block.header.prevhash)):
log.fatal('unlinked chains', newest_known_block=chain.chain.head.number,
first_unknown_block=first_block.header.number)
sys.exit(1)
# import all blocks
for n, block in enumerate(blocks()):
if block is None:
log.warning('skipping block', number_in_file=n)
continue
log.debug('adding block to queue', number_in_file=n, number_in_chain=block.header.number)
app.services.chain.add_block(block, None) # None for proto
# let block processing finish
while not app.services.chain.block_queue.empty():
gevent.sleep()
app.stop()
log.info('import finished', head_number=app.services.chain.chain.head.number)
@app.group()
@click.pass_context
def account(ctx):
"""Manage accounts.
For accounts to be accessible by pyethapp, their keys must be stored in the keystore directory.
Its path can be configured through "accounts.keystore_dir".
"""
app = EthApp(ctx.obj['config'])
ctx.obj['app'] = app
AccountsService.register_with_app(app)
unlock_accounts(ctx.obj['unlock'], app.services.accounts, password=ctx.obj['password'])
@account.command('new')
@click.option('--uuid', '-i', help='equip the account with a random UUID', is_flag=True)
@click.pass_context
def new_account(ctx, uuid):
"""Create a new account.
This will generate a random private key and store it in encrypted form in the keystore
directory. You are prompted for the password that is employed (if no password file is
specified). If desired the private key can be associated with a random UUID (version 4) using
the --uuid flag.
"""
app = ctx.obj['app']
if uuid:
id_ = uuid4()
else:
id_ = None
password = ctx.obj['password']
if password is None:
password = click.prompt('Password to encrypt private key', default='', hide_input=True,
confirmation_prompt=True, show_default=False)
account = Account.new(password, uuid=id_)
account.path = os.path.join(app.services.accounts.keystore_dir, encode_hex(account.address))
try:
app.services.accounts.add_account(account)
except IOError:
click.echo('Could not write keystore file. Make sure you have write permission in the '
'configured directory and check the log for further information.')
sys.exit(1)
else:
click.echo('Account creation successful')
click.echo(' Address: {}'.format(encode_hex(account.address)))
click.echo(' Id: {}'.format(account.uuid))
@account.command('list')
@click.pass_context
def list_accounts(ctx):
"""List accounts with addresses and ids.
This prints a table of all accounts, numbered consecutively, along with their addresses and
ids. Note that some accounts do not have an id, and some addresses might be hidden (i.e. are
not present in the keystore file). In the latter case, you have to unlock the accounts (e.g.
via "pyethapp --unlock <account> account list") to display the address anyway.
"""
accounts = ctx.obj['app'].services.accounts
if len(accounts) == 0:
click.echo('no accounts found')
else:
fmt = '{i:>4} {address:<40} {id:<36} {locked:<1}'
click.echo(' {address:<40} {id:<36} {locked}'.format(address='Address (if known)',
id='Id (if any)',
locked='Locked'))
for i, account in enumerate(accounts):
click.echo(fmt.format(i='#' + to_string(i + 1),
address=encode_hex(account.address or ''),
id=account.uuid or '',
locked='yes' if account.locked else 'no'))
@account.command('import')
@click.argument('f', type=click.File(), metavar='FILE')
@click.option('--uuid', '-i', help='equip the new account with a random UUID', is_flag=True)
@click.pass_context
def import_account(ctx, f, uuid):
"""Import a private key from FILE.
FILE is the path to the file in which the private key is stored. The key is assumed to be hex
encoded, surrounding whitespace is stripped. A new account is created for the private key, as
if it was created with "pyethapp account new", and stored in the keystore directory. You will
be prompted for a password to encrypt the key (if no password file is specified). If desired a
random UUID (version 4) can be generated using the --uuid flag in order to identify the new
account later.
"""
app = ctx.obj['app']
if uuid:
id_ = uuid4()
else:
id_ = None
privkey_hex = f.read()
try:
privkey = decode_hex(privkey_hex.strip())
except TypeError:
click.echo('Could not decode private key from file (should be hex encoded)')
sys.exit(1)
password = ctx.obj['password']
if password is None:
password = click.prompt('Password to encrypt private key', default='', hide_input=True,
confirmation_prompt=True, show_default=False)
account = Account.new(password, privkey, uuid=id_)
account.path = os.path.join(app.services.accounts.keystore_dir, encode_hex(account.address))
try:
app.services.accounts.add_account(account)
except IOError:
click.echo('Could not write keystore file. Make sure you have write permission in the '
'configured directory and check the log for further information.')
sys.exit(1)
else:
click.echo('Account creation successful')
click.echo(' Address: {}'.format(encode_hex(account.address)))
click.echo(' Id: {}'.format(account.uuid))
@account.command('update')
@click.argument('account', type=str)
@click.pass_context
def update_account(ctx, account):
"""
Change the password of an account.
ACCOUNT identifies the account: It can be one of the following: an address, a uuid, or a
number corresponding to an entry in "pyethapp account list" (one based).
"update" first prompts for the current password to unlock the account. Next, the new password
must be entered.
The password replacement procedure backups the original keystore file in the keystore
directory, creates the new file, and finally deletes the backup. If something goes wrong, an
attempt will be made to restore the keystore file from the backup. In the event that this does
not work, it is possible to recover from the backup manually by simply renaming it. The backup
shares the same name as the original file, but with an appended "~" plus a number if necessary
to avoid name clashes.
As this command tampers with your keystore directory, it is advisable to perform a manual
backup in advance.
If a password is provided via the "--password" option (on the "pyethapp" base command), it will
be used to unlock the account, but not as the new password (as distinguished from
"pyethapp account new").
"""
app = ctx.obj['app']
unlock_accounts([account], app.services.accounts, password=ctx.obj['password'])
old_account = app.services.accounts.find(account)
if old_account.locked:
click.echo('Account needs to be unlocked in order to update its password')
sys.exit(1)
click.echo('Updating account')
click.echo('Address: {}'.format(encode_hex(old_account.address)))
click.echo(' Id: {}'.format(old_account.uuid))
new_password = click.prompt('New password', default='', hide_input=True,
confirmation_prompt=True, show_default=False)
try:
app.services.accounts.update_account(old_account, new_password)
except:
click.echo('Account update failed. Make sure that the keystore file has been restored '
'correctly (e.g. with "pyethapp --unlock <acct> account list"). If not, look '
'for automatic backup files in the keystore directory (suffix "~" or '
'"~<number>"). Check the log for further information.')
raise
click.echo('Account update successful')
def unlock_accounts(account_ids, account_service, max_attempts=3, password=None):
"""Unlock a list of accounts, prompting for passwords one by one if not given.
If a password is specified, it will be used to unlock all accounts. If not, the user is
prompted for one password per account.
If an account can not be identified or unlocked, an error message is logged and the program
exits.
:param accounts: a list of account identifiers accepted by :meth:`AccountsService.find`
:param account_service: the account service managing the given accounts
:param max_attempts: maximum number of attempts per account before the unlocking process is
aborted (>= 1), or `None` to allow an arbitrary number of tries
:param password: optional password which will be used to unlock the accounts
"""
accounts = []
for account_id in account_ids:
try:
account = account_service.find(account_id)
except KeyError:
log.fatal('could not find account', identifier=account_id)
sys.exit(1)
accounts.append(account)
if password is not None:
for identifier, account in zip(account_ids, accounts):
try:
account.unlock(password)
except ValueError:
log.fatal('Could not unlock account with password from file',
account_id=identifier)
sys.exit(1)
return
max_attempts_str = to_string(max_attempts) if max_attempts else 'oo'
attempt_fmt = '(attempt {{attempt}}/{})'.format(max_attempts_str)
first_attempt_fmt = 'Password for account {id} ' + attempt_fmt
further_attempts_fmt = 'Wrong password. Please try again ' + attempt_fmt
for identifier, account in zip(account_ids, accounts):
attempt = 1
pw = click.prompt(first_attempt_fmt.format(id=identifier, attempt=1), hide_input=True,
default='', show_default=False)
while True:
attempt += 1
try:
account.unlock(pw)
except ValueError:
if max_attempts and attempt > max_attempts:
log.fatal('Too many unlock attempts', attempts=attempt, account_id=identifier)
sys.exit(1)
else:
pw = click.prompt(further_attempts_fmt.format(attempt=attempt),
hide_input=True, default='', show_default=False)
else:
break
assert not account.locked
if __name__ == '__main__':
# python app.py 2>&1 | less +F
app()
|
mit
| 1,139,392,548,384,149,500
| 39.220294
| 180
| 0.645378
| false
| 3.923037
| true
| false
| false
|
kirbyfan64/cppexpat
|
doc/source/conf.py
|
1
|
8300
|
# -*- coding: utf-8 -*-
#
# CppExpat documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 13 19:00:49 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe']
breathe_projects_source = {'cppexpat': ('../..', ['cppexpat.hpp'])}
breathe_doxygen_config_options = {'EXCLUDE_SYMBOLS': 'CPPEXPAT_TO_PBASE'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CppExpat'
copyright = u'2015, Ryan Gonzalez'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CppExpatdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CppExpat.tex', u'CppExpat Documentation',
u'Ryan Gonzalez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cppexpat', u'CppExpat Documentation',
[u'Ryan Gonzalez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CppExpat', u'CppExpat Documentation',
u'Ryan Gonzalez', 'CppExpat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
| -6,357,192,428,893,004,000
| 30.679389
| 79
| 0.707229
| false
| 3.661226
| true
| false
| false
|
ehashman/oh-mainline
|
vendor/packages/django-http-proxy/httpproxy/migrations/0001_initial.py
|
1
|
2970
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('method', models.CharField(max_length=20, verbose_name='method')),
('domain', models.CharField(max_length=100, verbose_name='domain')),
('port', models.PositiveSmallIntegerField(default=80)),
('path', models.CharField(max_length=250, verbose_name='path')),
('date', models.DateTimeField(auto_now=True)),
('querykey', models.CharField(verbose_name='query key', max_length=255, editable=False)),
],
options={
'get_latest_by': 'date',
'verbose_name': 'request',
'verbose_name_plural': 'requests',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RequestParameter',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(default=b'G', max_length=1, choices=[(b'G', b'GET'), (b'P', b'POST')])),
('order', models.PositiveSmallIntegerField(default=1)),
('name', models.CharField(max_length=100, verbose_name='naam')),
('value', models.CharField(max_length=250, null=True, verbose_name='value', blank=True)),
('request', models.ForeignKey(related_name='parameters', verbose_name='request', to='httpproxy.Request')),
],
options={
'ordering': ('order',),
'verbose_name': 'request parameter',
'verbose_name_plural': 'request parameters',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.PositiveSmallIntegerField(default=200)),
('content_type', models.CharField(max_length=200, verbose_name='inhoudstype')),
('content', models.TextField(verbose_name='inhoud')),
('request', models.OneToOneField(verbose_name='request', to='httpproxy.Request')),
],
options={
'verbose_name': 'response',
'verbose_name_plural': 'responses',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='request',
unique_together=set([('method', 'domain', 'port', 'path', 'querykey')]),
),
]
|
agpl-3.0
| 5,437,310,444,514,051,000
| 43.328358
| 122
| 0.537037
| false
| 4.513678
| false
| false
| false
|
Banbury/cartwheel-3d
|
Python/UI/MainWindow.py
|
1
|
5484
|
'''
Created on 2009-08-24
This module contains the main OpenGL application window that is used by all SNM applications
@author: beaudoin
'''
import wx
import UI
class MainWindow(wx.Frame):
"""The class for the main window."""
MIN_TOOLPANEL_WIDTH = 200
MIN_CONSOLE_HEIGHT = 100
def __init__(self, parent, id, title, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,
name='frame', fps=30, glCanvasSize=wx.DefaultSize,
showConsole=True,
consoleEnvironment={} ):
# Check if a fixed glWindow was asked
fixedGlWindow = glCanvasSize != wx.DefaultSize
self._glCanvasSize = glCanvasSize
#
# Forcing a specific style on the window.
# Should this include styles passed?
style |= wx.NO_FULL_REPAINT_ON_RESIZE
# Not resizable if GL canvas is fixed size
if fixedGlWindow :
style &= ~wx.RESIZE_BORDER & ~wx.MAXIMIZE_BOX
super(MainWindow, self).__init__(parent, id, title, pos, size, style, name)
#
# Create the menu
self._menuBar = wx.MenuBar()
self._fileMenu = wx.Menu()
self._fileMenu.Append( wx.ID_OPEN, "&Open" )
self._fileMenu.Append( wx.ID_SAVE, "&Save" )
self._fileMenu.AppendSeparator()
self._fileMenu.Append( wx.ID_EXIT, "&Quit" )
self._menuBar.Append(self._fileMenu, "&File" )
self._helpMenu = wx.Menu()
self._helpMenu.Append( wx.ID_ABOUT, "&About" )
self._menuBar.Append(self._helpMenu, "&Help" )
self.SetMenuBar( self._menuBar )
#
# Create the GL canvas
attribList = (wx.glcanvas.WX_GL_RGBA, # RGBA
wx.glcanvas.WX_GL_DOUBLEBUFFER, # Double Buffered
wx.glcanvas.WX_GL_DEPTH_SIZE, 24, # 24 bit depth
wx.glcanvas.WX_GL_STENCIL_SIZE, 8 ) # 8 bit stencil
self._glCanvas = UI.GLPanel(self, fps = fps, size = glCanvasSize, attribList = attribList)
# Create the right window (sashed) where the tool panel will be
self._rightWindow = wx.SashLayoutWindow(self)
self._rightWindow.SetDefaultSize((MainWindow.MIN_TOOLPANEL_WIDTH * 1.3,-1))
self._rightWindow.SetMinimumSizeX(MainWindow.MIN_TOOLPANEL_WIDTH)
self._rightWindow.SetOrientation( wx.LAYOUT_VERTICAL )
self._rightWindow.SetAlignment( wx.LAYOUT_RIGHT )
if not fixedGlWindow:
self._rightWindow.SetSashVisible( wx.SASH_LEFT, True )
self._rightWindow.Bind( wx.EVT_SASH_DRAGGED, self.onSashDragRightWindow )
#
# Create the tool panel
self._toolPanel = UI.ToolPanel(self._rightWindow)
# Create the bottom window (sashed) where the console will be
self._bottomWindow = wx.SashLayoutWindow(self)
self._bottomWindow.SetDefaultSize((-1,MainWindow.MIN_CONSOLE_HEIGHT*2))
self._bottomWindow.SetMinimumSizeY(MainWindow.MIN_CONSOLE_HEIGHT)
self._bottomWindow.SetOrientation( wx.LAYOUT_HORIZONTAL )
self._bottomWindow.SetAlignment( wx.LAYOUT_BOTTOM )
if not fixedGlWindow:
self._bottomWindow.SetSashVisible( wx.SASH_TOP, True )
self._bottomWindow.Bind( wx.EVT_SASH_DRAGGED, self.onSashDragBottomWindow )
#
# Create the console window
self._console = UI.PythonConsole(self._bottomWindow, size=(-1,220), consoleEnvironment = consoleEnvironment )
if not showConsole:
self._bottomWindow.Hide()
self.Bind( wx.EVT_SIZE, self.onSize )
#
# Private methods
def _layoutFrame(self):
"""Private. Perform frame layout"""
wx.LayoutAlgorithm().LayoutFrame(self, self._glCanvas)
#
# Event handlers
def onSize(self, event):
self._layoutFrame()
if self._glCanvasSize != wx.DefaultSize :
currGlCanvasSize = self._glCanvas.GetSize()
diff = ( currGlCanvasSize[0] - self._glCanvasSize[0], currGlCanvasSize[1] - self._glCanvasSize[1] )
if diff == (0,0) :
return
currentSize = event.GetSize()
newSize= ( currentSize[0] - diff[0], currentSize[1] - diff[1] )
if newSize == currentSize :
return
self.SetSize( newSize )
self.SendSizeEvent()
def onSashDragRightWindow(self, event):
if event.GetDragStatus() == wx.SASH_STATUS_OUT_OF_RANGE:
return
self._rightWindow.SetDefaultSize((event.GetDragRect().width,-1))
self._layoutFrame()
def onSashDragBottomWindow(self, event):
if event.GetDragStatus() == wx.SASH_STATUS_OUT_OF_RANGE:
return
self._bottomWindow.SetDefaultSize((-1,event.GetDragRect().height))
self._layoutFrame()
#
# Accessors
def getGLCanvas(self):
"""Return the associated GL canvas."""
return self._glCanvas
def getToolPanel(self):
"""Return the associated tool panel."""
return self._toolPanel
def getFps(self):
"""Return the desired frame per second for this window."""
return self._glCanvas.getFps()
|
apache-2.0
| -7,808,601,954,536,289,000
| 34.317881
| 117
| 0.585157
| false
| 3.925555
| false
| false
| false
|
serkanaltuntas/yavst
|
yavst/prepare_gpf4.py
|
1
|
4894
|
#!/usr/bin/env python
#
#
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/Utilities24/prepare_gpf4.py,v 1.10.4.3 2009/03/23 21:54:28 rhuey Exp $
#
import string
import os.path
import glob
from MolKit import Read
from AutoDockTools.GridParameters import GridParameters, grid_parameter_list4
from AutoDockTools.GridParameters import GridParameter4FileMaker
from AutoDockTools.atomTypeTools import AutoDock4_AtomTyper
def usage():
print "Usage: prepare_gpf4.py -l pdbqt_file -r pdbqt_file "
print " -l ligand_filename"
print " -r receptor_filename"
print
print "Optional parameters:"
print " [-i reference_gpf_filename]"
print " [-o output_gpf_filename]"
print " [-x flexres_filename]"
print " [-p parameter=newvalue. For example: -p ligand_types='HD,Br,A,C,OA' ]"
print " [-d directory of ligands to use to set types]"
print " [-y boolean to center grids on center of ligand]"
print " [-n boolean to NOT size_box_to_include_ligand]"
print " [-v]"
print
print "Prepare a grid parameter file (GPF) for AutoDock4."
print
print " The GPF will by default be <receptor>.gpf. This"
print "may be overridden using the -o flag."
if __name__ == '__main__':
import getopt
import sys
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'vl:r:i:x:o:p:d:yn')
except getopt.GetoptError, msg:
print 'prepare_gpf4.py: %s' % msg
usage()
sys.exit(2)
receptor_filename = ligand_filename = None
list_filename = gpf_filename = gpf_filename = None
output_gpf_filename = None
flexres_filename = None
directory = None
parameters = []
verbose = None
center_on_ligand = False
size_box_to_include_ligand = True
for o, a in opt_list:
if o in ('-v', '--v'):
verbose = 1
if o in ('-l', '--l'):
ligand_filename = a
if verbose: print 'ligand_filename=', ligand_filename
if o in ('-r', '--r'):
receptor_filename = a
if verbose: print 'receptor_filename=', receptor_filename
if o in ('-i', '--i'):
gpf_filename = a
if verbose: print 'reference_gpf_filename=', gpf_filename
if o in ('-x', '--x'):
flexres_filename = a
if verbose: print 'flexres_filename=', flexres_filename
if o in ('-o', '--o'):
output_gpf_filename = a
if verbose: print 'output_gpf_filename=', output_gpf_filename
if o in ('-p', '--p'):
parameters.append(a)
if verbose: print 'parameters=', parameters
if o in ('-d', '--d'):
directory = a
if verbose: print 'directory=', directory
if o in ('-y', '--y'):
center_on_ligand = True
if verbose: print 'set center_on_ligand to ', center_on_ligand
if o in ('-n', '--n'):
size_box_to_include_ligand = False
if verbose: print 'set size_box_to_include_ligand to ', size_box_to_include_ligand
if o in ('-h', '--'):
usage()
sys.exit()
if (not receptor_filename) or (ligand_filename is None and directory is None):
print "prepare_gpf4.py: ligand and receptor filenames"
print " must be specified."
usage()
sys.exit()
gpfm = GridParameter4FileMaker(size_box_to_include_ligand=size_box_to_include_ligand,verbose=verbose)
if gpf_filename is not None:
gpfm.read_reference(gpf_filename)
if ligand_filename is not None:
gpfm.set_ligand(ligand_filename)
gpfm.set_receptor(receptor_filename)
if directory is not None:
gpfm.set_types_from_directory(directory)
if flexres_filename is not None:
flexmol = Read(flexres_filename)[0]
flexres_types = flexmol.allAtoms.autodock_element
lig_types = gpfm.gpo['ligand_types']['value'].split()
all_types = lig_types
for t in flexres_types:
if t not in all_types:
all_types.append(t)
all_types_string = all_types[0]
if len(all_types)>1:
for t in all_types[1:]:
all_types_string = all_types_string + " " + t
gpfm.gpo['ligand_types']['value'] = all_types_string
for p in parameters:
key,newvalue = string.split(p, '=')
kw = {key:newvalue}
apply(gpfm.set_grid_parameters, (), kw)
#gpfm.set_grid_parameters(spacing=1.0)
if center_on_ligand is True:
gpfm.gpo['gridcenterAuto']['value'] = 0
cenx,ceny,cenz = gpfm.ligand.getCenter()
gpfm.gpo['gridcenter']['value'] = "%.3f %.3f %.3f" %(cenx,ceny,cenz)
gpfm.write_gpf(output_gpf_filename)
#prepare_gpf4.py -l 1ebg_lig.pdbqt -r 1ebg_rec.pdbqt -p spacing=0.4 -p npts=[60,60,60] -i ref.gpf -o testing.gpf
|
mit
| -1,222,868,990,191,267,000
| 35.796992
| 129
| 0.592154
| false
| 3.28677
| false
| false
| false
|
wy182000/gyp
|
pylib/gyp/generator/ninja.py
|
1
|
80265
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, flavor, toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def WriteSpec(self, spec, config_name, generator_flags,
case_sensitive_filesystem):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = spec.get('sources', []) + extra_sources
if sources:
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
config_name, config, sources, compile_depends_stamp, pch,
case_sensitive_filesystem, spec)
# Some actions/rules output 'sources' that are already object files.
link_deps += [self.GypPathToNinja(f)
for f in sources if f.endswith(self.obj_ext)]
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', []) + \
extra_mac_bundle_resources
self.WriteMacBundleResources(mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(intermediate_plist, 'infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
self.ninja.build(out, 'mac_tool', info_plist,
variables=[('mactool_cmd', 'copy-info-plist'),
('env', env)])
bundle_depends.append(out)
def WriteSources(self, config_name, config, sources, predepends,
precompiled_header, case_sensitive_filesystem, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
pdbpath = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir,
self.name + '.pdb'))
self.WriteVariableList('pdbname', [pdbpath])
self.WriteVariableList('pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
defines = config.get('defines', []) + extra_defines
self.WriteVariableList('defines', [Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList('rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
env = self.GetSortedXcodeEnv()
self.WriteVariableList('includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands()
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext)
if include: self.ninja.variable(var, include)
self.WriteVariableList('cflags', map(self.ExpandSpecial, cflags))
self.WriteVariableList('cflags_c', map(self.ExpandSpecial, cflags_c))
self.WriteVariableList('cflags_cc', map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList('cflags_objc', map(self.ExpandSpecial,
cflags_objc))
self.WriteVariableList('cflags_objcc', map(self.ExpandSpecial,
cflags_objcc))
self.ninja.newline()
outputs = []
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
# Ninja's depfile handling gets confused when the case of a filename
# changes on a case-insensitive file system. To work around that, always
# convert .o filenames to lowercase on such file systems. See
# https://github.com/martine/ninja/issues/402 for details.
if not case_sensitive_filesystem:
output = output.lower()
implicit = precompiled_header.GetObjDependencies([input], [output])
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
self.ninja.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
self.WritePchTargets(pch_commands)
self.ninja.newline()
return outputs
def WritePchTargets(self, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
self.ninja.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
extra_link_deps |= set(target.component_objs)
elif self.flavor == 'win' and target.import_lib:
extra_link_deps.add(target.import_lib)
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
extra_link_deps.add(target.binary)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
link_deps.extend(list(extra_link_deps))
extra_bindings = []
if self.is_mac_bundle:
output = self.ComputeMacBundleBinaryOutput()
else:
output = self.ComputeOutput(spec)
extra_bindings.append(('postbuilds',
self.GetPostbuildCommand(spec, output, output)))
is_executable = spec['type'] == 'executable'
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja)
elif self.flavor == 'win':
manifest_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, manifest_files = self.msvs_settings.GetLdflags(config_name,
self.GypPathToNinja, self.ExpandSpecial, manifest_name, is_executable)
self.WriteVariableList('manifests', manifest_files)
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name),
self.msvs_settings.IsLinkIncremental(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
ldflags = config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList('ldflags',
gyp.common.uniquer(map(self.ExpandSpecial,
ldflags)))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = [QuoteShellArgument('-LIBPATH:' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList('libs', library_dirs + libraries)
self.target.binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('dll', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
output = [output, self.target.import_lib]
else:
output = [output, output + '.TOC']
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
self.ninja.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
if spec['type'] == 'none':
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
variables = []
postbuild = self.GetPostbuildCommand(
spec, self.target.binary, self.target.binary)
if postbuild:
variables.append(('postbuilds', postbuild))
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps, variables=variables)
else:
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
postbuild = self.GetPostbuildCommand(spec, output, self.target.binary,
is_command_start=not package_framework)
variables = []
if postbuild:
variables.append(('postbuilds', postbuild))
if package_framework:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def GetPostbuildCommand(self, spec, output, output_binary,
is_command_start=False):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
target_postbuilds = []
if output_binary is not None:
target_postbuilds = self.xcode_settings.GetTargetPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
quiet=True)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
postbuilds = target_postbuilds + postbuilds
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeMacBundleBinaryOutput(self):
"""Return the 'output' (full output path) to the binary in a bundle."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetExecutablePath()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, type=None):
"""Compute the path for the final output of the spec."""
assert not self.is_mac_bundle or type
if not type:
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, var, values):
assert not isinstance(values, str)
if values is None:
values = []
self.ninja.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
generator_flags = params.get('generator_flags', {})
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
return max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest, link_incremental):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding and/or incremental linking is enabled."""
suffix = ''
if embed_manifest:
suffix += '_embed'
if link_incremental:
suffix += '_inc'
return suffix
def _AddWinLinkRules(master_ninja, embed_manifest, link_incremental):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
cmd = ('cmd /c %(ldcmd)s'
' && %(python)s gyp-win-tool manifest-wrapper $arch'
' cmd /c if exist %(out)s.manifest del %(out)s.manifest'
' && %(python)s gyp-win-tool manifest-wrapper $arch'
' $mt -nologo -manifest $manifests')
if embed_manifest and not link_incremental:
# Embed manifest into a binary. If incremental linking is enabled,
# embedding is postponed to the re-linking stage (see below).
cmd += ' -outputresource:%(out)s;%(resname)s'
else:
# Save manifest as an external file.
cmd += ' -out:%(out)s.manifest'
if link_incremental:
# There is no point in generating separate rule for the case when
# incremental linking is enabled, but manifest embedding is disabled.
# In that case the basic rule should be used (e.g. 'link').
# See also implementation of _GetWinLinkRuleNameSuffix().
assert embed_manifest
# Make .rc file out of manifest, compile it to .res file and re-link.
cmd += (' && %(python)s gyp-win-tool manifest-to-rc $arch'
' %(out)s.manifest %(out)s.manifest.rc %(resname)s'
' && %(python)s gyp-win-tool rc-wrapper $arch $rc'
' %(out)s.manifest.rc'
' && %(ldcmd)s %(out)s.manifest.res')
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return cmd % {'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name}
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest, link_incremental)
dlldesc = 'LINK%s(DLL) $dll' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo $implibflag /DLL /OUT:$dll '
'/PDB:$dll.pdb @$dll.rsp' % sys.executable)
dllcmd = FullLinkCommand(dllcmd, '$dll', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True)
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$dll.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True)
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch '
'$ld /nologo /OUT:$out /PDB:$out.pdb @$out.rsp' %
sys.executable)
exe_cmd = FullLinkCommand(exe_cmd, '$out', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $out' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$out.rsp',
rspfile_content='$in_newline $libs $ldflags')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja = ninja_syntax.Writer(
OpenOutput(os.path.join(toplevel_build, 'build.ninja')),
width=120)
case_sensitive_filesystem = not os.path.exists(
os.path.join(toplevel_build, 'BUILD.NINJA'))
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'gcc'
cxx = 'g++'
ld = '$cxx'
ld_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
master_ninja.variable(
'cl_' + arch, CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, flavor)))
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
ld = GetEnvironFallback(['LD_target', 'LD'], ld)
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
ld_host = GetEnvironFallback(['LD_host'], ld_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True, link_incremental=True)
_AddWinLinkRules(master_ninja, embed_manifest=True, link_incremental=False)
_AddWinLinkRules(master_ninja, embed_manifest=False, link_incremental=False)
# Do not generate rules for embed_manifest=False and link_incremental=True
# because in that case rules for (False, False) should be used (see
# implementation of _GetWinLinkRuleNameSuffix()).
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': '$ld -shared $ldflags -o $lib %(suffix)s',
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
# TODO(thakis): The solink_module rule is likely wrong. Xcode seems to pass
# -bundle -single_module here (for osmesa.so).
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '$in $solibs $libs$postbuilds'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'infoplist',
description='INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='ln -f $in $out 2>/dev/null || (rm -rf $out && cp -af $in $out)')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets.")
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
OpenOutput(os.path.join(toplevel_build, output_file)),
flavor, toplevel_dir=options.toplevel_dir)
master_ninja.subninja(output_file)
target = writer.WriteSpec(
spec, config_name, generator_flags, case_sensitive_filesystem)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
bsd-3-clause
| -8,764,016,464,242,874,000
| 40.203799
| 80
| 0.626624
| false
| 3.876974
| true
| false
| false
|
myd7349/DiveIntoPython3Practices
|
chapter_09_UnitTesting/roman2.py
|
1
|
4834
|
# -*- coding: utf-8 -*-
# 2014-11-18T22:48+08:00
import unittest
class OutOfRangeError(ValueError): pass
roman_numeral_map = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def to_roman(n):
'''Convert integer to Roman numeral'''
if n > 3999:
# The unit test does not check the human-readable string that accompanies the exception
raise OutOfRangeError('number out of range (must be less than 4000)')
result = ''
for numeral, integer in roman_numeral_map:
while n >= integer:
result += numeral
n -= integer
return result
class KnownValues(unittest.TestCase):
known_values = ((1, 'I'),
(2, 'II'),
(3, 'III'),
(4, 'IV'),
(5, 'V'),
(6, 'VI'),
(7, 'VII'),
(8, 'VIII'),
(9, 'IX'),
(10, 'X'),
(50, 'L'),
(100, 'C'),
(500, 'D'),
(1000, 'M'),
(31, 'XXXI'),
(148, 'CXLVIII'),
(294, 'CCXCIV'),
(312, 'CCCXII'),
(421, 'CDXXI'),
(528, 'DXXVIII'),
(621, 'DCXXI'),
(782, 'DCCLXXXII'),
(870, 'DCCCLXX'),
(941, 'CMXLI'),
(1043, 'MXLIII'),
(1110, 'MCX'),
(1226, 'MCCXXVI'),
(1301, 'MCCCI'),
(1485, 'MCDLXXXV'),
(1509, 'MDIX'),
(1607, 'MDCVII'),
(1754, 'MDCCLIV'),
(1832, 'MDCCCXXXII'),
(1993, 'MCMXCIII'),
(2074, 'MMLXXIV'),
(2152, 'MMCLII'),
(2212, 'MMCCXII'),
(2343, 'MMCCCXLIII'),
(2499, 'MMCDXCIX'),
(2574, 'MMDLXXIV'),
(2646, 'MMDCXLVI'),
(2723, 'MMDCCXXIII'),
(2892, 'MMDCCCXCII'),
(2975, 'MMCMLXXV'),
(3051, 'MMMLI'),
(3185, 'MMMCLXXXV'),
(3250, 'MMMCCL'),
(3313, 'MMMCCCXIII'),
(3408, 'MMMCDVIII'),
(3501, 'MMMDI'),
(3610, 'MMMDCX'),
(3743, 'MMMDCCXLIII'),
(3844, 'MMMDCCCXLIV'),
(3888, 'MMMDCCCLXXXVIII'),
(3940, 'MMMCMXL'),
(3999, 'MMMCMXCIX'))
def test_to_roman_known_values(self):
'''to_roman should give known result with known input'''
for integer, numeral in self.known_values:
self.assertEqual(numeral, to_roman(integer))
class ToRomanBadInput(unittest.TestCase):
# PASS
def test_too_large(self):
'''to_roman should fail with large input'''
# 3. The unittest.TestCase class provides the assertRaises method,
# which takes the following arguments: the exception you’re expecting,
# the function you’re testing, and the arguments you’re passing to that
# function. (If the function you’re testing takes more than one argument,
# pass them all to assertRaises, in order, and it will pass them right along
# to the function you’re testing.)
# assertRaises(exception, callable, *args, **kwds)
# assertRaises(exception, msg=None)
self.assertRaises(OutOfRangeError, to_roman, 4000)
if __name__ == '__main__':
# By default, unittest.main will call sys.exit after all tests, and in this case,
# code after the invoking of unittest.main will be ignored. By passing False to
# 'exit' keyword argument, we change this behavior.
unittest.main(exit = False)
# It is not enough to test that functions succeed when given good input; you must
# also test that they fail when given bad input. And not just any sort of failure;
# they must fail in the way you expect.
try:
print(to_roman(5000))
except OutOfRangeError:
print('5000 is too large')
# Along with testing numbers that are too large,
# you need to test numbers that are too small.
print('{!r}'.format(to_roman(0)))
print('{!r}'.format(to_roman(-1)))
|
lgpl-3.0
| 2,665,887,912,295,815,000
| 36.984252
| 95
| 0.439884
| false
| 3.980198
| true
| false
| false
|
troup-system/troup
|
troup/observer.py
|
1
|
1190
|
# Copyright 2016 Pavle Jonoski
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Observable:
def __init__(self):
self.listeners = {}
def on(self, event, handler):
listeners = self.listeners.get(event)
if not listeners:
listeners = self.listeners[event] = []
listeners.append(handler)
def trigger(self, event, *args):
listeners = self.listeners.get(event)
if listeners:
for listener in listeners:
listener(*args)
def remove_listener(self, event, listener):
listeners = self.listeners.get(event)
if listeners:
listeners.remove(listener)
|
apache-2.0
| -3,194,647,551,165,012,500
| 33
| 74
| 0.663866
| false
| 4.311594
| false
| false
| false
|
MaxTyutyunnikov/lino
|
obsolete/tests/74.py
|
1
|
1459
|
## Copyright 2003-2006 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import win32ui
import win32con
from lino.misc import tsttools
from lino import config
class Case(tsttools.TestCase):
""
def test01(self):
spoolFile = self.addTempFile("74.ps",showOutput=True)
dc = win32ui.CreateDC()
dc.CreatePrinterDC(config.win32.get('postscript_printer'))
dc.StartDoc("my print job",spoolFile)
dc.SetMapMode(win32con.MM_TWIPS)
dc.StartPage()
minx, miny = dc.GetWindowOrg()
maxx,maxy = dc.GetWindowExt()
for x in range(minx,maxx,1440):
for y in range(miny,maxy,1440):
dc.TextOut(x,-y,repr((x,y)))
dc.EndDoc()
if __name__ == '__main__':
tsttools.main()
|
gpl-3.0
| -2,120,210,230,867,551,700
| 30.717391
| 70
| 0.681974
| false
| 3.558537
| false
| false
| false
|
gem/oq-engine
|
openquake/hmtk/seismicity/declusterer/dec_gardner_knopoff.py
|
1
|
6271
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2021 GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM’s OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM’s OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
"""
Module :mod:`openquake.hmtk.seismicity.declusterer.dec_gardner_knopoff`
defines the Gardner and Knopoff declustering algorithm
"""
import numpy as np
from openquake.hmtk.seismicity.declusterer.base import (
BaseCatalogueDecluster, DECLUSTERER_METHODS)
from openquake.hmtk.seismicity.utils import decimal_year, haversine
from openquake.hmtk.seismicity.declusterer.distance_time_windows import (
TIME_DISTANCE_WINDOW_FUNCTIONS)
@DECLUSTERER_METHODS.add(
"decluster",
time_distance_window=TIME_DISTANCE_WINDOW_FUNCTIONS,
fs_time_prop=np.float)
class GardnerKnopoffType1(BaseCatalogueDecluster):
"""
This class implements the Gardner Knopoff algorithm as described in
this paper:
Gardner, J. K. and Knopoff, L. (1974). Is the sequence of aftershocks
in Southern California, with aftershocks removed, poissonian?. Bull.
Seism. Soc. Am., 64(5): 1363-1367.
"""
def decluster(self, catalogue, config):
"""
The configuration of this declustering algorithm requires two
objects:
- A time-distance window object (key is 'time_distance_window')
- A value in the interval [0,1] expressing the fraction of the
time window used for aftershocks (key is 'fs_time_prop')
:param catalogue:
Catalogue of earthquakes
:type catalogue: Dictionary
:param config:
Configuration parameters
:type config: Dictionary
:returns:
**vcl vector** indicating cluster number,
**flagvector** indicating which eq events belong to a cluster
:rtype: numpy.ndarray
"""
# Get relevant parameters
neq = len(catalogue.data['magnitude']) # Number of earthquakes
# Get decimal year (needed for time windows)
year_dec = decimal_year(
catalogue.data['year'], catalogue.data['month'],
catalogue.data['day'])
# Get space and time windows corresponding to each event
# Initial Position Identifier
sw_space, sw_time = (
config['time_distance_window'].calc(
catalogue.data['magnitude'], config.get('time_cutoff')))
eqid = np.arange(0, neq, 1)
# Pre-allocate cluster index vectors
vcl = np.zeros(neq, dtype=int)
# Sort magnitudes into descending order
id0 = np.flipud(np.argsort(catalogue.data['magnitude'],
kind='heapsort'))
longitude = catalogue.data['longitude'][id0]
latitude = catalogue.data['latitude'][id0]
sw_space = sw_space[id0]
sw_time = sw_time[id0]
year_dec = year_dec[id0]
eqid = eqid[id0]
flagvector = np.zeros(neq, dtype=int)
# Begin cluster identification
clust_index = 0
for i in range(0, neq - 1):
if vcl[i] == 0:
# Find Events inside both fore- and aftershock time windows
dt = year_dec - year_dec[i]
vsel = np.logical_and(
vcl == 0,
np.logical_and(
dt >= (-sw_time[i] * config['fs_time_prop']),
dt <= sw_time[i]))
# Of those events inside time window,
# find those inside distance window
vsel1 = haversine(longitude[vsel],
latitude[vsel],
longitude[i],
latitude[i]) <= sw_space[i]
vsel[vsel] = vsel1[:, 0]
temp_vsel = np.copy(vsel)
temp_vsel[i] = False
if any(temp_vsel):
# Allocate a cluster number
vcl[vsel] = clust_index + 1
flagvector[vsel] = 1
# For those events in the cluster before the main event,
# flagvector is equal to -1
temp_vsel[dt >= 0.0] = False
flagvector[temp_vsel] = -1
flagvector[i] = 0
clust_index += 1
# Re-sort the catalog_matrix into original order
id1 = np.argsort(eqid, kind='heapsort')
eqid = eqid[id1]
vcl = vcl[id1]
flagvector = flagvector[id1]
return vcl, flagvector
|
agpl-3.0
| -5,080,174,637,922,129,000
| 39.173077
| 79
| 0.629807
| false
| 3.826007
| true
| false
| false
|
tommyod/KDEpy
|
KDEpy/tests/test_estimator_vs_estimator.py
|
1
|
2239
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Test the implemented estimators against each other on simple data sets.
"""
import numpy as np
from KDEpy.NaiveKDE import NaiveKDE
from KDEpy.TreeKDE import TreeKDE
from KDEpy.FFTKDE import FFTKDE
import itertools
import pytest
N = 2 ** 5
estimators = [NaiveKDE, TreeKDE, FFTKDE]
estimators_2 = list(itertools.combinations(estimators, 2))
kernels = list(NaiveKDE._available_kernels.keys())
@pytest.mark.parametrize("est1, est2", estimators_2)
def test_vs_simple(est1, est2):
"""
Test that mean error is low on default parameters.
"""
np.random.seed(12)
data = np.random.randn(N)
x1, y1 = est1().fit(data)()
x1, y2 = est2().fit(data)()
assert np.sqrt(np.mean((y1 - y2) ** 2)) < 0.0001
@pytest.mark.parametrize("est1, est2", estimators_2)
def test_vs_simple_weighted(est1, est2):
"""
Test that mean error is low on default parameters with weighted data.
"""
np.random.seed(12)
data = np.random.randn(N) * 10
weights = np.random.randn(N) ** 2 + 1
x1, y1 = est1().fit(data, weights)()
x1, y2 = est2().fit(data, weights)()
assert np.sqrt(np.mean((y1 - y2) ** 2)) < 0.0001
@pytest.mark.parametrize("estimators, kernel, bw", list(itertools.product(estimators_2, kernels, [0.1, 5])))
def test_vs_simple_weighted_kernels(estimators, kernel, bw):
"""
Test every kernel function over every implementation.
"""
est1, est2 = estimators
np.random.seed(13)
data = np.random.randn(N) * 10
weights = np.random.randn(N) ** 2 + 1
x1, y1 = est1(kernel, bw=bw).fit(data, weights)()
x1, y2 = est2(kernel, bw=bw).fit(data, weights)()
assert np.sqrt(np.mean((y1 - y2) ** 2)) < 0.01
# TODO: Examine why error increases when bw -> 0
if __name__ == "__main__":
# --durations=10 <- May be used to show potentially slow tests
# pytest.main(args=['.', '--doctest-modules', '-v'])
est1, est2 = NaiveKDE, TreeKDE
np.random.seed(13)
data = np.random.randn(2 ** 8) * 10
weights = np.random.randn(2 ** 8) ** 2 + 1
x1, y1 = est1(bw=100).fit(data, weights)()
x1, y2 = est2(bw=100).fit(data, weights)()
import matplotlib.pyplot as plt
plt.plot(x1, y1 - y2)
|
gpl-3.0
| 7,116,459,819,006,918,000
| 28.460526
| 108
| 0.634658
| false
| 2.946053
| true
| false
| false
|
BenDoan/unomaha_utils
|
course_scraper/scraper.py
|
1
|
5095
|
#!/usr/bin/env python2
"""
Usage:
./scraper.py [options]
Options:
-h, --help Prints this help message
-o FILE, --output FILE Specifies output file
-c COLLEGE, --college COLLEGE Specifies a specific college
-l, --last-term-only Only ouputs the last term
-u URL, --url URL Specify an alternate class-search url
-v, --verbose Turns on verbose logging
"""
import datetime
import itertools
import json
import logging
import time
from collections import OrderedDict
from multiprocessing import Pool, cpu_count
from os import path
import requests
from BeautifulSoup import BeautifulSoup
from docopt import docopt
BASE_URL = "https://www.unomaha.edu/registrar/students/before-you-enroll/class-search/"
terms = [1158]
def get_college_data((college, term)):
"""Returns a dictionary containing all classes within college and term"""
logging.info("Processing college {}".format(college))
time.sleep(1)
page = requests.get("{}?term={}&session=&subject={}&catalog_nbr=&career=&instructor=&class_start_time=&class_end_time=&location=&special=&instruction_mode=".format(BASE_URL, term, college))
soup = BeautifulSoup(page.text)
if len(soup.findAll("div", {'class': 'dotted-bottom'})) == 0:
logging.error("No classes for college {}, term {}".format(college, term))
classes = OrderedDict()
#loop through each class in the college
for dotted in soup.findAll("div", {'class': 'dotted-bottom'}):
cls = OrderedDict()
number = dotted.find("h2")
if number:
class_number = number.text.split(" ")[-1]
else:
class_number = "-"
title = dotted.find("p")
if title:
cls['title'] = title.text
else:
cls['title'] = "-"
desc = dotted.findAll("p")
if len(desc) > 1:
cls['desc'] = desc[1].text
else:
cls['desc'] = "-"
if len(desc) > 2:
cls['prereq'] = desc[2].text
else:
cls['prereq'] = "-"
sections = OrderedDict()
tables = dotted.findAll("table")
if tables:
# loop through each section in the class
for table in tables:
section = OrderedDict()
rows = table.findAll("tr")
for tr in rows:
tds = tr.findAll("td")
if tds:
if len(tds) > 1 and tds[1].text != "Date": # remove weird field
section[tds[0].text] = tds[1].text
section_name = table.find("th")
if section_name:
section_num = section_name.text.split(" ")[-1]
sections[section_num] = section
cls['sections'] = sections
if class_number != "-":
classes[class_number] = cls
return classes
def get_full_term_listing(college=None):
"""Returns a dictionary containing the uno classes
for every listed term and college"""
pool = Pool(cpu_count()*2)
term_data = OrderedDict()
for term in terms:
logging.info("Processing term {}".format(term))
if college is None:
colleges = get_colleges(term)
else:
colleges = [college]
results = pool.map(get_college_data, zip(colleges, itertools.repeat(term)))
term_data[term] = OrderedDict(zip(colleges, results))
stats = {
"num_terms": len(term_data)
}
for term, colleges in term_data.items():
stats[term] = {
"num_colleges": len(colleges)
}
out_dict = {
"meta" : {
"time": int(datetime.datetime.utcnow().strftime("%s")),
"time_str": str(datetime.datetime.utcnow()),
"url": BASE_URL,
"stats": stats,
},
"term_data": term_data
}
return out_dict
def _main():
args = docopt(__doc__, version="1")
# process arguments
if args['--college']:
college = args['--college']
else:
college = None
if args['--last-term-only']:
global terms
terms = [terms[-1]]
if args['--url']:
global BASE_URL
BASE_URL = args['--url']
if args['--verbose']:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
terms = get_terms()
term_data = get_full_term_listing(college)
# output class data as json
json_data = json.dumps(term_data)
if args['--output'] is not None:
with open(path.abspath(args['--output']), 'w') as f:
f.write(json_data)
else:
print json_data
def get_colleges(term):
return [x['value'] for x in requests.get("{}subjects.load.php?term={}".format(BASE_URL, term)).json()]
def get_terms():
page = requests.get(BASE_URL)
soup = BeautifulSoup(page.text)
return [int(dict(x.attrs)['value']) for x in soup.find("select").findAll("option")]
if __name__ == "__main__":
_main()
|
mit
| -6,599,031,899,051,639,000
| 27.463687
| 193
| 0.559961
| false
| 3.776872
| false
| false
| false
|
admk/soap
|
soap/semantics/schedule/ii.py
|
1
|
2125
|
import itertools
import math
import numpy
from soap.context import context
neg_inf = -float('inf')
def rec_init_int_check(graph, ii):
"""
Checks if the target II is valid. Runs a modified Floyd-Warshall
algorithm to test the absence of positive cycles.
Input ii must be greater or equal to 1.
"""
nodes = graph.nodes()
len_nodes = len(nodes)
dist_shape = [len_nodes] * 2
dist = numpy.full(dist_shape, neg_inf)
iterer = itertools.product(enumerate(nodes), repeat=2)
for (from_idx, from_node), (to_idx, to_node) in iterer:
try:
edge = graph[from_node][to_node]
except KeyError:
continue
dist[from_idx, to_idx] = edge['latency'] - ii * edge['distance']
iterer = itertools.product(range(len_nodes), repeat=3)
for mid_idx, from_idx, to_idx in iterer:
dist_val = dist[from_idx, mid_idx] + dist[mid_idx, to_idx]
if dist_val > dist[from_idx, to_idx]:
if from_idx == to_idx and dist_val > 0:
return False
dist[from_idx, to_idx] = dist_val
return True
def rec_init_int_search(graph, init_ii=1, prec=None, round_values=False):
"""
Performs a binary search of the recurrence-based minimum initiation
interval (RecMII).
"""
prec = prec or context.ii_precision
min_ii = max_ii = init_ii
incr = prec = 2 ** -prec
# find an upper-bound on MII
while not rec_init_int_check(graph, max_ii):
max_ii += incr
incr *= 2
# binary search for the optimal MII
last_ii = max_ii
while max_ii - min_ii > prec:
mid_ii = (min_ii + max_ii) / 2
if rec_init_int_check(graph, mid_ii):
max_ii = last_ii = mid_ii
else:
min_ii = mid_ii
if round_values:
return int(math.ceil(last_ii - (max_ii - min_ii) / 2))
return last_ii
def res_init_int(memory_access_map):
if not memory_access_map:
return 1
port_count = context.port_count
return max(1, max(
access_count / port_count
for access_count in memory_access_map.values()))
|
mit
| 3,456,767,238,910,983,000
| 26.24359
| 73
| 0.596706
| false
| 3.289474
| false
| false
| false
|
shivamvats/graphSearch
|
heuristicSearch/planners/island_astar.py
|
1
|
2665
|
from astar import *
class IslandAstar(Astar):
def __init__(self, env, inflation=10):
super(IslandAstar, self).__init__(env, inflation)
#@profile
def plan(self, startNode, goalNode, viz=None):
self.startNode = startNode
self.goalNode = goalNode
print(goalNode.getNodeId())
# Ordered List of expanded sates and their timestamps.
stateTimeStamps = collections.OrderedDict()
self.startNode.setG(0)
heuristicCost = self.env.heuristic(startNode, goalNode)
startNode.setH(heuristicCost)
openQ = Q.PriorityQueue()
# Using a dictionary 'cos list has slow lookup.
closed = {}
openQ.put((startNode.getH() + startNode.getG(), startNode))
currNode = startNode
startTime = time.time()
while(not openQ.empty() and currNode.getNodeId() !=
self.goalNode.getNodeId()):
priority, currNode = openQ.get()
nodeId = currNode.getNodeId()
if nodeId in closed:
continue
stateTimeStamps[nodeId] = (time.time(), currNode.getH())
closed[nodeId] = 1
if viz.incrementalDisplay:
viz.markPoint(self.env.getPointFromId(currNode.getNodeId()), 0)
viz.displayImage(1)
children, edgeCosts = \
self.env.getChildrenAndCosts(currNode)
for child, edgeCost in zip(children, edgeCosts):
if child.getNodeId() in closed:
continue
updated = self.updateG(child, currNode.getG() + edgeCost)
if updated:
child.setParent(currNode)
if currNode.getNodeId() in self.env.islandNodeIds:
child.viaIsland = True
else:
child.viaIsland = currNode.viaIsland
#XXX What if this node is already in the open list?
openQ.put((child.getG() +
self.inflation*self.env.heuristic(child, goalNode), child))
self.stateTimeStamps = stateTimeStamps
endTime = time.time()
timeTaken = endTime - startTime
print("Total time taken for planning is %f", timeTaken)
#print(self.stateTimeStamps)
print("Nodes expanded", len(closed))
closedNodeIds = list(closed.keys())
points = map(self.env.getPointFromId, closedNodeIds)
viz.markPoints(points, 90)
viz.displayImage(1)
if currNode.getNodeId() == self.goalNode.getNodeId():
return 1
else:
return 0
|
mit
| -3,337,239,640,545,486,300
| 31.901235
| 83
| 0.566604
| false
| 4.074924
| false
| false
| false
|
amadeusproject/amadeuslms
|
reports/forms.py
|
1
|
3623
|
"""
Copyright 2016, 2017 UFPE - Universidade Federal de Pernambuco
Este arquivo é parte do programa Amadeus Sistema de Gestão de Aprendizagem, ou simplesmente Amadeus LMS
O Amadeus LMS é um software livre; você pode redistribui-lo e/ou modifica-lo dentro dos termos da Licença Pública Geral GNU como publicada pela Fundação do Software Livre (FSF); na versão 2 da Licença.
Este programa é distribuído na esperança que possa ser útil, mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a Licença Pública Geral GNU para maiores detalhes.
Você deve ter recebido uma cópia da Licença Pública Geral GNU, sob o título "LICENSE", junto com este programa, se não, escreva para a Fundação do Software Livre (FSF) Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
import datetime
from django.forms.formsets import BaseFormSet
class BaseResourceAndTagFormset(BaseFormSet):
def clean(self):
"""
Adds validation to check that no two links have the same anchor or URL
and that all links have both an anchor and URL.
"""
print(self.errors)
if any(self.errors):
return
for form in self.forms:
pass
class ResourceAndTagForm(forms.Form):
resource = forms.ChoiceField(label=_("Kind Of Resource"), required=True)
tag = forms.ChoiceField(label=_('Tag'))
def __init__(self, *args, **kwargs):
super(ResourceAndTagForm, self).__init__(*args, **kwargs)
if kwargs.get('initial'):
initial = kwargs['initial']
self.fields['resource'].choices = [(classes.__name__.lower(), classes.__name__.lower()) for classes in initial['class_name']]
self.fields['tag'].choices = [(tag.id, tag.name) for tag in initial['tag']]
class CreateInteractionReportForm(forms.Form):
topic = forms.ChoiceField( label= _("Topics"), required=True)
init_date = forms.DateField(required=True, label= _("Initial Date"))
end_date = forms.DateField(required=True, label= _("Final Date"))
from_mural = forms.BooleanField(required=False, label=_("From Mural"))
from_messages = forms.BooleanField(required=False, label=_("Messages"))
class Meta:
fields = ('topic', 'init_date', 'end_date', 'from_mural' , 'from_messages')
def __init__(self, *args, **kwargs):
super(CreateInteractionReportForm, self).__init__(*args, **kwargs)
initial = kwargs['initial']
topics = list(initial['topic'])
self.subject = initial['subject'] #so we can check date cleaned data
self.fields['topic'].choices = [(topic.id, topic.name) for topic in topics]
self.fields['topic'].choices.append((_("All"), _("All")))
def clean(self):
cleaned_data = super(CreateInteractionReportForm, self).clean()
init_date = cleaned_data.get("init_date")
end_date = cleaned_data.get("end_date")
if init_date and end_date:
if init_date > end_date:
raise forms.ValidationError(_("The initial date can't be after the end one."))
def clean_init_date(self):
init_date = self.cleaned_data['init_date']
if init_date < self.subject.init_date:
self._errors['init_date'] = [_('This date should be right or after %s, which is when the subject started. ') % str(self.subject.init_date)]
return init_date
def clean_end_date(self):
end_date = self.cleaned_data['end_date']
if end_date > self.subject.end_date:
self._errors['end_date'] = [_('This date should be right or before %s, which is when the subject finishes. ') % str(self.subject.end_date)]
return end_date
|
gpl-2.0
| -1,449,295,162,794,327,800
| 41.797619
| 231
| 0.705064
| false
| 3.169312
| false
| false
| false
|
bernardhu/whlianjia
|
crawler.py
|
1
|
59560
|
# -*- coding: utf-8 -*-
import pickle
import math
import os.path
import shutil
import datetime
import time
import random
import json
import re
import chardet
import string
import base64
import requests
from bs4 import BeautifulSoup
from model import TradedHouse, DistricHouse, BidHouse, RentHouse, create_table, clear_table
grabedPool = {}
gz_district = ['jiangan', 'jianghan', 'qiaokou', 'dongxihu', 'wuchang', 'qingshan', 'hongshan', 'hanyang', 'donghugaoxin', 'jiangxia']
gz_district_name = {"jiangan":u"江岸", "jianghan":u"江汉", "qiaokou":u"硚口", "dongxihu":u"东西湖",
"wuchang":u"武昌", "qingshan":u"青山", "hongshan":u"洪山", "hanyang": u"汉阳", "donghugaoxing": u"东湖高新",
"jiangxia":u"江夏"}
global start_offset
start_offset = 1
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 ",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A175 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.39 (KHTML, like Gecko) Version/9.0 Mobile/13A4305g Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A344 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A344 Safari/600.1.4 (000205)",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/8.0.57838 Mobile/13A344 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A404 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/631.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A171 Safari/637.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/6.0.51363 Mobile/13A404 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/8.0.57838 Mobile/13B5110e Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A404 Safari/600.1.4 (000994)",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A404 Safari/600.1.4 (000862)",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A404 Safari/600.1.4 (000065)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/5.2.43972 Mobile/13A452 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A452 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B5130b Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A404 Safari/600.1.4 (000539)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A452 Safari/600.1.4 (000549)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A452 Safari/600.1.4 (000570)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/44.0.2403.67 Mobile/13A452 Safari/600.1.4 (000693)",
"Mozilla/5.0 (iPad; CPU OS 9_0_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/9.0.60246 Mobile/13A404 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A452 Safari/600.1.4 (000292)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/9.0.60246 Mobile/13A452 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B137 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13A452 Safari/600.1.4 (000996)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/46.0.2490.73 Mobile/13B143 Safari/600.1.4 (000648)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/46.0.2490.73 Mobile/13B143 Safari/600.1.4 (000119)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/9.0.60246 Mobile/13B143 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/46.0.2490.73 Mobile/13B143 Safari/600.1.4 (000923)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/1.2 Mobile/13B143 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A340 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13B143",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/10.0.63022 Mobile/13B143 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A175 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1.56 (KHTML, like Gecko) Version/9.0 Mobile/13c75 Safari/601.1.56",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B144 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C75 Safari/601.1.46 (000144)",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C75 Safari/601.1.46 (000042)",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13C75 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 7_1_1 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) CriOS/38.0.2125.59 Mobile/11D201 Safari/9537.53",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/11.0.65374 Mobile/13B143 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.70 Mobile/13C75 Safari/601.1.46 (000468)",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/11.0.65374 Mobile/13C75 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.16 (KHTML, like Gecko) Version/8.0 Mobile/13A171a Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/11.1.66360 Mobile/13C75 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.83 Mobile/13C75 Safari/601.1.46 (000468)",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/13C75 Safari/601.1.46 (000702)",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/10A403 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B14 3 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13D15 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/13A452 Safari/601.1.46 (000412)",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.107 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/12.0.68608 Mobile/13D15 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.87 Mobile/13A452 Safari/601.1.46 (000715)",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.87 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/45.0.2454.89 Mobile/13B143 Safari/600.1.4 (000381)",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E5200d Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E5200d Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/11.1.66360 Mobile/13D15 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.104 Mobile/13B143 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.104 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E5200d Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/48.0.2564.104 Mobile/13E5200d Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.83 Mobile/13C75 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/47.0.2526.83 Mobile/13C75 Safari/601.1.46 (000381)",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13A344 Shelter/1.0.0 (YmqLQeAh3Z-nBdz2i87Rf) ",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/46.0.2490.73 Mobile/13C143 Safari/600.1.4 (000718)",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A143 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/1.4 Mobile/13E5181f Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.73 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.73 Mobile/13A15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E233 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/13.1.72140 Mobile/13E233 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.73 Mobile/13E233 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E238 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/49.0.2623.109 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/1.4 Mobile/13A452 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) CriOS/44.0.2403.67 Mobile/13B143 Safari/600.1.4 (000073)",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/3.0 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/14.1.119979954 Mobile/13E238 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.95 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E234 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F69 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E237 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.95 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/15.1.122860578 Mobile/13F69 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/51.0.2704.64 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13F72 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/51.0.2704.104 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.77 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/4.0 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/51.0.2704.104 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/16.0.124986583 Mobile/13F69 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/2.0 Mobile/13E5200d Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G34 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/52.0.2743.84 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E188a Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/17.0.128207670 Mobile/13G35 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.95 Mobile/13G34 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G35 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G35",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/52.0.2743.84 Mobile/13G35 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/5.0 Mobile/13G35 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 iPadApp",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G35 Safari/601.1 MXiOS/4.9.0.60",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69",
"Mozilla/5.0 (iPad; CPU OS 9_3_4 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/18.0.130791545 Mobile/13G35 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13G36 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/18.0.130791545 Mobile/13G36 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 7_1 like Mac OS X) AppleWebKit/537.51.3 (KHTML, like Gecko) Version/7.0 Mobile/11A4149 Safari/9537.72",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/601.1.17 (KHTML, like Gecko) Version/8.0 Mobile/13A175 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/18.1.132077863 Mobile/13G36 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.86 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.109 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.109 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OSX) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13A452 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13D11",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G36 Safari/601.1.46 Sleipnir/4.3.0m",
"Mozilla/5.0 (iPad; CPU OS 9_0_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/53.0.2785.86 Mobile/13A452 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46.140 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/54.0.2840.66 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/54.0.2840.91 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 Safari/601.1.46 Sleipnir/4.3.2m",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13G36",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/5.3.48993 Mobile/13D15 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/54.0.2840.66 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/50.0.2661.77 Mobile/13E238 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/55.0.2883.79 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_2 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/55.0.2883.79 Mobile/13F69 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/5.3 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) GSA/22.0.141836113 Mobile/13G36 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/57.0.2987.100 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) FxiOS/6.1 Mobile/13D15 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13BC75 Safari/601.1",
"Mozilla/5.0 (iPad; CPU OS 9_3_3 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/56.0.2924.79 Mobile/13G34 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/57.0.2987.137 Mobile/13G36 Safari/601.1.46",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1.46(KHTML, like Gecko) FxiOS/6.1 Mobile/13G36 Safari/601.1.46",
"Mozilla/6.0 (iPhone; CPU iPhone OS 8_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/8.0 Mobile/10A5376e Safari/8536.25",
"Mozilla/5.0 (iPad; CPU OS 9_0 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/9.0 Mobile/13A340 Safari/600.1.4",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/537.51.2 (KHTML, like Gecko) CriOS/36.0.1985.49 Mobile/13G36 Safari/9537.53",
"Mozilla/5.0 (iPad; CPU OS 9_3_5 like Mac OS X) AppleWebKit/601.1 (KHTML, like Gecko) CriOS/59.0.3071.102 Mobile/13G36 Safari/601.1.46"
]
def get_header():
i = random.randint(0,len(user_agent_list)-1)
headers = {
'User-Agent': user_agent_list[i],
'x-forearded-for': "1.2.3.4"
}
return headers
def get_multipart_formdata(data, bondary):
post_data = []
for key, value in data.iteritems():
if value is None:
continue
post_data.append('--' + bondary )
post_data.append('Content-Disposition: form-data; name="{0}"'.format(key))
post_data.append('')
if isinstance(value, int):
value = str(value)
post_data.append(value)
post_data.append('--' + bondary + '--')
post_data.append('')
body = '\r\n'.join(post_data)
return body.encode('utf-8')
def verify_captcha():
url = "http://captcha.lianjia.com"
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("form", class_="human").find_all("input")
print pages[2]['value'], pages[2]['name']
csrf = pages[2]['value']
time.sleep(1)
url = "http://captcha.lianjia.com/human"
r = requests.get(url, headers= get_header(), timeout= 30)
cookie = r.headers['Set-Cookie']
soup = BeautifulSoup(r.content, "lxml")
images = json.loads(r.content)['images']
uuid = json.loads(r.content)['uuid']
#print images
for idx in xrange(0, len(images)):
fh = open("%d.jpg"%idx, "wb")
data = images['%d'%idx].split(',', 1)
fh.write(base64.b64decode(data[1]))
fh.close()
step = 0
mask = 0
while 1:
if step == 0:
val = raw_input("check 0.jpg reverse,(y/n):\t")
if val == 'y' or val == 'Y':
mask = mask + 1
step = 1
elif step == 1:
val = raw_input("check 1.jpg reverse,(y/n):\t")
if val == 'y' or val == 'Y':
mask = mask + 2
step = 2
elif step == 2:
val = raw_input("check 2.jpg reverse,(y/n):\t")
if val == 'y' or val == 'Y':
mask = mask + 4
step = 3
elif step == 3:
val = raw_input("check 3.jpg reverse,(y/n):\t")
if val == 'y' or val == 'Y':
mask = mask + 8
break
print mask
boundary='----WebKitFormBoundary7MA4YWxkTrZu0gW'
headers = get_header()
headers['content-type'] = "multipart/form-data; boundary={0}".format(boundary)
headers['Cookie'] = cookie
print get_multipart_formdata({'uuid':uuid, 'bitvalue': mask, '_csrf': csrf}, boundary)
print headers
r = requests.post(url, headers=headers, data=get_multipart_formdata({'uuid':uuid, 'bitvalue': mask, '_csrf': csrf}, boundary))
print r.request
print r.content
def get_distric_rent_cnt(distric):
print "try to grab %s community rent cnt "%distric
url = "http://wh.lianjia.com/zufang/%s/"%distric
r = requests.get(url, headers= get_header(), timeout= 30)
#print r.text.encode("utf-8")
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("div", class_="page-box house-lst-page-box")
time.sleep(random.randint(5,10))
try:
pageStr = pages["page-data"]
except Exception, e:
print e,r.content
os._exit(0)
jo = json.loads(pageStr)
return jo['totalPage']
def get_distric_community_cnt(distric):
print "try to grab %s community cnt "%distric
url = "http://wh.lianjia.com/xiaoqu/%s/"%distric
r = requests.get(url, headers= get_header(), timeout= 30)
#print r.text.encode("utf-8")
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("div", class_="page-box house-lst-page-box")
time.sleep(random.randint(5,10))
try:
pageStr = pages["page-data"]
except Exception, e:
print e,r.content,r.text
os._exit(0)
jo = json.loads(pageStr)
return jo['totalPage']
def grab_distric(url):
print "try to grab distric page ", url
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
try:
districList = soup.find("ul", class_="listContent").find_all('li')
except Exception, e:
print e,r.content
os._exit(0)
if not districList:
return
for item in districList:
# 房屋详情链接,唯一标识符
distUrl = item.a["href"] or ''
#if distUrl in grabedPool["data"]:
# print distUrl, "already exits,skip"
# continue
print "start to crawl" , distUrl
# 抓取 历史成交
title = item.find("div", class_="title").a.string.encode("utf-8").rstrip()
historyList = item.find("div", class_="houseInfo").find_all('a')
history = historyList[0].string.encode("utf-8")
m = re.match(r"(\d+)天成交(\d+)套", history)
print m, history
historyRange = 0
historySell = 0
if m:
historyRange = m.group(1)
historySell = m.group(2)
print title, history, historyRange, historySell
# 抓取 区&商圈
pos = item.find("div", class_="positionInfo").find_all('a')
dis = pos[0].string.encode("utf-8")
bizcircle = pos[1].string.encode("utf-8")
print dis, bizcircle
#抓取成交均价噢
avgStr = item.find("div", class_="totalPrice").span.string.encode("utf-8")
m = re.match(r"(\d+)", avgStr)
if m:
avg = int(avgStr)
else:
avg = 0
print avg
#抓取在售
onSell = int(item.find("div", class_="xiaoquListItemSellCount").a.span.string)
print onSell
# 通过 ORM 存储到 sqlite
distItem = DistricHouse(
name = title,
district = dis,
bizcircle = bizcircle,
historyRange = historyRange,
historySell = historySell,
ref = distUrl,
avgpx = avg,
onsell = onSell,
)
distItem.save()
# 添加到已经抓取的池
#grabedPool["data"].add(distUrl)
# 抓取完成后,休息几秒钟,避免给对方服务器造成大负担
time.sleep(random.randint(1,3))
def get_distric_chengjiao_cnt(distric, proxy):
print "try to grab %s chengjiao cnt "%distric
url = "http://wh.lianjia.com/chengjiao/%s/"%distric
r = requests.get(url, headers= get_header(), timeout= 30)
#print r.text.encode("utf-8")
soup = BeautifulSoup(r.content, "lxml")
try:
pages = soup.find("div", class_="page-box house-lst-page-box")
time.sleep(random.randint(5,10))
pageStr = pages["page-data"]
jo = json.loads(pageStr)
return jo['totalPage']
except Exception, e:
print e,r.content
os._exit(0)
def get_distric_bid_cnt(distric, proxy):
print "try to grab %s bid cnt "%distric
url = "http://wh.lianjia.com/ershoufang/%s/"%distric
r = requests.get(url, headers= get_header(), timeout= 30)
#print r.text.encode("utf-8")
soup = BeautifulSoup(r.content, "lxml")
try:
pages = soup.find("div", class_="page-box house-lst-page-box")
time.sleep(random.randint(5,10))
pageStr = pages["page-data"]
jo = json.loads(pageStr)
return jo['totalPage']
except Exception, e:
print e,r.content
os._exit(0)
#i = random.randint(0,len(proxy)-1)
#proxies = {
# "http": proxy[i]
# }
#print "try proxy", proxy[i]
#r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 30)
#soup = BeautifulSoup(r.content, "lxml")
#pages = soup.find("div", class_="page-box house-lst-page-box")
#time.sleep(random.randint(5,10))
#pageStr = pages["page-data"]
#jo = json.loads(pageStr)
#return jo['totalPage']
def get_xici_proxy(url, proxys):
print "get proxy", url
r = requests.get(url, headers= get_header(), timeout= 10)
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find_all("tr", class_="odd")
for page in pages:
items = page.find_all("td")
proxy ="http://%s:%s"%(items[1].string, items[2].string)
url = "http://wh.lianjia.com/chengjiao/tianhe/"
proxies = {
"http": proxy
}
try:
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 3)
soup = BeautifulSoup(r.content, "lxml")
tradedHoustList = soup.find("ul", class_="listContent")
if not tradedHoustList:
continue
proxys.append(proxy)
print proxy, proxys
except Exception, e:
#print Exception,":",e
continue
def get_kuaidaili_proxy(url, proxys):
print "get proxy", url
r = requests.get(url, headers= get_header(), timeout= 10)
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("tbody").find_all("tr")
for page in pages:
items = page.find_all("td")
proxy ="http://%s:%s"%(items[0].string, items[1].string)
print proxy
url = "http://wh.lianjia.com/chengjiao/tianhe/"
proxies = {
"http": proxy
}
try:
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 3)
soup = BeautifulSoup(r.content, "lxml")
tradedHoustList = soup.find("ul", class_="listContent")
if not tradedHoustList:
continue
proxys.append(proxy)
print proxy, proxys
except Exception, e:
#print Exception,":",e
continue
def get_youdaili_proxy(url, proxys):
print "get proxy", url
r = requests.get(url, headers= get_header(), timeout= 10)
soup = BeautifulSoup(r.content, "lxml")
pages = soup.find("div", class_="chunlist").find_all("a")
page = pages[0]
u = page["href"]
html = requests.get(u, headers= get_header(), timeout= 3).content
proxy_list = re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}', html)
for proxy in proxy_list:
url = "http://wh.lianjia.com/chengjiao/tianhe/"
proxies = {
"http": proxy
}
try:
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 3)
soup = BeautifulSoup(r.content, "lxml")
tradedHoustList = soup.find("ul", class_="listContent")
if not tradedHoustList:
continue
proxys.append(proxy)
print proxy, proxys
except Exception, e:
#print Exception,":",e
continue
def build_proxy():
proxys = []
#get_xici_proxy("http://www.xicidaili.com/nn/1", proxys)
#get_xici_proxy("http://www.xicidaili.com/nn/2", proxys)
#get_kuaidaili_proxy("http://www.kuaidaili.com/proxylist/1", proxys)
#get_kuaidaili_proxy("http://www.kuaidaili.com/proxylist/2", proxys)
#get_kuaidaili_proxy("http://www.kuaidaili.com/proxylist/3", proxys)
#get_kuaidaili_proxy("http://www.kuaidaili.com/proxylist/4", proxys)
#get_youdaili_proxy("http://www.youdaili.net/Daili/http", proxys)
r = requests.get("http://127.0.0.1:5000/get_all/", headers= get_header(), timeout= 10)
print r.content
proxys= json.loads(r.content)
print proxys
return proxys
def grabRent(url, proxy, disName, priceDic, bizDic):
print "try to grab page ", url
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
try:
bidHoustList = soup.find("ul", class_="house-lst").find_all('li')
except Exception, e:
print e,r.content
os._exit(0)
if not bidHoustList:
return
storge = []
for item in bidHoustList:
# 房屋详情链接,唯一标识符
houseUrl = item.a["href"] or ''
#if houseUrl in grabedPool["data"]:
# print houseUrl, "already exit, skip"
# continue
print 'start to crawl' , houseUrl
# 抓取 小区,户型,面积 朝向,装修,电梯
xiaoqu = item.find("div", class_="where").a.string.rstrip().encode("utf-8")
houseType = item.find("span", class_="zone").span.string.rstrip().encode("utf-8")
squareStr = item.find("span", class_="meters").string.rstrip().encode("utf-8")
orientation = item.find("div", class_="where").findAll("span")[4].string.encode("utf-8").rstrip()
print xiaoqu, houseType, squareStr, orientation
m = re.match(r"\b[0-9]+(\.[0-9]+)?", squareStr)
square = 0
if m:
square = string.atof(m.group(0))
print squareStr, square
#楼层,楼龄
posInfo = item.find("div", class_="con").contents[2]
m = re.match(ur"(.*)楼层\(共(\d+)层\)", posInfo)
floorLevel = 'Nav'
floorTotal = -1
if m:
floorLevel = m.group(1)
floorTotal = m.group(2)
print m.group(1).encode("utf-8"), m.group(2)
print floorLevel.encode("utf-8"), floorTotal
#挂牌价
priceInfo = item.find("div", class_="price").span
if priceInfo:
price = string.atof(priceInfo.string)
else :
price = 0
print price
pricePre = item.find("div", class_="price-pre").string
priceUpdate, misc = ([x.strip() for x in pricePre.split(" ")])
print priceUpdate
#关注,带看, 放盘
seenStr = item.find("div", class_="square").find("span", class_="num").string
seen = 0
if m:
seen = string.atoi(seenStr)
print seen
try:
avg = priceDic[xiaoqu]
except Exception, e:
print e
avg = 0
print "avg", avg
try:
biz = bizDic[xiaoqu]
except Exception, e:
print e
biz = ""
print "biz", biz
loan = 0
loan = square*avg -1500000
loanRet = 0
yearRate = 0.049
monthRate = 0.049/12
loanYear = 30
loanMonth = loanYear*12
if loan < 0 :
loan = 0
loanRet = 0
else:
loanRet = loan*monthRate*((1+monthRate)**loanMonth)/(((1+monthRate)**loanMonth)-1)
loan = round(loan/10000)
print loan, loanRet
# 通过 ORM 存储到 sqlite
BidItem = RentHouse(
xiaoqu = xiaoqu,
houseType = houseType,
square = square,
houseUrl = houseUrl,
orientation = orientation,
floorLevel = floorLevel,
floorTotal = floorTotal,
price = price,
avg = avg,
loan = loan,
loanRet = loanRet,
seen = seen,
bizcircle = biz,
district = disName,
)
storge.append(BidItem)
for s in storge:
s.save()
# 添加到已经抓取的池
#grabedPool["data"].add(s.houseUrl)
# 抓取完成后,休息几秒钟,避免给对方服务器造成大负担
time.sleep(random.randint(1,3))
def grabBid(url, proxy, disName, priceDic):
print "try to grabbid page ", url
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
try:
bidHoustList = soup.find("ul", class_="sellListContent").find_all('li')
except Exception, e:
print e,r.content
os._exit(0)
i = random.randint(0,len(proxy)-1)
proxies = {
"http": proxy[i]
}
print "try proxy", proxy[i]
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
bidHoustList = soup.find("ul", class_="sellListContent").find_all('li')
if not bidHoustList:
return
storge = []
for item in bidHoustList:
# 房屋详情链接,唯一标识符
houseUrl = item.a["href"] or ''
#if houseUrl in grabedPool["data"]:
# print houseUrl, "already exit, skip"
# continue
print 'start to crawl' , houseUrl
# 抓取 小区,户型,面积 朝向,装修,电梯
houseInfo = item.find("div", class_="houseInfo").contents[2]
xiaoqu = item.find("div", class_="houseInfo").a.string.encode("utf-8").rstrip()
if houseInfo:
if len(houseInfo.split("|")) == 5:
null, houseType, squareStr, orientation, decoration = ([x.strip() for x in houseInfo.split("|")])
elevator = 'Nav'
if len(houseInfo.split("|")) == 6:
null, houseType, squareStr, orientation, decoration, elevator = ([x.strip() for x in houseInfo.split("|")])
print xiaoqu, houseType.encode("utf-8"), orientation.encode("utf-8"), decoration.encode("utf-8"), elevator.encode("utf-8")
m = re.match(ur"\b[0-9]+(\.[0-9]+)?", squareStr)
square = 0
if m:
square = string.atof(m.group(0))
print squareStr.encode("utf-8"), square
#楼层,楼龄
posInfo = item.find("div", class_="positionInfo").contents[1]
print posInfo.encode("utf-8")
m = re.match(ur"(.*)楼层\(共(\d+)层\)(\d+)年建", posInfo)
floorLevel = 'Nav'
floorTotal = -1
build = -1
if m:
floorLevel = m.group(1)
floorTotal = m.group(2)
build = int(m.group(3))
print m.group(1).encode("utf-8"), m.group(2), m.group(3)
print floorLevel.encode("utf-8"), floorTotal, build
biz = item.find("div", class_="positionInfo").a.string
print biz
#挂牌价
priceInfo = item.find("div", class_="totalPrice").span
if priceInfo:
bid = string.atof(priceInfo.string)
else :
bid = 0
print bid
#均价
priceInfo = item.find("div", class_="unitPrice").span
priceStr = ""
if priceInfo:
priceStr = priceInfo.string
m = re.match(ur"单价(\d+)元", priceStr)
price = 0
if m:
price = m.group(1)
print price, priceStr.encode("utf-8")
#关注,带看, 放盘
followInfo = item.find("div", class_="followInfo").contents[1]
if followInfo:
watchStr, seenStr, releaseStr = ([x.strip() for x in followInfo.split("/")])
print watchStr.encode("utf-8"), seenStr.encode("utf-8"), releaseStr.encode("utf-8")
m = re.match(ur"(\d+)人", watchStr)
watch = 0
if m:
watch = m.group(1)
m = re.match(ur"共(\d+)次", seenStr)
seen = 0
if m:
seen = m.group(1)
m = re.match(ur"(\d+)天", releaseStr)
release = 0
if m:
release = int(m.group(1))
else:
m = re.match(ur"(\d+)个月", releaseStr)
if m:
release = int(m.group(1))*30
else:
m = re.match(ur"(.*)年", releaseStr)
if m:
release = m.group(1)
if release == u"一":
release = 365
try:
avg = priceDic[xiaoqu]
except Exception, e:
avg = 0
print watch, seen, release, avg
# 通过 ORM 存储到 sqlite
BidItem = BidHouse(
xiaoqu = xiaoqu,
houseType = houseType,
square = square,
houseUrl = houseUrl,
orientation = orientation,
decoration = decoration,
elevator = elevator,
floorLevel = floorLevel,
floorTotal = floorTotal,
build = build,
price = price,
avg = avg,
bid = bid,
watch = watch,
seen = seen,
release = release,
bizcircle = biz,
district = disName,
)
storge.append(BidItem)
for s in storge:
s.save()
# 添加到已经抓取的池
#grabedPool["data"].add(s.houseUrl)
# 抓取完成后,休息几秒钟,避免给对方服务器造成大负担
time.sleep(random.randint(1,3))
def grab(url, proxy, disName, bizDic, lastMarkTrade):
print "try to grab page ", url
r = requests.get(url, headers= get_header(), timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
try:
tradedHoustList = soup.find("ul", class_="listContent").find_all('li')
except Exception, e:
print e,r.content
#os._exit(0)
tradedHoustList = soup.find("li", class_="pictext")
if not tradedHoustList:
tradedHoustList = soup.find("ul", class_="listContent").find_all('li')
else:
i = random.randint(0,len(proxy)-1)
proxies = {
"http": proxy[i]
}
print "try proxy", proxy[i]
r = requests.get(url, headers= get_header(), proxies=proxies, timeout= 30)
soup = BeautifulSoup(r.content, "lxml")
tradedHoustList = soup.find("ul", class_="listContent").find_all('li')
if not tradedHoustList:
return
storge = []
stop = False
for item in tradedHoustList:
# 房屋详情链接,唯一标识符
houseUrl = item.a["href"] or ''
#if houseUrl in grabedPool["data"]:
# print houseUrl, "already exit, skip"
# continue
print 'start to crawl' , houseUrl
# 抓取 小区,户型,面积
title = item.find("div", class_="title")
if title:
print title
xiaoqu, houseType, square = (title.string.replace(" ", " ").split(" "))
m = re.match(ur"\b[0-9]+(\.[0-9]+)?", square)
if m:
square = string.atof(m.group(0))
else:
xiaoqu, houseType, square = ('Nav', 'Nav', 0)
xiaoqu = xiaoqu.encode("utf-8").rstrip()
houseType = houseType.encode("utf-8")
print xiaoqu, houseType, square
dealInfo = item.find("div", class_="totalPrice").span
try:
deal = string.atof(dealInfo.string.encode("utf-8"))
except Exception, e:
deal = -1
print deal
# 朝向,装修,电梯
houseInfo = item.find("div", class_="houseInfo").contents[1]
if houseInfo:
if len(houseInfo.split("|")) == 2:
orientation, decoration = ([x.strip() for x in houseInfo.split("|")])
elevator = 'Nav'
if len(houseInfo.split("|")) == 3:
orientation, decoration, elevator = ([x.strip() for x in houseInfo.split("|")])
print orientation.encode("utf-8"), decoration.encode("utf-8"), elevator.encode("utf-8")
#成交日期
dealDate = item.find("div", class_="dealDate")
if dealDate:
tradeDate = datetime.datetime.strptime(dealDate.string, '%Y.%m.%d') or datetime.datetime(1990, 1, 1)
print tradeDate
if lastMarkTrade >= tradeDate:
print 'break for time'
stop = True
break
#楼层,楼龄
posInfo = item.find("div", class_="positionInfo").contents[1]
if posInfo:
floor, buildStr = ([x.strip() for x in posInfo.split(" ")])
print floor.encode("utf-8"), buildStr.encode("utf-8")
m = re.match(ur"(.*)楼层\(共(\d+)层\)", floor)
floorLevel = 'Nav'
floorTotal = -1
if m:
floorLevel = m.group(1)
floorTotal = m.group(2)
print m.group(1).encode("utf-8"), m.group(2)
m = re.match(ur"(\d+)年建", buildStr)
build = -1
if m:
build = m.group(1)
print floorLevel.encode("utf-8"), floorTotal, build
#均价
priceInfo = item.find("div", class_="unitPrice").span
if priceInfo:
price = int(priceInfo.string)
else :
price = 0
print price
#挂牌价,成交周期
dealCycle = item.find("span", class_="dealCycleTxt").find_all('span')
bid = -1
cycle = -1
if dealCycle:
if len(dealCycle) == 1:
bidStr = dealCycle[0].string
cycleStr = ""
if len(dealCycle) == 2:
bidStr = dealCycle[0].string
cycleStr = dealCycle[1].string
print bidStr.encode("utf-8"), cycleStr.encode("utf-8")
m = re.match(ur"挂牌(\d+)万", bidStr)
if m:
bid = m.group(1)
m = re.match(ur"成交周期(\d+)天", cycleStr)
if m:
cycle = m.group(1)
try:
biz = bizDic[xiaoqu]
except Exception, e:
biz = "unknown"
#print bid, cycle, disName, biz
# 通过 ORM 存储到 sqlite
tradeItem = TradedHouse(
xiaoqu = xiaoqu,
houseType = houseType,
square = square,
houseUrl = houseUrl,
orientation = orientation,
decoration = decoration,
elevator = elevator,
floorLevel = floorLevel,
floorTotal = floorTotal,
build = build,
price = price,
tradeDate = tradeDate,
bid = bid,
deal = deal,
cycle = cycle,
district = disName,
bizcircle = biz,
)
storge.append(tradeItem)
for s in storge:
s.save()
# 添加到已经抓取的池
#grabedPool["data"].add(s.houseUrl)
# 抓取完成后,休息几秒钟,避免给对方服务器造成大负担
time.sleep(random.randint(1,3))
return stop
step_context = {"phase":0, "cnt":0, "offset":0, "pgoffset":1, "date":"20170705"}
def save_context():
global step_context
print "save", step_context, type(step_context)
json.dump(step_context, open('context','w'))
def load_context():
global step_context
step_context = json.load(open('context','r'))
print "load", step_context, type(step_context)
def crawl_district():
global step_context
for dis_offset in xrange(step_context['offset'], len(gz_district)):
dis = gz_district[dis_offset]
step_context['offset'] = dis_offset
save_context()
cnt = step_context['cnt']
if cnt == 0:
cnt = get_distric_community_cnt(dis)
print "get_distric_info", dis, cnt
step_context['cnt'] = cnt
save_context()
for i in xrange(step_context['pgoffset'], cnt+1):
step_context['pgoffset'] = i
save_context()
url = "http://wh.lianjia.com/xiaoqu/%s/pg%s/"%(dis, format(str(i)))
grab_distric(url)
step_context['pgoffset'] = 1
step_context['cnt'] = 0
save_context()
def crawl_district_chengjiao():
global step_context
for dis_offset in xrange(step_context['offset'], len(gz_district)):
dis = gz_district[dis_offset]
step_context['offset'] = dis_offset
save_context()
distric = DistricHouse.select(DistricHouse.name, DistricHouse.bizcircle, DistricHouse.avgpx).where(DistricHouse.district == gz_district_name[dis])
print distric
bizDic = {}
priceDic = {}
for item in distric:
name = item.name.rstrip().encode("utf-8")
biz = item.bizcircle.encode("utf-8")
bizDic[name] = biz
price = item.avgpx
priceDic[name] = price
#print name
cnt = step_context['cnt']
if cnt == 0:
cnt = get_distric_chengjiao_cnt(dis, [])
step_context['cnt'] = cnt
save_context()
lastMarkTrade = datetime.datetime(1990, 1, 1)
ts = TradedHouse.select(TradedHouse.tradeDate).where(TradedHouse.district == gz_district_name[dis]).order_by(TradedHouse.tradeDate.desc()).limit(1)
print ts
for item in ts:
print item.tradeDate, type(item.tradeDate)
lastMarkTrade = item.tradeDate
for i in xrange(step_context['pgoffset'], cnt+1):
step_context['pgoffset'] = i
save_context()
page = "http://wh.lianjia.com/chengjiao/%s/pg%s/"%(dis, format(str(i)))
stop = grab(page, [], gz_district_name[dis], bizDic, lastMarkTrade)
if stop == True:
break
step_context['pgoffset'] = 1
step_context['cnt'] = 0
save_context()
def crawl_district_bid():
global step_context
#proxy = build_proxy()
for dis_offset in xrange(step_context['offset'], len(gz_district)):
dis = gz_district[dis_offset]
distric = DistricHouse.select(DistricHouse.name, DistricHouse.bizcircle, DistricHouse.avgpx).where(DistricHouse.district == gz_district_name[dis])
print distric
bizDic = {}
priceDic = {}
for item in distric:
name = item.name.rstrip().encode("utf-8")
biz = item.bizcircle.encode("utf-8")
bizDic[name] = biz
price = item.avgpx
priceDic[name] = price
#print name
step_context['offset'] = dis_offset
save_context()
cnt = step_context['cnt']
if cnt == 0:
cnt = get_distric_bid_cnt(dis, [])
step_context['cnt'] = cnt
save_context()
for i in xrange(step_context['pgoffset'], cnt+1):
step_context['pgoffset'] = i
save_context()
page = "http://wh.lianjia.com/ershoufang/%s/pg%s/"%(dis, format(str(i)))
grabBid(page, [], gz_district_name[dis], priceDic)
step_context['pgoffset'] = 1
step_context['cnt'] = 0
save_context()
def crawl_district_rent():
global step_context
for dis_offset in xrange(step_context['offset'], len(gz_district)):
dis = gz_district[dis_offset]
distric = DistricHouse.select(DistricHouse.name, DistricHouse.bizcircle, DistricHouse.avgpx).where(DistricHouse.district == gz_district_name[dis])
print distric
bizDic = {}
priceDic = {}
for item in distric:
name = item.name.rstrip().encode("utf-8")
biz = item.bizcircle.encode("utf-8")
bizDic[name] = biz
price = item.avgpx
priceDic[name] = price
#print name
step_context['offset'] = dis_offset
save_context()
cnt = step_context['cnt']
if cnt == 0:
cnt = get_distric_rent_cnt(dis)
step_context['cnt'] = cnt
save_context()
for i in xrange(step_context['pgoffset'], cnt+1):
step_context['pgoffset'] = i
save_context()
page = "http://wh.lianjia.com/zufang/%s/pg%s/"%(dis, format(str(i)))
grabRent(page, [], gz_district_name[dis], priceDic, bizDic)
step_context['pgoffset'] = 1
step_context['cnt'] = 0
save_context()
def process_context():
#global step_context
print step_context['phase']
if step_context['phase'] == 0:
crawl_district()
step_context['phase'] = 1
step_context['cnt'] = 0
step_context['offset'] = 0
step_context['pgoffset'] = 1
step_context['date'] = time.strftime("%Y%m%d", time.localtime())
save_context()
elif step_context['phase'] == 1:
crawl_district_chengjiao()
step_context['phase'] = 2
step_context['cnt'] = 0
step_context['offset'] = 0
step_context['pgoffset'] = 1
save_context()
elif step_context['phase'] == 2:
crawl_district_bid()
step_context['phase'] = 3
step_context['cnt'] = 0
step_context['offset'] = 0
step_context['pgoffset'] = 1
save_context()
elif step_context['phase'] == 3:
crawl_district_rent()
step_context['phase'] = -1
step_context['cnt'] = 0
step_context['offset'] = 0
step_context['pgoffset'] = 1
save_context()
elif step_context['phase'] == -1:
#shutil.copy('houseprice.db', time.strftime("houseprice_%Y%m%d.db", time.localtime()))
clear_table()
step_context['phase'] = 0
if __name__== "__main__":
#save_context()
load_context()
#verify_captcha()
if step_context['phase'] == -1:
process_context()
while step_context['phase'] != -1:
process_context()
|
mit
| -7,020,224,055,126,833,000
| 47.170492
| 155
| 0.585199
| false
| 2.855588
| false
| false
| false
|
andres53016/domotica
|
software/alarma.py
|
1
|
1989
|
import zmq,json,time
import pygame
import RPi.GPIO as GPIO
pygame.mixer.init()
GPIO.setmode(GPIO.BCM)
GPIO.setup(25,GPIO.OUT)
GPIO.setup(8,GPIO.OUT)
GPIO.output(25,0)
GPIO.output(8,0)
entradas={"puerta":17,"ventanaCocina":27,"ventanaDormitorio":22}
for entrada in entradas.values():
GPIO.setup(entrada,GPIO.IN,pull_up_down=GPIO.PUD_UP)
def reproducir2(archivo):
s = pygame.mixer.Sound(archivo)
s.play()
cont = zmq.Context()
s = cont.socket(zmq.REP)
s.bind("tcp://127.0.0.1:5001")
tags=["alarma","focoCocina","focoDormitorio"]
alarma="true"
irrumpieron=0
while True:
try:
msg = s.recv(zmq.NOBLOCK)
vector=json.loads(msg.decode())
print(vector)
if vector[0]=="w":
if vector[1]=="focoCocina":
if vector[2]=="true":
GPIO.output(25,1)
elif vector[2]=="false":
GPIO.output(25,0)
elif vector[1]=="focoDormitorio":
if vector[2]=="true":
GPIO.output(8,1)
elif vector[2]=="false":
GPIO.output(8,0)
elif vector[1]=="alarma":
alarma=vector[2]
if alarma=="true":
reproducir2("activada.wav")
elif alarma=="false":
reproducir2("desactivada.wav")
irrumpieron=0
s.send("ok".encode())
elif vector[0]=="r":
msg={}
for entrada in entradas.keys():
msg[entrada]=GPIO.input(entradas[entrada])
msg["alarma"]=alarma
msg["focoCocina"]=GPIO.input(25)
msg["focoDormitorio"]=GPIO.input(8)
s.send(json.dumps(msg).encode())
except:
pass
if alarma=="true":
for entrada in entradas.values():
irrumpieron+=GPIO.input(entrada)
if irrumpieron:
reproducir2("alarma.wav")
pass
time.sleep(0.1)
|
gpl-3.0
| -7,158,794,117,130,000,000
| 29.136364
| 64
| 0.529915
| false
| 3.132283
| false
| false
| false
|
2016-Capstone/PythonController
|
src/Bybop_Commands.py
|
1
|
8092
|
import os
import sys
import struct
MY_PATH, _ = os.path.split(os.path.realpath(__file__))
ARSDK_PATH=os.path.join(MY_PATH,'..', 'arsdk-xml')
ARCOMMANDS_PATH=os.path.join(ARSDK_PATH, 'xml')
sys.path.append(ARSDK_PATH)
import arsdkparser
_ctx = arsdkparser.ArParserCtx()
arsdkparser.parse_xml(_ctx, os.path.join(ARCOMMANDS_PATH, 'generic.xml'))
for f in sorted(os.listdir(ARCOMMANDS_PATH)):
if not f.endswith('.xml') or f == 'generic.xml':
continue
arsdkparser.parse_xml(_ctx, os.path.join(ARCOMMANDS_PATH, f))
arsdkparser.finalize_ftrs(_ctx)
class CommandError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
_struct_fmt_for_type = {
'u8' : 'B',
'i8' : 'b',
'u16' : 'H',
'i16' : 'h',
'u32' : 'I',
'i32' : 'i',
'u64' : 'Q',
'i64' : 'q',
'float' : 'f',
'double' : 'd',
'string' : 'z',
'enum' : 'i',
}
def _format_string_for_cmd(cmd):
ret = '<'
for arg in cmd.args:
if isinstance(arg.argType, arsdkparser.ArMultiSetting):
raise Exception('Multisettings not supported !')
elif isinstance(arg.argType, arsdkparser.ArBitfield):
arg_str_type = arsdkparser.ArArgType.TO_STRING[arg.argType.btfType]
elif isinstance(arg.argType, arsdkparser.ArEnum):
arg_str_type = 'i32'
else:
arg_str_type = arsdkparser.ArArgType.TO_STRING[arg.argType]
ret += _struct_fmt_for_type[arg_str_type]
return ret, bool(cmd.args)
def _struct_pack(fmt, *args):
"""
like struct.pack(fmt, *args)
except that a 'z' format is supported to include null terminated strings
"""
nbarg = 0
real_fmt = ''
for c in fmt:
if c == 'z':
real_fmt += '%ds' % (len(args[nbarg])+1)
nbarg += 1
else:
real_fmt += c
if c in 'cbB?hHiIlLqQfdspP':
nbarg += 1
return struct.pack(real_fmt, *args)
def _struct_unpack(fmt, string):
"""
like struct.unpack(fmt, string)
except that a 'z' format is supported to read a null terminated string
"""
real_fmt=''
null_idx=[]
nbarg = 0
for i in range(len(fmt)):
c = fmt[i]
if c == 'z':
start = struct.calcsize(real_fmt)
strlen = string[start:].find('\0')
if strlen < 0:
raise CommandError('No null char in string')
real_fmt += '%dsB' % strlen
nbarg += 1
null_idx.append(nbarg)
nbarg += 1
else:
real_fmt += c
if c in 'cbB?hHiIlLqQfdspP':
nbarg += 1
content = struct.unpack(real_fmt, string)
ret = tuple([content[i] for i in range(len(content)) if i not in null_idx])
return ret
def pack_command(s_proj, s_cls, s_cmd, *args):
"""
Pack a command into a string.
Arguments:
- s_proj : Name of the project
- s_cls : Name of the class within the project (ignored for features)
- s_cmd : Name of the command within the class
- *args : Arguments of the command.
If the project, the class or the command can not be found in the command table,
a CommandError will be raised.
If the number and type of arguments in *arg do not match the expected ones, a
CommandError will be raised.
Return the command string, the command recommanded buffer and the command
recommanded timeout policy.
"""
proj = None
feat = None
projid = 0
cls = None
clsid = 0
cmd = None
# Let an exception be raised if we do not know the command or if the format is bad
# Find the project
if s_proj in _ctx.projectsByName:
proj = _ctx.projectsByName[s_proj]
elif s_proj in _ctx.featuresByName:
feat = _ctx.featuresByName[s_proj]
if proj is None and feat is None:
raise CommandError('Unknown project ' + s_proj)
if proj: # Project
projid = proj.projectId
# Find the class
if s_cls in proj.classesByName:
cls = proj.classesByName[s_cls]
if cls is None:
raise CommandError('Unknown class ' + s_cls + ' in project ' + s_proj)
clsid = cls.classId
# Find the command
if s_cmd in cls.cmdsByName:
cmd = cls.cmdsByName[s_cmd]
if cmd is None:
raise CommandError('Unknown command ' + s_cmd + ' in class ' + s_cls + ' of project ' + s_proj)
elif feat: # Feature
projid = feat.featureId
# Find the command
if s_cmd in feat.cmdsByName:
cmd = feat.cmdsByName[s_cmd]
if cmd is None:
raise CommandError('Unknown command ' + s_cmd + ' in feature ' + s_proj)
ret = struct.pack('<BBH', projid, clsid, cmd.cmdId)
argsfmt, needed = _format_string_for_cmd(cmd)
if needed:
try:
ret += _struct_pack(argsfmt, *args)
except IndexError:
raise CommandError('Missing arguments')
except TypeError:
raise CommandError('Bad type for arguments')
except struct.error:
raise CommandError('Bad type for arguments')
return ret, cmd.bufferType, cmd.timeoutPolicy
def unpack_command(buf):
"""
Unpack a command string into a dictionnary of arguments
Arguments:
- buf : The packed command
Return a dictionnary describing the command, and a boolean indicating whether the
command is known. If the boolean is False, then the dictionnary is {}
Return dictionnary format:
{
'name' : full name of the command (project.class.command)
'project' : project of the command
'class' : class of the command
'cmd' : command name
'listtype' : list type (none/list/map) of the command
'args' : arguments in the commands, in the form { 'name':value, ... }
'arg0' : value of the first argument ('' if no arguments)
this is useful for map commands, as this will be the key.
}
A CommandError is raised if the command is in a bad format.
"""
# Read the project/cls/cmd from the buffer
try:
(i_proj, i_cls, i_cmd) = struct.unpack('<BBH', buf[:4])
except struct.error:
raise CommandError('Bad input buffer (not an ARCommand)')
proj = None
feat = None
cls = None
cmd = None
# Let an exception be raised if we do not know the command or if the format is bad
# Find the project
if i_proj in _ctx.projectsById:
proj = _ctx.projectsById[i_proj]
# Or the feature
if i_proj in _ctx.featuresById:
feat = _ctx.featuresById[i_proj]
# If project, Find the class
if proj:
if i_cls in proj.classesById:
cls = proj.classesById[i_cls]
else:
return {}, False
if i_cmd in cls.cmdsById:
cmd = cls.cmdsById[i_cmd]
else:
return {}, False
# If feature, find directly the command
elif feat:
if i_cmd in feat.cmdsById:
cmd = feat.cmdsById[i_cmd]
elif i_cmd in feat.evtsById:
cmd = feat.evtsById[i_cmd]
else:
return {}, False
else:
return {}, False
args = ()
argsfmt, needed = _format_string_for_cmd(cmd)
if needed:
try:
args = _struct_unpack(argsfmt, buf[4:])
except struct.error:
raise CommandError('Bad input buffers (arguments do not match the command)')
ret = {
'name' : '%s.%s.%s' % (proj.name if proj else feat.name, cls.name if cls else '', cmd.name),
'proj' : proj.name if proj else feat.name,
'class' : cls.name if cls else '',
'cmd' : cmd.name,
'listtype' : cmd.listType,
'listtype_str' : arsdkparser.ArCmdListType.TO_STRING[cmd.listType],
'args' : {},
'arg0' : '',
}
for i in range(len(args)):
if i == 0:
ret['arg0'] = args[0]
ret['args'][cmd.args[i].name] = args[i]
return ret, True
|
bsd-3-clause
| -1,871,689,588,491,917,000
| 29.885496
| 107
| 0.57699
| false
| 3.546012
| false
| false
| false
|
juanchodepisa/sbtk
|
SBTK_League_Helper/src/security/key_handling.py
|
1
|
4752
|
from urllib.parse import quote
import json
import os
from src import log_entry
from .obfuscation import transform
from .exceptions import KeysDirectoryNotFound, KeysFileNotFound
user_index = os.path.join(os.path.dirname(__file__), "keys_loc.json")
default_context = "OGS"
obfuscated = "_obfuscated_"
plaintext = "_plaintext_"
no_directory_default = lambda usr: ""
def reset_index():
with open (user_index, 'w') as f:
json.dump({}, f)
log_entry (user_index, "file reset to empty value.")
def get_keys_directory(user, on_fail = no_directory_default):
with open(user_index, 'r+') as f:
index_data = json.load(f)
update = False
ref = log_entry("Searching %s's keys location from %s...." % (user, user_index))
if user in index_data:
dir = index_data[user]
else:
log_entry(ref, "Location not found.")
dir = False
if not (dir and os.path.isdir(dir)):
if dir:
log_entry (ref, "Location invalid.")
index_data.pop(user)
update = True
ref = log_entry("Getting %s's keys location from backup method...." % user)
dir = on_fail(user)
try:
if os.path.isdir(dir):
index_data[user] = dir
update = True
else:
log_entry(ref, "Location not found or invalid.")
raise KeysDirectoryNotFound(user)
finally:
if update:
ref = log_entry ("Updating %s...." % user_index)
f.seek(0)
json.dump(index_data, f, sort_keys=True, indent=4)
f.truncate()
log_entry (ref, "Updated!")
log_entry (ref, "Location found!")
return dir
def set_keys_directory(user, directory):
with open(user_index, 'r+') as f:
ref = log_entry ("Updating %s's keys location at %s...." % (user, user_index))
index_data = json.load(f)
index_data[user] = directory
f.seek(0)
json.dump(index_data, f, sort_keys=True, indent=4)
f.truncate()
log_entry (ref, "Updated!")
def remove_keys_directory(user):
with open(user_index, 'r+') as f:
ref = log_entry ("Removing %s's keys location at %s...." % (user, user_index))
index_data = json.load(f)
index_data.pop(user)
f.seek(0)
json.dump(index_data, f, sort_keys=True, indent=4)
f.truncate()
log_entry (ref, "Removed!")
def store_keys (user, keys, password="", context=default_context, if_no_directory = no_directory_default):
directory = get_keys_directory(user, if_no_directory)
if password:
ref = log_entry ("Encrypting %s's keys...." % user)
keys = transform(keys, password)
log_entry (ref, "Encrypted!")
else:
log_entry ("WARNING: No password provided to encrypt %s's keys. This is unsafe, as keys will be stored in plain text." % user)
filename = standard_filename(user, password, directory, context)
with open(filename, 'w') as f:
ref = log_entry("Storing %s's keys at %s...." % (user, filename))
json.dump(keys, f, sort_keys=True, indent=4)
log_entry(ref, "Stored!")
def retrieve_keys (user, password="", context=default_context, return_location=False):
directory = get_keys_directory(user)
filename = standard_filename(user, password, directory, context)
if os.path.isfile(filename):
with open(filename, 'r') as f:
ref = log_entry("Retrieving %s's keys from %s...." % (user, filename))
keys = json.load(f)
log_entry(ref, "Retrieved!")
else:
raise KeysFileNotFound(user, filename)
if password:
ref = log_entry ("Decrypting %s's keys...." % user)
keys = transform(keys, password)
log_entry (ref, "Decrypted!")
if return_location:
return (keys, filename)
else:
return keys
def standard_filename(user, password, directory, context):
filename = context+(obfuscated if password else plaintext)+quote(user, safe='')+".json"
return os.path.join(directory, filename)
###########################
## ##
## INITIALIZATION CODE ##
## ##
###########################
if not os.path.isfile(user_index):
log_entry (user_index, "file does not exist.")
__ref = log_entry ("Creating file %s...." % user_index)
reset_index()
log_entry(__ref, "File created. Ready!")
del __ref
else:
log_entry (user_index, "file exists. Ready!")
|
mit
| -288,971,447,259,933,760
| 32.471831
| 134
| 0.555766
| false
| 3.774424
| false
| false
| false
|
xirdneh/oposum
|
oPOSum/apps/client/models.py
|
1
|
3379
|
from django.db import models
from oPOSum.libs import utils as pos_utils
from django.utils.translation import ugettext as _
from decimal import Decimal
from django.core.validators import RegexValidator
# Create your models here.
class Client(models.Model):
first_name = models.CharField(_("First Name"), max_length=100, blank=False)
last_name = models.CharField(_("Last Name"), max_length = 512, blank=False)
phonenumber = models.CharField(_("Phone Number"),
max_length=512, blank=True, unique=False, null=True,
validators = [
RegexValidator(r'[0-9]{3}\-?[0-9]{3}\-?[0-9]{4}',
'Format: 834-117-1086',
'phone_format_error')
]
)
address = models.TextField(_("Address"), max_length=1024, blank=True)
id_type = models.CharField(_("ID Type"), max_length=50, blank=True, default='IFE',
choices=(
('IFE', 'IFE (Credencial de Elector'),
('LICENCIA', 'Licencia de conducir'),
('PASAPORTE', 'Pasaporte'),
('OTRO', 'Otro'),
))
id_number = models.CharField(_("Identification Number"), max_length=255, blank=True)
email = models.EmailField(_("Email"), max_length = 255, blank=True, unique=False)
class Meta:
unique_together = (('first_name', 'last_name', 'phonenumber', 'email'))
verbose_name = "client"
verbose_name_plural = "clients"
def __unicode__(self):
return u"{0} {1}. {2}, {3}".format(self.first_name, self.last_name, self.phonenumber, self.email)
def as_json(self):
return dict(
id = self.id,
first_name = self.first_name.encode('latin-1'),
last_name = self.last_name.encode('latin-1'),
phonenumber = self.phonenumber,
address = self.address.encode('latin-1'),
id_type = self.id_type,
id_number = self.id_number,
email = self.email
)
def get_total_debt(self):
apps = pos_utils.get_installed_oposum_apps()
ret = Decimal(0)
if 'layaway' in apps:
from oPOSum.apps.layaway.models import Layaway
ret += Layaway.objects.get_total_debt_amount(self)
if 'repairshop' in apps:
from oPOSum.apps.repairshop.models import RepairTicket
ret += RepairTicket.objects.get_total_debt_amount(self)
return ret
def get_layaway_debt(self):
apps = pos_utils.get_installed_oposum_apps()
ret = Decimal(0)
if 'layway' in apps:
from oPOSum.apps.layaway.models import Layaway
ret += Layaway.objects.get_total_debt_amount(self)
return ret
def get_repairshop_debt(self):
apps = pos_utils.get_installed_oposum_apps()
ret = Decimal(0)
if 'repairshop' in apps:
from oPOSum.apps.repairshop.models import RepairTicket
ret += RepairTicket.objects.get_total_debt_amount(self)
return ret
def get_repairshop_pending_tickets(self):
#TODO get pending tickets
return 0
def save(self, *args, **kwargs):
if not self.pk:
self.phonenumber = self.phonenumber.replace("-", "")
super(Client, self).save(*args, **kwargs)
|
mit
| -6,659,084,490,970,760,000
| 39.22619
| 105
| 0.576798
| false
| 3.668838
| false
| false
| false
|
avanzosc/avanzosc6.1
|
avanzosc_crm_call_ext/crm_opportunity.py
|
1
|
7613
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import time
from crm import crm
from osv import fields, osv
from tools.translate import _
class crm_opportunity(osv.osv):
_inherit = 'crm.lead'
_columns = {
'credit': fields.float('Total Receivable'),
'invoice2pay': fields.integer('Invoices to pay'),
'last_invoice': fields.date('Last Invoice'),
'last_payment': fields.date('Last Payment'),
}
def onchange_partner_id(self, cr, uid, ids, part, email=False):
invoice_obj = self.pool.get('account.invoice')
voucher_obj = self.pool.get('account.voucher')
res = super(crm_opportunity, self).onchange_partner_id(cr, uid, ids, part, email)
if part:
partner = self.pool.get('res.partner').browse(cr, uid, part)
unpaid_invoice_ids = invoice_obj.search(cr, uid, [('partner_id', '=', part), ('state', '=', 'open')])
invoice_ids = invoice_obj.search(cr, uid, [('partner_id', '=', part)])
voucher_ids = voucher_obj.search(cr, uid, [('partner_id', '=', part)])
if invoice_ids:
last_invoice = invoice_obj.browse(cr, uid, invoice_ids[0])
for invoice in invoice_obj.browse(cr, uid, invoice_ids):
if invoice.date_invoice > last_invoice.date_invoice and invoice.date_invoice != False:
last_invoice = invoice
elif last_invoice.date_invoice == False:
last_invoice = invoice
res['value'].update({
'last_invoice': last_invoice.date_invoice,
})
if voucher_ids:
last_voucher = voucher_obj.browse(cr, uid, voucher_ids[0])
for voucher in voucher_obj.browse(cr, uid, voucher_ids):
if voucher.date > last_voucher.date and voucher.date != False:
last_voucher = voucher
elif last_voucher.date == False:
last_voucher = voucher
res['value'].update({
'last_payment': last_voucher.date,
})
res['value'].update({
'credit': partner.credit,
'invoice2pay': int(len(unpaid_invoice_ids)),
})
return res
crm_opportunity()
class crm_make_sale(osv.osv_memory):
_inherit = "crm.make.sale"
def makeOrder(self, cr, uid, ids, context=None):
"""
This function create Quotation on given case.
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm make sales' ids
@param context: A standard dictionary for contextual values
@return: Dictionary value of created sales order.
"""
if context is None:
context = {}
case_obj = self.pool.get('crm.lead')
sale_obj = self.pool.get('sale.order')
partner_obj = self.pool.get('res.partner')
address_obj = self.pool.get('res.partner.address')
data = context and context.get('active_ids', []) or []
for make in self.browse(cr, uid, ids, context=context):
partner = make.partner_id
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
fpos = partner.property_account_position and partner.property_account_position.id or False
new_ids = []
for case in case_obj.browse(cr, uid, data, context=context):
if not partner and case.partner_id:
partner = case.partner_id
fpos = partner.property_account_position and partner.property_account_position.id or False
partner_addr = partner_obj.address_get(cr, uid, [partner.id],
['default', 'invoice', 'delivery', 'contact'])
pricelist = partner.property_product_pricelist.id
if False in partner_addr.values():
raise osv.except_osv(_('Data Insufficient!'), _('Customer has no addresses defined!'))
def_address = address_obj.browse(cr, uid, partner_addr['default'])
vals = {
'origin': _('Opportunity: %s') % str(case.id),
'section_id': case.section_id and case.section_id.id or False,
'shop_id': make.shop_id.id,
'partner_id': partner.id,
'pricelist_id': pricelist,
'partner_invoice_id': partner_addr['invoice'],
'partner_order_id': partner_addr['contact'],
'partner_shipping_id': partner_addr['delivery'],
'date_order': time.strftime('%Y-%m-%d'),
'fiscal_position': fpos,
}
if partner.id:
vals['user_id'] = partner.user_id and partner.user_id.id or uid
new_id = sale_obj.create(cr, uid, vals)
case_obj.write(cr, uid, [case.id], {'ref': 'sale.order,%s' % new_id})
new_ids.append(new_id)
message = _('Opportunity ') + " '" + case.name + "' "+ _("is converted to Quotation.")
self.log(cr, uid, case.id, message)
case_obj.message_append(cr, uid, [case], _("Converted to Sales Quotation(id: %s).") % (new_id))
if make.close:
case_obj.case_close(cr, uid, data)
if not new_ids:
return {'type': 'ir.actions.act_window_close'}
if len(new_ids)<=1:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'res_id': new_ids and new_ids[0]
}
else:
value = {
'domain': str([('id', 'in', new_ids)]),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'sale.order',
'view_id': False,
'type': 'ir.actions.act_window',
'res_id': new_ids
}
return value
crm_make_sale()
|
agpl-3.0
| -5,189,564,213,395,655,000
| 45.408537
| 113
| 0.517935
| false
| 4.20265
| false
| false
| false
|
weirdgiraffe/plugin.video.giraffe.seasonvar
|
resources/site-packages/plugin_video/screen.py
|
1
|
7083
|
# coding: utf-8
#
# Copyright © 2017 weirdgiraffe <giraffe@cyberzoo.xyz>
#
# Distributed under terms of the MIT license.
#
from kodi import logger, Plugin
import seasonvar
from datetime import datetime, timedelta
def week(plugin):
date = datetime.today()
for date_offset in range(7):
datestr = date.strftime('%d.%m.%Y')
dayurl = plugin.make_url({
'screen': 'day',
'date': datestr,
})
plugin.add_screen_directory(datestr, dayurl)
date -= timedelta(days=1)
searchurl = plugin.make_url({'screen': 'search'})
plugin.add_screen_directory('[COLOR FFFFD700]поиск[/COLOR]', searchurl)
plugin.publish_screen(True)
def day(plugin):
date = plugin.args.get('date')
if date is None:
logger.error('{0}: "date" arg is missing or malformed: {0}'.format(
'screen "day"', plugin.args))
plugin.publish_screen(False)
return
for i in seasonvar.day_items(date):
url = plugin.make_url({
'screen': 'episodes',
'url': i['url'],
})
name = '{0} [COLOR FFFFD700]{1}[/COLOR]'.format(
i['name'], i['changes'])
plugin.add_screen_directory(name, url,
thumb=seasonvar.thumb_url(i['url']))
plugin.publish_screen(True)
def direct_search(plugin):
term = plugin.args.get('q')
if term is None:
logger.error('{0}: "q" arg is missing or malformed: {0}'.format(
'screen "direct_search"', plugin.args))
plugin.publish_screen(False)
return
for i in seasonvar.search(term):
if i['url'] is not None:
season_url = i['url'].encode('utf-8')
url = plugin.make_url({
'screen': 'episodes',
'url': season_url,
})
plugin.add_screen_directory(
i['name'],
url,
thumb=seasonvar.thumb_url(season_url)
)
plugin.publish_screen(True)
def search(plugin):
term = plugin.read_input('Что искать?')
plugin.args["q"] = term
direct_search(plugin)
def episodes(plugin):
season_url = plugin.args.get('url')
if season_url is None:
logger.error('{0}: "url" arg is missing or malformed: {0}'.format(
'screen "episodes"', plugin.args))
plugin.publish_screen(False)
return
tr = plugin.args.get('tr')
thumb = seasonvar.thumb_url(season_url)
season = seasonvar.season_info(season_url)
if season is None or len(season) == 0:
logger.error('{0}: failed to get season info: {0}'.format(
'screen "episodes"', plugin.args))
plugin.show_notification(
'Content is blocked',
'Or external player is being used')
plugin.publish_screen(False)
return
if season.get('total', 0) > 1:
url = plugin.make_url({
'screen': 'seasons',
'url': season_url,
})
name = '[COLOR FFFFD700]сезон[/COLOR]: {0} / {1}'.format(
season['number'], season['total'])
plugin.add_screen_directory(name, url)
if len(season.get('playlist', [])) > 1:
url = plugin.make_url({
'screen': 'translations',
'url': season_url,
'tr': tr,
})
name = '[COLOR FFFFD700]озвучка[/COLOR]: {0}'.format(
tr if tr is not None else 'Стандартная')
plugin.add_screen_directory(name, url)
pl_url = (x['url'] for x in season.get('playlist', []) if x['tr'] == tr)
for e in (x for url in pl_url for x in seasonvar.episodes(url)):
url = plugin.make_url({'play': e['url']})
plugin.add_screen_item(e['name'], url, thumb=thumb)
plugin.publish_screen(True)
def seasons(plugin):
season_url = plugin.args.get('url')
if season_url is None:
logger.error('{0}: "url" arg is missing or malformed: {0}'.format(
'screen "seasons"', plugin.args))
plugin.publish_screen(False)
return
num, seasons = seasonvar.seasons(season_url)
if seasons is None:
logger.error('{0}: failed to get season info: {0}'.format(
'screen "seasons"', plugin.args))
plugin.publish_screen(False)
return
for n, s in enumerate(seasons, 1):
prefix = '* ' if n == num else ''
name = '{0}сезон {1}'.format(prefix, n)
url = plugin.make_url({
'screen': 'episodes',
'url': s,
})
plugin.add_screen_directory(name, url, thumb=seasonvar.thumb_url(s))
plugin.publish_screen(True)
def translations(plugin):
season_url = plugin.args.get('url')
if season_url is None:
logger.error('{0}: "url" arg is missing or malformed: {0}'.format(
'screen "translations"', plugin.args))
plugin.publish_screen(False)
return
tr = plugin.args.get('tr')
thumb = seasonvar.thumb_url(season_url)
season = seasonvar.season_info(season_url)
if season is None:
logger.error('{0}: failed to get season info: {0}'.format(
'screen "translations"', plugin.args))
plugin.publish_screen(False)
return
for n, pl in enumerate(season['playlist']):
if tr is None and n == 0 or pl['tr'] == tr:
prefix = '* '
else:
prefix = ''
url = plugin.make_url({
'screen': 'episodes',
'url': season_url,
'tr': pl['tr'],
})
name = '{0}{1}'.format(
prefix,
pl['tr'] if pl['tr'] is not None else 'Стандартная')
plugin.add_screen_directory(name, url, thumb=thumb)
plugin.publish_screen(True)
def play(plugin):
play_url = plugin.args.get('play')
if play_url is None:
logger.error('{0}: "url" arg is missing or malformed: {0}'.format(
'play', plugin.args))
plugin.publish_screen(False)
return
plugin.play(play_url)
def render(plugin):
screen = plugin.args.get('screen')
if screen is None:
screen = 'week'
try:
if 'play' in plugin.args:
play(plugin)
return
if 'q' in plugin.args:
direct_search(plugin)
return
{'week': week,
'day': day,
'episodes': episodes,
'seasons': seasons,
'translations': translations,
'search': search,
}[screen](plugin)
except KeyError:
logger.error('unexpected screen "{0}"'.format(screen))
except seasonvar.NetworkError:
logger.error('NetworkError')
plugin.show_notification(
'Network error',
'Check your connection')
except seasonvar.HTTPError:
logger.error('HTTPError')
plugin.show_notification(
'HTTP error',
'Something goes wrong. Please, send your logs to addon author')
if __name__ == "__main__":
import sys
render(Plugin(*sys.argv))
|
mit
| 7,216,715,170,337,933,000
| 31.391705
| 76
| 0.551999
| false
| 3.717081
| false
| false
| false
|
bagage/cadastre-conflation
|
back/batimap/bbox.py
|
1
|
1181
|
import re
from math import sqrt
class Bbox(object):
def __init__(self, xmin, ymin, xmax, ymax):
self.coords = [xmin, ymin, xmax, ymax]
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def __repr__(self):
return f"{self.xmin},{self.ymin},{self.xmax},{self.ymax}"
def max_distance(self):
"""
Maximum distance from the center of the screen that this bbox may reach
"""
return sqrt((self.xmax - self.xmin) ** 2 + (self.ymax - self.ymin) ** 2) / 2
@staticmethod
def from_pg(bbox_string):
# cf https://docs.python.org/3/library/re.html#simulating-scanf
# need to handle 10e3 notation too
float_re = r"([-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)"
box_re = re.compile(
r"BOX\("
+ float_re
+ " "
+ float_re
+ ","
+ float_re
+ " "
+ float_re
+ r"\)"
)
groups = box_re.match(bbox_string).groups()
return Bbox(
float(groups[0]), float(groups[4]), float(groups[8]), float(groups[12])
)
|
mit
| -6,715,307,017,670,733,000
| 26.465116
| 84
| 0.485182
| false
| 3.611621
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.