repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
streitho/spinmob | _pylab_colorslider_frame.py | <filename>_pylab_colorslider_frame.py
#Boa:Frame:ColorSliderFrame
import _pylab_colorslider as _pc
import wx
from spinmob import _app
def create(parent):
return ColorSliderFrame(parent)
[wxID_COLORSLIDERFRAME, wxID_COLORSLIDERFRAMEBUTTONCOLOR,
wxID_COLORSLIDERFRAMEBUTTONCOLOR2, wxID_COLORSLIDERFRAMEBUTTONINSERT,
wxID_COLORSLIDERFRAMEBUTTONKILL, wxID_COLORSLIDERFRAMECHECKBOXLOCKED,
wxID_COLORSLIDERFRAMEPANEL1, wxID_COLORSLIDERFRAMESLIDERPOSITION,
wxID_COLORSLIDERFRAMETEXTLOWERBOUND, wxID_COLORSLIDERFRAMETEXTUPPERBOUND,
] = [wx.NewId() for _init_ctrls in range(10)]
[wxID_COLORSLIDERFRAMETIMERUPDATE] = [wx.NewId() for _init_utils in range(1)]
class ColorSliderFrame(wx.Frame):
# This thing's purpose is to pop into creation when the parent color line
# object needs gui modification, get modified, and then get destroyed once
# things are edited to the user's liking
dialog = wx.ColourDialog(_app.GetTopWindow())
dialog2 = wx.ColourDialog(_app.GetTopWindow())
daddy = None
def _init_utils(self):
# generated method, don't edit
self.timerUpdate = wx.Timer(id=wxID_COLORSLIDERFRAMETIMERUPDATE,
owner=self)
self.timerUpdate.SetEvtHandlerEnabled(False)
self.Bind(wx.EVT_TIMER, self.OnTimerUpdateTimer,
id=wxID_COLORSLIDERFRAMETIMERUPDATE)
def _init_ctrls(self, prnt, style=0,size=wx.Size(351, 38),position=wx.Point(0,0)):
# generated method, don't edit
wx.Frame.__init__(self, id=wxID_COLORSLIDERFRAME,
name=u'ColorSliderFrame', parent=prnt, pos=position,
size=size, style=style,
title=u'Color Slider')
self._init_utils()
self.SetClientSize(size)
self.Bind(wx.EVT_CLOSE, self.OnColorSliderFrameClose)
self.Bind(wx.EVT_LEFT_UP, self.OnColorSliderFrameLeftUp)
self.Bind(wx.EVT_MOVE, self.OnColorSliderFrameMove)
self.Bind(wx.EVT_ICONIZE, self.OnColorSliderFrameLower)
self.panel1 = wx.Panel(id=wxID_COLORSLIDERFRAMEPANEL1, name='panel1',
parent=self, pos=wx.Point(0, 0), size=wx.Size(351, 38),
style=wx.TAB_TRAVERSAL)
self.buttonColor = wx.Button(id=wxID_COLORSLIDERFRAMEBUTTONCOLOR,
label=u'', name=u'buttonColor', parent=self.panel1,
pos=wx.Point(8, 8), size=wx.Size(24, 23), style=0)
self.buttonColor.Bind(wx.EVT_BUTTON, self.OnButtonColorButton,
id=wxID_COLORSLIDERFRAMEBUTTONCOLOR)
self.sliderPosition = wx.Slider(id=wxID_COLORSLIDERFRAMESLIDERPOSITION,
maxValue=1000, minValue=0, name=u'sliderPosition',
parent=self.panel1, pos=wx.Point(128, 8), size=wx.Size(128, 24),
style=wx.SL_HORIZONTAL, value=500)
self.sliderPosition.SetLabel(u'')
self.sliderPosition.SetToolTipString(u'sliderPosition')
self.sliderPosition.Bind(wx.EVT_COMMAND_SCROLL,
self.OnSliderPositionCommandScroll,
id=wxID_COLORSLIDERFRAMESLIDERPOSITION)
self.sliderPosition.Bind(wx.EVT_LEFT_DOWN,
self.OnSliderPositionLeftDown)
self.sliderPosition.Bind(wx.EVT_LEFT_UP, self.OnSliderPositionLeftUp)
self.textLowerBound = wx.TextCtrl(id=wxID_COLORSLIDERFRAMETEXTLOWERBOUND,
name=u'textLowerBound', parent=self.panel1, pos=wx.Point(94, 8),
size=wx.Size(34, 22), style=wx.PROCESS_ENTER, value=u'0.0')
self.textLowerBound.SetHelpText(u'')
self.textLowerBound.SetInsertionPoint(0)
self.textLowerBound.Bind(wx.EVT_TEXT_ENTER,
self.OnTextLowerBoundTextEnter,
id=wxID_COLORSLIDERFRAMETEXTLOWERBOUND)
self.textUpperBound = wx.TextCtrl(id=wxID_COLORSLIDERFRAMETEXTUPPERBOUND,
name=u'textUpperBound', parent=self.panel1, pos=wx.Point(256, 8),
size=wx.Size(32, 22), style=wx.PROCESS_ENTER, value=u'1.0')
self.textUpperBound.Bind(wx.EVT_TEXT_ENTER,
self.OnTextUpperBoundTextEnter,
id=wxID_COLORSLIDERFRAMETEXTUPPERBOUND)
self.buttonColor2 = wx.Button(id=wxID_COLORSLIDERFRAMEBUTTONCOLOR2,
label='', name='buttonColor2', parent=self.panel1,
pos=wx.Point(32, 8), size=wx.Size(24, 23), style=0)
self.buttonColor2.Bind(wx.EVT_BUTTON, self.OnButtonColor2Button,
id=wxID_COLORSLIDERFRAMEBUTTONCOLOR2)
self.checkBoxLocked = wx.CheckBox(id=wxID_COLORSLIDERFRAMECHECKBOXLOCKED,
label='lock', name='checkBoxLocked', parent=self.panel1,
pos=wx.Point(56, 13), size=wx.Size(34, 13), style=0)
self.checkBoxLocked.SetValue(True)
self.checkBoxLocked.Bind(wx.EVT_CHECKBOX, self.OnCheckBoxLockedCheckbox,
id=wxID_COLORSLIDERFRAMECHECKBOXLOCKED)
self.buttonKill = wx.Button(id=wxID_COLORSLIDERFRAMEBUTTONKILL,
label='-', name='buttonKill', parent=self.panel1,
pos=wx.Point(320, 8), size=wx.Size(24, 23), style=0)
self.buttonKill.Bind(wx.EVT_BUTTON, self.OnButtonKillButton,
id=wxID_COLORSLIDERFRAMEBUTTONKILL)
self.buttonInsert = wx.Button(id=wxID_COLORSLIDERFRAMEBUTTONINSERT,
label='+', name='buttonInsert', parent=self.panel1,
pos=wx.Point(296, 8), size=wx.Size(24, 23), style=0)
self.buttonInsert.Bind(wx.EVT_BUTTON, self.OnButtonInsertButton,
id=wxID_COLORSLIDERFRAMEBUTTONINSERT)
def __init__(self, parent_window, parent_ColorPoint, style=0, size=wx.Size(359, 72), position=wx.Point(0,0)):
#wx.Frame.__init__(self, parent_window, style=wx.FRAME_NO_TASKBAR|wx.DEFAULT_FRAME_STYLE)
# This is something that wx needs to do. Don't mess with it.
self._init_ctrls(prnt=parent_window, style=style, size=size, position=position)
# Set the variables and mess with the controls
self.daddy = parent_ColorPoint
# should we skip an event?
self.shhhh = 0
# update the controls based on these new values
self.UpdateControls()
#
# EVENTS AND FUNCTIONS
#
def CheckIfEndPoint(self):
# just look at where we are in the list
if self.FindSelf() == 0 or self.FindSelf() == len(self.daddy.parent.colorpoints)-1:
self.EnableStuff(False)
else:
self.EnableStuff(True)
def EnableStuff(self, enable):
self.buttonKill.Enable(enable)
self.textLowerBound.Show(enable)
self.textUpperBound.Show(enable)
self.sliderPosition.Show(enable)
def OnSliderPositionCommandScroll(self, event):
# store the 0-1 position
self.daddy.position = self.GetSliderPosition()
# now update the graph!
self.daddy.parent.UpdateImage()
# let the mouse timer update the image
self.CheckIfEndPoint()
def OnButtonColorButton(self, event):
#self.timerUpdate.Stop()
# pop up the color dialog
self.dialog.ShowModal()
# update the data variables
self.daddy.color = self.dialog.ColourData.GetColour()
if self.checkBoxLocked.GetValue():
self.daddy.color2 = self.daddy.color
self.dialog2.ColourData.SetColour(self.daddy.color)
# update the gui
self.daddy.parent.UpdateImage()
self.UpdateControls()
def OnButtonColor2Button(self, event):
#self.timerUpdate.Stop()
# pop up the color dialog
self.dialog2.ShowModal()
# update the data variables
self.daddy.color2 = self.dialog2.ColourData.GetColour()
if self.checkBoxLocked.GetValue():
self.daddy.color = self.daddy.color2
self.dialog.ColourData.SetColour(self.daddy.color2)
# update the gui
self.daddy.parent.UpdateImage()
self.UpdateControls()
def OnCheckBoxLockedCheckbox(self, event):
#self.timerUpdate.Stop()
# set colors to uniform
if self.checkBoxLocked.GetValue():
self.daddy.color2 = self.daddy.color
self.dialog2.ColourData.SetColour(self.daddy.color)
self.daddy.parent.UpdateImage()
self.UpdateControls()
def OnTextLowerBoundTextEnter(self, event):
#self.timerUpdate.Stop()
# try to update our lower bound value
self.daddy.min = self.SafeFloat(self.daddy.min, self.textLowerBound.GetValue(), 0.0, self.daddy.max-0.001)
self.textLowerBound.SetValue(str(self.daddy.min))
self.UpdateControls()
def OnTextUpperBoundTextEnter(self, event):
#self.timerUpdate.Stop()
# try to update our max value
self.daddy.max = self.SafeFloat(self.daddy.max, self.textUpperBound.GetValue(), self.daddy.min+0.001, 1.0)
self.textUpperBound.SetValue(str(self.daddy.max))
self.UpdateControls()
def UpdateControls(self):
# set the controls based on internal variables
self.buttonColor.SetBackgroundColour(self.daddy.color)
self.buttonColor2.SetBackgroundColour(self.daddy.color2)
self.textLowerBound.SetValue(str(self.daddy.min))
self.textUpperBound.SetValue(str(self.daddy.max))
self.SetSliderPosition(self.daddy.position)
# if we're the end points, gray out the delete button
self.CheckIfEndPoint()
def GetSliderPosition(self):
x0 = self.sliderPosition.GetMin()
x1 = self.sliderPosition.GetMax()
return 1.0*(self.sliderPosition.GetValue()-x0)/(x1-x0)*(self.daddy.max-self.daddy.min)+self.daddy.min
def SetSliderPosition(self, position):
x0 = self.sliderPosition.GetMin()
x1 = self.sliderPosition.GetMax()
self.sliderPosition.SetValue((position-self.daddy.min)/(self.daddy.max-self.daddy.min)*(x1-x0)+x0)
def SafeFloat(self, old_float, new_value, min=0.0, max=1.0):
"""
This is for modifying min, max or the value. It makes sure new_value (string or float)
is a valid float between min and max
"""
try:
x = float(new_value)
if x <= max and x >= min: return x
else: return old_float
except:
return old_float
def OnButtonKillButton(self, event):
#self.timerUpdate.Stop()
# don't delete the end points!
if self.daddy.position in [0.0,1.0]:
return
self.Hide()
# remove this guy from the list
self.daddy.parent.colorpoints.pop(self.FindSelf())
# now redraw everything
self.daddy.parent.ShowSliders()
self.daddy.parent.UpdateImage()
def FindSelf(self):
#self.timerUpdate.Stop()
# loop over the boss's list and pop this one out
for n in range(len(self.daddy.parent.colorpoints)):
if self.daddy.parent.colorpoints[n].slider == self:
return n
def OnButtonInsertButton(self, event):
#self.timerUpdate.Stop()
# insert a new color point in before this one
self.daddy.parent.colorpoints.insert(self.FindSelf(),
_pc.ColorPoint(self.daddy.parent, self.daddy.position,
self.daddy.color.Red(),
self.daddy.color.Green(),
self.daddy.color.Blue(),
self.daddy.color2.Red(),
self.daddy.color2.Green(),
self.daddy.color2.Blue()))
# now redraw everything
self.daddy.parent.ShowSliders()
self.daddy.parent.UpdateImage()
def OnColorSliderFrameClose(self, event):
#self.timerUpdate.Stop()
# close all the slider windows
self.daddy.parent.HideSliders()
def OnSliderPositionLeftDown(self, event):
#self.timerUpdate.Start(1000)
event.Skip()
def OnSliderPositionLeftUp(self, event):
#self.timerUpdate.Stop()
#self.daddy.parent.ShowSliders()
event.Skip()
def OnTimerUpdateTimer(self, event):
self.daddy.parent.UpdateImage()
def OnColorSliderFrameLeftUp(self, event):
# now redraw everything
#self.daddy.parent.ShowSliders()
event.Skip()
def OnColorSliderFrameMove(self, event):
# if we're supposed to skip this event or we're NOT the boss slider
if not self.FindSelf() == len(self.daddy.parent.colorpoints)-1:
return
# get the list of windows
l = self.daddy.parent.colorpoints
# get the coordinates of this one
x = self.Position.x
y = self.Position.y
# windows minimizing means "move the window way negative off screen"
if x < -31000 or y < -31000: return
# find out which one we are in the list
n = self.FindSelf()
# if we didn't find one, better quit!
if n==None:
print "couldn't find this window in list"
event.Skip()
# set the positions of all the other frames
# the only frame that does the moving is the last one
for i in range(len(l)-1):
# only update coordinates of guys that aren't us
# only mess with it if the colorpoint has made a slider
if l[i].slider:
l[i].slider.SetPosition([x+3,y+65+35*(len(l)-i-2)])
def OnColorSliderFrameLower(self, event):
if self.shhhh:
self.shhhh=self.shhhh-1
event.Skip()
# figure out our current state
i = self.IsIconized()
# iconize everything
for guy in self.daddy.parent.colorpoints:
if not guy.slider == self:
guy.slider.Show(not i)
|
streitho/spinmob | _pylab_tweaks.py | <filename>_pylab_tweaks.py
import pylab as _pylab
import numpy as _numpy
import matplotlib as _mpl
import wx as _wx
import time as _time
from matplotlib.font_manager import FontProperties as _FontProperties
import os as _os
import _dialogs
import _functions as _fun
import _pylab_colorslider as _pc
import _plot
line_attributes = ["linestyle","linewidth","color","marker","markersize","markerfacecolor","markeredgewidth","markeredgecolor"]
image_undo_list = []
def add_text(text, x=0.01, y=0.01, axes="gca", draw=True, **kwargs):
"""
Adds text to the axes at the specified position.
**kwargs go to the axes.text() function.
"""
if axes=="gca": axes = _pylab.gca()
axes.text(x, y, text, transform=axes.transAxes, **kwargs)
if draw: _pylab.draw()
def auto_zoom(zoomx=1, zoomy=1, axes="gca", x_space=0.04, y_space=0.04, draw=True):
if axes=="gca": axes = _pylab.gca()
a = axes
# get all the lines
lines = a.get_lines()
# get the current limits, in case we're not zooming one of the axes.
x1, x2 = a.get_xlim()
y1, y2 = a.get_ylim()
xdata = []
ydata = []
for n in range(0,len(lines)):
# store this line's data
# build up a huge data array
if isinstance(lines[n], _mpl.lines.Line2D):
x, y = lines[n].get_data()
for n in range(len(x)):
# if we're not zooming x and we're in range, append
if not zoomx and x[n] >= x1 and x[n] <= x2:
xdata.append(x[n])
ydata.append(y[n])
elif not zoomy and y[n] >= y1 and y[n] <= y2:
xdata.append(x[n])
ydata.append(y[n])
elif zoomy and zoomx:
xdata.append(x[n])
ydata.append(y[n])
if len(xdata):
xmin = min(xdata)
xmax = max(xdata)
ymin = min(ydata)
ymax = max(ydata)
# we want a 3% white space boundary surrounding the data in our plot
# so set the range accordingly
if zoomx: a.set_xlim(xmin-x_space*(xmax-xmin), xmax+x_space*(xmax-xmin))
if zoomy: a.set_ylim(ymin-y_space*(ymax-ymin), ymax+y_space*(ymax-ymin))
if draw: _pylab.draw()
else:
return
def click_estimate_slope():
"""
Takes two clicks and returns the slope.
Right-click aborts.
"""
c1 = ginput()
if len(c1)==0:
raise_pyshell()
return None
c2 = ginput()
if len(c2)==0:
raise_pyshell()
return None
raise_pyshell()
return (c1[0][1]-c2[0][1])/(c1[0][0]-c2[0][0])
def click_estimate_curvature():
"""
Takes two clicks and returns the curvature, assuming the first click
was the minimum of a parabola and the second was some other point.
Returns the second derivative of the function giving this parabola.
Right-click aborts.
"""
c1 = ginput()
if len(c1)==0:
raise_pyshell()
return None
c2 = ginput()
if len(c2)==0:
raise_pyshell()
return None
raise_pyshell()
return 2*(c2[0][1]-c1[0][1])/(c2[0][0]-c1[0][0])**2
def click_estimate_difference():
"""
Takes two clicks and returns the difference vector [dx, dy].
Right-click aborts.
"""
c1 = ginput()
if len(c1)==0:
raise_pyshell()
return None
c2 = ginput()
if len(c2)==0:
raise_pyshell()
return None
raise_pyshell()
return [c2[0][0]-c1[0][0], c2[0][1]-c1[0][1]]
def close_sliders():
# get the list of open windows
w = _wx.GetTopLevelWindows()
# loop over them and close all the type colorsliderframe's
for x in w:
# if it's of the right class
if x.__class__.__name__ == _pc._pcf.ColorSliderFrame.__name__:
x.Close()
def differentiate_shown_data(neighbors=1, fyname=1, **kwargs):
"""
Differentiates the data visible on the specified axes using
fun.derivative_fit() (if neighbors > 0), and derivative() otherwise.
Modifies the visible data using manipulate_shown_data(**kwargs)
"""
if neighbors:
def D(x,y): return _fun.derivative_fit(x,y,neighbors)
else:
def D(x,y): return _fun.derivative(x,y)
if fyname==1: fyname = str(neighbors)+'-neighbor D'
manipulate_shown_data(D, fxname=None, fyname=fyname, **kwargs)
def integrate_shown_data(scale=1, fyname=1, autozero=0, **kwargs):
"""
Numerically integrates the data visible on the current/specified axes using
scale*fun.integrate_data(x,y). Modifies the visible data using
manipulate_shown_data(**kwargs)
autozero is the number of data points used to estimate the background
for subtraction. If autozero = 0, no background subtraction is performed.
"""
def I(x,y):
xout, iout = _fun.integrate_data(x,y, autozero=autozero)
print "Total =", scale*iout[-1]
return xout, scale*iout
if fyname==1: fyname = str(scale)+" * Integral"
manipulate_shown_data(I, fxname=None, fyname=fyname, **kwargs)
def image_sliders(image="top", colormap="_last"):
close_sliders()
_pc.GuiColorMap(image, colormap)
def _old_format_figure(figure='gcf', tall=False, draw=True, figheight=10.5, figwidth=8.0, **kwargs):
"""
This formats the figure in a compact way with (hopefully) enough useful
information for printing large data sets. Used mostly for line and scatter
plots with long, information-filled titles.
Chances are somewhat slim this will be ideal for you but it very well might
and is at least a good starting point.
"""
for k in kwargs.keys(): print "NOTE: '"+k+"' is not an option used by spinmob.tweaks.format_figure()"
if figure == 'gcf': figure = _pylab.gcf()
# get the window of the figure
figure_window = get_figure_window(figure)
#figure_window.SetPosition([0,0])
# assume two axes means twinx
window_width=645
legend_position=1.01
# set the size of the window
if(tall): figure_window.SetSize([window_width,680])
else: figure_window.SetSize([window_width,520])
figure.set_figwidth(figwidth)
figure.set_figheight(figheight)
# first, find overall bounds of the figure.
ymin = 1.0
ymax = 0.0
xmin = 1.0
xmax = 0.0
for axes in figure.get_axes():
(x,y,dx,dy) = axes.get_position().bounds
if y < ymin: ymin = y
if y+dy > ymax: ymax = y+dy
if x < xmin: xmin = x
if x+dx > xmax: xmax = x+dx
# Fraction of the figure's height to use for all the plots.
if tall: h = 0.7
else: h = 0.5
# buffers around edges
bt = 0.07
bb = 0.05
w = 0.55
bl = 0.20
xscale = w / (xmax-xmin)
yscale = (h-bt-bb) / (ymax-ymin)
for axes in figure.get_axes():
(x,y,dx,dy) = axes.get_position().bounds
y = 1-h+bb + (y-ymin)*yscale
dy = dy * yscale
x = bl
dx = dx * xscale
axes.set_position([x,y,dx,dy])
# set the position of the legend
_pylab.axes(axes) # set the current axes
if len(axes.lines)>0:
_pylab.legend(loc=[legend_position, 0], borderpad=0.02, prop=_FontProperties(size=7))
# set the label spacing in the legend
if axes.get_legend():
if tall: axes.get_legend().labelsep = 0.007
else: axes.get_legend().labelsep = 0.01
axes.get_legend().set_visible(1)
# set up the title label
axes.title.set_horizontalalignment('right')
axes.title.set_size(8)
axes.title.set_position([1.4,1.02])
axes.title.set_visible(1)
#axes.yaxis.label.set_horizontalalignment('center')
#axes.xaxis.label.set_horizontalalignment('center')
# get the shell window
if draw:
shell_window = get_pyshell()
figure_window.Raise()
shell_window.Raise()
def format_figure(figure='gcf', tall=False, draw=True, **kwargs):
"""
This formats the figure in a compact way with (hopefully) enough useful
information for printing large data sets. Used mostly for line and scatter
plots with long, information-filled titles.
Chances are somewhat slim this will be ideal for you but it very well might
and is at least a good starting point.
"""
for k in kwargs.keys(): print "NOTE: '"+k+"' is not an option used by spinmob.tweaks.format_figure()"
if figure == 'gcf': figure = _pylab.gcf()
# get the window of the figure
figure_window = get_figure_window(figure)
#figure_window.SetPosition([0,0])
# assume two axes means twinx
window_width=figure_window.GetSize()[0]
legend_position=1.01
# set the size of the window
if(tall): figure_window.SetSize([window_width,window_width*680./645.])
else: figure_window.SetSize([window_width,window_width*520./645.])
# first, find overall bounds of all axes.
ymin = 1.0
ymax = 0.0
xmin = 1.0
xmax = 0.0
for axes in figure.get_axes():
(x,y,dx,dy) = axes.get_position().bounds
if y < ymin: ymin = y
if y+dy > ymax: ymax = y+dy
if x < xmin: xmin = x
if x+dx > xmax: xmax = x+dx
# Fraction of the figure's width and height to use for all the plots.
w = 0.55
if tall: h = 0.77
else: h = 0.75
# buffers on left and bottom edges
bb = 0.12
bl = 0.12
xscale = w / (xmax-xmin)
yscale = h / (ymax-ymin)
current_axes = _pylab.gca()
for axes in figure.get_axes():
(x,y,dx,dy) = axes.get_position().bounds
y = bb + (y-ymin)*yscale
dy = dy * yscale
x = bl + (x-xmin)*xscale
dx = dx * xscale
axes.set_position([x,y,dx,dy])
# set the position of the legend
_pylab.axes(axes) # set the current axes
if len(axes.lines)>0:
_pylab.legend(loc=[legend_position, 0], borderpad=0.02, prop=_FontProperties(size=7))
# set the label spacing in the legend
if axes.get_legend():
if tall: axes.get_legend().labelsep = 0.007
else: axes.get_legend().labelsep = 0.01
axes.get_legend().set_visible(1)
# set up the title label
axes.title.set_horizontalalignment('right')
axes.title.set_size(8)
axes.title.set_position([1.5,1.02])
axes.title.set_visible(1)
#axes.yaxis.label.set_horizontalalignment('center')
#axes.xaxis.label.set_horizontalalignment('center')
_pylab.axes(current_axes)
# get the shell window
if draw:
shell_window = get_pyshell()
figure_window.Raise()
shell_window.Raise()
def impose_legend_limit(limit=30, axes="gca", **kwargs):
"""
This will erase all but, say, 30 of the legend entries and remake the legend.
You'll probably have to move it back into your favorite position at this point.
"""
if axes=="gca": axes = _pylab.gca()
# make these axes current
_pylab.axes(axes)
# loop over all the lines
for n in range(0,len(axes.lines)):
if n > limit-1 and not n==len(axes.lines)-1: axes.lines[n].set_label("_nolegend_")
if n == limit-1 and not n==len(axes.lines)-1: axes.lines[n].set_label("...")
_pylab.legend(**kwargs)
def image_autozoom(axes="gca"):
if axes=="gca": axes = _pylab.gca()
# get the extent
extent = axes.images[0].get_extent()
# rezoom us
axes.set_xlim(extent[0],extent[1])
axes.set_ylim(extent[2],extent[3])
_pylab.draw()
def image_coarsen(xlevel=0, ylevel=0, image="auto", method='average'):
"""
This will coarsen the image data by binning each xlevel+1 along the x-axis
and each ylevel+1 points along the y-axis
type can be 'average', 'min', or 'max'
"""
if image == "auto": image = _pylab.gca().images[0]
Z = _numpy.array(image.get_array())
# store this image in the undo list
global image_undo_list
image_undo_list.append([image, Z])
if len(image_undo_list) > 10: image_undo_list.pop(0)
# images have transposed data
image.set_array(_fun.coarsen_matrix(Z, ylevel, xlevel, method))
# update the plot
_pylab.draw()
def image_neighbor_smooth(xlevel=0.2, ylevel=0.2, image="auto"):
"""
This will bleed nearest neighbor pixels into each other with
the specified weight factors.
"""
if image == "auto": image = _pylab.gca().images[0]
Z = _numpy.array(image.get_array())
# store this image in the undo list
global image_undo_list
image_undo_list.append([image, Z])
if len(image_undo_list) > 10: image_undo_list.pop(0)
# get the diagonal smoothing level (eliptical, and scaled down by distance)
dlevel = ((xlevel**2+ylevel**2)/2.0)**(0.5)
# don't touch the first column
new_Z = [Z[0]*1.0]
for m in range(1,len(Z)-1):
new_Z.append(Z[m]*1.0)
for n in range(1,len(Z[0])-1):
new_Z[-1][n] = (Z[m,n] + xlevel*(Z[m+1,n]+Z[m-1,n]) + ylevel*(Z[m,n+1]+Z[m,n-1]) \
+ dlevel*(Z[m+1,n+1]+Z[m-1,n+1]+Z[m+1,n-1]+Z[m-1,n-1]) ) \
/ (1.0+xlevel*2+ylevel*2 + dlevel*4)
# don't touch the last column
new_Z.append(Z[-1]*1.0)
# images have transposed data
image.set_array(_numpy.array(new_Z))
# update the plot
_pylab.draw()
def image_undo():
"""
Undoes the last coarsen or smooth command.
"""
if len(image_undo_list) <= 0:
print "no undos in memory"
return
[image, Z] = image_undo_list.pop(-1)
image.set_array(Z)
_pylab.draw()
def image_set_aspect(aspect=1.0, axes="gca"):
"""
sets the aspect ratio of the current zoom level of the imshow image
"""
if axes is "gca": axes = _pylab.gca()
# make sure it's not in "auto" mode
if type(axes.get_aspect()) == str: axes.set_aspect(1.0)
_pylab.draw() # this makes sure the window_extent is okay
axes.set_aspect(aspect*axes.get_aspect()*axes.get_window_extent().width/axes.get_window_extent().height)
_pylab.draw()
def image_set_extent(x=None, y=None, axes="gca"):
"""
Set's the first image's extent, then redraws.
Examples:
x = [1,4]
y = [33.3, 22]
"""
if axes == "gca": axes = _pylab.gca()
# get the current plot limits
xlim = axes.get_xlim()
ylim = axes.get_ylim()
# get the old extent
extent = axes.images[0].get_extent()
# calculate the fractional extents
x0 = extent[0]
y0 = extent[2]
xwidth = extent[1]-x0
ywidth = extent[3]-y0
frac_x1 = (xlim[0]-x0)/xwidth
frac_x2 = (xlim[1]-x0)/xwidth
frac_y1 = (ylim[0]-y0)/ywidth
frac_y2 = (ylim[1]-y0)/ywidth
# set the new
if not x == None:
extent[0] = x[0]
extent[1] = x[1]
if not y == None:
extent[2] = y[0]
extent[3] = y[1]
# get the new zoom window
x0 = extent[0]
y0 = extent[2]
xwidth = extent[1]-x0
ywidth = extent[3]-y0
x1 = x0 + xwidth*frac_x1
x2 = x0 + xwidth*frac_x2
y1 = y0 + ywidth*frac_y1
y2 = y0 + ywidth*frac_y2
# set the extent
axes.images[0].set_extent(extent)
# rezoom us
axes.set_xlim(x1,x2)
axes.set_ylim(y1,y2)
# draw
image_set_aspect(1.0)
def image_scale(xscale=1.0, yscale=1.0, axes="gca"):
"""
Scales the image extent.
"""
if axes == "gca": axes = _pylab.gca()
e = axes.images[0].get_extent()
x1 = e[0]*xscale
x2 = e[1]*xscale
y1 = e[2]*yscale
y2 = e[3]*yscale
image_set_extent([x1,x2],[y1,y2], axes)
def image_click_xshift(axes = "gca"):
"""
Takes a starting and ending point, then shifts the image y by this amount
"""
if axes == "gca": axes = _pylab.gca()
try:
p1 = ginput()
p2 = ginput()
xshift = p2[0][0]-p1[0][0]
e = axes.images[0].get_extent()
e[0] = e[0] + xshift
e[1] = e[1] + xshift
axes.images[0].set_extent(e)
_pylab.draw()
except:
print "whoops"
def image_click_yshift(axes = "gca"):
"""
Takes a starting and ending point, then shifts the image y by this amount
"""
if axes == "gca": axes = _pylab.gca()
try:
p1 = ginput()
p2 = ginput()
yshift = p2[0][1]-p1[0][1]
e = axes.images[0].get_extent()
e[2] = e[2] + yshift
e[3] = e[3] + yshift
axes.images[0].set_extent(e)
_pylab.draw()
except:
print "whoops"
def image_shift(xshift=0, yshift=0, axes="gca"):
"""
This will shift an image to a new location on x and y.
"""
if axes=="gca": axes = _pylab.gca()
e = axes.images[0].get_extent()
e[0] = e[0] + xshift
e[1] = e[1] + xshift
e[2] = e[2] + yshift
e[3] = e[3] + yshift
axes.images[0].set_extent(e)
_pylab.draw()
def image_set_clim(zmin=None, zmax=None, axes="gca"):
"""
This will set the clim (range) of the colorbar.
Setting zmin or zmax to None will not change them.
Setting zmin or zmax to "auto" will auto-scale them to include all the data.
"""
if axes=="gca": axes=_pylab.gca()
image = axes.images[0]
if zmin=='auto': zmin = _numpy.min(image.get_array())
if zmax=='auto': zmax = _numpy.max(image.get_array())
if zmin==None: zmin = image.get_clim()[0]
if zmax==None: zmax = image.get_clim()[1]
image.set_clim(zmin, zmax)
_pylab.draw()
def image_ubertidy(figure="gcf", aspect=1.0, fontsize=18, fontweight='bold', fontname='Arial', ylabel_pad=0.007, xlabel_pad=0.010, colorlabel_pad=0.1, borderwidth=3.0, tickwidth=2.0, window_size=(550,500)):
if figure=="gcf": figure = _pylab.gcf()
# do this to both axes
for a in figure.axes:
_pylab.axes(a)
# remove the labels
a.set_title("")
a.set_xlabel("")
a.set_ylabel("")
# thicken the border
# we want thick axis lines
a.spines['top'].set_linewidth(borderwidth)
a.spines['left'].set_linewidth(borderwidth)
a.spines['bottom'].set_linewidth(borderwidth)
a.spines['right'].set_linewidth(borderwidth)
a.set_frame_on(True) # adds a thick border to the colorbar
# these two cover the main plot
_pylab.xticks(fontsize=fontsize, fontweight=fontweight, fontname=fontname)
_pylab.yticks(fontsize=fontsize, fontweight=fontweight, fontname=fontname)
# thicken the tick lines
for l in a.get_xticklines(): l.set_markeredgewidth(tickwidth)
for l in a.get_yticklines(): l.set_markeredgewidth(tickwidth)
# set the aspect and window size
_pylab.axes(figure.axes[0])
image_set_aspect(aspect)
get_figure_window().SetSize(window_size)
# we want to give the labels some breathing room (1% of the data range)
for label in _pylab.xticks()[1]: label.set_y(-xlabel_pad)
for label in _pylab.yticks()[1]: label.set_x(-ylabel_pad)
# need to draw to commit the changes up to this point. Annoying.
_pylab.draw()
# get the bounds of the first axes and come up with corresponding bounds
# for the colorbar
a1 = _pylab.gca()
b = a1.get_position()
aspect = figure.axes[1].get_aspect()
pos = []
pos.append(b.x0+b.width+0.02) # lower left x
pos.append(b.y0) # lower left y
pos.append(b.height/aspect) # width
pos.append(b.height) # height
# switch to the colorbar axes
_pylab.axes(figure.axes[1])
_pylab.gca().set_position(pos)
for label in _pylab.yticks()[1]: label.set_x(1+colorlabel_pad)
# switch back to the main axes
_pylab.axes(figure.axes[0])
_pylab.draw()
def is_a_number(s):
try: eval(s); return 1
except: return 0
def manipulate_shown_data(f, input_axes="gca", output_axes=None, fxname=1, fyname=1, clear=1, pause=False, **kwargs):
"""
Loops over the visible data on the specified axes and modifies it based on
the function f(xdata, ydata), which must return new_xdata, new_ydata
input_axes which axes to pull the data from
output_axes which axes to dump the manipulated data (None for new figure)
fxname the name of the function on x
fyname the name of the function on y
1 means "use f.__name__"
0 or None means no change.
otherwise specify a string
**kwargs are sent to axes.plot
"""
# get the axes
if input_axes == "gca": a1 = _pylab.gca()
else: a1 = input_axes
# get the xlimits
xmin, xmax = a1.get_xlim()
# get the name to stick on the x and y labels
if fxname==1: fxname = f.__name__
if fyname==1: fyname = f.__name__
# get the output axes
if output_axes == None:
_pylab.figure(a1.figure.number+1)
a2 = _pylab.axes()
else:
a2 = output_axes
if clear: a2.clear()
# loop over the data
for line in a1.get_lines():
if isinstance(line, _mpl.lines.Line2D):
x, y = line.get_data()
x, y, e = _fun.trim_data(x,y,None,[xmin,xmax])
new_x, new_y = f(x,y)
_plot.xy.data(new_x,new_y, clear=0, label=line.get_label(), draw=pause, **kwargs)
if pause:
format_figure()
raise_pyshell()
raw_input("<enter> ")
# set the labels and title.
if fxname in [0,None]: a2.set_xlabel(a1.get_xlabel())
else: a2.set_xlabel(fxname+"[ "+a1.get_xlabel()+" ]")
if fyname in [0,None]: a2.set_ylabel(a1.get_ylabel())
else: a2.set_ylabel(fyname+"[ "+a1.get_ylabel()+" ]")
if not kwargs.has_key('title'):
a2.title.set_text(get_pyshell_command() + "\n" + a1.title.get_text())
_pylab.draw()
def manipulate_shown_xdata(fx, fxname=1, **kwargs):
"""
This defines a function f(xdata,ydata) returning fx(xdata), ydata and
runs manipulate_shown_data() with **kwargs sent to this. See
manipulate_shown_data() for more info.
"""
def f(x,y): return fx(x), y
f.__name__ = fx.__name__
manipulate_shown_data(f, fxname=fxname, fyname=None, **kwargs)
def manipulate_shown_ydata(fy, fyname=1, **kwargs):
"""
This defines a function f(xdata,ydata) returning xdata, fy(ydata) and
runs manipulate_shown_data() with **kwargs sent to this. See
manipulate_shown_data() for more info.
"""
def f(x,y): return x, fy(y)
f.__name__ = fy.__name__
manipulate_shown_data(f, fxname=None, fyname=fyname, **kwargs)
def shift(xshift=0, yshift=0, progressive=0, axes="gca"):
"""
This function adds an artificial offset to the lines.
yshift amount to shift vertically
xshift amount to shift horizontally
axes="gca" axes to do this on, "gca" means "get current axes"
progressive=0 progressive means each line gets more offset
set to 0 to shift EVERYTHING
"""
if axes=="gca": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for m in range(0,len(lines)):
if isinstance(lines[m], _mpl.lines.Line2D):
# get the actual data values
xdata = _numpy.array(lines[m].get_xdata())
ydata = _numpy.array(lines[m].get_ydata())
# add the offset
if progressive:
xdata += m*xshift
ydata += m*yshift
else:
xdata += xshift
ydata += yshift
# update the data for this line
lines[m].set_data(xdata, ydata)
# zoom to surround the data properly
auto_zoom()
def reverse_draw_order(axes="current"):
"""
This function takes the graph and reverses the draw order.
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# reverse the order
lines.reverse()
for n in range(0, len(lines)):
if isinstance(lines[n], _mpl.lines.Line2D):
axes.lines[n]=lines[n]
_pylab.draw()
def scale_x(scale, axes="current"):
"""
This function scales lines horizontally.
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
line.set_xdata(_pylab.array(line.get_xdata())*scale)
# update the title
title = axes.title.get_text()
title += ", x_scale="+str(scale)
axes.title.set_text(title)
# zoom to surround the data properly
auto_zoom()
def scale_y(scale, axes="current", lines="all"):
"""
This function scales lines vertically.
You can specify a line index, such as lines=0 or lines=[1,2,4]
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
line.set_ydata(_pylab.array(line.get_ydata())*scale)
# update the title
title = axes.title.get_text()
if not title == "":
title += ", y_scale="+str(scale)
axes.title.set_text(title)
# zoom to surround the data properly
auto_zoom()
def scale_y_universal(average=[1,10], axes="current"):
"""
This function scales lines vertically.
average=[1,10] indices of average universal point
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for m in range(0,len(lines)):
if isinstance(lines[m], _mpl.lines.Line2D):
# get the actual data values
xdata = lines[m].get_xdata()
ydata = lines[m].get_ydata()
# figure out the scaling factor
s=0
for n in range(average[0], average[1]+1): s += ydata[n]
scale = 1.0*s/(average[1]-average[0]+1.0)
# loop over the ydata to scale it
for n in range(0,len(ydata)): ydata[n] = ydata[n]/scale
# update the data for this line
lines[m].set_data(xdata, ydata)
# update the title
title = axes.title.get_text()
title += ", universal scale"
axes.title.set_text(title)
# zoom to surround the data properly
auto_zoom()
def set_title(axes="current", title=""):
if axes=="current": axes = _pylab.gca()
axes.title.set_text(title)
_pylab.draw()
def set_xrange(xmin="same", xmax="same", axes="gca"):
if axes == "gca": axes = _pylab.gca()
xlim = axes.get_xlim()
if xmin == "same": xmin = xlim[0]
if xmax == "same": xmax = xlim[1]
axes.set_xlim(xmin,xmax)
_pylab.draw()
def set_yrange(ymin="same", ymax="same", axes="gca"):
if axes == "gca": axes = _pylab.gca()
ylim = axes.get_ylim()
if ymin == "same": ymin = ylim[0]
if ymax == "same": ymax = ylim[1]
axes.set_ylim(ymin,ymax)
_pylab.draw()
def set_yticks(start, step, axes="gca"):
"""
This will generate a tick array and apply said array to the axis
"""
if axes=="gca": axes = _pylab.gca()
# first get one of the tick label locations
xposition = axes.yaxis.get_ticklabels()[0].get_position()[0]
# get the bounds
ymin, ymax = axes.get_ylim()
# get the starting tick
nstart = int(_pylab.floor((ymin-start)/step))
nstop = int(_pylab.ceil((ymax-start)/step))
ticks = []
for n in range(nstart,nstop+1): ticks.append(start+n*step)
axes.set_yticks(ticks)
# set the x-position
for t in axes.yaxis.get_ticklabels():
x, y = t.get_position()
t.set_position((xposition, y))
_pylab.draw()
def set_xticks(start, step, axes="gca"):
"""
This will generate a tick array and apply said array to the axis
"""
if axes=="gca": axes = _pylab.gca()
# first get one of the tick label locations
yposition = axes.xaxis.get_ticklabels()[0].get_position()[1]
# get the bounds
xmin, xmax = axes.get_xlim()
# get the starting tick
nstart = int(_pylab.floor((xmin-start)/step))
nstop = int(_pylab.ceil((xmax-start)/step))
ticks = []
for n in range(nstart,nstop+1): ticks.append(start+n*step)
axes.set_xticks(ticks)
# set the y-position
for t in axes.xaxis.get_ticklabels():
x, y = t.get_position()
t.set_position((x, yposition))
_pylab.draw()
def invert(axes="current"):
"""
inverts the plot
"""
if axes=="current": axes = _pylab.gca()
scale_y(-1,axes)
def set_markers(marker="o", axes="current"):
if axes == "current": axes = _pylab.gca()
set_all_line_attributes("marker", marker, axes)
def set_all_line_attributes(attribute="lw", value=2, axes="current", refresh=True):
"""
This function sets all the specified line attributes.
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
_pylab.setp(line, attribute, value)
# update the plot
if refresh: _pylab.draw()
def set_line_attribute(line=-1, attribute="lw", value=2, axes="current", refresh=True):
"""
This function sets all the specified line attributes.
"""
if axes=="current": axes = _pylab.gca()
# get the lines from the plot
line = axes.get_lines()[-1]
_pylab.setp(line, attribute, value)
# update the plot
if refresh: _pylab.draw()
def smooth_line(line, smoothing=1, trim=True, draw=True):
"""
This takes a line instance and smooths its data with nearest neighbor averaging.
"""
# get the actual data values
xdata = list(line.get_xdata())
ydata = list(line.get_ydata())
_fun.smooth_array(ydata, smoothing)
if trim:
for n in range(0, smoothing):
xdata.pop(0); xdata.pop(-1)
ydata.pop(0); ydata.pop(-1)
# don't do anything if we don't have any data left
if len(ydata) == 0:
print "There's nothing left in "+str(line)+"!"
else:
# otherwise set the data with the new arrays
line.set_data(xdata, ydata)
# we refresh in real time for giggles
if draw: _pylab.draw()
def coarsen_line(line, coarsen=1, draw=True):
"""
This takes a line instance and smooths its data with nearest neighbor averaging.
"""
# get the actual data values
xdata = line.get_xdata()
ydata = line.get_ydata()
xdata = _fun.coarsen_array(xdata, coarsen)
ydata = _fun.coarsen_array(ydata, coarsen)
# don't do anything if we don't have any data left
if len(ydata) == 0:
print "There's nothing left in "+str(line)+"!"
else:
# otherwise set the data with the new arrays
line.set_data(xdata, ydata)
# we refresh in real time for giggles
if draw: _pylab.draw()
def smooth_selected_trace(trim=True, axes="gca"):
"""
This cycles through all the lines in a set of axes, highlighting them,
and asking for how much you want to smooth by (0 or <enter> is valid)
"""
if axes=="gca": axes = _pylab.gca()
# get all the lines
lines = axes.get_lines()
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
# first highlight it
fatten_line(line)
raise_figure_window()
raise_pyshell()
# get the smoothing factor
ready = 0
while not ready:
response = raw_input("Smoothing Factor (<enter> to skip): ")
try:
int(response)
ready=1
except:
if response=="\n": ready = 1
else: print "No!"
if not response == "\n":
smooth_line(line, int(response), trim)
# return the line to normal
unfatten_line(line)
def smooth_all_traces(smoothing=1, trim=True, axes="gca"):
"""
This function does nearest-neighbor smoothing of the data
"""
if axes=="gca": axes=_pylab.gca()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
smooth_line(line, smoothing, trim, draw=False)
_pylab.draw()
def coarsen_all_traces(coarsen=1, axes="all", figure=None):
"""
This function does nearest-neighbor smoothing of the data
"""
if axes=="gca": axes=_pylab.gca()
if axes=="all":
if not figure: f = _pylab.gcf()
axes = f.axes
if not _fun.is_iterable(axes): axes = [axes]
for a in axes:
# get the lines from the plot
lines = a.get_lines()
# loop over the lines and trim the data
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
coarsen_line(line, coarsen, draw=False)
_pylab.draw()
def line_math(fx=None, fy=None, axes='gca'):
"""
applies function fx to all xdata and fy to all ydata.
"""
if axes=='gca': axes = _pylab.gca()
lines = axes.get_lines()
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
xdata, ydata = line.get_data()
if not fx==None: xdata = fx(xdata)
if not fy==None: ydata = fy(ydata)
line.set_data(xdata,ydata)
_pylab.draw()
def trim(xmin="auto", xmax="auto", ymin="auto", ymax="auto", axes="current"):
"""
This function just removes all data from the plots that
is outside of the [xmin,xmax,ymin,ymax] range.
"auto" means "determine from the current axes's range"
"""
if axes=="current": axes = _pylab.gca()
# if trim_visible is true, use the current plot's limits
if xmin=="auto": (xmin, dummy) = axes.get_xlim()
if xmax=="auto": (dummy, xmax) = axes.get_xlim()
if ymin=="auto": (ymin, dummy) = axes.get_ylim()
if ymax=="auto": (dummy, ymax) = axes.get_ylim()
# get the lines from the plot
lines = axes.get_lines()
# loop over the lines and trim the data
for line in lines:
# get the actual data values
old_xdata = line.get_xdata()
old_ydata = line.get_ydata()
# loop over the xdata and trim if it's outside the range
new_xdata = []
new_ydata = []
for n in range(0, len(old_xdata)):
# if it's in the data range
if old_xdata[n] >= xmin and old_xdata[n] <= xmax \
and old_ydata[n] >= ymin and old_ydata[n] <= ymax:
# append it to the new x and y data set
new_xdata.append(old_xdata[n])
new_ydata.append(old_ydata[n])
# don't do anything if we don't have any data left
if len(new_xdata) == 0:
print "There's nothing left in "+str(line)+"!"
else:
# otherwise set the data with the new arrays
line.set_data(new_xdata, new_ydata)
# loop over the collections, where the vertical parts of the error bars are stored
for c in axes.collections:
# loop over the paths and pop them if they're bad
for n in range(len(c._paths)-1,-1,-1):
# loop over the vertices
naughty = False
for v in c._paths[n].vertices:
# if the path contains any vertices outside the trim box, kill it!
if v[0] < xmin or v[0] > xmax or v[1] < ymin or v[1] > ymax:
naughty=True
# BOOM
if naughty: c._paths.pop(n)
# zoom to surround the data properly
auto_zoom()
def xscale(scale='log'):
_pylab.xscale(scale)
_pylab.draw()
def yscale(scale='log'):
_pylab.yscale(scale)
_pylab.draw()
def ubertidy(figure="gcf", zoom=True, width=None, height=None, fontsize=15, fontweight='normal', fontname='Arial',
borderwidth=1.5, tickwidth=1, ticks_point="in", xlabel_pad=0.013, ylabel_pad=0.010, window_size=[550,550]):
"""
This guy performs the ubertidy from the helper on the first window.
Currently assumes there is only one set of axes in the window!
"""
if figure=="gcf": f = _pylab.gcf()
else: f = figure
# first set the size of the window
f.canvas.Parent.SetSize(window_size)
for n in range(len(f.axes)):
# get the axes
a = f.axes[n]
# set the current axes
_pylab.axes(a)
# we want thick axis lines
a.spines['top'].set_linewidth(borderwidth)
a.spines['left'].set_linewidth(borderwidth)
a.spines['bottom'].set_linewidth(borderwidth)
a.spines['right'].set_linewidth(borderwidth)
# get the tick lines in one big list
xticklines = a.get_xticklines()
yticklines = a.get_yticklines()
# set their marker edge width
_pylab.setp(xticklines+yticklines, mew=tickwidth)
# set what kind of tickline they are (outside axes)
if ticks_point=="out":
for l in xticklines: l.set_marker(_mpl.lines.TICKDOWN)
for l in yticklines: l.set_marker(_mpl.lines.TICKLEFT)
# get rid of the top and right ticks
a.xaxis.tick_bottom()
a.yaxis.tick_left()
# we want bold fonts
_pylab.xticks(fontsize=fontsize, fontweight=fontweight, fontname=fontname)
_pylab.yticks(fontsize=fontsize, fontweight=fontweight, fontname=fontname)
# we want to give the labels some breathing room (1% of the data range)
for label in _pylab.xticks()[1]: label.set_y(-xlabel_pad)
for label in _pylab.yticks()[1]: label.set_x(-ylabel_pad)
# get rid of tick label offsets
#a.ticklabel_format(style='plain')
# set the position/size of the axis in the window
p = a.get_position().bounds
if width: a.set_position([0.15,p[1],0.15+width*0.5,p[3]])
p = a.get_position().bounds
if height: a.set_position([p[0],0.17,p[2],0.17+height*0.5])
# set the axis labels to empty (so we can add them with a drawing program)
a.set_title('')
a.set_xlabel('')
a.set_ylabel('')
# kill the legend
a.legend_ = None
# zoom!
if zoom: auto_zoom(axes=a)
def make_inset(figure="current", width=1, height=1):
"""
This guy makes the figure thick and small, like an inset.
Currently assumes there is only one set of axes in the window!
"""
# get the current figure if we're not supplied with one
if figure == "current": figure = _pylab.gcf()
# get the window
w = figure.canvas.GetParent()
# first set the size of the window
w.SetSize([220,300])
# we want thick axis lines
figure.axes[0].get_frame().set_linewidth(3.0)
# get the tick lines in one big list
xticklines = figure.axes[0].get_xticklines()
yticklines = figure.axes[0].get_yticklines()
# set their marker edge width
_pylab.setp(xticklines+yticklines, mew=2.0)
# set what kind of tickline they are (outside axes)
for l in xticklines: l.set_marker(_mpl.lines.TICKDOWN)
for l in yticklines: l.set_marker(_mpl.lines.TICKLEFT)
# get rid of the top and right ticks
figure.axes[0].xaxis.tick_bottom()
figure.axes[0].yaxis.tick_left()
# we want bold fonts
_pylab.xticks(fontsize=20, fontweight='bold', fontname='Arial')
_pylab.yticks(fontsize=20, fontweight='bold', fontname='Arial')
# we want to give the labels some breathing room (1% of the data range)
figure.axes[0].xaxis.set_ticklabels([])
figure.axes[0].yaxis.set_ticklabels([])
# set the position/size of the axis in the window
figure.axes[0].set_position([0.1,0.1,0.1+0.7*width,0.1+0.7*height])
# set the axis labels to empty (so we can add them with a drawing program)
figure.axes[0].set_title('')
figure.axes[0].set_xlabel('')
figure.axes[0].set_ylabel('')
# set the position of the legend far away
figure.axes[0].legend=None
# zoom!
auto_zoom(figure.axes[0], 0.07, 0.07)
def export_figure(dpi=200, figure="gcf", path="ask"):
"""
Saves the actual postscript data for the figure.
"""
if figure=="gcf": figure = _pylab.gcf()
if path=="ask": path = _dialogs.Save("*.*", default_directory="save_plot_default_directory")
if path=="":
print "aborted."
return
figure.savefig(path, dpi=dpi)
def save_plot(axes="gca", path="ask"):
"""
Saves the figure in my own ascii format
"""
global line_attributes
# choose a path to save to
if path=="ask": path = _dialogs.Save("*.plot", default_directory="save_plot_default_directory")
if path=="":
print "aborted."
return
if not path.split(".")[-1] == "plot": path = path+".plot"
f = file(path, "w")
# if no argument was given, get the current axes
if axes=="gca": axes=_pylab.gca()
# now loop over the available lines
f.write("title=" +axes.title.get_text().replace('\n', '\\n')+'\n')
f.write("xlabel="+axes.xaxis.label.get_text().replace('\n','\\n')+'\n')
f.write("ylabel="+axes.yaxis.label.get_text().replace('\n','\\n')+'\n')
for l in axes.lines:
# write the data header
f.write("trace=new\n")
f.write("legend="+l.get_label().replace('\n', '\\n')+"\n")
for a in line_attributes: f.write(a+"="+str(_pylab.getp(l, a)).replace('\n','')+"\n")
# get the data
x = l.get_xdata()
y = l.get_ydata()
# loop over the data
for n in range(0, len(x)): f.write(str(float(x[n])) + " " + str(float(y[n])) + "\n")
f.close()
def save_figure_raw_data(figure="gcf", **kwargs):
"""
This will just output an ascii file for each of the traces in the shown figure.
**kwargs are sent to dialogs.Save()
"""
# choose a path to save to
path = _dialogs.Save(**kwargs)
if path=="": return "aborted."
# if no argument was given, get the current axes
if figure=="gcf": figure = _pylab.gcf()
for n in range(len(figure.axes)):
a = figure.axes[n]
for m in range(len(a.lines)):
l = a.lines[m]
x = l.get_xdata()
y = l.get_ydata()
p = _os.path.split(path)
p = _os.path.join(p[0], "axes" + str(n) + " line" + str(m) + " " + p[1])
print p
# loop over the data
f = open(p, 'w')
for j in range(0, len(x)):
f.write(str(x[j]) + "\t" + str(y[j]) + "\n")
f.close()
def load_plot(clear=1, offset=0, axes="gca"):
# choose a path to load the file from
path = _dialogs.SingleFile("*.*", default_directory="save_plot_default_directory")
if path=="": return
# read the file in
lines = _fun.read_lines(path)
# if no argument was given, get the current axes
if axes=="gca": axes=_pylab.gca()
# if we're supposed to, clear the plot
if clear:
axes.figure.clear()
_pylab.gca()
# split by space delimiter and see if the first element is a number
xdata = []
ydata = []
line_stuff = []
legend = []
title = 'reloaded plot with no title'
xlabel = 'x-data with no label'
ylabel = 'y-data with no label'
for line in lines:
s = line.strip().split('=')
if len(s) > 1: # header stuff
if s[0].strip() == 'title':
# set the title of the plot
title = ""
for n in range(1,len(s)): title += " "+s[n].replace('\\n', '\n')
elif s[0].strip() == 'xlabel':
# set the title of the plot
xlabel = ""
for n in range(1,len(s)): xlabel += " "+s[n].replace('\\n', '\n')
elif s[0].strip() == 'ylabel':
# set the title of the plot
ylabel = ""
for n in range(1,len(s)): ylabel += " "+s[n].replace('\\n', '\n')
elif s[0].strip() == 'legend':
l=""
for n in range(1,len(s)): l += " " + s[n].replace('\\n', '\n')
legend.append(l)
elif s[0].strip() == 'trace':
# if we're on a new plot
xdata.append([])
ydata.append([])
line_stuff.append({})
elif s[0].strip() in line_attributes:
line_stuff[-1][s[0].strip()] = s[1].strip()
else: # data
s = line.strip().split(' ')
try:
float(s[0])
float(s[1])
xdata[-1].append(float(s[0]))
ydata[-1].append(float(s[1])+offset)
except:
print "error s=" + str(s)
for n in range(0, len(xdata)):
axes.plot(xdata[n], ydata[n])
l = axes.get_lines()[-1]
l.set_label(legend[n])
for key in line_stuff[n]:
try: _pylab.setp(l, key, float(line_stuff[n][key]))
except: _pylab.setp(l, key, line_stuff[n][key])
axes.set_title(title)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
format_figure(axes.figure)
def get_figure_window(figure='gcf'):
"""
This will search through the wx windows and return the one containing the figure
"""
if figure == 'gcf': figure = _pylab.gcf()
return figure.canvas.GetParent()
def get_pyshell():
"""
This will search through the wx windows and return the pyshell
"""
# starting from the top, grab ALL the wx windows available
w = _wx.GetTopLevelWindows()
for x in w:
if type(x) == _wx.py.shell.ShellFrame or type(x) == _wx.py.crust.CrustFrame: return x
return False
def get_pyshell_command(n=0):
"""
Returns a string of the n'th previous pyshell command.
"""
if n: return str(get_pyshell().shell.history[n-1])
else: return str(get_pyshell().shell.GetText().split('\n>>> ')[-1].split('\n')[0].strip())
def raise_figure_window(figure='gcf'):
get_figure_window(figure).Raise()
def raise_pyshell():
get_pyshell().Raise()
def modify_legend(axes="gca"):
# get the axes
if axes=="gca": axes = _pylab.gca()
# get the lines
lines = axes.get_lines()
# loop over the lines
for line in lines:
if isinstance(line, _mpl.lines.Line2D):
# highlight the line
fatten_line(line)
# get the label (from the legend)
label = line.get_label()
print label
new_label = raw_input("New Label: ")
if new_label == "q" or new_label == "quit":
unfatten_line(line)
return
if not new_label == "\n": line.set_label(new_label)
unfatten_line(line)
format_figure()
def fatten_line(line, william_fatner=2.0):
size = line.get_markersize()
width = line.get_linewidth()
line.set_markersize(size*william_fatner)
line.set_linewidth(width*william_fatner)
_pylab.draw()
def unfatten_line(line, william_fatner=0.5):
fatten_line(line, william_fatner)
def legend(location='best', fontsize=16, axes="gca"):
if axes=="gca": axes = _pylab.gca()
axes.legend(loc=location, prop=_mpl.font_manager.FontProperties(size=fontsize))
_pylab.draw()
class GaelInput(object):
"""
Class that create a callable object to retrieve mouse click in a
blocking way, a la MatLab. Based on <NAME>'s almost-working
object. Thanks Gael! I've wanted to get this working for years!
-Jack
"""
debug = False
cid = None # event connection object
clicks = [] # list of click coordinates
n = 1 # number of clicks we're waiting for
lines = False # if we should draw the lines
def on_click(self, event):
"""
Event handler that will be passed to the current figure to
retrieve clicks.
"""
# write the debug information if we're supposed to
if self.debug: print "button "+str(event.button)+": "+str(event.xdata)+", "+str(event.ydata)
# if this event's a right click we're done
if event.button == 3:
self.done = True
return
# if it's a valid click (and this isn't an extra event
# in the queue), append the coordinates to the list
if event.inaxes and not self.done:
self.clicks.append([event.xdata, event.ydata])
# now if we're supposed to draw lines, do so
if self.lines and len(self.clicks) > 1:
event.inaxes.plot([self.clicks[-1][0], self.clicks[-2][0]],
[self.clicks[-1][1], self.clicks[-2][1]],
color='w', linewidth=2.0, scalex=False, scaley=False)
event.inaxes.plot([self.clicks[-1][0], self.clicks[-2][0]],
[self.clicks[-1][1], self.clicks[-2][1]],
color='k', linewidth=1.0, scalex=False, scaley=False)
_pylab.draw()
# if we have n data points, we're done
if len(self.clicks) >= self.n and self.n is not 0:
self.done = True
return
def __call__(self, n=1, timeout=0, debug=False, lines=False):
"""
Blocking call to retrieve n coordinate pairs through mouse clicks.
n=1 number of clicks to collect. Set n=0 to keep collecting
points until you click with the right mouse button.
timeout=30 maximum number of seconds to wait for clicks before giving up.
timeout=0 to disable
debug=False show each click event coordinates
lines=False draw lines between clicks
"""
# just for printing the coordinates
self.debug = debug
# for drawing lines
self.lines = lines
# connect the click events to the on_click function call
self.cid = _pylab.connect('button_press_event', self.on_click)
# initialize the list of click coordinates
self.clicks = []
# wait for n clicks
self.n = n
self.done = False
t = 0.0
while not self.done:
# key step: yield the processor to other threads
_wx.Yield();
_time.sleep(0.05)
# check for a timeout
t += 0.02
if timeout and t > timeout: print "ginput timeout"; break;
# All done! Disconnect the event and return what we have
_pylab.disconnect(self.cid)
self.cid = None
return _numpy.array(self.clicks)
def ginput(n=1, timeout=0, show=True, lines=False):
"""
Simple functional call for physicists. This will wait for n clicks from the user and
return a list of the coordinates of each click.
n=1 number of clicks to collect, n=0 for "wait until right click"
timeout=30 maximum number of seconds to wait for clicks before giving up.
timeout=0 to disable
show=True print the clicks as they are received
lines=False draw lines between clicks
This is my original implementation, and I'm leaving it here because it behaves a little
differently than the eventual version that was added to matplotlib. I would recommend using
the official version if you can!
"""
x = GaelInput()
return x(n, timeout, show, lines)
#
# Style cycle, available for use in plotting
#
class style_cycle:
def __init__(self, linestyles=['-'], markers=['s','^','o'], colors=['k','r','b','g','m'], line_colors=None, face_colors=None, edge_colors=None):
"""
Set up the line/marker rotation cycles.
linestyles, markers, and colors need to be lists, and you can override
using line_colors, and face_colors, and edge_colors (markeredgecolor) by
setting them to a list instead of None.
"""
# initial setup, assuming all the overrides are None
self.linestyles = linestyles
self.markers = markers
self.line_colors = colors
self.face_colors = colors
self.edge_colors = colors
# Apply the override colors
if not line_colors == None: self.line_colors = line_colors
if not face_colors == None: self.face_colors = face_colors
if not edge_colors == None: self.edge_colors = edge_colors
self.line_colors_index = 0
self.markers_index = 0
self.linestyles_index = 0
self.face_colors_index = 0
self.edge_colors_index = 0
# binding for the user to easily re-initialize
initialize = __init__
def reset(self):
self.line_colors_index = 0
self.markers_index = 0
self.linestyles_index = 0
self.face_colors_index = 0
self.edge_colors_index = 0
def get_line_color(self, increment=1):
"""
Returns the current color, then increments the color by what's specified
"""
i = self.line_colors_index
self.line_colors_index += increment
if self.line_colors_index >= len(self.line_colors):
self.line_colors_index = self.line_colors_index-len(self.line_colors)
if self.line_colors_index >= len(self.line_colors): self.line_colors_index=0 # to be safe
return self.line_colors[i]
def set_all_colors(self, colors=['k','k','r','r','b','b','g','g','m','m']):
self.line_colors=colors
self.face_colors=colors
self.edge_colors=colors
self.reset()
def get_marker(self, increment=1):
"""
Returns the current marker, then increments the marker by what's specified
"""
i = self.markers_index
self.markers_index += increment
if self.markers_index >= len(self.markers):
self.markers_index = self.markers_index-len(self.markers)
if self.markers_index >= len(self.markers): self.markers_index=0 # to be safe
return self.markers[i]
def set_markers(self, markers=['o']):
self.markers=markers
self.reset()
def get_linestyle(self, increment=1):
"""
Returns the current marker, then increments the marker by what's specified
"""
i = self.linestyles_index
self.linestyles_index += increment
if self.linestyles_index >= len(self.linestyles):
self.linestyles_index = self.linestyles_index-len(self.linestyles)
if self.linestyles_index >= len(self.linestyles): self.linestyles_index=0 # to be safe
return self.linestyles[i]
def set_linestyles(self, linestyles=['-']):
self.linestyles=linestyles
self.reset()
def get_face_color(self, increment=1):
"""
Returns the current face, then increments the face by what's specified
"""
i = self.face_colors_index
self.face_colors_index += increment
if self.face_colors_index >= len(self.face_colors):
self.face_colors_index = self.face_colors_index-len(self.face_colors)
if self.face_colors_index >= len(self.face_colors): self.face_colors_index=0 # to be safe
return self.face_colors[i]
def set_face_colors(self, colors=['k','none','r','none','b','none','g','none','m','none']):
self.face_colors=colors
self.reset()
def get_edge_color(self, increment=1):
"""
Returns the current face, then increments the face by what's specified
"""
i = self.edge_colors_index
self.edge_colors_index += increment
if self.edge_colors_index >= len(self.edge_colors):
self.edge_colors_index = self.edge_colors_index-len(self.edge_colors)
if self.edge_colors_index >= len(self.edge_colors): self.edge_colors_index=0 # to be safe
return self.edge_colors[i]
def set_edge_colors(self, colors=['k','none','r','none','b','none','g','none','m','none']):
self.edge_colors=colors
self.reset()
def apply(self, axes="gca"):
"""
Applies the style cycle to the lines in the axes specified
"""
if axes == "gca": axes = _pylab.gca()
self.reset()
lines = axes.get_lines()
for l in lines:
l.set_color(self.get_line_color(1))
l.set_mfc(self.get_face_color(1))
l.set_marker(self.get_marker(1))
l.set_mec(self.get_edge_color(1))
l.set_linestyle(self.get_linestyle(1))
_pylab.draw()
def __call__(self, increment=1):
return self.get_line_color(increment)
# this is the guy in charge of keeping track of the rotation of colors and symbols for plotting
style = style_cycle(colors = ['k','r','b','g','m'],
markers = ['o', '^', 's'],
linestyles = ['-'])
|
streitho/spinmob | _plot.py | <filename>_plot.py
import spinmob as _s
import numpy as _n
import _plot_complex as complex_plane; reload(complex_plane)
import _plot_magphase as mag_phase; reload(mag_phase)
import _plot_realimag as real_imag; reload(real_imag)
import _plot_image as image; reload(image)
import _plot_xy as xy; reload(xy)
import _plot_parametric as parametric; reload(parametric)
import _plotting_mess
import _pylab_tweaks as tweaks
style = _plotting_mess.plot_style_cycle |
Ablesius/Codewars | list_filtering.py | #! /usr/bin/env python
# In this kata you will create a function that takes a list of non-negative
# integers and strings and returns a new list with the strings filtered out.
# Example
#
# filter_list([1,2,'a','b']) == [1,2]
# filter_list([1,'a','b',0,15]) == [1,0,15]
# filter_list([1,2,'aasf','1','123',123]) == [1,2,123]
def filter_list(input_list):
output_list = []
for element in input_list:
if isinstance(element, int):
output_list.append(element)
return output_list
|
Ablesius/Codewars | find_odd_int.py | <gh_stars>0
#! /usr/bin/env python
# Given an array, find the int that appears an odd number of times.
# There will always be only one integer that appears an odd number of times.
def find_odd(int_list):
"""return None if no odd int was found, the odd one otherwise"""
for k in int_list:
if int_list.count(k) % 2:
return k
return None
|
Ablesius/Codewars | Title_Case.py | #! /usr/bin/env python
# A string is considered to be in title case if each word in the string is either (a) capitalised (that is, only the first letter of the word is in upper case) or (b) considered to be an exception and put entirely into lower case unless it is the first word, which is always capitalised.
#
# Write a function that will convert a string into title case, given an optional list of exceptions (minor words). The list of minor words will be given as a string with each word separated by a space. Your function should ignore the case of the minor words string -- it should behave in the same way even if the case of the minor word string is changed.
#
# ###Arguments
#
# First argument (required): the original string to be converted.
# Second argument (optional): space-delimited list of minor words that must always be lowercase except for the first word in the string.
def convert_to_title_case(s: str, exceptions="") -> str:
"""convert s to title case with exceptions"""
converted_s = []
for index, word in enumerate(s.split()):
if word.lower() not in exceptions.lower().split() or index == 0:
converted_s.append(word.title())
else:
converted_s.append(word.lower())
return ' '.join(converted_s)
def test_convert_to_title_case():
assert convert_to_title_case("this is a sentence with exceptions", "is a") == "This is a Sentence With Exceptions"
assert convert_to_title_case("in a hole in the ground there lived a hobbit", "in a the") == "In a Hole in the Ground There Lived a Hobbit"
|
Ablesius/Codewars | count_duplicates.py | <reponame>Ablesius/Codewars
#! /usr/bin/env python
# Count the number of Duplicates
#
# Write a function that will return the count of distinct case-insensitive alphabetic characters and numeric digits that occur more than once in the input string. The input string can be assumed to contain only alphabets (both uppercase and lowercase) and numeric digits.
# Example
#
# "abcde" -> 0 # no characters repeats more than once
# "aabbcde" -> 2 # 'a' and 'b'
# "aabBcde" -> 2 # 'a' occurs twice and 'b' twice (`b` and `B`)
# "indivisibility" -> 1 # 'i' occurs six times
# "Indivisibilities" -> 2 # 'i' occurs seven times and 's' occurs twice
# "aA11" -> 2 # 'a' and '1'
# "ABBA" -> 2 # 'A' and 'B' each occur twice
def count_duplicate_characters(input_string):
count = 0
normalized_string = input_string.lower()
for char in set(normalized_string):
if normalized_string.count(char) > 1:
count += 1
return count
def count_duplicates(input_string):
"""More concise version"""
return len([char for char in set(input_string.lower()) if input_string.lower().count(char) > 1])
|
Ablesius/Codewars | find_divisors.py | <filename>find_divisors.py
#! /usr/bin/env python
# Create a function named divisors/Divisors that takes an integer n > 1 and
# returns an array with all of the integer's divisors(except for 1 and the
# number itself), from smallest to largest. If the number is prime return the
# string '(integer) is prime' (null in C#) (use Either String a in Haskell and
# Result<Vec<u32>, String> in Rust).
# Example:
#
# divisors(12); // should return [2,3,4,6]
# divisors(25); // should return [5]
# divisors(13); // should return "13 is prime"
def divisors(number):
divisor_list = []
for i in range(2, number):
if number % i == 0:
divisor_list.append(i)
if not divisor_list:
return str(number) + " is prime"
return divisor_list
|
waripoornima/temeva_rest_client | setup.py | import os, sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def main():
setup(
name='py_temeva_rest_client',
version= '0.1',
author='PoornimaWari',
author_email='<EMAIL>',
url='https://github.com/waripoornima/temeva_rest_client',
description='temeva_rest_client: Front end for Spirent Temeva service ReST API',
long_description = 'See https://github.com/waripoornima/temeva_rest_client',
license='http://www.opensource.org/licenses/mit-license.php',
keywords='Spirent Temeva Service ReST API',
classifiers=['License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'],
packages=['py_temeva_rest_client'],
install_requires=['requests>=2.7'],
zip_safe=True,
)
if __name__ == '__main__':
main()
|
waripoornima/temeva_rest_client | py_temeva_rest_client/SpirentTemeva.py | """
Temeva Rest client is front end for Spirent Temeva licensing server ReST API
Supports 2.7 + to python 3+
"""
__Version__ = '0.1'
__Author__ = '<NAME>'
from datetime import datetime # we need it to create log file based on current time and date
"""
Modification history
====================
0.1 : 12/14/2021
- Initial code
"""
import requests # we need it for http session
import json # we need it to parse json data
import logging # we need it to create logger
import platform # we need it to print environment - python version
import sys # we need it to trace runtime error
import os # we need it to create log file
import functools # we need it to decorate the logs
# helper functions
# helps trace the runtime error
def trace_error():
logging.error('Error: {}.{},line:{}'.format(sys.exc_info()[0], sys.exc_info()[1],
sys.exc_info()[2].tb_lineno))
# get the default organization id
def get_default_id():
"""
returns default Spirent organization id
"""
url = 'https://spirent.temeva.com/api/iam/organizations/default'
session = requests.Session()
response = session.get(url)
if not response.ok:
try:
error_content = response.content
response.raise_for_status()
except Exception as error_massage:
logging.critical('Failed to get the organization id {} {}'.format(error_massage, error_content))
raise requests.HTTPError(error_massage, error_content)
return response.json()['id']
# define wrapper class , we will call our function in that
def log_decorator(function):
"""
This is to log the request and return result of the http verbs
:param function: function object
:return: logs the start and end results
"""
@functools.wraps(function)
def inner_function(*args, **kwargs):
# log beginning of function and agrs / kwargs if any
logging.info('Calling function {} arguments {} {}'.format(function.__name__, args, kwargs))
try:
# get the return value from the function
value = function(*args, **kwargs)
logging.info('Response of {} is {}'.format(function.__name__, value))
except:
# log exception in-case
trace_error()
raise
return value
return inner_function
# process the response
def process_response(raw_response):
"""
returns raw response into json/text/content
"""
# get the content type
content_type = raw_response.headers.get('content-type')
result = ''
if 'text' in content_type:
result = raw_response.text
elif 'application/json' in content_type:
result = raw_response.json()
else:
result = raw_response.content
return result
class SpirentTemeva:
"""
This module is ReST Client for Spirent Temeva license server
Support : HTTP-Verbs GET,PUT,POST & DELETE
Command Syntax :
temeva_object = SpirentTemeva(username, password, orgnization_id)
1: Get the Build version
end_point = '/lic/version'
temeva_object.get(end_point)
2: List Users
end_point = '/iam/users'
temeva_object.get(end_point)
3: Get application Id
end_point = '/inv/applications'
temeva_object.get(end_point)
4: License Checkout
end_point = '/lic/checkouts'
user_params = {
'orgnization_id' = 'org id provided by spirent'
'application_id' = 'your application id ex: stc'
}
temeva_object.get(end_point,params=user_params)
"""
def __init__(self, username, password, organization_id='', base_url='', log_level='INFO', log_path=None):
"""
arguments :
username = temeva license server username
password = <PASSWORD> license server password
organization_id = spirent organization id (optional)
if not passed the rest client will get one.
base_url = uses 'https://temeva.com' default
log_level = uses INFO default
log_path = if not provided creates log folder in abspath and add .log file with current date and time
"""
self.username = username
self.password = password
self.log_level = log_level
self.log_path = log_path
self.__url = base_url
self.organization_id = organization_id
# if base url is empty, assign it to temeva default
if not self.__url:
self.__url = 'https://temeva.com'
# if log path is not defined create one at abspath
if self.log_path:
self.log_path = os.path.join(self.log_path, 'logs')
else:
self.log_path = os.path.abspath('logs')
# creating a log file with current date and time
now = datetime.now()
current_time_date = now.strftime('H%M%S%m%d%Y')
# creating the log folder if it doesnt exist
if not os.path.exists(self.log_path):
os.mkdir(self.log_path)
self.log_path = os.path.expanduser(self.log_path)
# creating log file with current time and date
self.log_file = os.path.join(self.log_path + '/temeva_rest_client' + current_time_date + '.log')
# set the log-level
if log_level.lower() == 'debug':
self.log_level = 'DEBUG'
elif log_level.lower() == 'error':
self.log_level = 'ERROR'
elif log_level.lower() == 'critical':
self.log_level = 'CRITICAL'
elif log_level.lower() == 'warning':
self.log_level = 'WARNING'
else:
self.log_level = 'INFO'
# set the logger format
logging.basicConfig(filename=self.log_file, filemode='w', level=self.log_level,
format='%(asctime)s %(levelname)-8s %(message)s')
# creating logger object
logger = logging.getLogger(self.log_file)
# print the python version
logging.info('Python Version :{}'.format(platform.python_version()))
logging.info('Executing SpirentTemeva __init__ ')
logging.info('Temeva URL :{}'.format(self.__url))
# suppress all deeply nested module messages, but the CRITICAL
# get to the root and set it CRITICAL
logging.getLogger('request').setLevel(logging.CRITICAL)
# getting default organization id , if not passed
if not self.organization_id:
self.organization_id = get_default_id()
# Authorizing temeva license server
logging.info('Authorizing Tevema license Server with USERNAME:{} PASSWORD:{} and ORGANIZATION_ID:{}'
.format(self.username, self.password, self.organization_id))
# set bearer token
self.__beare_token = None
# set the header
self.__header = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
# set the data
self.__data = json.dumps({
'grant_type': 'password',
'username': self.username,
'password': <PASSWORD>,
'scope': self.organization_id
})
# append end point to the path
path = '{}/api/iam/oauth2/token'.format(self.__url)
# storing the session handle
self.__session = requests.Session()
# authorizing the session
response = self.__session.post(path, headers=self.__header, data=self.__data)
# error handling in case of failure
if not response.ok:
try:
error_content = response.content
response.raise_for_status()
except Exception as error_massage:
logging.critical('Failed to Authorize {} {}'.format(error_massage, error_content))
raise requests.HTTPError(error_massage, error_content)
if response.status_code == 200:
logging.info('Successfully Authorized :) ')
# extract the authentication token
self.__bearer_token = response.json()['access_token']
# updating bearer token
self.__session.headers.update({'Authorization': 'Bearer {}'.format(self.__bearer_token)})
version_api = 'https://temeva.com/api/lic/version'
license_version = self.__session.get(version_api)
logging.info("Platform Version:{}".format(license_version.json()['build_number']))
def execute_request(self, httpverb, endpoints, **kwargs):
"""
Execute the http method and return result
:param http-verb: method to process
:param endpoints: url endpoints
:param kwargs: key values : ex data = {} or payload = {} or files = []
:return: returns the response or the error
"""
# append / if the endpoints doesn't starts with '/'
if not endpoints.startswith('/'):
endpoints = '/' + endpoints
# append /api if the endpoints doesn't starts with '/api'
if not endpoints.startswith('/api'):
endpoints = '/api' + endpoints
# add endpoints to the base url
url = self.__url + endpoints
payload = {}
params = {}
file_data = None
raw_response = None
# check for the payload and files in kwargs
if len(list(kwargs.keys())) > 0:
for key1 in kwargs.keys():
if 'params' in key1:
# convert python object to json string
params = json.dumps(kwargs[key1])
if 'payload' in key1 or 'data' in key1:
# convert python object to json string
payload = json.dumps(kwargs[key1])
elif 'file' in key1:
# in case you want to post the file
file_name = kwargs[key1]
file_data = [('mapFileFormFile', (file_name, open(file_name, 'rb'), 'application/json'))]
# process the http verb
if httpverb.lower() == 'get':
raw_response = self.__session.get(url, params=params)
elif httpverb.lower() == 'put':
self.__session.headers.update(self.__headers)
raw_response = self.__session.put(url, data=payload)
elif httpverb.lower() == 'post':
raw_response = self.__session.post(url, data=payload, files=file_data)
elif httpverb.lower() == 'delete':
raw_response = self.__session.delete(url)
if not raw_response.ok:
# ERROR handling
try:
raw_response.raise_for_status()
except Exception as error_massage:
error_content = raw_response.content
logging.critical(str(error_massage) + ' ' + str(error_content))
raise requests.HTTPError(error_massage, error_content)
# process the response
end_result = process_response(raw_response)
return end_result
@log_decorator
def get(self, end_points, **kwargs):
"""
:param end_points: end points
:param **kwargs: key:value
:return: result in json or text
"""
result = self.execute_request('get', end_points, **kwargs)
return result
@log_decorator
def post(self, end_points, **kwargs):
"""
:param end_points: end point
:param **kwargs: key:value Ex file=filename or payload=dictionary
:return: result in json or text
"""
result = self.execute_request('post', end_points, **kwargs)
return result
@log_decorator
def put(self, end_points, **kwargs):
"""
:param end_points: end point
:param **kwargs: key:value Ex : payload = dictionary
:return: result in json or text
"""
result = self.execute_request('put', end_points, **kwargs)
return result
@log_decorator
def delete(self, end_points, **kwargs):
"""
:param end_points: end points
:param payload: data
:return:raw_responsejson format
"""
result = self.execute_request('delete', end_points, **kwargs)
return result
def main():
# user credentials
user = '<EMAIL>'
password = '******'
# license server version end point
end_points = '/lic/version'
# creating temeva object
temeva_object = SpirentTemeva(user, password)
# calling get method
license_version = temeva_object.get(end_points)
return license_version['build_number']
if __name__ == '__main__':
print(main())
|
waripoornima/temeva_rest_client | examples/sample_temeva_rest_client_example.py | <gh_stars>0
"""
This is sample example for temeva license checkouts
Using temeva rest client
"""
from py_temeva_rest_client.SpirentTemeva import SpirentTemeva
# user credentials
user = '<EMAIL>'
password = '****'
# creating temeva object
temeva_object = SpirentTemeva(user, password)
# get the license server version
end_points = '/lic/version'
# calling get method
license_version = temeva_object.get(end_points)
print('License Server Version: {}'.format(license_version['build_number']))
# get the organization id
end_points = '/iam/organizations'
organization_list = temeva_object.get(end_points)
organization_id = organization_list[0]['id']
print('Organization Id: {}'.format(organization_id))
# get the application id for Spirent TestCenter
end_points ='/inv/applications'
applications_list = temeva_object.get(end_points)
print('Applications : {}'.format(applications_list))
|
amrufathy/Secure-Chat | utils.py | <filename>utils.py
import curses
import os
import signal
def showDialog(screen, title, message, is_error):
(height, width) = screen.getmaxyx()
exit_message = "Press Ctrl^c to exit" if is_error else ""
dialog_width = max(len(title), len(message), len(exit_message)) + 2
dialog_height = 8 if message else 3
dialog_window = screen.subwin(dialog_height, dialog_width, int(height / 2) - int(dialog_height / 2),
int(width / 2) - int(dialog_width / 2))
dialog_window.border(0)
dialog_window.addstr(1, 1, title, curses.color_pair(2) if is_error else curses.color_pair(1))
if message:
dialog_window.hline(2, 1, 0, dialog_width - 2)
dialog_window.addstr(3, 1, message)
if is_error:
dialog_window.addstr(6, 1, exit_message)
curses.curs_set(0)
dialog_window.refresh()
if is_error:
dialog_window.getch()
os.kill(os.getpid(), signal.SIGINT)
else:
return dialog_window
|
amrufathy/Secure-Chat | parallel.py | <filename>parallel.py<gh_stars>0
import curses
import curses.ascii
from threading import Thread, Lock
import utils
mutex = Lock()
class CursesSendThread(Thread):
def __init__(self, sock, screen, chat_window, text_box_window, textbox):
self.sock = sock
self.screen = screen
self.chat_window = chat_window
self.text_box_window = text_box_window
self.textbox = textbox
Thread.__init__(self)
self.daemon = True
def run(self):
height, width = self.chat_window.getmaxyx()
while True:
chat_input = self.textbox.edit(self.input_validator)
mutex.acquire()
self.text_box_window.deleteln()
self.text_box_window.move(0, 0)
self.text_box_window.deleteln()
self.chat_window.scroll(1)
self.chat_window.addstr(height - 1, 0, chat_input[:-1], curses.color_pair(2))
self.sock.send(chat_input[:-1])
self.text_box_window.move(0, 0)
self.chat_window.refresh()
self.text_box_window.refresh()
mutex.release()
@staticmethod
def input_validator(char):
if char == curses.KEY_HOME:
return curses.ascii.SOH
elif char == curses.KEY_END:
return curses.ascii.ENQ
elif char == curses.KEY_ENTER or char == ord('\n'):
return curses.ascii.BEL
return char
class CursesRecvThread(Thread):
def __init__(self, sock, screen, chat_window, text_box_window):
self.sock = sock
self.screen = screen
self.chat_window = chat_window
self.text_box_window = text_box_window
Thread.__init__(self)
self.daemon = True
def run(self):
height, width = self.chat_window.getmaxyx()
while True:
response = self.sock.recv()
mutex.acquire()
if response == "__END__":
self.sock.disconnect()
utils.showDialog(self.chat_window, "Connection Terminated",
"The client requested to end the connection",
True)
self.chat_window.scroll(1)
self.chat_window.addstr(height - 1, 0, response, curses.color_pair(3))
self.text_box_window.move(0, 0)
self.chat_window.refresh()
self.text_box_window.refresh()
mutex.release()
|
amrufathy/Secure-Chat | DES.py | import random
import string
from Crypto.Cipher import DES
class myDES:
def __init__(self, TYPE):
letters = string.ascii_letters + string.digits
letters = letters.replace('\\', '').replace('\'', '').replace('\"', '')
# self._key = '12345678'
if TYPE == 1:
self._key = ''.join(random.SystemRandom().choice(letters) for _ in range(8))
with open('key.txt', 'w') as f:
f.write(self._key)
else:
with open('key.txt', 'r') as f:
self._key = f.read()
self.__des = DES.new(self._key, DES.MODE_ECB)
def encrypt(self, text):
return self.__des.encrypt(text)
def decrypt(self, text):
return self.__des.decrypt(text)
|
amrufathy/Secure-Chat | server_driver.py | <gh_stars>0
import sys
from Server import Server
server = Server()
server.start(9000)
print('Server is running...')
client = server.accept()
print('Received connection from', client.get_host())
while True:
sys.stdout.write('>> ')
client.send(str(input()))
print('Client:', client.recv())
|
amrufathy/Secure-Chat | gui.py | #! /usr/bin/env python
import curses
import curses.ascii
import curses.textpad
import time
import parallel
from Client import Client
from Server import Server
SERVER = 0
CLIENT = 1
def main(screen):
set_colors(screen)
screen.clear()
screen.border(0)
chat_window = make_chat_window(screen)
text_box_window, textbox = make_chat_input_window(screen)
_type = show_options_window(screen)
if _type == SERVER:
server = start_server()
global sock
sock = server.accept()
elif _type == CLIENT:
sock = Client(('127.0.0.1', 9000), _type=1)
sock.connect()
screen.refresh()
parallel.CursesSendThread(sock, screen, chat_window, text_box_window, textbox).start()
parallel.CursesRecvThread(sock, screen, chat_window, text_box_window).start()
while True:
time.sleep(0)
def start_server():
server = Server()
server.start(9000)
return server
def set_colors(screen):
if curses.has_colors():
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_GREEN)
screen.bkgd(curses.color_pair(1))
def get_host(screen):
height, width = screen.getmaxyx()
host_window = screen.subwin(3, 26, int(height / 2) - 1, int(width / 2) - 13)
host_window.border(0)
host_window.addstr(1, 1, "Host: ")
host_window.refresh()
curses.echo()
curses.nocbreak()
host = host_window.getstr(1, 7)
curses.cbreak()
curses.noecho()
host_window.clear()
screen.refresh()
return host
def make_chat_window(screen):
height, width = screen.getmaxyx()
chat_window = screen.subwin(height - 4, width - 2, 1, 1)
chat_window.scrollok(True)
return chat_window
def make_chat_input_window(screen):
height, width = screen.getmaxyx()
text_box_window = screen.subwin(1, width - 25, height - 2, 1)
textbox = curses.textpad.Textbox(text_box_window, insert_mode=True)
curses.textpad.rectangle(screen, height - 3, 0, height - 1, width - 24)
text_box_window.move(0, 0)
return text_box_window, textbox
def show_options_window(screen):
height, width = screen.getmaxyx()
options_window = screen.subwin(6, 11, int(height / 2) - 3, int(width / 2) - 6)
options_window.border(0)
options_window.keypad(True)
curses.curs_set(0)
options_window.addstr(1, 1, "Run as:")
pos = SERVER
while True:
if pos == SERVER:
options_window.addstr(3, 2, "Server", curses.color_pair(4))
options_window.addstr(4, 2, "Client")
else:
options_window.addstr(3, 2, "Server")
options_window.addstr(4, 2, "Client", curses.color_pair(4))
screen.refresh()
key = options_window.getch()
if key == curses.KEY_DOWN and pos == SERVER:
pos = CLIENT
elif key == curses.KEY_UP and pos == CLIENT:
pos = SERVER
# Enter key
elif key == ord('\n'):
break
curses.curs_set(2)
options_window.clear()
options_window.refresh()
return pos
curses.wrapper(main)
|
amrufathy/Secure-Chat | Client.py | import socket
from DES import myDES
class Client:
def __init__(self, addr, _socket=None, _type=0):
self.addr = addr
if _socket is not None:
self._socket = _socket
else:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.encryption = myDES(_type)
def connect(self):
try:
self._socket.connect(self.addr)
except socket.error as e:
print("Error connecting to address", str(e))
def disconnect(self):
self._socket.shutdown(socket.SHUT_RDWR) # shutdown both reading and writing
self._socket.close()
def send(self, data):
assert type(data) == str
data = self.__pad_text_for_encryption(data)
data_length = len(data)
data_length_str = self.__pad_number_to_eight_chars(data_length)
# Send data length to be expected
self.__send(data_length_str, len(data_length_str))
# Send data itself
self.__send(data, data_length)
def recv(self):
# First, receive message length
length = self.__recv(8)
# return message itself
return self.__recv(length)
def __send(self, data, length):
data_sent_length = 0
while data_sent_length < length:
encrypted_data = self.encryption.encrypt(data[data_sent_length:])
data_sent_partial = self._socket.send(encrypted_data)
if data_sent_partial == 0:
print("Connection closed")
break
data_sent_length += data_sent_partial
def __recv(self, length):
received_data, formatted_data = '', ''
length = int(length)
while len(received_data) < length:
received_chunk = self._socket.recv(length - len(received_data))
decrypted_data = self.encryption.decrypt(received_chunk).decode('utf-8')
if not received_chunk or received_chunk == '':
print("Connection closed")
break
received_data += decrypted_data
formatted_data += decrypted_data.strip()
return formatted_data
@staticmethod
def __pad_number_to_eight_chars(length):
length_str = str(length)
while len(length_str) < 8:
length_str = '0' + length_str
return length_str
@staticmethod
def __pad_text_for_encryption(text):
while len(text) % 8 != 0:
text += ' '
return text
def get_host(self):
return self.addr[0]
|
amrufathy/Secure-Chat | Server.py | import socket
from Client import Client
class Server:
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allows port to be reused
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def start(self, port):
self._socket.bind(('127.0.0.1', int(port)))
self._socket.listen(10) # 10 connections in queue
def accept(self):
client_socket, client_addr = self._socket.accept()
return Client(client_addr, client_socket, _type=0)
|
amrufathy/Secure-Chat | client_driver.py | <filename>client_driver.py
import sys
from Client import Client
_socket = Client(('127.0.0.1', 9000), _type=1)
_socket.connect()
print('Client connected to', _socket.get_host())
while True:
print('Server:', _socket.recv())
sys.stdout.write('>> ')
_socket.send(str(input()))
|
MAliHassnain/ExtractTextFromImage | Main/urls.py | <gh_stars>0
from django.urls import path
from Main import views
urlpatterns = [
path('', views.Index, name='Index'),
path('Main/ProceedImage', views.ProceedImage, name='ProceedImage'),
path('Main/ProceedImageUrl', views.ProceedImageUrl, name='ProceedImageUrl'),
]
|
MAliHassnain/ExtractTextFromImage | Main/views.py | <reponame>MAliHassnain/ExtractTextFromImage<gh_stars>0
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
import TextFromImage.settings as Settings
from PIL import Image
import pytesseract
from django.core.files.storage import FileSystemStorage
import os
import glob
import requests
from io import BytesIO
import re
# Create your views here.
def Index(request):
return render(request, 'Index.html', {})
def ProceedImage(request):
if request.method == 'POST':
if request.is_ajax():
files = glob.glob('media/*')
for f in files:
os.remove(f)
pytesseract.pytesseract.tesseract_cmd = Settings.Tesseract_OCR_Path
image = request.FILES.get('Image')
fs = FileSystemStorage()
fs.save(image.name, image)
savedImage = Image.open('media/'+image.name)
text = pytesseract.image_to_string(savedImage)
return JsonResponse({'status': True, 'Text': text})
return JsonResponse({'staus': False})
def ProceedImageUrl(request):
if request.method == 'POST':
if request.is_ajax():
url = request.POST['ImageUrl']
if(url!=None):
try:
if not re.match('(?:http|ftp|https)', url):
if not re.match('(?://)', url):
url = 'http://{}'.format(url)
else:
url = 'http:{}'.format(url)
response = requests.get(url)
if(response.status_code!=404):
imageFile = Image.open(BytesIO(response.content))
text = pytesseract.image_to_string(imageFile)
return JsonResponse({'status': True, 'Text': text})
else:
JsonResponse({'status': False})
except Exception:
JsonResponse({'status': False})
else:
JsonResponse({'status': False})
return JsonResponse({'staus': False})
|
FerreiraWalter/OBI-E-MARATONAS | OBI-2018-1/Xadrez.py | <filename>OBI-2018-1/Xadrez.py
#Xadrez
L = int(input())
C = int(input())
if (L % 2 == 0 and C % 2 == 0) or (L % 2 != 0 and C % 2 != 0):
print('1')
else:
print('0') |
FerreiraWalter/OBI-E-MARATONAS | OBI-2018-1/Escadinha.py | #Escadinha
N = int(input())
num = input().split()
for i in range(len(num)):
num[i] = int(num[i])
resp = 1
i = 2
while i < len(num):
if (num[i] - num[i-1]) != (num[i-1] - num[i-2]):
resp += 1
i += 1
print(resp) |
FerreiraWalter/OBI-E-MARATONAS | OBI-2018-JR/Basquete_de_Robôs.py | <reponame>FerreiraWalter/OBI-E-MARATONAS
#Basquete de Robôs
D = int(input())
if ( D <= 800 ):
print('1')
elif ( 800 < D <= 1400 ):
print('2')
elif ( 1400 < D <=2000 ):
print('3')
|
FerreiraWalter/OBI-E-MARATONAS | OBI-2017-2/Game-10.py | <filename>OBI-2017-2/Game-10.py
n = int(input())
d = int(input())
a = int(input())
if a == d:
print('0')
elif a < d:
print('{}'.format(d - a))
elif a > d:
print('{}'.format((n - a) + 1 + (d - 1))) |
FerreiraWalter/OBI-E-MARATONAS | OBI-2017-JR/Drone_de_Entrega.py | <gh_stars>1-10
#ENTRADA DE DASDOS:
A = int(input())
B = int(input())
C = int(input())
caixa = [A, B, C]
caixa.sort()
H = int(input())
L = int(input())
janela = [H, L]
janela.sort()
#PROCESSAMENTO:
if caixa[0] <= janela[0] and caixa[1] <= janela[1]:
print('S')
else:
print('N') |
FerreiraWalter/OBI-E-MARATONAS | OBI-2017-1/Teleférico.py | <filename>OBI-2017-1/Teleférico.py<gh_stars>1-10
C = int(input())
A = int(input())
if A % (C - 1) == 0:
print(A // (C - 1))
else:
print((A // (C - 1)) + 1) |
FerreiraWalter/OBI-E-MARATONAS | OBI-2018-JR/Álbum_da_Copa.py | #Álbum da Copa
N = int(input())
M = int(input())
album = []
for c in range(0, M):
x = int(input())
if x not in album:
album.append(x)
restante = len(album)
total = N - restante
print(total)
|
aleccunningham/streampy | messenger_with_files.py | #!/usr/bin/python
import sys
import socket
import getopt
def dial_socket(host='localhost', port):
"""
Create a socket and establish a perisistent
connection to it, listening for events
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(1)
conn, addr = s.accept()
# Initializes stream to socket
# and returns that connection
return conn
def send_message(port):
conn = dial_socket(port) # RecvMessages().start()
while True:
print('Enter your message:')
sys.stdout.flush()
message = sys.stdin.readline().rstrip()
if not message: break
try:
conn.send(message.encode())
break
except:
sys.exit()
close(conn)
def send_file_request(port):
conn = dial_socket(port)
req = raw_input('Which file do you want?')
conn.send(req.encode)
close(conn)
def request_handler(port, req):
conn = dial_socket(port)
req = conn.recv(1024)
with open(data, 'rb') as f:
resp = f.read(1024)
sock.send(resp)
close(conn)
def close(conn):
# Shutdown open socket connection
conn.shutdown(socket.SHUT_WR)
conn.close()
def process(command):
actions = {
'M': threading.Thread(target=send_message),
'F': threading.Thread(target=request_file),
'X': threading.Thread(target=close)}
actions[command]()
def usage(script_name):
print('Usage: py ' + script_name + '\n'
' [-l] Listen on <port> for events\n' +
' [-s] Connect to <port> as client\n' +
' [-p <port>\n' )
#store command line argument
if __name__ == "__main__":
# Handle file requests
file_listen = threading.Thread(name='listener', target=request_handler)
file_listen.setDaemon(True)
file_listen.start()
# Send messages over socket
send_message = threading.Thread(name='send_message', target=send_message)
# Send file to socket
send_file = threading.Thread('send_file', target=send_file_request)
try:
opts, args = getopt.getopt(sys.argv[1:], 'lp:')
except getopt.GetoptError as err:
# print help information and exit:
usage()
sys.exit(2)
for o, a in opts:
if o in ('-p', '--port'):
port = a
else:
usage()
sys.exit(2)
while True:
print('Enter an option(\'m\', \'f\', \'x\'):\n' +
' (M)essage (send)\n' +
' (F)ile (request)\n' +
' e(X)it')
cmd = sys.stdin.readline().rstrip().upper()
process(cmd)
if option == 'M':
send_message.start()
elif option == 'F':
send_file.start()
else:
close()
|
aleccunningham/streampy | recv_messages.py | <filename>recv_messages.py
import threading
import os, sys
class RecvMessages(threading.Thread):
def __init__(self, client_socket):
threading.Thread.__init__(self)
self.client_socket = client_socket
def run(self):
while True:
try:
msg_bytes = self.client_socket.recv(1024)
except:
sys.exit()
if len(msg_bytes):
print(msg_bytes.decode())
else:
self.client_socket.close()
os._exit(0)
|
aleccunningham/streampy | python_handler.py | #!/usr/bin/python
# Copyright 2017.
# Author: <NAME>
# Server usage:
# ./stream.py -s | --serve <port>
# Server is run in a daemonized thread, while clients
# spawn a new thread; the server will not close
# and exit until all client threads are closed
#
# Client usage:
# ./stream.py -c | --client -p | --port <port>
# Passing the -c flag will specify the file should
# be run as a client, and not a server daemon
import os
import sys
import threading
import logging
host = socket.gethostname() = 'localhost' # Local machine
buff = 1024 # Buffer size for data
def usage(stream):
print('Usage: py ' + stream +
' [-h] | [--help]' +
' [-p] | [--port]' + '<connect server port>' +
' [-l] | [--listen]' + ' <port number>' +
' [-c | --client] Create a client instance, defaults to server')
def send_message(port, data):
"""
Connect to socket and send a message
via the connection to any threads on the socket
"""
sock = dial_socket(port=3033)
sock.send(data.encode())
sys.exit()
def request_file(port):
"""
Connects to a socket and writes a
file to another thread that requested it
"""
sock = dial_socket(port=3033)
print('Which file do you want?')
sys.stdout.flush()
data = sys.stdin.readline().rstrip()
sock.send(data.encode())
with condition: #TODO
while not file_written:
condition.wait()
_write_file()
def _write_file():
f = open(resp, 'wb')
while 1:
print ("Receiving...")
resp = conn.recv(1024)
if not resp: break
f.write(resp)
f.close()
print ("Done Receiving")
break
sys.exit()
def recieve_message(conn):
"""
Listens to socket and returns
any messages that are passed through it
"""
conn = listen_socket(port=3033)
data = conn.recv(1024)
print('RECIEVED: {}'.format(data.decode()))
def file_response(conn):
"""
Specify a file to recieve from a
producer thread on the socket, and save
the bytes that are return from it
"""
conn = listen_socket(port=3033)
while 1:
req = conn.recv(1023)
if not req: break
resp = req.read(1024)
while resp:
print('Sending...')
conn.send(resp)
print('Done sending')
conn.shutdown(socket.SHUT_WR)
def dial_socket(host='localhost', port):
"""
Connect to the socket created by the
server instance on specified host and port
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return sock
def listen_socket(host='localhost', port):
"""
Create a socket and establish a perisistent
connection to it, listening for events
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(1)
conn, addr = s.accept()
# Initializes stream to socket
# and returns that connection
return conn
def exit(conn):
"""
Close and exit the socket and script
"""
conn.shutdown(socket.SHUT_WR)
conn.close()
sys.exit(0)
def process(port):
"""
Display client functions and spawn thread that
handles the event chosen
"""
actions = {'M': threading.Thread(target=send_message),
'F': threading.Thread(target=request_file),
'X': threading.Thread(target=exit)}
while 1:
print('Enter an option(\'m\', \'f\', \'x\'):')
print(' (M)essage (send)')
print(' (F)ile (request)')
print(' e(X)it')
event = sys.stdin.readline().rstrip().upper()
actions[event]()
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hscp:',
['help', 'serve', 'client', 'port='])
except getopt.GetoptError as err:
print(err)
usage()
sys.exit(2)
port = None
client, server = False
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit()
elif o in ('-p', '--port'):
port = a
# Take port value and start a server
# daemon on that addres
elif o in ('-s', '--serve'):
server = True
listen_socket(port)
# Create a client, and process it
# in order to connect to the socket
# and transfer data
elif o in ('-c', '--client'):
client = True
process(port)
else:
assert False, 'unhandled option'
if __name__ == "__main__":
main()
|
aleccunningham/streampy | tests/file_compare.py | #! python3
# FileCompare
"""Compares files as either text files or binary files"""
class FileCompare():
def textFiles( self, file1, file2, verbose ):
textfile1= open( file1 )
textfile2= open( file2 )
line_num= 1
more= True
differ= False
banner_printed= False
while( more ):
f1_line= textfile1.readline().rstrip()
f2_line= textfile2.readline().rstrip()
if f1_line and f2_line:
if f1_line != f2_line:
if verbose:
if banner_printed is False:
print( 'Comparing ' + file1 + ' (f1) and ' + file2 + ' (f2)' )
banner_printed= True
print( "Mismatch in line " + str(line_num) + ":" )
print( " f1: " + f1_line, end= '' )
print( " f2: " + f2_line, end= '' )
differ= True
line_num+= 1
else:
if f1_line:
if verbose:
print( file1 + " is larger than " + file2 )
differ= True
elif f2_line:
if verbose:
print( file2 + " is larger than " + file1 )
differ= True
more= False
textfile1.close()
textfile2.close()
return differ
def binFiles( self, file1, file2 ):
binfile1= open( file1, 'rb' )
binfile2= open( file2, 'rb' )
differ= False
more= True
while( more ):
f1_bytes= binfile1.read( 4096 )
f2_bytes= binfile2.read( 4096 )
if f1_bytes and f2_bytes:
if f1_bytes != f2_bytes:
differ= True
break
else:
if f1_bytes or f2_bytes:
differ= True
more= False
binfile1.close()
binfile2.close()
return differ
# TESTING!!!
if __name__ == "__main__":
# get the command line arguments
import sys
argc= len( sys.argv )
if argc != 3 :
print( 'Usage: py ' + sys.argv[0] + ' file1 file2' )
sys.exit()
compare= FileCompare()
#result= compare.textFiles( sys.argv[1], sys.argv[2], True )
result= compare.binFiles( sys.argv[1], sys.argv[2] )
if result is False:
print( "(files are the same)" )
else:
print( "(files differ)" )
|
aleccunningham/streampy | tests/autograde-messenger-with-files-py.py | #! python3
"""
Automated testing
Assignment: Messenger with file transfers
1. Start the program with server command line args
2. Start the program with client command line args
3. Input data from standard input for the server and the client
4. Redirect standard output to files
5. Compare output files to input files
6. Compare transferred files to original files
14 points total
"""
import os, time
from file_compare import FileCompare
from within_file import WithinFile
points= 0
compare= FileCompare()
withinFile= WithinFile()
shell_command= 'cd server; rm one-liners.txt'
os.system( shell_command )
shell_command= 'cd client; rm Ameca_splendens.jpg'
os.system( shell_command )
print( 'Executing server and client with a single message...' )
shell_command= 'cd server; py ../input-writer.py 2 2 server-msg.txt 2>errors.txt | py -u ../messenger_with_files.py -l 6001 >server-recvd.txt &'
os.system( shell_command )
time.sleep( 1 )
shell_command= 'cd client; py ../input-writer.py 0 2 client-msg.txt 2>errors.txt | py -u ../messenger_with_files.py -l 6002 -p 6001 >client-recvd.txt'
os.system( shell_command )
print( 'execution completed; grading...' )
found= withinFile.searchText( 'client/client-recvd-single-ref.txt', 'client/client-recvd.txt' )
if found:
points+= 1
found= withinFile.searchText( 'server/server-recvd-single-ref.txt', 'server/server-recvd.txt' )
if found:
points+= 1
print( 'Executing server and client with multiple messages...' )
shell_command= 'cd server; py ../input-writer.py 2 2 server-msgs.txt 2>>errors.txt | py -u ../messenger_with_files.py -l 6001 >server-recvd.txt &'
os.system( shell_command )
time.sleep( 1 )
shell_command= 'cd client; py ../input-writer.py 0 2 client-msgs.txt 2>>errors.txt | py -u ../messenger_with_files.py -l 6002 -p 6001 >client-recvd.txt'
os.system( shell_command )
print( 'execution completed; grading...' )
found= withinFile.searchText( 'client/client-recvd-multiple-ref.txt', 'client/client-recvd.txt' )
if found:
points+= 2
found= withinFile.searchText( 'server/server-recvd-multiple-ref.txt', 'server/server-recvd.txt' )
if found:
points+= 2
print( 'Executing single file transfers between server and client...' )
shell_command= 'cd server; py ../input-writer.py 1 3 server-file.txt 2>>errors.txt | py -u ../messenger_with_files.py -l 6001 >server-recvd.txt &'
os.system( shell_command )
time.sleep( 1 )
shell_command= 'cd client; py ../input-writer.py 0 3 client-file.txt 2>>errors.txt | py -u ../messenger_with_files.py -l 6002 -p 6001 >client-recvd.txt'
os.system( shell_command )
print( 'execution completed; grading...' )
differ= compare.binFiles( 'client/Ameca_splendens.jpg', 'server/Ameca_splendens.jpg' )
if differ is False:
points+= 2
differ= compare.binFiles( 'client/one-liners.txt', 'server/one-liners.txt' )
if differ is False:
points+= 2
shell_command= 'cd server; rm one-liners.txt'
os.system( shell_command )
shell_command= 'cd client; rm Ameca_splendens.jpg'
os.system( shell_command )
print( 'Executing multiple transfers between server and client...' )
shell_command= 'cd server; py ../input-writer.py 1 3 server-all.txt 2>>errors.txt | py -u ../messenger_with_files.py -l 6001 >server-recvd.txt &'
os.system( shell_command )
time.sleep( 1 )
shell_command= 'cd client; py ../input-writer.py 0 3 client-all.txt 2>>errors.txt | py -u ../messenger_with_files.py -l 6002 -p 6001 >client-recvd.txt'
os.system( shell_command )
print( 'execution completed; grading...' )
differ= compare.binFiles( 'client/Ameca_splendens.jpg', 'server/Ameca_splendens.jpg' )
if differ is False:
points+= 1
differ= compare.binFiles( 'client/one-liners.txt', 'server/one-liners.txt' )
if differ is False:
points+= 1
found= withinFile.searchText( 'client/client-recvd-multiple-file-ref.txt', 'client/client-recvd.txt' )
if found:
points+= 1
found= withinFile.searchText( 'server/server-recvd-multiple-file-ref.txt', 'server/server-recvd.txt' )
if found:
points+= 1
print( 'Points: ' + str(points) );
|
aleccunningham/streampy | tests/within_file.py | #! python3
# WithinFile
"""Test whether the content of the first file is found within the content
of the second file"""
class WithinFile():
def searchText( self, file1, file2 ):
textfile1= open( file1 )
textfile2= open( file2 )
linesfile1= textfile1.readlines()
linesfile2= textfile2.readlines()
textfile1.close()
textfile2.close()
number_of_lines_file1= len(linesfile1)
number_of_lines_file2= len(linesfile2)
more= True
file2_lineno= 0
for file1_lineno in range(number_of_lines_file1):
while( more ):
if linesfile1[file1_lineno] != linesfile2[file2_lineno]:
file2_lineno+= 1
if file2_lineno == number_of_lines_file2:
more= False
else:
more= False
if file2_lineno == number_of_lines_file2:
return False
more= True
return True
# TESTING!!!
if __name__ == "__main__":
# get the command line arguments
import sys
argc= len( sys.argv )
if argc != 3 :
print( 'Usage: py ' + sys.argv[0] + ' <text file> <within file>' )
sys.exit()
withinFile= WithinFile()
result= withinFile.searchText( sys.argv[1], sys.argv[2] )
if result:
print( "(content within file)" )
else:
print( "(content not found)" )
|
aleccunningham/streampy | retrieve_file.py | <gh_stars>0
import threading
import os, sys
class RetrieveFile(threading.Thread):
def __init__(self, client_socket, data):
threading.Thread.__init__(self)
self.client_socket= client_socket
self.data = data
def run(self):
data = self.client_socket.recv(1024)
if os.path.isfile(file_name):
with open(data, 'rb') as f:
send_bytes = f.read(1024)
self.client_socket.send(send_bytes)
while send_bytes != "":
send_bytes = f.read(1024)
self.client_socket.send(send_bytes)
else:
break
f.close()
self.client_socket.close() # Close the connection
os._exit(0)
|
aleccunningham/streampy | file_request_listener.py | import threading
import os, sys
class fileRequestListener(threading.Thread):
def __init__(self, client_socket):
threading.Thread.__init__(self)
self.client_socket= client_socket
def run(self):
filename = input('Which file do you want?')
self.client_socket.send(filename.encode())
from retrieve_file import RetrieveFile
t = RetrieveFile(self.client_socket, filename)
t.start()
t.join()
self.client_socket.close() # Close the connection
os._exit(0)
|
aleccunningham/streampy | tests/input-writer.py | # open a file
# read each line
# output the line to standard input
# pace the timing of output
import sys
import time
argc= len( sys.argv )
if argc != 4 :
print( 'Usage: py ' + sys.argv[0] + ' <initial delay> <pace> <input file>' )
sys.exit()
initial_delay= int(sys.argv[1])
pace= int(sys.argv[2])
input_file= open( sys.argv[3] )
if initial_delay:
time.sleep( initial_delay )
for line in input_file:
try:
print( line, end= '' )
sys.stdout.flush()
#print( line.rstrip() )
except BrokenPipeError: # process receiving input ended
sys.exit()
if pace:
time.sleep( pace )
if pace:
time.sleep( pace )
sys.stderr.close() # eliminate message at end
|
danicat/oxford-ai-gcp | 03_spark/als.py | <filename>03_spark/als.py
import logging
import sys
from pyspark.sql import SparkSession, SQLContext
from pyspark.mllib.recommendation import ALS
from pyspark.sql.types import StructType, StructField, StringType, FloatType
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
CLOUDSQL_INSTANCE_IP = sys.argv[1]
CLOUDSQL_DB_NAME = sys.argv[2]
CLOUDSQL_USER = sys.argv[3]
CLOUDSQL_PWD = sys.argv[4]
spark = SparkSession.builder.appName('ALS Recommender').getOrCreate()
# Load data from BigQuery.
df = spark.read \
.format("bigquery") \
.option("table", "danicat.oxford.pageviews") \
.load()
df.createOrReplaceTempView("pageviews")
df_pageviews = spark.sql("select distinct user_id, doc_id, 1 as rating from pageviews")
df_pageviews.show()
rank = 8
seed = 5L
iterations = 10
regularization_parameter = 0.1
logger.info("Training the ALS model...")
model = ALS.train(df_pageviews.rdd.map(lambda r: (int(r[0]), int(r[1]), r[2])).cache(),
rank=rank,
seed=seed,
iterations=iterations,
lambda_=regularization_parameter)
logger.info("ALS model built!")
# Calculate all predictions
predictions = model.recommendProductsForUsers(10) \
.flatMap(lambda pair: pair[1]) \
.map(lambda rating: (rating.user, rating.product, rating.rating))
TABLE_RECOMMENDATIONS = 'RECOMMENDATIONS'
jdbcUrl = 'jdbc:mysql://%s:3306/%s?user=%s&password=%s' % (CLOUDSQL_INSTANCE_IP, CLOUDSQL_DB_NAME, CLOUDSQL_USER, CLOUDSQL_PWD)
schema = StructType([StructField("user_id", StringType(), True), StructField("doc_id", StringType(), True), StructField("prediction", FloatType(), True)])
dfToSave = spark.createDataFrame(predictions, schema)
dfToSave.show()
dfToSave.write.jdbc(url=jdbcUrl, table=TABLE_RECOMMENDATIONS, mode='overwrite', properties={"useSSL": "false"})
|
danicat/oxford-ai-gcp | 04_cloud_function/main.py | from flask import jsonify
import sqlalchemy
def recommend(request):
db = sqlalchemy.create_engine(
sqlalchemy.engine.url.URL(
drivername="mysql+pymysql",
username='rec-db',
password='<PASSWORD>',
database='rec-db',
query={"unix_socket": "/cloudsql/danicat:us-west1:recsys-db"}
))
request_json = request.get_json(silent=True)
if request_json and 'user_id' in request_json:
recs = []
with db.connect() as conn:
user_id = request_json['user_id']
stmt = sqlalchemy.text(
"SELECT doc_id FROM RECOMMENDATIONS WHERE user_id=:user_id AND prediction > 0.8 ORDER BY prediction DESC"
)
rows = conn.execute(stmt, user_id=user_id).fetchall()
for row in rows:
recs.append(row[0])
result = {"items": recs}
return jsonify(result)
|
danicat/oxford-ai-gcp | 02_dataflow/pageviews.py | #!/usr/bin/env python
from __future__ import absolute_import
import argparse
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
import json
def run(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--topic',
type=str,
help='Pub/Sub topic to read from',
required=True)
parser.add_argument(
'--table',
type=str,
help='BigQuery table name',
required=True)
args, pipeline_args = parser.parse_known_args(argv)
options = PipelineOptions(pipeline_args)
options.view_as(SetupOptions).save_main_session=True
options.view_as(StandardOptions).streaming=True
p = beam.Pipeline(options=options)
(p | 'Read from Pub/Sub' >> beam.io.ReadFromPubSub(topic=args.topic)
| 'Convert to JSON' >> beam.Map(lambda message: json.loads(message))
| 'Write to BigQuery' >> beam.io.WriteToBigQuery(
args.table,
schema='user_id:STRING,doc_id:STRING',
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
result = p.run()
result.wait_until_finish()
if __name__ == "__main__":
run()
|
amjadcp/bookingLine-grpA-miniProject | users/forms.py | <filename>users/forms.py
from django import forms
from django.contrib.auth.forms import UserCreationForm
from .models import *
class SignupForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', '<PASSWORD>')
def __init__(self, *args, **kargs):
super(SignupForm, self).__init__(*args, **kargs)
self.fields['username'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Username'
})
self.fields['first_name'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'First Name',
'name' : 'first_name',
'id' : 'first_name'
})
self.fields['last_name'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Last Name',
'name' : 'last_name',
'id' : 'last_name',
})
self.fields['email'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Email',
'name' : 'email',
'id' : 'email',
})
self.fields['password1'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Password',
'name' : 'password1',
'id' : 'password1',
})
self.fields['password2'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Confirm Password',
'name' : 'password2',
'id' : '<PASSWORD>',
})
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = (
'name', 'phone', 'email', 'adhar', 'pdf_adhar', 'pic', 'passbook',
'address1', 'address2', 'district', 'state', 'pin'
)
def __init__(self, *args, **kargs):
super(ProfileForm, self).__init__(*args, **kargs)
self.fields['name'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Name',
'name' : 'name',
'id' : 'name'
})
self.fields['phone'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Phone Number',
'name' : 'phone',
'id' : 'phone'
})
self.fields['email'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Email',
'name' : 'email',
'id' : 'email'
})
self.fields['adhar'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Aadhar',
'name' : 'adhar',
'id' : 'adhar'
})
self.fields['pdf_adhar'].widget.attrs.update({
'class' : 'form-control',
'name' : 'pdf_adhar',
'id' : 'pdf_adhar'
})
self.fields['pic'].widget.attrs.update({
'class' : 'form-control',
'name' : 'pic',
'id' : 'pic'
})
self.fields['passbook'].widget.attrs.update({
'class' : 'form-control',
'name' : 'passbook',
'id' : 'passbook'
})
self.fields['address1'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Address1',
'name' : 'address1',
'id' : 'address1'
})
self.fields['address2'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Address2',
'name' : 'address2',
'id' : 'address2'
})
self.fields['district'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'District',
'name' : 'district',
'id' : 'district'
})
self.fields['state'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'State',
'name' : 'state',
'id' : 'state'
})
self.fields['pin'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'PIN code',
'name' : 'pin',
'id' : 'pin'
})
|
amjadcp/bookingLine-grpA-miniProject | serviceprovider/migrations/0004_book_message.py | # Generated by Django 4.0.2 on 2022-02-06 05:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('serviceprovider', '0003_book_book_email_book_book_name_book_book_number'),
]
operations = [
migrations.AddField(
model_name='book',
name='message',
field=models.TextField(default=0, max_length=200),
preserve_default=False,
),
]
|
amjadcp/bookingLine-grpA-miniProject | serviceprovider/migrations/0006_remove_book_user.py | <reponame>amjadcp/bookingLine-grpA-miniProject<filename>serviceprovider/migrations/0006_remove_book_user.py<gh_stars>0
# Generated by Django 4.0.2 on 2022-02-06 05:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('serviceprovider', '0005_book_user'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='user',
),
]
|
amjadcp/bookingLine-grpA-miniProject | users/urls.py | <filename>users/urls.py
from django.urls import path
from .views import *
app_name='users'
urlpatterns = [
path('signup-client', signup_client, name='signup-client'),
path('signup-serviceprovider', signup_serviceprovider, name='signup-serviceprovider'),
path('profile', profile, name='profile'),
path('dashboard', dashboard, name='dashboard'),
path('dashboard-client', dashboard_client, name='dashboard-client'),
] |
amjadcp/bookingLine-grpA-miniProject | users/models.py | <filename>users/models.py
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
from .utils import *
class User(AbstractUser):
username = models.CharField(
max_length=50, blank=True, null=True, unique=True)
email = models.EmailField(_('email address'), unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
def __str__(self):
return f'{self.email}'
class AccType(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
type = models.CharField(max_length=20)
def __str__(self):
return f'{self.user}'
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=100)
phone = models.CharField(max_length=10)
email = models.EmailField()
adhar = models.CharField(max_length=12)
pdf_adhar = models.FileField(upload_to=get_aadhar_upload_to, null= False,)
pic = models.ImageField(upload_to=get_pic_upload_to, null=False)
passbook = models.ImageField(upload_to=get_passbook_upload_to, null=False)
address1 = models.CharField(max_length=100)
address2 = models.CharField(max_length=100)
district = models.CharField(max_length=100)
state = models.CharField(max_length=100)
pin = models.CharField(max_length=10)
def __str__(self):
return f'{self.user}'
class ProfileStatus(models.Model):
choices = (
('Accepted', 'Accepted'),
('Deline', 'Deline')
)
user = models.OneToOneField(User, on_delete=models.CASCADE)
status = models.CharField(max_length=9, choices=choices)
def __str__(self):
return f'{self.user}' |
amjadcp/bookingLine-grpA-miniProject | serviceprovider/forms.py | from dataclasses import field
from pyexpat import model
from django import forms
from .models import *
class DateForm(forms.ModelForm):
class Meta:
model = Book
fields = (
'book_name',
'book_number',
'book_email',
'date',
'from_time',
'to_time'
)
def __init__(self, *args, **kargs):
super(DateForm, self).__init__(*args, **kargs)
self.fields['book_name'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Your Name'
})
self.fields['book_number'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Your Phone Number'
})
self.fields['book_email'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Your Email'
})
self.fields['date'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'Date (YYYY-MM-DD)'
})
self.fields['from_time'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'From Time (HH:MM:SS)'
})
self.fields['to_time'].widget.attrs.update({
'class' : 'form-control',
'placeholder' : 'To Time (HH:MM:SS)'
})
|
amjadcp/bookingLine-grpA-miniProject | serviceprovider/views.py | <gh_stars>0
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from .models import *
# Create your views here.
@login_required
def add_auditorium(request):
if request.method == 'POST':
user = request.user
email = ''
name = request.POST['name']
catelog = request.FILES.get('catelog')
about = request.POST['about']
number = request.POST['number']
email = request.POST['email']
price = request.POST['price']
street = request.POST['street']
district = request.POST['district']
state = request.POST['state']
pin = request.POST['pin']
post = request.POST['post']
images = request.FILES.getlist('images')
Auditorium.objects.create(
user = user,
name = name,
catelog = catelog,
about = about,
number = number,
email = email,
price = price,
street = street,
district = district,
state = state,
pin = pin,
post = post
)
auditoriums = Auditorium.objects.filter(user=user)
for auditorium in auditoriums:
if auditorium.name == name:
for image in images:
Preview.objects.create(
auditorium = auditorium,
images = image
)
return redirect('users:dashboard')
return render(request, 'auditorium.html')
@login_required
def auditorium_dashboard(request, user, id):
auditorium = Auditorium.objects.get(id=id)
books = Book.objects.filter(auditorium=auditorium)
context = {'auditorium':auditorium, 'books':books}
return render(request, 'auditorium_dashboard.html', context=context)
@login_required
def update_auditorium(request,id):
if request.method == 'POST':
auditorium = Auditorium.objects.get(id=id)
user = request.user
auditorium.name = request.POST['name']
catelog = request.FILES.get('catelog')
if catelog != None:
auditorium.catelog = catelog
auditorium.about = request.POST['about']
auditorium.number = request.POST['number']
auditorium.email = request.POST['email']
auditorium.price = request.POST['price']
auditorium.street = request.POST['street']
auditorium.district = request.POST['district']
auditorium.state = request.POST['state']
auditorium.pin = request.POST['pin']
auditorium.post = request.POST['post']
auditorium.save()
images = request.FILES.getlist('images')
if images != []:
previews = Preview.objects.filter(auditorium=auditorium)
for preview in previews:
preview.delete()
for image in images:
Preview.objects.create(
auditorium = auditorium,
images = image
)
return redirect(f'../auditorium-dashboard/{request.user}/{id}')
data = Auditorium.objects.get(id=id)
return render(request, 'update_auditorium.html', {'data' : data}) |
amjadcp/bookingLine-grpA-miniProject | serviceprovider/models.py | <reponame>amjadcp/bookingLine-grpA-miniProject
import re
from django.db import models
from users.models import *
from .utils import *
# Create your models here.
class Auditorium(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=30)
catelog = models.ImageField(upload_to=get_catelog_upload_to)
about = models.TextField(max_length=300)
number = models.CharField(max_length=15)
email = models.EmailField(null=True, blank=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
street = models.CharField(max_length=20)
district = models.CharField(max_length=20)
state = models.CharField(max_length=20)
pin = models.CharField(max_length=10)
post = models.CharField(max_length=20)
def __str__(self) -> str:
return f'{self.name}-{self.user}'
class Preview(models.Model):
auditorium = models.ForeignKey(Auditorium, on_delete=models.CASCADE)
images = models.ImageField(upload_to=get_preview_upload_to)
def __str__(self) -> str:
return f'{self.auditorium}'
class Book(models.Model):
auditorium = models.ForeignKey(Auditorium, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
book_name = models.CharField(max_length=30)
book_number = models.CharField(max_length=15)
book_email = models.EmailField()
date = models.DateField()
from_time = models.TimeField()
to_time = models.TimeField()
message = models.TextField(max_length=200)
connected = models.BooleanField(default=False)
def __str__(self) -> str:
return f'{self.auditorium}'
|
amjadcp/bookingLine-grpA-miniProject | users/views.py | <gh_stars>0
from django.http import HttpRequest
from django.shortcuts import render, redirect
# from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from datetime import date, datetime
from .forms import *
from .models import *
from serviceprovider.models import Auditorium, Book
# Create your views here.
#client
def signup_client(request):
form = SignupForm()
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
print("Success")
email = form.cleaned_data['email']
form.save()
user = User.objects.get(email=email)
acc = AccType.objects.create(user=user, type='client')
acc.save()
return redirect('accounts/login')
return render(request, 'signup_client.html', {'form': form})
#service provider
def signup_serviceprovider(request):
form = SignupForm()
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
print("Success")
email = form.cleaned_data['email']
form.save()
user = User.objects.get(email=email)
acc = AccType.objects.create(user=user, type='serviceprovider')
acc.save()
return redirect('accounts/login')
return render(request, 'signup_service.html', {'form': form})
@login_required(login_url='users:signup-serviceprovider')
def dashboard(request):
user = User.objects.get(email=request.user)
join_date = user.date_joined.date()
if abs((date.today() - join_date).days) <= 365:
pay = 'true'
else:
pay = 'false'
auditoriums = Auditorium.objects.filter(user=user)
try:
profile_status = ProfileStatus.objects.get(user=user)
if profile_status.status == "Accepted":
status = 'accepted'
else:
status = 'deline'
except:
status = 'pending'
context = {
'profile' : Profile.objects.get(user=user).pic,
'first_name' : user.first_name,
'last_name' : user.last_name,
'username' : user.username,
'email' : user.email,
'status' : status,
'pay' : pay,
'auditoriums': auditoriums,
'date' : date.today()
}
return render(request, 'dashboard_service.html', context=context)
@login_required(login_url='users:signup-serviceprovider')
def profile(request):
if request.method == 'POST':
form = ProfileForm(request.POST)
name = request.POST['name']
phone = request.POST['phone']
email = request.POST['email']
adhar = request.POST['adhar']
pdf_adhar = request.FILES['pdf_adhar']
pic = request.FILES['pic']
passbook = request.FILES['passbook']
address1 = request.POST['address1']
address2 = request.POST['address2']
district = request.POST['district']
state = request.POST['state']
pin = request.POST['pin']
user = request.user
profile = Profile.objects.create(
user=user,
name=name,
phone=phone,
email=email,
adhar=adhar,
pdf_adhar=pdf_adhar,
pic=pic,
passbook=passbook,
address1=address1,
address2=address2,
district=district,
state=state,
pin=pin
)
profile.save()
return redirect('users:dashboard')
return render(request, 'profile.html', {'form': ProfileForm()})
@login_required(login_url='users:signup-client')
def dashboard_client(request):
user = request.user
books = Book.objects.filter(user=user)
user = User.objects.get(email=user)
context = {
'books':books,
'user':user,
'date' : date.today()
}
return render(request, 'dashboard_client.html', context=context)
|
amjadcp/bookingLine-grpA-miniProject | serviceprovider/urls.py | from unicodedata import name
from django.urls import path
from .views import *
app_name='serviceprovider'
urlpatterns = [
path('add-auditorium', add_auditorium, name='add-auditorium'),
path('update-auditorium/<int:id>', update_auditorium, name='update-auditorium'),
path('auditorium-dashboard/<str:user>/<int:id>', auditorium_dashboard, name='auditorium-dashboard'),
] |
amjadcp/bookingLine-grpA-miniProject | home/models.py | <gh_stars>0
from operator import mod
from django.db import models
# Create your models here.
class Contact(models.Model):
name = models.CharField(max_length=20)
email = models.EmailField()
subject = models.CharField(max_length=25)
message = models.TextField(max_length=100)
def __str__(self) -> str:
return f'{self.message}'
|
amjadcp/bookingLine-grpA-miniProject | serviceprovider/migrations/0001_initial.py | <reponame>amjadcp/bookingLine-grpA-miniProject<gh_stars>0
# Generated by Django 4.0.2 on 2022-02-05 07:20
from django.db import migrations, models
import django.db.models.deletion
import serviceprovider.utils
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Auditorium',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('catelog', models.ImageField(upload_to=serviceprovider.utils.get_catelog_upload_to)),
('about', models.TextField(max_length=300)),
('number', models.CharField(max_length=15)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('street', models.CharField(max_length=20)),
('district', models.CharField(max_length=20)),
('state', models.CharField(max_length=20)),
('pin', models.CharField(max_length=10)),
('post', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Preview',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('images', models.ImageField(upload_to=serviceprovider.utils.get_preview_upload_to)),
('auditorium', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='serviceprovider.auditorium')),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('from_time', models.TimeField()),
('to_time', models.TimeField()),
('auditorium', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='serviceprovider.auditorium')),
],
),
]
|
amjadcp/bookingLine-grpA-miniProject | serviceprovider/utils.py | <reponame>amjadcp/bookingLine-grpA-miniProject
def get_catelog_upload_to(instance, filename):
return "static/auditorium/{}/{}/catelog/{}".format(instance.user,instance.name, filename)
def get_preview_upload_to(instance, filename):
return "static/auditorium/{}/{}/preview/{}".format(instance.auditorium.user,instance.auditorium.name, filename)
|
amjadcp/bookingLine-grpA-miniProject | home/views.py | <gh_stars>0
from django.http import JsonResponse
from django.core.mail import send_mail
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from serviceprovider.models import Book, Preview
from users.models import *
from users.views import *
from .models import Contact
import random
# from serviceprovider.forms import *
# Create your views here.
def contact(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
subject = request.POST['subject']
message = request.POST['message']
Contact.objects.create(
name=name,
email=email,
subject=subject,
message=message
)
return redirect('home:home')
def index(request):
context = {'check' : True}
return render(request, 'index.html', context=context)
@login_required(login_url='/')
def after_login(request):
users = User.objects.all()
user = request.user
acc_type = AccType.objects.get(user=user)
if acc_type.type == 'serviceprovider':
try:
status = Profile.objects.get(user=user)
except:
return redirect('users:profile')
return redirect('users:dashboard')
else:
try:
user = random.choice(users)
except IndexError:
return redirect('home:auditoriums')
auditoriums_ = Auditorium.objects.filter(user=user)
try:
auditorium = random.choice(auditoriums_)
except IndexError:
return redirect('home:auditoriums')
previews = Preview.objects.filter(auditorium=auditorium)
context = {
'client':request.user,
'check' : False,
'auditorium' : auditorium,
'previews' : previews,
'preview0' : previews[0]
}
return render(request, 'home.html', context=context)
# @login_required
def auditoriums(request):
users = []
auditorium_list = []
profile_statuss = ProfileStatus.objects.filter(status='Accepted')
for profile_status in profile_statuss:
join_date = profile_status.user.date_joined.date()
if abs((date.today() - join_date).days) <= 365:
users.append(profile_status.user)
for user in users:
auditorium_list.append(Auditorium.objects.filter(user=user))
return render(request, 'auditoriums.html', {'auditorium_list': auditorium_list})
@login_required(login_url='users:signup-client')
def auditorium_info(request):
if request.method == "POST":
id = request.POST['btn']
auditorium = Auditorium.objects.get(id=id)
previews = Preview.objects.filter(auditorium=auditorium)
context = {
'auditorium' : auditorium,
'client' : request.user,
'previews' : previews,
'preview0' : previews[0],
'years' : list(range(2022,2051)),
'months' : list(range(1,13)),
'days' : list(range(1,32)),
'froms' : list(range(7, 23)),
'tos' : list(range(8, 24)),
}
return render(request, 'auditorium_info.html', context=context)
@login_required(login_url='users:signup-client')
def auditorium_book(request):
if request.method == 'POST':
id = request.POST['id']
auditorium = Auditorium.objects.get(id=id)
book_name = request.POST['book_name']
book_number = request.POST['book_number']
book_email = request.POST['book_email']
year = request.POST['year']
month = request.POST['month']
day = request.POST['day']
date = f'{year}-{month}-{day}'
from_time = request.POST['from_time']
from_time = str(from_time)+':00:00'
to_time = request.POST['to_time']
to_time = str(to_time)+':00:00'
message = request.POST['message']
books = Book.objects.filter(auditorium=auditorium)
for book in books:
if str(book.date) == date:
if str(book.from_time)==from_time or str(book.to_time)>from_time:
print("Time slot not available")
return JsonResponse({'message' : '0'})
Book.objects.create(
auditorium=auditorium,
user = request.user,
book_name=book_name,
book_number=book_number,
book_email=book_email,
date=date,
from_time=from_time,
to_time=to_time,
message=message
)
send_mail(
f'Booking Request from {request.user}(bookingLine)',
f'''Name : {book_name}
Contact Number : {book_number}
Contact Email : {book_email}
Date : {date}
Time Slot : {from_time} - {to_time}
Message : {message}''',
'<EMAIL>',
['<EMAIL>', auditorium.user],
fail_silently=False,
)
send_mail(
f'Booking Request Send (bookingLine)',
f'''Auditorium : {auditorium.name}
Name : {book_name}
Contact Number : {book_number}
Contact Email : {book_email}
Date : {date}
Time Slot : {from_time} - {to_time}
Message : {message}
Service Provider will be contact you soon
(bookingLine)''',
'<EMAIL>',
[request.user, '<EMAIL>'],
fail_silently=False,
)
return JsonResponse({'message' : '1'})
@login_required(login_url='users:signup-client')
def auditorium_cancel(request,id):
book = Book.objects.get(id=id)
book.delete()
send_mail(
f'Booking Request Canceled by {request.user}(bookingLine)',
f'''Name : {book.book_name}
Contact Number : {book.book_number}
Contact Email : {book.book_email}
Date : {book.date}
Time Slot : {book.from_time} - {book.to_time}
Message : {book.message}
{request.user} canceled their time slot
(bookingLine)''',
'<EMAIL>',
[book.auditorium.user, '<EMAIL>'],
fail_silently=False,
)
send_mail(
f'Booking Cancel Request Send (bookingLine)',
f'''Auditorium : {book.auditorium.name}
Name : {book.book_name}
Contact Number : {book.book_number}
Contact Email : {book.book_email}
Date : {book.date}
Time Slot : {book.from_time} - {book.to_time}
Message : {book.message}
Your time slot canceled
(bookingLine)''',
'<EMAIL>',
[request.user, '<EMAIL>'],
fail_silently=False,
)
return redirect('users:dashboard-client')
@login_required(login_url='users:signup-client')
def connected(request,id):
book = Book.objects.get(id=id)
book.connected = True
book.save()
return redirect(f'../serviceprovider/auditorium-dashboard/{book.auditorium.user}/{book.auditorium.id}')
@login_required(login_url='users:signup-client')
def remove(request,id):
book = Book.objects.get(id=id)
book.delete()
return redirect(f'../serviceprovider/auditorium-dashboard/{book.auditorium.user}/{book.auditorium.id}')
|
amjadcp/bookingLine-grpA-miniProject | serviceprovider/admin.py | <reponame>amjadcp/bookingLine-grpA-miniProject
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Auditorium)
admin.site.register(Preview)
admin.site.register(Book) |
amjadcp/bookingLine-grpA-miniProject | users/utils.py | <filename>users/utils.py
def get_pic_upload_to(instance, filename):
return "static/profile/{}/pic/{}".format(instance.user, filename)
def get_aadhar_upload_to(instance, filename):
instance.filename = filename
return "static/profile/{}/aadhar/{}".format(instance.user, filename)
def get_passbook_upload_to(instance, filename):
instance.filename = filename
return "static/profile/{}/passbook/{}".format(instance.user, filename) |
amjadcp/bookingLine-grpA-miniProject | serviceprovider/migrations/0003_book_book_email_book_book_name_book_book_number.py | # Generated by Django 4.0.2 on 2022-02-06 04:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('serviceprovider', '0002_initial'),
]
operations = [
migrations.AddField(
model_name='book',
name='book_email',
field=models.EmailField(default=0, max_length=254),
preserve_default=False,
),
migrations.AddField(
model_name='book',
name='book_name',
field=models.CharField(default=0, max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='book',
name='book_number',
field=models.CharField(default=0, max_length=15),
preserve_default=False,
),
]
|
amjadcp/bookingLine-grpA-miniProject | home/urls.py | <reponame>amjadcp/bookingLine-grpA-miniProject
from django.urls import path
from .views import *
app_name='home'
urlpatterns = [
path('', index, name='index'),
path('home', after_login, name='home'),
path('auditorium-info', auditorium_info, name='auditorium-info'),
path('auditoriums', auditoriums, name='auditoriums'),
path('auditorium-book', auditorium_book, name='auditorium-book'),
path('auditorium-cancel/<int:id>', auditorium_cancel, name='auditorium-cancel'),
path('connected/<int:id>', connected, name='connected'),
path('remove/<int:id>', remove, name='remove'),
path('contact', contact, name='contact'),
] |
avsavras/FileLock | filelock/filelock.py | # -*- coding: utf-8 -*-
import errno
import logging
import os
import time
logger = logging.getLogger(__name__)
class FileLockException(Exception):
pass
class FileLock(object):
"""A file locking mechanism that has context-manager support so
you can use it in a with statement. This should be relatively cross
compatible as it doesn't rely on msvcrt or fcntl for the locking.
"""
def __init__(self, folder, filename, timeout_sec=300, retry_delay_sec=0.1):
self.lockfile = os.path.join(folder, "{}.lock".format(filename))
self.fd = None
self.is_locked = False
self.timeout_sec = timeout_sec
self.retry_delay_sec = retry_delay_sec
def acquire(self):
"""Acquire the lock, if possible. If the lock is in use, it check again
every `wait` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception.
"""
start_time = time.time()
while True:
try:
# For details on flags: https://man7.org/linux/man-pages/man2/open.2.html
self.fd = os.open(self.lockfile, os.O_CREAT | os.O_RDWR | os.O_EXCL)
self.is_locked = True
break
except OSError as e:
if e.errno not in (errno.EEXIST, errno.EACCES):
raise
if (time.time() - start_time) >= self.timeout_sec:
raise FileLockException("Timeout occurred for lockfile '%s'" % self.lockfile)
logger.debug("Waiting for another worker. Retry after '%s' sec", self.retry_delay_sec)
time.sleep(self.retry_delay_sec)
def release(self):
"""Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically called at the end.
"""
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False
def __enter__(self):
"""Activated when used in the with statement.
Should automatically acquire a lock to be used in the with block.
"""
if not self.is_locked:
self.acquire()
logger.debug("Worker '%s' lock acquire '%s'", os.getpid(), self.lockfile)
return self
def __exit__(self, *args):
if self.is_locked:
self.release()
logger.debug("Worker '%s' lock release '%s'", os.getpid(), self.lockfile)
|
sidoh/webpack_with_platformio | .build_web.py | <gh_stars>1-10
from shutil import copyfile
from subprocess import check_output, CalledProcessError
import sys
import os
import platform
import subprocess
Import("env")
def is_tool(name):
cmd = "where" if platform.system() == "Windows" else "which"
try:
check_output([cmd, name])
return True
except:
return False
def build_web():
if is_tool("npm"):
os.chdir("web")
print("Attempting to build webpage...")
try:
if platform.system() == "Windows":
print(check_output(["npm.cmd", "install", "--only=dev"]))
print(check_output(["npm.cmd", "run", "build"]))
else:
print(check_output(["npm", "install"]))
print(check_output(["npm", "run", "build"]))
if not os.path.exists("../dist"):
os.mkdir("../dist")
copyfile("build/web_assets.h", "../dist/web_assets.h")
finally:
os.chdir("..");
build_web()
|
jamiels/pysovryn | pysovryn/core.py | <filename>pysovryn/core.py
from web3 import Web3
import requests as requests
import matplotlib.pyplot as plt
from dateutil import parser
from prettytable import PrettyTable
class LiquidityPool:
def __init__(self,asset,contract_address='0x6E2fb26a60dA535732F8149b25018C9c0823a715'):
self.__asset=asset
self.__address = contract_address
def lp_get(self):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
result = requests.post('https://backend.sovryn.app/rpc',json={"method":"custom_getLoanTokenHistory","params":[{"address":self.__address}]},headers=headers)
return result.json()
def history(self):
result = self.lp_get()
rates_table = PrettyTable(['Supply','Lend APR','Borrow APR','Timestamp'])
for r in result:
rates_table.add_row([r['supply'],r['supply_apr'],r['borrow_apr'],r['timestamp']])
print(rates_table)
def chart_history(self):
result = self.lp_get()
supply = []
supply_apr = []
borrow_apr = []
timestamp = []
for r in result:
supply.append(int(r['supply']) / 100000000)
supply_apr.append(int(r['supply_apr']) / 10000000000)
borrow_apr.append(int(r['borrow_apr']) / 10000000000)
timestamp.append(parser.isoparse(r['timestamp']))
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(timestamp,supply_apr,'b-')
ax1.plot(timestamp,borrow_apr,'b-')
ax2.plot(timestamp,supply,'r')
ax1.set_xlabel('Time')
ax1.set_ylabel('APRs', color='b')
ax2.set_ylabel('Supply', color='r')
plt.title('Liquidity Metrics for: ' + self.__address)
plt.show()
def balances(self,asset='SOV'):
url = 'https://backend.sovryn.app/amm/pool-balance/' + asset
r = requests.get(url).json()
print('Contract Balance',r['contractBalanceToken'])
print('Staked Balance',r['stakedBalanceToken'])
print('rBTC Contract Balance',r['contractBalanceBtc'])
print('rBTC Staked Balance',r['stakedBalanceBtc'])
print()
class RSKNode:
def __init__(self,url='https://public-node.rsk.co'):
self.__w3 = Web3(Web3.HTTPProvider('https://public-node.rsk.co'))
def current_block(self):
return self.__w3.eth.block_number
|
jamiels/pysovryn | pysovryn/__init__.py | <reponame>jamiels/pysovryn<filename>pysovryn/__init__.py
from .core import LiquidityPool, RSKNode
|
jamiels/pysovryn | setup.py | from setuptools import setup
setup(
name='pysovryn',
version='0.02',
author='<NAME>',
packages=['pysovryn'],
install_requires=[
'matplotlib',
'web3',
'pandas',
'datetime',
'requests',
'prettytable'
],
include_package_data=True,
) |
jamiels/pysovryn | app.py | <gh_stars>0
from pysovryn import RSKNode, LiquidityPool
n = RSKNode()
print(n.current_block())
# rbtc_lp = LiquidityPool(contract_address='0x6E2fb26a60dA535732F8149b25018C9c0823a715')
# rbtc_lp.print()
# rbtc_lp.chart()
sov_lp = LiquidityPool('SOV')
sov_lp.balances()
sov_lp.history()
sov_lp.chart_history()
'''from pysovryn import RSKNode, LiquidityPools
n = RSKNode()
n.current_block()
rbtc_lp = LiquidityPools(contract_address='0x6E2fb26a60dA535732F8149b25018C9c0823a715')
rbtc_lp.print()
rbtc_lp.chart()'''
|
nfynt/Unity_ScriptingRef | Random/Python/module_test.py | import module
from module import GetVersion as gv
from module import *
print(module.GetNumSquared(3))
print(gv())
print(GetPow(10,2)) |
nfynt/Unity_ScriptingRef | Random/Python/python_refresh.py | ## This is a quick refresher for python functions and syntax ##
#print("Hello python!")
message = "This is a variable"
#print(message)
#print(message.title()) #This Is A Variable
#print(message.upper()) #THIS IS A VARIABLE
#print(message.lower()) #this is a variable
message2 = "This is a message"
#print(message + " " + message2)
language = "python "
language.rstrip()
#print(language) #python
#strip() to remove whitespace from left and right side of the string
print(3**2) #9
print(2+3*4)
print((2+3)*4)
#python support order of expression
num = 42
print(message2 + " for agent " + str(num))
print(None == []) #False
# if num != 42:
# raise AssertionError("num value not equal to 42")
# assert num > 45, "num less than 46"
########## LISTS #############################
items = ["tom","dick",'h',45,"harry"]
print(items[2])
items[2]="h_new"
items.append("newly added item")
print(items)
items.insert(2,"meddler")
del items[4]
print(items)
print (items.pop(2)) #pop 2nd element
items.remove("newly added item") # removing by value
print(items)
##### SORTING
print(sorted(items)) #without sorting the actual list
items.reverse() #simple reverse the list
print("reversed: " + str(items))
items.sort()
print(str(items) + " no of items: "+str(len(items)))
##### LOOPING
items.append(34)
items.append('d')
for ll in items:
print(ll)
for val in range(94,100,2): #start inclusive, end exclusive, and jump value
print(val)
digits = list(range(2,12))
print(digits)
print(sum(digits))
print(min(digits))
print(max(digits))
##### SLICING
sliceEnd = int(len(digits)/2)
print(digits[0:sliceEnd]) #digits[:sliceEnd]
print(digits[sliceEnd:])
#can not assign new list with = operator
digits_copy = digits[:] #copying list into new one...
#an immutable list is called a tuple
#tple = (100,40,60)
tple = tuple(range(45,52,2))
for tp in tple:
print(tp)
########## Conditional Statement
tple = tuple(range(79,98,3))
if tple[1]==82 or tple[1]==83:
print("tple[1] is 82 or 83 " + str(tple[1]))
else:
print("tple[1] not 82 or 83")
if 95 in tple:
print("tuple contains 95")
else:
print("tuple doesn't contain 95")
########### Dictionary
dictionary = {"key1":"val1","key2":2,"key3":"val3"}
print(dictionary)
dictionary["key4"] = 54
del dictionary["key2"]
for key,val in dictionary.items():
print("key: "+str(key)+"\tval: "+str(val))
#for key in dictionary.keys():
#for val in dictionary.values():
keys = list(dictionary.keys())
print(keys)
############ INPUTS
# name = input("username: ")
# print("Hi! "+name)
import random
def GetRandomInt():
return random.randint(10,100)
print(GetRandomInt())
def UpdateList(items):
for i in range(0,len(items)):
items[i]+=1
def GetFormatedReport(name, age, percent):
return "Hi, "+name+"!\nAge: "+str(age)+"\nYou have scored "+str(percent)+"%"
def SwapVal(x,y):
return y,x
def SwapGlobalVal():
global x,y
x,y = y,x
itms = list(range(5,10,2))
UpdateList(itms)
print(itms)
print(GetFormatedReport("nfynt",24,95.8))
print(GetFormatedReport(percent=95.6,age=32,name="ransom"))
print(GetFormatedReport("nnfh",percent=34.7,age=55))
x,y = 5,10
#x,y = SwapVal(x,y)
SwapGlobalVal()
print("X "+str(x)+"\tY "+str(y))
#### Empty Tuple
cart = ("ford","merc","lexus")
*cars, = cart #starred assignment of tuple into list; same as cars = list(cart)
cars+= ['bmw']
print(cars)
|
nfynt/Unity_ScriptingRef | Random/Python/class_test.py | import classes
m1 = classes.Math(8,2)
m2 = classes.Math(5,6)
m1.ShowVals()
m2.ShowVals()
print("")
print(m2.GetPow(m1.val1,m1.val2))
print(m1.GetPow(m2.val1,m2.val2))
eq = classes.Equations(2,4,9)
print(eq.GetSquaredRoot(eq.val3))
print(eq.ShowVals()) |
nfynt/Unity_ScriptingRef | Random/Python/classes.py | <filename>Random/Python/classes.py
import random
import math
class Math():
def __init__(self, val1, val2):
self.val1 = val1
self.val2 = val2
def GetVersion(self):
return "v0.1"
def GetNumSquared(self, num):
return num ** 2
def GetAuthorName(self):
return "Nfynt"
def GetNumCubed(self, num):
return num ** 3
def GetRandomNum(self):
return random.randint(89,8989)
''' Get a raise to the power of b '''
def GetPow(self, a, b):
return a ** b
def ShowVals(self):
print(self.val1)
print(self.val2)
#### Simple inheritence
class Equations(Math):
def __init__(self, val1, val2, val3):
super().__init__(val1,val2)
self.val3 = val3
def GetSquaredRoot(self, num):
return math.sqrt(num) |
nfynt/Unity_ScriptingRef | Random/Python/module.py | import random
def GetVersion():
return "v0.1"
def GetNumSquared(num):
return num ** 2
def GetAuthorName():
return "Nfynt"
def GetNumCubed(num):
return num ** 3
def GetRandomNum():
return random.randint(89,8989)
''' Get a raise to the power of b '''
def GetPow(a,b):
return a ** b |
iGEM-SBU/lab-signin | mainapp/admin.py | from django.contrib import admin
from django.urls import path
from django.shortcuts import HttpResponseRedirect
from .constants import MEMBER_NAMES
from .models import Member
from .models import Member
class MemberAdmin(admin.ModelAdmin):
change_list_template = 'mainapp/admin_member_changelist.html'
list_display = ('name', 'is_signed_in', 'total_time')
def get_urls(self):
urls = super().get_urls()
my_urls = [
path('populate/', self.populate_database),
]
return my_urls + urls
def populate_database(self, request):
for member_name in MEMBER_NAMES:
member = Member()
member.name = member_name[0]
member.save()
return HttpResponseRedirect('../')
admin.site.register(Member, MemberAdmin)
|
iGEM-SBU/lab-signin | mainapp/migrations/0001_initial.py | <reponame>iGEM-SBU/lab-signin<gh_stars>0
# Generated by Django 2.0.5 on 2018-06-04 20:46
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('paggarwal', '<NAME>'), ('sbudhan', '<NAME>'), ('wchiang', '<NAME>'), ('dkwasniak', '<NAME>'), ('kledalla', '<NAME>'), ('mlee', '<NAME>'), ('nlo', '<NAME>'), ('mmullin', '<NAME>'), ('lypan', '<NAME>'), ('jrakhimov', '<NAME>'), ('rruzic', '<NAME>'), ('mshah', '<NAME>'), ('lvelikov', '<NAME>'), ('svincent', '<NAME>')], max_length=9)),
('sign_in_time', models.DateTimeField(default=django.utils.timezone.now)),
('sign_out_time', models.DateTimeField(default=django.utils.timezone.now)),
('is_signed_in', models.BooleanField(default=False)),
('last_time_block', models.PositiveIntegerField(default=0)),
('total_time', models.PositiveIntegerField(default=0)),
],
),
]
|
iGEM-SBU/lab-signin | mainapp/urls.py | from django.contrib import admin
from django.urls import path, re_path
from . import views
urlpatterns = [
re_path(r'^$', views.index, name="index"),
path('member/<member_name>', views.view_member_profile, name="member_profile"),
path('member/<member_name>/signin', views.member_signin, name="member_signin"),
path('member/<member_name>/signout', views.member_signout, name="member_signout"),
path('member/<member_name>/correction/out', views.member_time_correction, name="member_time_correction"),
path('member/<member_name>/correction/in', views.member_time_correction_in, name="member_time_correction_in"),
path('member/<member_name>/correction', views.member_correction, name="member_correction"),
path('whos_in', views.whos_in, name="whos_in"),
path('group_timeline', views.group_timeline, name="group_timeline")
]
|
iGEM-SBU/lab-signin | mainapp/views.py | <reponame>iGEM-SBU/lab-signin
import threading
from django.shortcuts import render, get_object_or_404, reverse, HttpResponseRedirect
from django.utils import timezone
from .models import Member, TimelineBlock
from .forms import HoursForm, SignInTimeForm, BigTimeCorrectionForm, TimelineForm
from .constants import MEMBER_NAMES, SPREADSHEET_ID, SCOPE, START_DATE, END_DATE, G_SHEETS_ROW_SUM_COMMAND, GSPREAD_CREDS
from oauth2client.service_account import ServiceAccountCredentials
import datetime as dt
import gspread
import os
def index(request):
member_list = Member.objects.all()
form = request.POST
if request.method == 'POST':
selected_user = get_object_or_404(Member, name=request.POST.get('member_name'))
return HttpResponseRedirect(reverse('member_profile', args=[selected_user.name]))
return render(request, 'mainapp/index.html', {'member_list':member_list})
def view_member_profile(request, member_name):
member = get_object_or_404(Member, name=member_name)
if request.method == 'POST':
form = TimelineForm(request.POST)
if form.is_valid():
text = form.cleaned_data.get('text')
if len(text) == 0:
return member_signout(request, member_name)
t = threading.Thread(target=timeline_block_bg_thread, args=[member, text])
t.setDaemon(False)
t.start()
return member_signout(request, member_name)
form = TimelineForm()
timeline_list = list(reversed(TimelineBlock.objects.filter(member=member)))
return render(request, 'mainapp/member_profile.html', {'member': member, 'form': form, 'timeline_list': timeline_list})
def timeline_block_bg_thread(member, text):
newBlock = TimelineBlock()
newBlock.content = text
newBlock.member = member
member.sign_out(timezone.now())
newBlock.header = member.sign_in_time.strftime("%B %d, %Y")
newBlock.subtitle = timeblock_subtitle_format(member.sign_in_time, member.sign_out_time)
newBlock.save()
def member_signin(request, member_name):
member = get_object_or_404(Member, name=member_name)
member.sign_in(timezone.now())
member.save()
return HttpResponseRedirect(reverse('member_profile', args=[member.name]))
def member_signout(request, member_name):
member = get_object_or_404(Member, name=member_name)
member.sign_out(timezone.now())
member.save()
t = threading.Thread(target=member_signout_bg_thread, args=[member, member_name])
t.setDaemon(False)
t.start()
return HttpResponseRedirect(reverse('member_profile', args=[member.name]))
def member_signout_bg_thread(member, member_name):
update_spreadsheet(member_name, member.last_time_block / 60)
verbose_log(member.get_name_display(), member.sign_in_time, member.sign_out_time)
def member_time_correction(request, member_name):
member = get_object_or_404(Member, name=member_name)
if request.method == 'POST':
form = HoursForm(request.POST)
if form.is_valid():
t = threading.Thread(target=member_time_correction_bg_thread, args=[member, member_name, form])
t.setDaemon(False)
t.start()
return HttpResponseRedirect(reverse('member_profile', args=[member.name]))
else:
form = HoursForm()
return render(request, 'mainapp/member_time_correction.html', {'member':member, 'form':form})
def member_time_correction_in(request, member_name):
member = get_object_or_404(Member, name=member_name)
if request.method == 'POST':
form = SignInTimeForm(request.POST)
if form.is_valid():
t = threading.Thread(target=member_time_correction_in_bg_thread, args=[member, form])
t.setDaemon(False)
t.start()
return HttpResponseRedirect(reverse('member_profile', args=[member.name]))
else:
form = SignInTimeForm()
return render(request, 'mainapp/member_time_correction_in.html', {'member': member, 'form': form})
def member_correction(request, member_name):
member = get_object_or_404(Member, name=member_name)
if request.method == 'POST':
form = BigTimeCorrectionForm(request.POST)
if form.is_valid():
t = threading.Thread(target=member_correction_bg_thread, args=[member, form])
t.setDaemon(False)
t.start()
return HttpResponseRedirect(reverse('member_profile', args=[member.name]))
else:
form =BigTimeCorrectionForm
return render(request, 'mainapp/member_correction.html', {'member':member, 'form':form})
def whos_in(request):
member_list = Member.objects.all()
return render(request, 'mainapp/whos_in.html', {'member_list': member_list})
def group_timeline(request):
timelinelist = list(reversed(TimelineBlock.objects.all()))
return render(request, 'mainapp/group_timeline.html', {'timelinelist': timelinelist})
# -----------------NON-VIEW FUNCTIONS----------------------------------------------
def member_time_correction_bg_thread(member, member_name, form):
hours_to_add = float(form.cleaned_data.get('hours'))
print(hours_to_add)
new_signed_out_dt = member.sign_in_time + dt.timedelta(hours=hours_to_add)
print(new_signed_out_dt)
member.sign_out_time = new_signed_out_dt
member.total_time += int(hours_to_add * 60)
member.is_signed_in = False
member.save()
update_spreadsheet(member_name, hours_to_add)
verbose_log(member.get_name_display(), member.sign_in_time, member.sign_out_time, 'Manually Entered (Sign out)')
def member_time_correction_in_bg_thread(member, form):
new_time_in = form.cleaned_data.get('time_signed_in')
new_dt_in = dt.datetime.combine(dt.date.today(), new_time_in)
member.sign_in_time = new_dt_in
member.is_signed_in = True
member.save()
verbose_log(member.get_name_display(), member.sign_in_time, dt.datetime.now(), 'Manually Entered (Sign in)')
def member_correction_bg_thread(member, form):
date = form.cleaned_data.get('date')
hours = float(form.cleaned_data.get('hours'))
date = dt.datetime.combine(date, dt.datetime.now().time())
update_spreadsheet_by_day(member.name, hours, date)
member.total_time += int(hours * 60)
member.save()
verbose_log(member.get_name_display(), timezone.now(), timezone.now(), 'Manually Entered (Backlog)')
# get_day_one_week_from(dt.datetime(2018,6,3)) returns 2018-06-10 00:00:00 (datetime object)
def get_day_one_week_from(date):
return date+dt.timedelta(days=7) # .strftime('%m/%d/%Y')
# get_week_list_dates_from(dt.datetime(2018,6,3)) returns ['06/03/2018', '06/04/2018', '06/05/2018', '06/06/2018', '06/07/2018', '06/08/2018', '06/09/2018']
def get_week_list_dates_from(date):
ans = [date.strftime('%m/%d/%Y')]
for i in range(1, 7):
ans.append((date+dt.timedelta(days=i)).strftime('%m/%d/%Y'))
return ans
# Passing MEMBER_NAMES returns ['<NAME>', '<NAME>', ... , '<NAME>']
def verbose_list_from_choices(choices):
ans = []
for tup in choices:
ans.append(tup[1])
return ans
def short_list_from_choices(choices):
ans = []
for tup in choices:
ans.append(tup[0])
return ans
# You know what this does
def is_sunday(date):
return date.weekday() == 6 # Day 6 is Sunday in Python
def get_list_of_weeks(start_date, end_date):
if not is_sunday(start_date) or not is_sunday(end_date):
return ['One of the dates given is not a Sunday or is not a datetime.']
ans = []
while(True):
ans.append(get_week_list_dates_from(start_date))
start_date = get_day_one_week_from(start_date)
if start_date == end_date:
return ans
def timeblock_subtitle_format(time1, time2):
return time1.strftime('%I:%M') + ' - ' + time2.strftime('%I:%M')
def generate_spreadsheet_template():
list_of_weeks = get_list_of_weeks(START_DATE, END_DATE)
spreadsheet_template = []
for week in list_of_weeks:
header = ['Week of '+week[0]]
header += get_week_list_dates_from(dt.datetime.strptime(week[0], '%m/%d/%Y'))
header.append('Total Hours (Week)')
for member in verbose_list_from_choices(MEMBER_NAMES):
header.append(member)
header += [0]*7 # [0,0,0,0,0,0,0]
header.append(G_SHEETS_ROW_SUM_COMMAND)
spreadsheet_template += header
dirname = os.path.dirname(__file__)
credentials = ServiceAccountCredentials.from_json_keyfile_name(
os.path.join(dirname, GSPREAD_CREDS),
SCOPE
)
gc = gspread.authorize(credentials)
wks = gc.open_by_key(SPREADSHEET_ID)
worksheet = wks.sheet1
template_range = 'A1:I' + str(int(len(spreadsheet_template)/9))
cell_list = worksheet.range(template_range)
for cell, new_cell_value in zip(cell_list, spreadsheet_template):
cell.value = new_cell_value
worksheet.update_cells(cell_list, value_input_option='USER_ENTERED')
def update_spreadsheet_by_day(username, value, day):
dirname = os.path.dirname(__file__)
credentials = ServiceAccountCredentials.from_json_keyfile_name(
os.path.join(dirname, GSPREAD_CREDS),
SCOPE
)
gc = gspread.authorize(credentials)
wks = gc.open_by_key(SPREADSHEET_ID)
worksheet = wks.sheet1
current_date = '{d.month}/{d.day}'.format(d=day)
current_date_cell = worksheet.findall(current_date)[0]
row_index = short_list_from_choices(MEMBER_NAMES).index(username) + 1
user_cell_row = current_date_cell.row + row_index
user_cell_col = current_date_cell.col
user_cell_value = float(worksheet.cell(user_cell_row, user_cell_col).value)
worksheet.update_cell(user_cell_row, user_cell_col, user_cell_value+value)
def update_spreadsheet(username, value):
dirname = os.path.dirname(__file__)
credentials = ServiceAccountCredentials.from_json_keyfile_name(
os.path.join(dirname, GSPREAD_CREDS),
SCOPE
)
gc = gspread.authorize(credentials)
wks = gc.open_by_key(SPREADSHEET_ID)
worksheet = wks.sheet1
current_date = '{d.month}/{d.day}'.format(d=dt.datetime.now())
current_date_cell = worksheet.findall(current_date)[0]
row_index = short_list_from_choices(MEMBER_NAMES).index(username) + 1
user_cell_row = current_date_cell.row + row_index
user_cell_col = current_date_cell.col
user_cell_value = float(worksheet.cell(user_cell_row, user_cell_col).value)
worksheet.update_cell(user_cell_row, user_cell_col, user_cell_value+value)
def verbose_log(member_name, signed_in, signed_out, notes='Successful with no errors'):
dirname = os.path.dirname(__file__)
credentials = ServiceAccountCredentials.from_json_keyfile_name(
os.path.join(dirname, GSPREAD_CREDS),
SCOPE
)
gc = gspread.authorize(credentials)
wks = gc.open_by_key(SPREADSHEET_ID)
worksheet = wks.worksheet("Verbose Log")
worksheet.append_row([member_name, str(signed_in), str(signed_out), str(signed_out - signed_in), notes], value_input_option='USER_ENTERED')
|
iGEM-SBU/lab-signin | mainapp/constants.py | <reponame>iGEM-SBU/lab-signin<gh_stars>0
import datetime as dt
# Member Names (('username', 'verbose name'), ('username2', 'verbose name2'))
MEMBER_NAMES = (
('paggarwal', '<NAME>'),
('sbudhan', '<NAME>'),
('wchiang', '<NAME>'),
('dkwasniak', '<NAME>'),
('kledalla', '<NAME>'),
('mlee', '<NAME>'),
('nlo', '<NAME>'),
('mmullin', '<NAME>'),
('lypan', '<NAME>'),
('jrakhimov', '<NAME>'),
('rruzic', '<NAME>'),
('mshah', '<NAME>'),
('lvelikov', '<NAME>'),
('svincent', '<NAME>')
)
# Lab Info
START_DATE = dt.datetime(2018, 5, 27)
END_DATE = dt.datetime(2018, 8, 12)
# Google Sheets API/gspread Info
SPREADSHEET_ID = '1WdJKTDyZWeEFwS2MT7nIvaqaRlMd9Sgut71cvNMYp_M'
SCOPE = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
GSPREAD_CREDS = 'gspread_creds.json'
G_SHEETS_ROW_SUM_COMMAND = '''=SUM(INDIRECT(CONCATENATE("B",ROW(),":H",ROW())))'''
|
iGEM-SBU/lab-signin | mainapp/forms.py | <gh_stars>0
from django import forms
from django.forms import DecimalField, TimeField, DateField, CharField
class HoursForm(forms.Form):
hours = DecimalField(min_value=0, max_value=12, max_digits=4, decimal_places=2)
class SignInTimeForm(forms.Form):
time_signed_in = TimeField(widget=forms.TimeInput(format="%H:%M"), label='What time did you come in (HH:MM)?')
class TimelineForm(forms.Form):
text = CharField(max_length=280, required=False)
class BigTimeCorrectionForm(forms.Form):
date = DateField(widget=forms.DateInput(format="%M/%D"), label="What day did you come in (MM/DD)?")
hours = DecimalField(min_value=0, max_value=12, max_digits=4, decimal_places=2, label="How many hours did you do?")
|
iGEM-SBU/lab-signin | mainapp/models.py | from django.db import models
from django.utils import timezone
from . import constants
# Create your models here.
class Member(models.Model):
name = models.CharField(max_length=9, choices=constants.MEMBER_NAMES, blank=False)
sign_in_time = models.DateTimeField(auto_now=False, default=timezone.now)
sign_out_time = models.DateTimeField(auto_now=False, default=timezone.now)
is_signed_in = models.BooleanField(default=False, editable=True)
last_time_block = models.PositiveIntegerField(default=0) # measured in minutes
total_time = models.PositiveIntegerField(default=0) # measured in minutes
def __str__(self):
return self.name
def sign_in(self, time):
if self.is_signed_in:
print('User tried to sign in but user is already signed in')
return
self.sign_in_time = time
print(time)
print(self.sign_in_time)
self.is_signed_in = True
print(self.is_signed_in)
def sign_out(self, time):
if not self.is_signed_in:
print('User tried to sign out but user is already signed out')
return
self.sign_out_time = time
self.last_time_block = int((self.sign_out_time - self.sign_in_time).seconds/60)
self.total_time += self.last_time_block
self.is_signed_in = False
def get_hours(self):
hours = self.total_time/60
if hours < 10:
return '{0:.3g}'.format(hours)
elif hours < 100:
return '{0:.4g}'.format(hours)
elif hours < 1000:
return '{0:.5g}'.format(hours)
return '{0:.6g}'.format(hours)
def forgot_to_sign_out(self):
print(timezone.now())
print(self.sign_in_time)
print()
return (self.is_signed_in) and (((timezone.now() - self.sign_in_time).total_seconds())//3600 > 12)
class TimelineBlock(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE)
header = models.CharField(max_length=25, default='No Header')
subtitle = models.CharField(max_length=30, default='No Subtitle')
content = models.CharField(max_length=280, default='No Content')
def __str__(self):
return 'By '+self.member+': '+self.header+' '+self.content
|
iGEM-SBU/lab-signin | labsignin/wsgi.py | <gh_stars>0
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "labsignin.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application) |
iGEM-SBU/lab-signin | mainapp/migrations/0002_timelineblock.py | # Generated by Django 2.0.5 on 2018-06-07 16:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TimelineBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('header', models.CharField(default='No Header', max_length=25)),
('subtitle', models.CharField(default='No Subtitle', max_length=30)),
('content', models.CharField(default='No Content', max_length=280)),
('member', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Member')),
],
),
]
|
MikeXydas/SiameseLSTM | SiameseLSTM/inference.py | import pickle
import pandas as pd
import numpy as np
from SiameseLSTM.SiamLSTM import create_malstm_model
from SiameseLSTM.utils import create_dict_datasets
def create_output_file(model, X_test, outfile, max_seq_length, embeddings, embedding_dims, from_path=False,
path_to_test='../storage/datasets/q2b/test_without_labels.csv'):
if from_path:
loaded_model = create_malstm_model(max_seq_length, embedding_dims=embedding_dims, embeddings=embeddings)
loaded_model.load_weights(model).expect_partial()
else:
loaded_model = model
y_preds = loaded_model.predict(X_test)
y_preds = np.round(y_preds)[:, 0].astype(int)
test_ids_df = pd.read_csv(path_to_test, usecols=['Id'])
results = {
"Id": list(test_ids_df.Id),
"Predicted": y_preds
}
results_df = pd.DataFrame.from_dict(results)
results_df.to_csv(outfile, index=False)
if __name__ == "__main__":
# This main will work only in the case of not using the features
# Read the texts
print(">>> Reading the texts...", end='')
clean_train_df = pd.read_csv('../storage/datasets/q2b/preprocessed/train_quora_clean.csv')
clean_test_df = pd.read_csv('../storage/datasets/q2b/preprocessed/test_quora_clean.csv')
print("Done")
# Load the embeddings
print(">>> Reading the embeddings...", end='')
embeddings = np.load('../storage/datasets/q2b/word_embeddings/embeddings_matrix.npy', )
with open('../storage/datasets/q2b/word_embeddings/numb_represantation_train.pkl', 'rb') as handle:
numb_represantation_train = pickle.load(handle)
with open('../storage/datasets/q2b/word_embeddings/numb_represantation_test.pkl', 'rb') as handle:
numb_represantation_test = pickle.load(handle)
print("Done")
print(">>> Creating the datasets...", end='')
X_train, X_validation, X_test, Y_train, Y_validation, max_seq_length = \
create_dict_datasets(clean_train_df, clean_test_df, numb_represantation_train, numb_represantation_test)
print("Done")
embeddings_dim = len(embeddings[0])
create_output_file(model='../checkpoints/epoch_0042/cp.ckpt',
X_test=[X_test['left'], X_test['right']],
outfile="../storage/datasets/q2b/results/test.csv",
max_seq_length=max_seq_length, embeddings=embeddings,
embedding_dims=embeddings_dim, from_path=True)
|
MikeXydas/SiameseLSTM | SiameseLSTM/SiamLSTMwithFeatures.py | <reponame>MikeXydas/SiameseLSTM
import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow_core.python.keras import regularizers
def create_malstm_features_model(max_seq_length, embedding_dims, embeddings, numb_engineered_features):
# Parameters
dropout_lstm = 0.23
dropout_dense = 0.23
regularizing = 0.002
n_hidden = 300
# Input layers
left_input = layers.Input(shape=(max_seq_length,), dtype='int32')
right_input = layers.Input(shape=(max_seq_length,), dtype='int32')
engineered_features_input = layers.Input(shape=(numb_engineered_features,))
# Embedding layer
embedding_layer = layers.Embedding(len(embeddings), embedding_dims,
weights=[embeddings], input_length=max_seq_length, trainable=False)
encoded_left = embedding_layer(left_input)
encoded_right = embedding_layer(right_input)
# Since this is a siamese network, both sides share the same LSTM
shared_lstm = layers.LSTM(n_hidden, kernel_regularizer=regularizers.l2(regularizing), dropout=dropout_lstm,
recurrent_dropout=dropout_lstm, name="Siamese_LSTM")
left_output = shared_lstm(encoded_left)
right_output = shared_lstm(encoded_right)
# One fully connected layer to transform the engineered features
encoded_engineered = layers.Dense(70, activation='relu', name="FeatureDense")(engineered_features_input)
# Concatenate the two question representations and the engineered features if they exist
concatenated = layers.Concatenate()([left_output, right_output, encoded_engineered])
concatenated = layers.Dropout(dropout_dense)(concatenated)
concatenated = layers.BatchNormalization()(concatenated)
concatenated = layers.Dense(150, kernel_regularizer=regularizers.l2(regularizing), activation='relu',
name="ConcatenatedDense_1")(concatenated)
concatenated = layers.Dropout(dropout_dense)(concatenated)
concatenated = layers.BatchNormalization(name="BatchNorm1")(concatenated)
concatenated = layers.Dense(70, kernel_regularizer=regularizers.l2(regularizing), activation='relu',
name="ConcatenatedDense_2")(concatenated)
concatenated = layers.Dropout(dropout_dense)(concatenated)
concatenated = layers.BatchNormalization(name="BatchNorm2")(concatenated)
concatenated = layers.Dense(35, kernel_regularizer=regularizers.l2(regularizing), activation='relu',
name="ConcatenatedDense_3")(concatenated)
concatenated = layers.Dropout(dropout_dense)(concatenated)
concatenated = layers.BatchNormalization(name="BatchNorm3")(concatenated)
output = layers.Dense(1, activation='sigmoid', name="Sigmoid")(concatenated)
return Model([left_input, right_input, engineered_features_input], output)
|
MikeXydas/SiameseLSTM | SiameseLSTM/SiamLSTM.py | import tensorflow.keras.backend as K
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow_core.python.keras import regularizers
def exponent_neg_manhattan_distance(left, right):
return K.exp(-K.sum(K.abs(left-right), axis=1, keepdims=True))
def create_malstm_model(max_seq_length, embedding_dims, embeddings):
# Parameters
dropout_lstm = 0.23
dropout_dense = 0.23
regularizing = 0.002
n_hidden = 300
# Input layers
left_input = layers.Input(shape=(max_seq_length,), dtype='int32')
right_input = layers.Input(shape=(max_seq_length,), dtype='int32')
embedding_layer = layers.Embedding(len(embeddings), embedding_dims,
weights=[embeddings], input_length=max_seq_length, trainable=False)
# Embedded version of the inputs
encoded_left = embedding_layer(left_input)
encoded_right = embedding_layer(right_input)
# Since this is a siamese network, both sides share the same LSTM
shared_lstm = layers.LSTM(n_hidden, dropout=dropout_lstm, kernel_regularizer=regularizers.l2(regularizing),
recurrent_dropout=dropout_lstm)
left_output = shared_lstm(encoded_left)
right_output = shared_lstm(encoded_right)
# Concatenate the two question representations and the engineered features if they exist
concatenated = layers.Concatenate()([left_output, right_output])
concatenated = layers.Dropout(dropout_dense)(concatenated)
concatenated = layers.BatchNormalization()(concatenated)
concatenated = layers.Dense(150, kernel_regularizer=regularizers.l2(regularizing), activation='relu')(concatenated)
concatenated = layers.Dropout(dropout_dense)(concatenated)
concatenated = layers.BatchNormalization()(concatenated)
concatenated = layers.Dense(70, kernel_regularizer=regularizers.l2(regularizing), activation='relu')(concatenated)
concatenated = layers.Dropout(dropout_dense)(concatenated)
concatenated = layers.BatchNormalization()(concatenated)
concatenated = layers.Dense(35, kernel_regularizer=regularizers.l2(regularizing), activation='relu')(concatenated)
concatenated = layers.Dropout(dropout_dense)(concatenated)
concatenated = layers.BatchNormalization()(concatenated)
output = layers.Dense(1, activation='sigmoid')(concatenated)
return Model([left_input, right_input], output)
|
MikeXydas/SiameseLSTM | Trainer.py | import shutil
import time
import pickle
import pandas as pd
import numpy as np
from tqdm.keras import TqdmCallback
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from sklearn import preprocessing
from SiameseLSTM.SiamLSTM import create_malstm_model
from SiameseLSTM.SiamLSTMwithFeatures import create_malstm_features_model
from SiameseLSTM.utils import create_dict_datasets, check_validation_acc, split_engineered_features_dataset
from SiameseLSTM.inference import create_output_file
if __name__ == "__main__":
# Model variables
batch_size = 1024
n_epoch = 500
use_engineered_features = False
tensorboard_dir = 'storage\\logs\\'
# model_checkpoint = "checkpoints/epoch_0470/cp.ckpt"
model_checkpoint = ""
delete_checkpoints_and_logs = True
# CARE: To save time we have already transformed our texts from words to integers
# and also created an embedding matrix (index -> embedding). In order to generate the
# number representations you should use EmbeddingMatrix.ipynb and fix appropriately the
# paths below
submit_file = "storage/datasets/q2b/results/delete.csv"
train_file = "storage/datasets/q2b/preprocessed/train_quora_clean.csv"
test_file = "storage/datasets/q2b/preprocessed/test_quora_clean.csv"
numb_representations_train_file = "storage/datasets/q2b/word_embeddings/numb_represantation_train.pkl"
numb_representations_test_file = "storage/datasets/q2b/word_embeddings/numb_represantation_test.pkl"
embedding_matrix_file = "storage/datasets/q2b/word_embeddings/embeddings_matrix.npy"
engineered_features_train_file = "storage/datasets/q2b/features/train_features.csv"
engineered_features_test_file = "storage/datasets/q2b/features/test_features.csv"
# Deleting previous checkpoints and logs
if delete_checkpoints_and_logs:
try:
shutil.rmtree('checkpoints/')
print(">>> Deleted previous checkpoints")
shutil.rmtree('storage/logs')
print(">>> Deleted previous logs")
except FileNotFoundError:
print("No checkpoints or logs found")
# Setting memory growth of GPU so as TF does not allocate all the available memory
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Read the texts
print(">>> Reading the texts...", end='')
clean_train_df = pd.read_csv(train_file)
clean_test_df = pd.read_csv(test_file)
print("Done")
# Load the embeddings
print(">>> Reading the embeddings...", end='')
embeddings = np.load(embedding_matrix_file)
with open(numb_representations_train_file, 'rb') as handle:
numb_representation_train = pickle.load(handle)
with open(numb_representations_test_file, 'rb') as handle:
numb_representation_test = pickle.load(handle)
print("Done")
# Load the engineered features
if use_engineered_features:
print(">>> Reading the engineered features...", end='')
engineered_features_train = np.array(pd.read_csv(engineered_features_train_file))
X_feat_test = np.array(pd.read_csv(engineered_features_test_file))
X_feat_train = engineered_features_train[:, :-1]
y_feat_train = engineered_features_train[:, -1]
normalizer = preprocessing.Normalizer().fit(X_feat_train)
X_feat_train = normalizer.transform(X_feat_train)
X_feat_test = normalizer.transform(X_feat_test)
print("Done")
else:
X_feat_train, X_feat_test, y_feat_train = None, None, None
embedding_dims = len(embeddings[0])
print(">>> Creating the datasets...", end='')
X_train, X_validation, X_test, Y_train, Y_validation, max_seq_length = \
create_dict_datasets(clean_train_df, numb_representation_train, numb_representation_test)
if X_feat_train is not None:
X_features_train, X_features_val, Y_features_train, Y_features_validation, feat_size = \
split_engineered_features_dataset(X_feat_train, y_feat_train)
else:
feat_size = 0
X_features_train, X_features_val = None, None
print("Done")
print(">>> Starting training!")
if use_engineered_features:
malstm = create_malstm_features_model(max_seq_length, embedding_dims, embeddings, feat_size)
else:
malstm = create_malstm_model(max_seq_length, embedding_dims, embeddings)
if model_checkpoint != "":
malstm.load_weights(model_checkpoint)
optimizer = Adam()
malstm.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# Tensorboard logging
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tensorboard_dir, write_graph=False,
histogram_freq=5)
# Start training
training_start_time = time.time()
checkpoint_path = "checkpoints/epoch_{epoch:04d}/cp.ckpt"
# checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights
malstm.save_weights(checkpoint_path.format(epoch=0))
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, period=5,
save_weights_only=True,
verbose=1)
early_stop_callback = tf.keras.callbacks.EarlyStopping(
monitor='val_accuracy', min_delta=1e-2, patience=8000, verbose=1, restore_best_weights=True,
)
if feat_size == 0:
malstm_trained = malstm.fit([X_train['left'], X_train['right'], ], Y_train, batch_size=batch_size, epochs=n_epoch,
validation_data=([X_validation['left'], X_validation['right']], Y_validation),
callbacks=[cp_callback, early_stop_callback, TqdmCallback(verbose=1),
tensorboard_callback],
verbose=0)
else:
malstm_trained = malstm.fit(
x=[X_train['left'], X_train['right'], X_features_train], y=Y_train,
batch_size=batch_size, epochs=n_epoch,
validation_data=([X_validation['left'], X_validation['right'], X_features_val],
Y_validation),
callbacks=[cp_callback, early_stop_callback, TqdmCallback(verbose=1),
tensorboard_callback],
verbose=0
)
print(">>> Training Finished!")
# check_validation_acc(malstm, X_validation, Y_validation)
print(">>> Predicting test results with the best validation model...", end='')
if X_feat_test is None:
create_output_file(malstm, [X_test['left'], X_test['right']], submit_file,
max_seq_length, embeddings, embedding_dims, from_path=False,
path_to_test=test_file)
else:
create_output_file(malstm, [X_test['left'], X_test['right'], X_feat_test], submit_file,
max_seq_length, embeddings, embedding_dims, from_path=False,
path_to_test=test_file)
print("Done")
|
MikeXydas/SiameseLSTM | SiameseLSTM/utils.py | <filename>SiameseLSTM/utils.py
import pandas as pd
import numpy as np
import itertools
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from tensorflow.keras.preprocessing.sequence import pad_sequences
def create_dict_datasets(clean_train_df, numb_represantation_train, numb_represantation_test, seed=1212, val_ratio=0.2):
max_seq_length = 30
# Split to train validation
validation_size = int(val_ratio * len(clean_train_df))
training_size = len(clean_train_df) - validation_size
X_train_Q1 = [t[0] for t in numb_represantation_train]
X_train_Q2 = [t[1] for t in numb_represantation_train]
X_test_Q1 = [t[0] for t in numb_represantation_test]
X_test_Q2 = [t[1] for t in numb_represantation_test]
results = {
"Q1": X_train_Q1,
"Q2": X_train_Q2
}
X = pd.DataFrame.from_dict(results)
Y = clean_train_df[['IsDuplicate']]
results = {
"Q1": X_test_Q1,
"Q2": X_test_Q2
}
X_test = pd.DataFrame.from_dict(results)
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=validation_size,
random_state=seed)
# Split to dicts
X_train = {'left': X_train.Q1, 'right': X_train.Q2}
X_validation = {'left': X_validation.Q1, 'right': X_validation.Q2}
X_test = {'left': X_test.Q1, 'right': X_test.Q2}
Y_train = Y_train.values
Y_validation = Y_validation.values
for dataset, side in itertools.product([X_train, X_validation, X_test], ['left', 'right']):
dataset[side] = pad_sequences(dataset[side], maxlen=max_seq_length)
return X_train, X_validation, X_test, Y_train, Y_validation, max_seq_length
def split_engineered_features_dataset(X_feat, y_feat, seed=1212, val_ratio=0.2):
validation_size = int(val_ratio * len(X_feat))
X_features_train, X_features_val, Y_features_train, Y_features_validation = \
train_test_split(X_feat, y_feat, test_size=validation_size, random_state=seed)
return X_features_train, X_features_val, Y_features_train, Y_features_validation, X_feat.shape[1]
def check_validation_acc(model, X_validation, y_validation):
y_preds = np.round(model.predict([X_validation['left'], X_validation['right']]))[:, 0].astype(int)
print(accuracy_score(y_validation, y_preds)) |
MikeXydas/SiameseLSTM | SiameseLSTM/plot_creation.py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
train_acc = pd.read_csv('../storage/datasets/q2b/plots/epochs500/run-train-tag-epoch_accuracy.csv')
val_acc = pd.read_csv('../storage/datasets/q2b/plots/epochs500/run-validation-tag-epoch_accuracy.csv')
train_loss = pd.read_csv('../storage/datasets/q2b/plots/epochs500/run-train-tag-epoch_loss.csv')
val_loss = pd.read_csv('../storage/datasets/q2b/plots/epochs500/run-validation-tag-epoch_loss.csv')
fig = plt.figure(figsize=(12, 5))
epochs = 500
plt.subplot(1, 2, 1)
plt.plot(np.arange(epochs), train_loss[:epochs].Value, label="Train", linewidth=3) #, marker='o')
plt.plot(np.arange(epochs), val_loss[:epochs].Value, label="Validation", linewidth=3) #, marker='o')
plt.legend(prop={'size': 18}, markerscale=5)
plt.title('Loss vs. Epochs', fontsize=22)
plt.xlabel("Epoch", fontsize=19)
plt.ylabel("BCE Loss", fontsize=19)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.grid()
plt.subplot(1, 2, 2)
plt.plot(np.arange(epochs), train_acc[:epochs].Value, label="Train", linewidth=3)#, marker='o')
plt.plot(np.arange(epochs), val_acc[:epochs].Value, label="Validation", linewidth=3)#, marker='o')
plt.legend(prop={'size': 18}, markerscale=5)
plt.title('Accuracy vs. Epochs', fontsize=22)
plt.xlabel("Epoch", fontsize=19)
plt.ylabel("Accuracy", fontsize=19)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.grid()
fig.tight_layout()
# plt.show()
plt.savefig('../storage/datasets/q2b/plots/lstm_dense_features_curves_500.png', bbox_inches='tight')
|
AbbVie-ComputationalGenomics/genetic-evidence-approval | src/ParseMeSH.py | <reponame>AbbVie-ComputationalGenomics/genetic-evidence-approval
from __future__ import print_function # for python2 compatibility
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 14:02:43 2018
@author: kingea
"""
# This script takes a zipped mesh xml file of the format available from the
# ftp server and returns and creates a
# text file parsing the output to give all names associated with a UI and
# to create a file with mapped headings for supplementary concepts, if using
# supplementary concepts, and a file with tree number if using MeSH heading.
import xmltodict
import gzip
import sys
# data_dir = '/Users/kingea/Downloads/'
# mesh_file = 'desc2018.gz'
data_dir = sys.argv[1]
mesh_file = sys.argv[2]
# Determine if mesh descriptors or mesh supplementary concept records
mesh_type = mesh_file[0:4]
if not (mesh_type=='desc' or mesh_type=='supp'):
print('unrecognized mesh file name', file=sys.stderr)
sys.exit()
# Year is used to label output automatically
mesh_year = mesh_file[4:8]
# check if extension is .gz
mesh_extension = mesh_file[8:11]
if not mesh_extension=='.gz':
print('expect .gz extension', file=sys.stderr)
sys.exit()
with gzip.open(data_dir + mesh_file) as fd:
doc = xmltodict.parse(fd.read())
# Fields accessed differ systematically between descriptor and supplementary concept
if mesh_type=='desc':
type_prefix1 = 'Descriptor'
type_prefix2 = 'Descriptor'
else:
type_prefix1 = 'Supplemental'
type_prefix2 = 'SupplementalRecord'
record_list = doc[type_prefix1 + 'RecordSet'][type_prefix1 + 'Record']
sr_name_out = open(data_dir + 'MeSH_' + mesh_type + '_' + mesh_year + '.tsv', 'w')
sr_name_out.write('UI' + '\t' + 'Name' + '\t' + 'Preferred' + '\n')
for record in record_list:
UI = record[type_prefix2 + 'UI']
Name = record[type_prefix2 + 'Name']['String']
# sr_name_out.write(UI + '\t' + Name + '\t' + 'TRUE' + '\n')
if not len(record['ConceptList'])==1:
print("Unexpected length")
if not isinstance(record['ConceptList']['Concept'], list):
concept_list = [record['ConceptList']['Concept']]
else:
concept_list = record['ConceptList']['Concept']
for concept in concept_list:
concept_name = concept['ConceptName']['String']
concept_preferred = concept['@PreferredConceptYN']
if isinstance(concept['TermList']['Term'], list):
term_list = concept['TermList']['Term']
else:
term_list = [concept['TermList']['Term']]
for term in term_list:
term_name = term['String']
term_preferred = term['@RecordPreferredTermYN']
if term_preferred == 'Y' and concept_preferred=='Y':
preferred = 'Y'
else:
preferred = 'N'
sr_name_out.write(UI + '\t' + term_name + '\t' + preferred + '\n')
if mesh_type=='supp':
sr_mapped_out = open(data_dir + 'MeSH_' + mesh_type + 'mapped_' + mesh_year + '.tsv', 'w')
sr_mapped_out.write('UI' + '\t' + 'MappedUI' + '\n')
for record in record_list:
UI = record['SupplementalRecordUI']
if not len(record['HeadingMappedToList'])==1:
print("Unexpected length")
if not isinstance(record['HeadingMappedToList']['HeadingMappedTo'], list):
mapped_list = [record['HeadingMappedToList']['HeadingMappedTo']]
else:
mapped_list = record['HeadingMappedToList']['HeadingMappedTo']
for heading in mapped_list:
mapped_UI = heading['DescriptorReferredTo']['DescriptorUI']
sr_mapped_out.write(UI + '\t' + mapped_UI + '\n')
if mesh_type=='desc':
sr_mapped_out = open(data_dir + 'MeSH_' + mesh_type + 'tree_' + mesh_year + '.tsv', 'w')
sr_mapped_out.write('UI' + '\t' + 'TreeNumber' + '\n')
for record in record_list:
UI = record['DescriptorUI']
# there are a few special terms that do not have a tree number
if 'TreeNumberList' in record:
if not len(record['TreeNumberList'])==1:
print("Unexpected length")
if not isinstance(record['TreeNumberList']['TreeNumber'], list):
tree_list = [record['TreeNumberList']['TreeNumber']]
else:
tree_list = record['TreeNumberList']['TreeNumber']
for tree_num in tree_list:
sr_mapped_out.write(UI + '\t' + tree_num + '\n') |
hoffmangroup/cla | claweb/claweb/controller/groups.py | """
Created on Oct 7, 2013
@author: mmendez
"""
import os
from jinja2 import Environment, FileSystemLoader
from ..model import groups as groups_model
def group(config_file, group_and_comparisons, group_id):
template_folder = os.path.join(os.path.dirname(__file__), '..', 'view')
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template('default.tpl')
child_template = 'group.tpl'
site = config_file['website'].copy()
if site['url'] == '.':
site['url'] = '..'
# load the results
group = groups_model.group(config_file, group_and_comparisons, group_id)
output = template.render(cl=group, site=site, tpl=child_template)
output_dir = os.path.join(config_file['website']['output'], "groups")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, group['print_id'].replace(":", "_") + ".html"), "wb") as f:
f.write(output.encode("utf-8"))
print(os.path.join(output_dir, group['print_id'].replace(":", "_") + ".html"))
print('group html generated: {}'.format(group['print_name']))
def group_list(config_file, group_and_comparisons):
template_folder = os.path.join(os.path.dirname(__file__), '..', 'view')
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template('default.tpl')
child_template = 'group_list.tpl'
groups = groups_model.group_list(config_file, group_and_comparisons)
output = template.render(groups=groups, site=config_file['website'], datasets=config_file['datasets'], tpl=child_template)
with open(os.path.join(config_file['website']['output'], "group_list.html"), "wb") as f:
f.write(output.encode("utf-8"))
print('group_list.html generated')
|
hoffmangroup/cla | claweb/claweb/f5clonto_helper/up_down_neither_count.py | <gh_stars>0
import os
import sys
import argparse
import pandas as pd
import json
from .. extra import GAC
from collections import defaultdict
def get_up_down_count(df):
# add up and down count
res = defaultdict(
lambda: {"up": 0, "down": 0})
for i, row in df.iterrows():
if row["t-test"] > 0:
up_in = row["cl1"]
down_in = row["cl2"]
else:
up_in = row["cl2"]
down_in = row["cl1"]
res[(row.gene, up_in)]["up"] += 1
res[(row.gene, down_in)]["down"] += 1
return res
def main(gac_filepath, summary_filepath, dataset_name, rob=10, acc=.9, outdir='.'):
# load GAC config file
gac = GAC(gac_filepath)
# load summary dataset
df = pd.read_csv(summary_filepath, sep="\t")
df = df[(df.robustness == rob) & (df.accuracy > acc)]
# get dict[(gene, cl_id)] -> {"up": x, "down": y}
up_down_dict = get_up_down_count(df)
# add neither count by subtracting up + down to number of comparison
for k, v in up_down_dict.items():
n_comp = gac.get_n_comp_from_id(k[1])
up_down_dict[k]['neither'] = n_comp - (v['up'] + v['down'])
# convert cl_id to cl_name and unstack the dict keys:
# up_down_dict[(gene, cl_id)]: up_down_count -> res[gene][cl_name]: up_down_count
res = defaultdict(lambda: dict())
for (gene, cl_id), up_down_count in up_down_dict.items():
cl_name = gac.get_name_from_group_id(cl_id)
res[gene][cl_name] = up_down_count
# save res as json
outfile_id = f"{dataset_name}_rob{rob}acc{acc * 100}"
with open(f"{outdir}/up_down_neither_counts_{outfile_id}.json", "w") as outfile:
json.dump(dict(res), outfile)
def parse_args():
parser = argparse.ArgumentParser(description='Collect lineage scores for each gene.')
parser.add_argument('gac_filepath', type=str,
help='path to the yaml file with group and comparison definitions.')
parser.add_argument('summary_filepath', type=str, help='path to the summary file.')
parser.add_argument('--dataset_name', type=str, help='prefix of the output filename. '
'By default, it is what precede the pattern "_summary" '
'from `summary_filepath`.')
parser.add_argument('--rob', type=int, default=10, help="robustness threshold (int).")
parser.add_argument('--acc', type=float, default=.9, help="accuracy threshold (float).")
parser.add_argument('--outdir', type=str, default=os.getcwd(), help="path to the output directory.")
args = parser.parse_args(sys.argv[1:])
if args.dataset_name is None:
args.dataset_name = os.path.basename(os.path.splitext(args.summary_filepath)[0])
return args
def cli_make_updown_count():
args = parse_args()
main(args.gac_filepath, args.summary_filepath,
args.dataset_name, args.rob, args.acc, args.outdir)
if __name__ == "__main__":
cli_make_updown_count()
|
hoffmangroup/cla | claweb/claweb/f5clonto_helper/ontoviewer.py | import argparse
import json
import sys
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
from matplotlib.patches import FancyBboxPatch
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use(backend="Agg")
FONT_SIZE = 15
class NodePlotter(ABC):
@abstractmethod
def plot_node(self, node, t_bbox, graph_w_display,
graph_h_display, ax, fig, pad=2):
pass
class MedianDERankPlotter(NodePlotter):
def __init__(self, filename):
with open(filename) as fh:
self.median_ranks = json.load(fh)
self.max_term = max(self.median_ranks.items(), key=lambda x: x[1])
print(self.max_term)
self.max_median = max(self.median_ranks.values())
def get_median_count(self, node):
node_name = node["node_print_name"].replace("\n", " ").lstrip("*")
if node_name in self.median_ranks:
red_value = self.median_ranks[node_name] / self.max_median
grey_value = (self.max_median - self.median_ranks[node_name]) / self.max_median
values = [red_value, 0., grey_value]
else:
values = [0., 0., 1.]
return values
def plot_node(self, node, t_bbox, graph_w_display, graph_h_display, ax, fig, pad=2):
# get graph coordinates
graph_x_display = t_bbox.p0[0] + (t_bbox.width - graph_w_display) / 2 # x0 + margin
graph_y_display = t_bbox.p0[1] - graph_h_display
graph_x1_display, graph_y1_display = (graph_x_display + graph_w_display, t_bbox.p0[1])
graph_x_fig, graph_y_fig = fig.transFigure.inverted().transform((graph_x_display, graph_y_display))
graph_w_fig, graph_h_fig = fig.transFigure.inverted().transform((graph_w_display, graph_h_display))
# plot graph
ax1 = fig.add_axes([graph_x_fig, graph_y_fig, graph_w_fig, graph_h_fig])
up, down, other = self.get_median_count(node)
ax1.barh([1], [up], color="#fb9a99", edgecolor="whitesmoke")
ax1.barh([1], [down], left=[up], color="#a6cee3", edgecolor="whitesmoke")
ax1.barh([1], [other], left=[up + down], color="lightgray", edgecolor="whitesmoke")
ax1.set_xlim(-0.05, 1.05)
ax1.set_ylim(0.45, 1.55)
ax1.set_yticks([])
ax1.set_xticks([])
ax1.axis("off")
# plot contour of text and graph
major_x_data, major_y_data = ax.transData.inverted().transform(
(min(t_bbox.p0[0], graph_x_display), t_bbox.p0[1] - graph_h_display))
major_x1_data, major_y1_data = ax.transData.inverted().transform(
(max(t_bbox.p1[0], graph_x1_display), t_bbox.p1[1]))
major_w_data = major_x1_data - major_x_data
major_h_data = major_y1_data - major_y_data
rect = FancyBboxPatch((major_x_data - 1, major_y_data - 1), major_w_data + pad, major_h_data + pad, linewidth=1,
edgecolor=node["color"], facecolor='none', boxstyle="round,pad=10")
p = ax.add_patch(rect)
return p
class UpDownNeitherPlotter(NodePlotter):
def __init__(self, filename, gene):
with open(filename) as fh:
up_down_dict = json.load(fh)
self.up_down_df = pd.DataFrame.from_dict(up_down_dict[gene]).T
def get_up_down_neither_count(self, node):
columns = ["up", "down", "neither"]
node_name = node["shared_name"]
if node_name in self.up_down_df.index:
values = self.up_down_df.loc[node_name, columns]
values = values / values.sum()
values = values.tolist()
else:
values = [0., 0., 1.]
return values
def plot_node(self, node, t_bbox, graph_w_display, graph_h_display, ax, fig, pad=2):
# get graph coordinates
graph_x_display = t_bbox.p0[0] + (t_bbox.width - graph_w_display) / 2 # x0 + margin
graph_y_display = t_bbox.p0[1] - graph_h_display
graph_x1_display, graph_y1_display = (graph_x_display + graph_w_display, t_bbox.p0[1])
graph_x_fig, graph_y_fig = fig.transFigure.inverted().transform((graph_x_display, graph_y_display))
graph_w_fig, graph_h_fig = fig.transFigure.inverted().transform((graph_w_display, graph_h_display))
# plot graph
ax1 = fig.add_axes([graph_x_fig, graph_y_fig, graph_w_fig, graph_h_fig])
up, down, other = self.get_up_down_neither_count(node)
ax1.barh([1], [up], color="#fb9a99", edgecolor="whitesmoke")
ax1.barh([1], [down], left=[up], color="#a6cee3", edgecolor="whitesmoke")
ax1.barh([1], [other], left=[up + down], color="lightgray", edgecolor="whitesmoke")
ax1.set_xlim(-0.05, 1.05)
ax1.set_ylim(0.45, 1.55)
ax1.set_yticks([])
ax1.set_xticks([])
ax1.axis("off")
# plot contour of text and graph
major_x_data, major_y_data = ax.transData.inverted().transform(
(min(t_bbox.p0[0], graph_x_display), t_bbox.p0[1] - graph_h_display))
major_x1_data, major_y1_data = ax.transData.inverted().transform(
(max(t_bbox.p1[0], graph_x1_display), t_bbox.p1[1]))
major_w_data = major_x1_data - major_x_data
major_h_data = major_y1_data - major_y_data
rect = FancyBboxPatch((major_x_data - 1, major_y_data - 1), major_w_data + pad, major_h_data + pad, linewidth=1,
edgecolor=node["color"], facecolor='none', boxstyle="round,pad=10")
p = ax.add_patch(rect)
return p
def plot_figure(graph_filename, plotter, outfile="ontoviewer_plot.pdf"):
with open(graph_filename) as fh:
graph = json.load(fh)
nodes, edges = (graph["nodes"], graph["edges"])
fig, ax = plt.subplots(figsize=(35, 25))
ax.set_ylim(-100, 2700)
ax.set_xlim(-150, 2500)
node_to_text_display = {}
node_to_patch = {}
node_to_patch_center = {}
chart_nodes = {n_id: n
for n_id, n in nodes.items()
if n["type"] == "chart_node"}
short_nodes = {n_id: n
for n_id, n in nodes.items()
if n["type"] == "short_node"}
for node_id, node in chart_nodes.items():
t = ax.text(node["x"], node["y"], node["node_print_name"],
ha="center", va="center", fontsize=FONT_SIZE)
renderer = fig.canvas.get_renderer()
# get display coord of the text (bbox not included)
bbox_text = t.get_window_extent(renderer=renderer)
node_to_text_display[node_id] = bbox_text
wideleast_display = min([node.width for node in node_to_text_display.values()])
mean_height_display = np.mean([node.height for node in node_to_text_display.values()])
mean_height_display *= .7 # TODO: why this number? make height smaller
# plot chart nodes
for node_id, node in chart_nodes.items():
p = plotter.plot_node(node, node_to_text_display[node_id],
wideleast_display + 40, mean_height_display, ax, fig)
node_to_patch[node_id] = p
patch_center = {
"x": p.get_x() + p.get_width() / 2,
"y": p.get_y() + p.get_height() / 2}
node_to_patch_center[node_id] = patch_center
# plot short nodes (sac and mc)
for node_id, node in short_nodes.items():
node_to_patch_center[node_id] = {"x": node["x"], "y": node["y"]}
bbox = dict(facecolor='none', edgecolor=node["color"], alpha=1, boxstyle="round,pad=.3")
t = ax.text(node["x"], node["y"], node["node_print_name"], ha="center", va="center", fontsize=FONT_SIZE, bbox=bbox)
node_to_patch[node_id] = t
# plot edges
for edge_id, edge in edges.items():
u, v = (edge["target"], edge["source"])
pos_u, pos_v = (node_to_patch_center[u], node_to_patch_center[v])
arrow_coord = (pos_u["x"], pos_u["y"], pos_v["x"], pos_v["y"])
ux, uy, vx, vy = arrow_coord
ax.annotate("", (vx, vy), xytext=(ux, uy), arrowprops=dict(
facecolor='black', patchA=node_to_patch[u], patchB=node_to_patch[v],
shrinkA=.1, shrinkB=20, lw=.01, ec="black", headwidth=5, headlength=6, width=.5))
ax.axis("off")
# outname = "cl_spi1_newline_script" # TODO: make parameter
# exts = ["png", "pdf", "svg"]
# exts = ["pdf"]
# exts = ["png"]
# for ext in exts:
# fig.savefig(f"{outname}.{ext}", bbox_inches='tight')
fig.savefig(outfile, bbox_inches='tight')
plt.close(fig)
def cli_plot_median_de_gene_rank(args=sys.argv[1:]):
args = median_de_gene_rank_parse_args(args)
node_plotter = MedianDERankPlotter(args.de_median_rank)
plot_figure(args.graph_json, args.lineage_scores, args.gene, args.outfile_name)
def median_de_gene_rank_parse_args(args):
parser = argparse.ArgumentParser(description='Plot a graph with median rank of DE genes for best CLA genes in nodes.')
parser.add_argument('graph_json', type=str,
help='path to the graph description in json format.')
parser.add_argument('lineage_scores', type=str, help='path to the file containing median ranks in json format.')
parser.add_argument('--outfile_name', type=str, help="path to the output directory.")
args = parser.parse_args(args)
if args.outfile_name is None:
args.outfile_name = f"{args.gene}.pdf"
return args
def cli_plot_up_down_neither(args=sys.argv[1:]):
args = up_down_neither_parse_args()
node_plotter = UpDownNeitherPlotter(args.lineage_scores, args.gene)
plot_figure(args.graph_json, node_plotter, args.outfile_name)
def up_down_neither_parse_args():
parser = argparse.ArgumentParser(description='Plot a graph with lineage scores in nodes.')
parser.add_argument('graph_json', type=str,
help='path to the graph description in json format.')
parser.add_argument('lineage_scores', type=str, help='path to the file containing lineage scores.')
parser.add_argument('gene', type=str, help='plot the lineage scores of `gene`. '
'The gene should be in the lineage score file.')
parser.add_argument('--outfile_name', type=str, help="path to the output directory.")
args = parser.parse_args(sys.argv[1:])
if args.outfile_name is None:
args.outfile_name = f"{args.gene}.pdf"
return args
if __name__ == "__main__":
cli_plot_up_down_neither() |
hoffmangroup/cla | claweb/claweb/controller/genes.py | <filename>claweb/claweb/controller/genes.py
__author__ = 'mickael'
from jinja2 import Environment, FileSystemLoader
from ..model import genes as genes_model
# from ..model import gene_dist as gene_dist_model
import os
def gene_list(config_file, group_and_comparisons):
template_folder = os.path.join(os.path.dirname(__file__), '..', 'view')
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template('default.tpl')
child_template = 'gene_list.tpl'
datasets_genes, datasets_name = genes_model.gene_list(config_file, group_and_comparisons)
output = template.render(d=datasets_genes, site=config_file['website'], datasets_name=datasets_name, tpl=child_template)
with open(os.path.join(config_file['website']['output'], "gene_list.html"), "wb") as f:
f.write(output.encode("utf-8"))
print('gene_list generated')
def gene_card(config_file, group_and_comparisons, dataset, gene):
template_folder = os.path.join(os.path.dirname(__file__), '..', 'view')
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template('default.tpl')
child_template = 'gene_card.tpl'
site = config_file['website'].copy()
if site['url'] == '.':
site['url'] = '../..'
d, gene_dist_url = genes_model.gene_card(config_file, group_and_comparisons, dataset, gene)
output = template.render(gene=d, site=site, gene_dist=gene_dist_url, tpl=child_template)
output_dir = os.path.join(config_file['website']['output'], "genes", dataset['name'])
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(os.path.join(output_dir, gene + ".html"), "wb") as f:
f.write(output.encode("utf-8"))
print('gene_card generated', gene)
# def gene_dist(config_file, group_and_comparisons, dataset, gene):
# template_folder = os.path.join(os.path.dirname(__file__), '..', 'view')
# env = Environment(loader=FileSystemLoader(template_folder))
# template = env.get_template('default.tpl')
# child_template = 'gene_dist.tpl'
#
# my_plot = gene_dist_model.gene_dist(config_file, group_and_comparisons, dataset, gene)
#
# output = template.render(gene=gene, my_plot=my_plot, site=config_file['website'], tpl=child_template)
#
# with open(os.path.join(config_file['website']['output'], 'gene_distribution', dataset, gene + ".html"), "wb") as f:
# f.write(output.encode("utf-8"))
|
hoffmangroup/cla | skrrf/bin/run_test.py | from skrrf._forest import RegularizedRandomForestClassifier
import numpy as np
X = np.array([
[0, 0, 0, 3, 3, 3] + [3] * 50,
[0, 0, 4, 3, 3, 2] + [10] * 50,
[0, 2, 4, 1, 3, 5] + [0] * 25 + [1] * 25,
]).transpose()
y = [0, 0, 0, 1, 1, 1] + [1] * 50
min_sample_count = np.bincount(y).min()
clf = RegularizedRandomForestClassifier(
n_estimators=20,
random_state=0,
n_jobs=1,
class_weight="balanced",
max_samples=min_sample_count * 2)
clf.fit(X, y)
|
hoffmangroup/cla | claweb/claweb/model/genes.py | __author__ = 'mickael'
import pandas as pd
import os
def gene_list(config_file, group_and_comparisons):
datasets_genes = []
datasets_name = ','.join([dataset['name'] for dataset in config_file['datasets']])
for dataset in config_file['datasets']:
df = pd.read_csv(dataset['summary'], sep='\t')
df = df[(df.robustness == 10) & (df.accuracy > .9)]
genes = df.gene.value_counts().keys().tolist()
genes.sort(key=lambda x: x.split(':')[-1].lower())
genes = [{"name": gene.split(':')[-1],"filename": gene + '.html'} for gene in genes]
datasets_genes.append({'name': dataset['name'], 'genes': genes})
return datasets_genes, datasets_name
def gene_card(config_file, group_and_comparisons, dataset, gene):
groupid_to_name = {group['id']: group['name'] for group in group_and_comparisons['group_definitions']}
group_name_to_print_name = {group['name']: group['print_name'] for group in group_and_comparisons['group_definitions']}
df = pd.read_csv(dataset['summary'], sep='\t')
n_cl = len(set(df.cl1.tolist() + df.cl2.unique().tolist()))
# ndf[(df.robustness == 10) & (df.accuracy > .9) & (df.gene == gene)]
# df = df[(df.robustness == 10) & (df.accuracy > .9) & (df.gene == gene)]
groups = []
for group in group_and_comparisons['group_definitions']:
group_df = df[(df.cl1 == group['id']) | (df.cl2 == group['id'])]
n_comp = group_df.id.unique().size
success_df = group_df[(group_df.robustness == 10) & (group_df.accuracy > .9) & (group_df.gene == gene)]
if success_df.empty:
continue
rows = success_df[['cl1', 'cl2', 't-test', 'p-value']].values.tolist()
rows.sort(key=lambda x: abs(x[2]), reverse=True)
final_rows = []
higher_in_this = 0
higher_in_compared = 0
for group1, group2, ttest, pval in rows:
name1 = groupid_to_name[group1]
name2 = groupid_to_name[group2]
row_score = [0, 0]
if name1 == group["name"]:
this_cl = name1
comparison_cl = name2
else:
this_cl = name2
comparison_cl = name1
if ttest >= 0:
higher_in = name1
else:
higher_in = name2
if higher_in == this_cl:
higher_in_this += 1
row_score[0] = 1
else:
higher_in_compared += 1
row_score[1] = 1
final_rows.append([group_name_to_print_name[comparison_cl]] + row_score)
groups.append({'name': group['name'],
'print_name': group['print_name'],
'rows': final_rows,
'up_count': higher_in_this,
'down_count': higher_in_compared,
'neither': n_comp - higher_in_this - higher_in_compared},
)
groups.sort(key=lambda x: x['up_count'] + x['down_count'], reverse=True)
d = {
"name": gene.split(':')[-1],
"infos": groups,
"dataset": dataset['name'],
"coordinates": gene,
'n_cl': n_cl
}
gene_dist_url = os.path.join(config_file['website']['url'], 'gene_distribution', dataset['name'], gene)
return d, gene_dist_url
|
hoffmangroup/cla | claweb/claweb/extra.py | <gh_stars>0
__author__ = 'mickael'
import os
import yaml
def load_config(config_path):
assert isinstance(config_path, str) or isinstance(config_path, dict)
if isinstance(config_path, str):
with open(config_path) as yaml_file:
cfg = yaml.safe_load(yaml_file)
else:
cfg = config_path
assert 'datasets' in cfg
if 'basedir' not in cfg:
cfg['basedir'] = os.getcwd()
elif cfg['basedir'] == 'config':
cfg['basedir'] = os.path.dirname(config_path)
for dataset in cfg['datasets']:
assert 'name' in dataset
assert 'n_tree' in dataset
if 'output' not in dataset:
output = '{}_{}'.format(dataset['name'], dataset['n_tree'])
dataset['output'] = os.path.join(cfg['basedir'], output)
if 'summary' not in dataset:
summary = '{}_summary.tsv'.format(dataset['output'])
dataset['summary'] = os.path.join(cfg['basedir'], summary)
if 'website' not in cfg:
cfg['website'] = dict()
cfg['website']['output'] = os.path.join(cfg['basedir'], 'website')
cfg['website']['url'] = '.'
return cfg
def load_gac(gac_file):
assert isinstance(gac_file, str) or isinstance(gac_file, dict)
if isinstance(gac_file, str):
with open(gac_file) as yaml_file:
gac = yaml.safe_load(yaml_file)
else:
gac = gac_file
assert 'comparisons' in gac
assert 'group_definitions' in gac
return gac
def load_configs(config_file, group_and_comparisons):
cfg = load_config(config_file)
group_and_comparisons = load_gac(group_and_comparisons)
return cfg, group_and_comparisons
def get_dict_from_list_dict(l, key, value):
for d in l:
if d[key] == value:
return d
return None
class GAC:
"""Group And Comparison"""
def __init__(self, config_path):
with open(config_path) as f:
self.config = yaml.safe_load(f)
@property
def comps(self):
return self.config["comparisons"]
@property
def groups(self):
return self.config["group_definitions"]
def get_dict_from_id(self, l, _id):
return get_dict_from_list_dict(l, "id", _id)
def get_comp_from_id(self, _id):
return self.get_dict_from_id(self.comps, _id)
def get_group_from_id(self, _id):
return self.get_dict_from_id(self.groups, _id)
def get_samples_from_group_id(self, _id):
group = self.get_group_from_id(_id)
return group["samples"]
def get_name_from_group_id(self, _id):
return self.get_group_from_id(_id)["name"]
def get_group_from_name(self, name):
return get_dict_from_list_dict(self.groups, "name", name)
def get_n_comp_from_id(self, _id):
comparisons = [None
for d in self.comps
if _id in d.values()]
return len(comparisons)
# import numpy as np
# import pandas as pd
# from scipy.cluster.hierarchy import linkage, leaves_list
# def sort_samples_by_correlation(group_and_comparisons):
#
# groups = group_and_comparisons['group_definitions']
#
# samples_list = [group['samples'] for group in groups]
# samples_flatten = [e for samples in samples_list for e in samples]
# samples_index = pd.Index(set(samples_flatten))
#
# m = [[1 if samples in group['samples'] else 0 for samples in samples_index] for group in groups]
# m_t = np.transpose(m)
#
# columns = [group['name'] for group in groups]
#
# sample_cl_df = pd.DataFrame(m_t, columns=columns, index=samples_index)
# term_correlation = sample_cl_df.corr()
#
# ln = linkage(term_correlation)
# index_oder = leaves_list(ln)
#
# return term_correlation.iloc[:, index_oder].columns
|
hoffmangroup/cla | claweb/claweb/model/comparisons.py | __author__ = 'mickael'
import os
from collections import defaultdict
import pandas as pd
def comparison_url(cfg, comp_id):
return os.path.join(cfg['website']['url'], 'comparisons', str(comp_id) + '.html')
def gene_dist_url(cfg, dataset_name, gene):
return os.path.join(cfg['website']['url'], 'gene_distribution', dataset_name, gene)
def comparison_list(config_file, group_and_comparisons):
results = defaultdict(list)
group_to_name = {group['id']: group['name'] for group in group_and_comparisons['group_definitions']}
comparisons = group_and_comparisons['comparisons']
reversed_comparisons = [{'id': comp['id'], 'group1': comp['group2'], 'group2': comp['group1']}
for comp in group_and_comparisons['comparisons']]
all_comparisons = comparisons + reversed_comparisons
for comp in all_comparisons:
g1_name = group_to_name[comp['group1']]
results[g1_name].append({
'url': comparison_url(config_file, comp['id']),
'group1': group_to_name[comp['group1']],
'group2': group_to_name[comp['group2']]
})
results = {k: results[k] for k in sorted(results)}
return results
def comparison(config_file, group_and_comparisons, comp_id):
dataset_genes = []
group_to_name = {group['id']: group['name'] for group in group_and_comparisons['group_definitions']}
comp = [comp for comp in group_and_comparisons['comparisons'] if comp['id'] == comp_id][0]
group1_name = group_to_name[comp['group1']]
group2_name = group_to_name[comp['group2']]
for dataset in config_file['datasets']:
df = pd.read_csv(dataset['summary'], sep='\t')
df = df[(df.robustness == 10) & (df.accuracy > .9) & (df.id == comp_id)]
df['name1'] = df['cl1'].apply(lambda x: group_to_name[x])
df['name2'] = df['cl2'].apply(lambda x: group_to_name[x])
df['short_name'] = df['gene'].apply(lambda x: x.split(':')[-1])
# df['gene_dist_url'] = [gene_dist_url(config_file, dataset['name'], gene) for gene in df['gene']]
sort_by_abs_ttest_index = df['t-test'].abs().sort_values(ascending=False).index
df = df.reindex(sort_by_abs_ttest_index)
rows = df.T.to_dict().values()
dataset_genes.append({'name': dataset['name'], 'n_genes': len(rows), 'rows': rows})
return dataset_genes, group1_name, group2_name
|
hoffmangroup/cla | claweb/claweb/model/summary.py | import pandas as pd
def get_number_of_cell_types_with_results(cfg):
n_cell_types_list = []
for dataset in cfg['datasets']:
df = pd.read_csv(dataset['summary'], sep='\t')
n_cell_types = pd.concat([df.cl1, df.cl2]).unique().size
n_cell_types_list.append(n_cell_types)
return max(n_cell_types_list)
|
hoffmangroup/cla | claweb/claweb/run_single_comp.py | <reponame>hoffmangroup/cla
__author__ = 'mickael'
import argparse
import os
import sys
from collections import defaultdict
from itertools import combinations
import numpy as np
import pandas as pd
import sklearn
from scipy import stats
from skrrf._forest import RegularizedRandomForestClassifier
from . import extra
def hamming_distance(list1, list2):
"""calculate hamming distance between two Pandas.Series"""
return sum(list1 != list2)
def ttest(S, groups):
S = S.fillna(0.)
g1_index = groups[groups == 0].index
g2_index = groups[groups == 1].index
return stats.ttest_ind(S[g1_index], S[g2_index])
def get_similar_genes(cur_expression_table, sample_mapper, n_rep=3):
genes = cur_expression_table.index
# pre-sort classes based on expression
gene_to_sorted_classes_list = []
for i in range(n_rep):
gene_to_sorted_classes = {}
for gene, row in cur_expression_table.iterrows():
sorted_indexs = sklearn.utils.shuffle(row).sort_values().index
sorted_classes = sorted_indexs.map(sample_mapper)
gene_to_sorted_classes[gene] = sorted_classes
gene_to_sorted_classes_list.append(gene_to_sorted_classes)
similar_genes = defaultdict(list)
tracker = set()
first = genes[0]
for gene_i, gene_j in combinations(genes, 2):
if not first == gene_i:
if gene_i in tracker:
continue
first = gene_i
if gene_j in tracker:
continue
genes_are_similar = True
for gene_to_sorted_classes in gene_to_sorted_classes_list:
hamming_dist = hamming_distance(
gene_to_sorted_classes[gene_i],
gene_to_sorted_classes[gene_j])
if hamming_dist >= 1:
genes_are_similar = False
break
if genes_are_similar:
similar_genes[gene_i].append(gene_j)
tracker.add(gene_i)
tracker.add(gene_j)
return tracker, similar_genes
def get_perfect_rows(df, samples1, samples2):
"""Perfect rows have a threshold value that can perfectly separate the samples.
For example, a row with values above 10 in `samples1` and values below 10 in `samples2` is a perfect row"""
g1_min = df[samples1].min(1)
g1_max = df[samples1].max(1)
g2_min = df[samples2].min(1)
g2_max = df[samples2].max(1)
p1 = df.loc[g1_min > g2_max].index
p2 = df.loc[g2_min > g1_max].index
return p1.union(p2)
def run_one(config_file, comp_file, comp_id):
cgf = extra.load_config(config_file)
gac = extra.GAC(comp_file)
comparison = gac.get_comp_from_id(comp_id)
for dataset in cgf['datasets']:
output_basename = os.path.join(dataset['output'], str(comparison['id']))
if os.path.exists(output_basename + "_fi.tsv") and os.path.exists(output_basename + "_comp.tsv"):
continue
if not os.path.exists(dataset['output']):
os.makedirs(dataset['output'])
expression_table = pd.read_csv(dataset['expression_table'], sep='\t', index_col=0)
samples1 = gac.get_samples_from_group_id(comparison['group1'])
samples2 = gac.get_samples_from_group_id(comparison['group2'])
try:
samples1 = [[s for s in expression_table.columns if sample in s][0] for sample in samples1]
samples2 = [[s for s in expression_table.columns if sample in s][0] for sample in samples2]
except:
print("Can not find the following pattern in the expression table: ", sample)
sample_mapper = {s: 0 for s in samples1}
sample_mapper.update({s: 1 for s in samples2})
cur_expression_table = expression_table[samples1 + samples2]
# remove genes with no variance
cur_expression_table = cur_expression_table[cur_expression_table.var(1) != 0]
# find perfect rows
perfect_rows = get_perfect_rows(cur_expression_table, samples1, samples2)
filtered_index = cur_expression_table.index.difference(perfect_rows)
# find genes with same hamming distance
tracker, similar_genes = get_similar_genes(cur_expression_table.loc[filtered_index], sample_mapper)
# when genes are similar, only use one
filtered_index = filtered_index.difference(
list(tracker)).union(
similar_genes.keys())
expression_table_filtered = cur_expression_table.loc[filtered_index]
y = pd.Series([sample_mapper[sample] for sample in cur_expression_table.columns])
min_sample_count = min(len(samples1), len(samples2))
results = pd.DataFrame(index=expression_table_filtered.index)
accuracies = []
expression_table_filtered = expression_table_filtered.T
for i in range(10):
rrf = RegularizedRandomForestClassifier(n_estimators=int(dataset['n_tree']), oob_score=True,
random_state=i, n_jobs=1, max_samples=min_sample_count * 2)
rrf.stratified_down_sampling = True
accuracies.append(rrf.fit(expression_table_filtered.values, y.values).oob_score_)
results[i] = rrf.feature_importances_
results.to_csv(os.path.join(dataset['output'], str(comparison['id']) + '_fi.tsv'), sep='\t')
gene_count = (results > 0).sum(1).sort_values()
rows = []
for gene, count in gene_count[gene_count > 0].iteritems():
for gene in [gene] + similar_genes[gene]:
tstat, pvalue = ttest(cur_expression_table.loc[gene], y)
rows.append([comparison['id'], comparison['group1'], comparison['group2'],
gene, count, np.mean(accuracies), tstat, pvalue, 0])
for gene in perfect_rows:
tstat, pvalue = ttest(cur_expression_table.loc[gene], y)
rows.append([comparison['id'], comparison['group1'], comparison['group2'],
gene, 10, np.mean(accuracies), tstat, pvalue, 1])
if not os.path.exists(dataset['output']):
os.makedirs(dataset['output'])
header = ['id', 'cl1', 'cl2', 'gene', 'robustness', 'accuracy', 't-test', 'p-value', 'p_row']
comp_file_output = os.path.join(dataset['output'], str(comparison['id'])) + '_comp.tsv'
df = pd.DataFrame(rows, columns=header)
df.to_csv(comp_file_output, index=False, sep='\t')
def parse_args(args):
parser = argparse.ArgumentParser(description='Regularized Random Forest for one comparison.')
parser.add_argument('config_file', type=str, help='full path to the config file')
parser.add_argument('comp_file', type=str, help='full path to the comparison file')
parser.add_argument('comp_id', type=int, help='THe identifier of a comparison defined in the comp_file')
return parser.parse_args(args)
def cli_run_single_comp(args=sys.argv[1:]):
args = parse_args(args)
run_one(args.config_file, args.comp_file, args.comp_id)
if __name__ == '__main__':
cli_run_single_comp()
|
hoffmangroup/cla | claweb/claweb/model/gac.py | from claweb.extra import load_configs
def get_used_samples(cfg_filepath, gac_filepath):
cfg, gac = load_configs(cfg_filepath, gac_filepath)
comparisons = gac["comparisons"]
cl_ids_in_comparison = []
for comparison in comparisons:
cl_ids_in_comparison.append(comparison["group1"])
cl_ids_in_comparison.append(comparison["group2"])
groups = gac["group_definitions"]
groups_in_comparisons = [group
for group in groups
if group["id"] in cl_ids_in_comparison]
samples = []
for group in groups_in_comparisons:
samples += group["samples"]
return list(set(samples))
#"../../../PycharmProjects/claweb/test_website/34/global_config.yaml", "../../../PycharmProjects/claweb/test_website/34/20170801_group_and_comparisons.yaml" |
hoffmangroup/cla | claweb/setup.py | <reponame>hoffmangroup/cla<filename>claweb/setup.py
from setuptools import find_packages
from setuptools import setup
def post_install_cleaning():
"""Remove dist, eggs, and build directory after install"""
import shutil
import glob
shutil.rmtree('dist')
shutil.rmtree('build')
shutil.rmtree(glob.glob('*.egg-info')[0])
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="claweb",
version="0.0.1",
author="mmendez12",
author_email="<EMAIL>",
description="Generate website summary for the Cell Lineage Analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mmendez12/claweb",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
scripts=['bin/claweb_make_website.py'], install_requires=['pandas'],
entry_points = {
'console_scripts': [
'claweb-make-gac=claweb.f5clonto_helper.parse_20170801:cli_make_gac',
'claweb-make-graph-coord=claweb.f5clonto_helper.make_ontoviewer_coords:cli_make_graph_coord',
'claweb-make-updown-count=claweb.f5clonto_helper.up_down_neither_count:cli_make_updown_count',
'claweb-plot-ontology=claweb.f5clonto_helper.ontoviewer:cli_plot_ontology',
'claweb-make-website=claweb.controller.master:cli_make_website',
'claweb-make-website-comparisons=claweb.controller.master:cli_make_website_comparisons',
]
},
include_package_data=True
)
post_install_cleaning()
|
hoffmangroup/cla | claweb/claweb/f5clonto_helper/cytoscape_utils.py | import xml.dom.minidom as minidom
def get_node_attr_from_cytoscape(xgmml_filename):
"""Get node attributes name, id, shared_name, x, y, w, id, and edges from
an xgmml file from cytoscape.
Return nodes and edges"""
assert "xgmml" in xgmml_filename
doc = minidom.parse(xgmml_filename)
nodes = {}
for node in doc.getElementsByTagName("node"):
for att in node.getElementsByTagName("att"):
if att.getAttribute("name") == "shared name":
shared_name = att.getAttribute("value")
graphic = node.getElementsByTagName("graphics")
if graphic:
graphic = graphic[0]
else:
continue
att = {
"name": node.getAttribute("label"),
"id": node.getAttribute("id"),
"shared_name": shared_name,
"x": float(graphic.getAttribute("x")),
# in cytoscape y are negative at the top and positive at the bottom,
# it's the opposite in matplotlib so we multiply by -1
"y": -float(graphic.getAttribute("y")),
}
nodes[node.getAttribute("id")] = att
# transform coord to remove negative values
min_x = min(v["x"] for v in nodes.values())
min_y = min(v["y"] for v in nodes.values())
for node in nodes:
nodes[node]["x"] = nodes[node]["x"] + abs(min_x)
nodes[node]["y"] = nodes[node]["y"] + abs(min_y)
edges = {}
for edge in doc.getElementsByTagName("edge"):
source = edge.getAttribute("source")
target = edge.getAttribute("target")
if not all(e in nodes for e in [source, target]):
continue
att = {"source": source, "target": target}
edges[edge.getAttribute("id")] = att
return nodes, edges
|
hoffmangroup/cla | claweb/claweb/f5clonto_helper/terms.py | <reponame>hoffmangroup/cla<filename>claweb/claweb/f5clonto_helper/terms.py
import operator
MERGE_SEPARATOR = ';;'
class TermMerger:
def __init__(self, terms, merge_separator=MERGE_SEPARATOR):
self.terms = sorted(terms, key=operator.attrgetter('name'))
self.term_ids = [term.term_id for term in self.terms]
self.merge_separator = merge_separator
def merge(self):
name = self.merge_separator.join(term.name for term in self.terms)
term_id = self.merge_separator.join(self.term_ids)
term = Term(name=name, term_id=term_id,
merge_separator=self.merge_separator)
term.is_a = self.get_new_is_a()
term.relationship = self.get_new_relationships()
return term
def get_new_is_a(self):
all_is_a = set()
for term in self.terms:
all_is_a.update(term.is_a)
# exclude the is_a referring to merged terms
new_is_a = all_is_a - frozenset(self.term_ids)
return list(new_is_a)
def get_new_relationships(self):
new_rel = set()
for term in self.terms:
for rel in term.relationship:
rel_type, term_id = rel
# exclude relationships referring to merged terms
if term_id not in self.term_ids:
new_rel.add(rel)
return list(new_rel)
class Term(object):
"""
Stores terms by turning term keys into object attributes.
"""
def __init__(self, name='', term_id='', rows=[],
merge_separator=MERGE_SEPARATOR):
self.term_id = term_id
self.name = name
self.is_a = []
self.relationship = []
self.subset = []
self.merge_separator = merge_separator
for tag, value in rows:
if tag == 'is_a':
self.is_a.append(value.split(' ! ')[0])
elif tag == 'id':
self.term_id = value
elif tag == 'name':
self.name = value.replace('/', '-')
elif tag == 'relationship':
self.relationship.append(tuple(value.split(' ! ')[0].split(' ')[:2]))
elif tag == 'subset':
self.subset.append(value)
@property
def is_merged_term(self):
return self.merge_separator in self.term_id
def update_is_a(self, update_dict):
new_term_ids = set()
for term_id in self.is_a:
new_term_id = update_dict.get(term_id, term_id)
new_term_ids.add(new_term_id)
self.is_a = list(new_term_ids)
def update_relationship(self, update_dict):
new_relationships = set()
for rel_type, term_id in self.relationship:
new_term_id = update_dict.get(term_id, term_id)
new_relationships.add((rel_type, new_term_id))
self.relationship = list(new_relationships)
def __repr__(self):
return (f'{self.__class__.__name__}('
f'{self.term_id}, {self.name})')
class CLTerm(Term):
"""
Inherits from Term. This object is used to store the cell lineage
terms from an obo file. It combines term information and
the list of samples associated to it.
"""
def __init__(self, term, samples=[], merge_separator=MERGE_SEPARATOR):
super().__init__(term.name, term.term_id, merge_separator=merge_separator)
self.is_a = term.is_a
self.relationship = term.relationship
self.samples = samples
@property
def has_sample(self):
return self.nb_of_sample > 0
@property
def nb_of_sample(self):
return len(self.samples)
@property
def samples_to_str(self):
return ",".join(self.samples).replace('FF:', '')
@classmethod
def from_clterms(cls, terms=[]):
new_term = TermMerger(terms).merge()
samples = set()
for term in terms:
samples.update(term.samples)
return cls(new_term, list(samples))
def samples_in_common(self, term):
return frozenset(self.samples).intersection(term.samples)
def has_same_sample_set(self, term):
return frozenset(self.samples) == frozenset(term.samples)
def __repr__(self):
return f'{self.__class__.__name__}({self.term_id}, {self.name})'
|
hoffmangroup/cla | claweb/claweb/controller/comparisons/comparisons.py | """
Created on Oct 7, 2013
@author: mmendez
"""
import os
from jinja2 import Environment, FileSystemLoader
from ...model import comparisons as comparison_model
def comparisons(cfg, group_and_comparisons, comp_id):
template_folder = os.path.join(os.path.dirname(__file__), '..', '..', 'view')
env = Environment(loader=FileSystemLoader(template_folder))
template = env.get_template('default.tpl')
site = cfg['website'].copy()
if site['url'] == '.':
site['url'] = '..'
if not os.path.exists(os.path.join(cfg['website']['output'], "comparisons")):
os.makedirs(os.path.join(cfg['website']['output'], "comparisons"))
dataset_genes, g1_name, g2_name = comparison_model.comparison(cfg, group_and_comparisons, comp_id)
# check if there is results for this comparison and choose the right template
if sum([dataset['n_genes'] for dataset in dataset_genes]):
child_template = 'comparisons.tpl'
else:
child_template = 'comparison_empty.tpl'
output = template.render(dataset_genes=dataset_genes, group1_name=g1_name, group2_name=g2_name,
site=site, tpl=child_template)
page_path = os.path.join(cfg['website']['output'], 'comparisons', str(comp_id) + ".html")
print(page_path, "generated")
with open(page_path, "wb") as f:
f.write(output.encode("utf-8"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.