text
stringlengths 8
6.05M
|
|---|
from panda3d.core import Point2, Vec3, Vec4, KeyboardButton, NodePath, LineSegs, MeshDrawer, BitMask32, Shader, Vec2
from panda3d.core import Geom, GeomNode, GeomVertexFormat, GeomLines, GeomVertexWriter, GeomVertexData, InternalName, Point3
from panda3d.core import TextNode
from .BaseTool import BaseTool, ToolUsage
from bsp.leveleditor.viewport.Viewport2D import Viewport2D
from bsp.leveleditor.viewport.ViewportType import *
from bsp.bspbase import BSPUtils
from bsp.leveleditor import RenderModes
from bsp.leveleditor.geometry.Handles import Handles, HandleType
from bsp.leveleditor.geometry.Box import Box
from bsp.leveleditor.geometry.Rect import Rect
from bsp.leveleditor.geometry.GeomView import GeomView
from bsp.leveleditor import LEGlobals
from bsp.leveleditor.menu import KeyBinds
from bsp.leveleditor.menu.KeyBind import KeyBind
from enum import IntEnum
import py_linq
from PyQt5 import QtCore
class BoxAction(IntEnum):
ReadyToDraw = 0
DownToDraw = 1
Drawing = 2
Drawn = 3
ReadyToResize = 4
DownToResize = 5
Resizing = 6
class ResizeHandle(IntEnum):
TopLeft = 0
Top = 1
TopRight = 2
Left = 3
Center = 4
Right = 5
BottomLeft = 6
Bottom = 7
BottomRight = 8
class BoxState:
def __init__(self):
self.activeViewport = None
self.action = BoxAction.ReadyToDraw
self.handle = ResizeHandle.Center
self.boxStart = None
self.boxEnd = None
self.moveStart = None
self.preTransformBoxStart = None
self.preTransformBoxEnd = None
self.clickStart = Point2(0, 0)
def cleanup(self):
self.activeViewport = None
self.action = None
self.handle = None
self.boxStart = None
self.boxEnd = None
self.moveStart = None
self.preTransformBoxEnd = None
self.preTransformBoxStart = None
self.clickStart = None
def isValidAndApplicable(self, vp):
return (self.action != BoxAction.DownToDraw and
self.action != BoxAction.Drawing and
self.action != BoxAction.DownToResize and
self.action != BoxAction.Resizing or
self.activeViewport == vp)
def fixBoxBounds(self):
if self.action != BoxAction.Drawing and self.action != BoxAction.Resizing:
return
if not isinstance(self.activeViewport, Viewport2D):
return
vp = self.activeViewport
assert len(self.boxStart) == len(self.boxEnd), "This literally should not happen. (BoxTool)"
for i in range(len(self.boxStart)):
start = self.boxStart[i]
end = self.boxEnd[i]
if start > end:
tmp = start
self.boxStart[i] = end
vec = Vec3(0, 0, 0)
vec[i] = 1
flat = vp.flatten(vec)
# FIXME: There has to be a better way of doing this.
if flat.x == 1:
self.swapHandle("Left", "Right")
if flat.z == 1:
self.swapHandle("Top", "Bottom")
def swapHandle(self, one, two):
if one in self.handle.name:
self.handle = ResizeHandle[self.handle.name.replace(one, two)]
elif two in self.handle.name:
self.handle = ResizeHandle[self.handle.name.replace(two, one)]
# We need this class for each 2D viewport since it has to render different things
class BoxToolViewport:
def __init__(self, tool, vp):
self.vp = vp
self.tool = tool
# Set up the resize handles for this 2d viewport
squareHandles = Handles(HandleType.Square)
squareHandles.np.setHpr(vp.getViewHpr())
squareHandles.addView(
GeomView.Triangles, vp.getViewportMask(),
renderState = RenderModes.DoubleSidedNoZ())
squareHandles.generateGeometry()
self.handlesList = {
HandleType.Square: squareHandles
}
self.handles = squareHandles
# Measurement text
ttext = TextNode("boxToolTopText")
ttext.setAlign(TextNode.ABoxedCenter)
self.topText = NodePath(ttext)
self.topText.setHpr(vp.getViewHpr())
self.topText.hide(~vp.getViewportMask())
self.topText.setBin("fixed", LEGlobals.WidgetSort)
self.topText.setDepthWrite(False)
self.topText.setDepthTest(False)
ltext = TextNode("boxToolLeftText")
ltext.setAlign(TextNode.ABoxedRight)
self.leftText = NodePath(ltext)
self.leftText.setHpr(vp.getViewHpr())
self.leftText.hide(~vp.getViewportMask())
self.leftText.setBin("fixed", LEGlobals.WidgetSort)
self.leftText.setDepthWrite(False)
self.leftText.setDepthTest(False)
def cleanup(self):
self.vp = None
self.tool = None
for handle in self.handlesList.values():
handle.cleanup()
self.handles = None
self.topText.removeNode()
self.topText = None
self.leftText.removeNode()
self.leftText = None
def updateHandles(self, handles):
self.handles.setHandles(handles)
def updateTextPosScale(self):
start = self.vp.flatten(self.tool.state.boxStart)
end = self.vp.flatten(self.tool.state.boxEnd)
center = (start + end) / 2
offset = 4 / self.vp.zoom
scale = 3.25 / self.vp.zoom
self.topText.setPos(self.vp.expand(Point3(center.x, 0, end.z + offset)))
self.topText.setScale(scale)
self.leftText.setPos(self.vp.expand(Point3(start.x - offset, 0, center.z)))
self.leftText.setScale(scale)
def updateText(self):
start = self.vp.flatten(self.tool.state.boxStart)
end = self.vp.flatten(self.tool.state.boxEnd)
width = abs(end.x - start.x)
height = abs(end.z - start.z)
self.topText.node().setText("%.1f" % width)
self.leftText.node().setText("%.1f" % height)
def showText(self):
self.topText.reparentTo(self.tool.doc.render)
self.leftText.reparentTo(self.tool.doc.render)
def hideText(self):
self.topText.reparentTo(NodePath())
self.leftText.reparentTo(NodePath())
def showHandles(self):
self.handles.np.reparentTo(self.tool.doc.render)
def hideHandles(self):
self.handles.np.reparentTo(NodePath())
class BoxTool(BaseTool):
Name = "Box Tool"
ToolTip = "Box Tool"
Usage = ToolUsage.Both
Draw3DBox = True
CursorHandles = {
ResizeHandle.TopLeft: QtCore.Qt.SizeFDiagCursor,
ResizeHandle.BottomRight: QtCore.Qt.SizeFDiagCursor,
ResizeHandle.TopRight: QtCore.Qt.SizeBDiagCursor,
ResizeHandle.BottomLeft: QtCore.Qt.SizeBDiagCursor,
ResizeHandle.Top: QtCore.Qt.SizeVerCursor,
ResizeHandle.Bottom: QtCore.Qt.SizeVerCursor,
ResizeHandle.Left: QtCore.Qt.SizeHorCursor,
ResizeHandle.Right: QtCore.Qt.SizeHorCursor,
ResizeHandle.Center: QtCore.Qt.SizeAllCursor
}
DrawActions = [
BoxAction.Drawing,
BoxAction.Drawn,
BoxAction.ReadyToResize,
BoxAction.DownToResize,
BoxAction.Resizing
]
@staticmethod
def getProperBoxCoordinates(start, end):
newStart = Point3(min(start[0], end[0]), min(start[1], end[1]), min(start[2], end[2]))
newEnd = Point3(max(start[0], end[0]), max(start[1], end[1]), max(start[2], end[2]))
return [newStart, newEnd]
@staticmethod
def handleHitTestPoint(hitX, hitY, testX, testY, hitbox):
return (hitX >= testX - hitbox and hitX <= testX + hitbox and
hitY >= testY - hitbox and hitY <= testY + hitbox)
@staticmethod
def getHandle(current, boxStart, boxEnd, hitbox, offset, zoom):
offset /= zoom
hitbox /= zoom
start = Point3(min(boxStart[0], boxEnd[0]) - offset, 0, min(boxStart[2], boxEnd[2]) - offset)
end = Point3(max(boxStart[0], boxEnd[0]) + offset, 0, max(boxStart[2], boxEnd[2]) + offset)
center = (end + start) / 2
if BoxTool.handleHitTestPoint(current[0], current[2], start[0], start[2], hitbox):
return ResizeHandle.BottomLeft
if BoxTool.handleHitTestPoint(current[0], current[2], end[0], start[2], hitbox):
return ResizeHandle.BottomRight
if BoxTool.handleHitTestPoint(current[0], current[2], start[0], end[2], hitbox):
return ResizeHandle.TopLeft
if BoxTool.handleHitTestPoint(current[0], current[2], end[0], end[2], hitbox):
return ResizeHandle.TopRight
if BoxTool.handleHitTestPoint(current[0], current[2], center[0], start[2], hitbox):
return ResizeHandle.Bottom
if BoxTool.handleHitTestPoint(current[0], current[2], center[0], end[2], hitbox):
return ResizeHandle.Top
if BoxTool.handleHitTestPoint(current[0], current[2], start[0], center[2], hitbox):
return ResizeHandle.Left
if BoxTool.handleHitTestPoint(current[0], current[2], end[0], center[2], hitbox):
return ResizeHandle.Right
# Remove the offset padding for testing if we are inside the box itself
start[0] += offset
start[2] += offset
end[0] -= offset
end[2] -= offset
if current[0] > start[0] and current[0] < end[0] \
and current[2] > start[2] and current[2] < end[2]:
return ResizeHandle.Center
return None
def __init__(self, mgr):
BaseTool.__init__(self, mgr)
self.handleWidth = 0.9
self.handleOffset = 1.6
self.handleType = HandleType.Square
self.state = BoxState()
self.suppressBox = False
self.vps = []
for vp in self.doc.viewportMgr.viewports:
if vp.is2D():
self.vps.append(BoxToolViewport(self, vp))
# Representation of the box we are drawing
self.box = Box()
if self.Draw3DBox:
# Render as solid lines in 3D viewport
self.box.addView(GeomView.Lines, VIEWPORT_3D_MASK)
# Render as dashed lines in 2D viewports
self.box.addView(GeomView.Lines, VIEWPORT_2D_MASK, state = RenderModes.DashedLineNoZ())
self.box.generateGeometry()
def cleanup(self):
self.handleWidth = None
self.handleOffset = None
self.handleType = None
self.state.cleanup()
self.state = None
self.suppressBox = None
for vp in self.vps:
vp.cleanup()
self.vps = None
self.box.cleanup()
self.box = None
BaseTool.cleanup(self)
def showHandles(self):
for vp in self.vps:
vp.showHandles()
def hideHandles(self):
for vp in self.vps:
vp.hideHandles()
def hideText(self):
for vp in self.vps:
vp.hideText()
def showText(self):
for vp in self.vps:
vp.showText()
def showBox(self):
self.box.np.reparentTo(self.doc.render)
def hideBox(self):
self.box.np.reparentTo(NodePath())
def updateHandles(self, vp):
start = vp.vp.flatten(self.state.boxStart)
end = vp.vp.flatten(self.state.boxEnd)
handles = self.getHandles(start, end, vp.vp.zoom)
vp.handles.setHandles(handles, vp.vp.zoom)
def onBoxChanged(self):
self.state.fixBoxBounds()
if self.state.action in [BoxAction.Drawing, BoxAction.Resizing, BoxAction.Drawn]:
# Fix up the text
for vp in self.vps:
vp.updateText()
self.updateHandles(vp)
self.box.setMinMax(self.state.boxStart, self.state.boxEnd)
self.doc.updateAllViews()
# TODO: mediator.selectionBoxChanged
def activate(self):
BaseTool.activate(self)
self.accept('mouse1', self.mouseDown)
self.accept('mouse1-up', self.mouseUp)
self.accept('mouseMoved', self.mouseMove)
self.accept('mouseEnter', self.mouseEnter)
self.accept('mouseExit', self.mouseExit)
self.accept(KeyBinds.getPandaShortcut(KeyBind.Confirm), self.enterDown)
self.accept(KeyBinds.getPandaShortcut(KeyBind.Cancel), self.escapeDown)
def disable(self):
BaseTool.disable(self)
self.maybeCancel()
def mouseDown(self):
if self.suppressBox:
return
vp = base.viewportMgr.activeViewport
if not vp:
return
if vp.is3D():
self.mouseDown3D()
return
self.state.clickStart = Point2(vp.getMouse())
if self.state.action in [BoxAction.ReadyToDraw, BoxAction.Drawn]:
self.leftMouseDownToDraw()
elif self.state.action == BoxAction.ReadyToResize:
self.leftMouseDownToResize()
def mouseDown3D(self):
pass
def leftMouseDownToDraw(self):
self.hideText()
self.hideBox()
self.hideHandles()
vp = base.viewportMgr.activeViewport
mouse = vp.getMouse()
self.state.activeViewport = vp
self.state.action = BoxAction.DownToDraw
toWorld = vp.viewportToWorld(mouse)
expanded = vp.expand(toWorld)
self.state.boxStart = base.snapToGrid(expanded)
self.state.boxEnd = Point3(self.state.boxStart)
self.state.handle = ResizeHandle.BottomLeft
self.onBoxChanged()
def leftMouseDownToResize(self):
self.hideHandles()
vp = base.viewportMgr.activeViewport
self.state.action = BoxAction.DownToResize
self.state.moveStart = vp.viewportToWorld(vp.getMouse())
self.state.preTransformBoxStart = self.state.boxStart
self.state.preTransformBoxEnd = self.state.boxEnd
def mouseUp(self):
vp = base.viewportMgr.activeViewport
if not vp:
return
if vp.is3D():
self.mouseUp3D()
return
if self.state.action == BoxAction.Drawing:
self.leftMouseUpDrawing()
elif self.state.action == BoxAction.Resizing:
self.leftMouseUpResizing()
elif self.state.action == BoxAction.DownToDraw:
self.leftMouseClick()
elif self.state.action == BoxAction.DownToResize:
self.leftMouseClickOnResizeHandle()
def mouseUp3D(self):
pass
def resizeBoxDone(self):
vp = base.viewportMgr.activeViewport
coords = self.getResizedBoxCoordinates(vp)
corrected = BoxTool.getProperBoxCoordinates(coords[0], coords[1])
self.state.activeViewport = None
self.state.action = BoxAction.Drawn
self.state.boxStart = corrected[0]
self.state.boxEnd = corrected[1]
self.showHandles()
self.onBoxChanged()
def leftMouseUpDrawing(self):
self.resizeBoxDone()
def leftMouseUpResizing(self):
self.resizeBoxDone()
def leftMouseClick(self):
self.state.activeViewport = None
self.state.action = BoxAction.ReadyToDraw
self.state.boxStart = None
self.state.boxEnd = None
self.onBoxChanged()
def leftMouseClickOnResizeHandle(self):
self.state.action = BoxAction.ReadyToResize
def mouseMove(self, vp):
if vp.is3D():
self.mouseMove3D()
return
if not self.state.isValidAndApplicable(vp):
return
if self.state.action in [BoxAction.Drawing, BoxAction.DownToDraw]:
self.mouseDraggingToDraw()
elif self.state.action in [BoxAction.Drawn, BoxAction.ReadyToResize]:
self.mouseHoverWhenDrawn()
elif self.state.action in [BoxAction.DownToResize, BoxAction.Resizing]:
self.mouseDraggingToResize()
def mouseMove3D(self):
pass
def resizeBoxDrag(self):
vp = base.viewportMgr.activeViewport
coords = self.getResizedBoxCoordinates(vp)
self.state.boxStart = coords[0]
self.state.boxEnd = coords[1]
self.onBoxChanged()
#render.ls()
def mouseDraggingToDraw(self):
self.showBox()
self.showText()
self.state.action = BoxAction.Drawing
self.resizeBoxDrag()
def mouseDraggingToResize(self):
self.state.action = BoxAction.Resizing
self.resizeBoxDrag()
def cursorForHandle(self, handle):
return self.CursorHandles.get(handle, QtCore.Qt.ArrowCursor)
def resetCursor(self):
vp = base.viewportMgr.activeViewport
def mouseHoverWhenDrawn(self):
vp = base.viewportMgr.activeViewport
now = vp.viewportToWorld(vp.getMouse())
start = vp.flatten(self.state.boxStart)
end = vp.flatten(self.state.boxEnd)
handle = BoxTool.getHandle(now, start, end, self.handleWidth, self.handleOffset, vp.zoom)
if handle is not None and (handle == ResizeHandle.Center or self.filterHandle(handle)):
vp.setCursor(self.cursorForHandle(handle))
self.state.handle = handle
self.state.action = BoxAction.ReadyToResize
self.state.activeViewport = vp
else:
vp.setCursor(QtCore.Qt.ArrowCursor)
self.state.action = BoxAction.Drawn
self.state.activeViewport = None
def getResizeOrigin(self, vp):
if self.state.action != BoxAction.Resizing or self.state.handle != ResizeHandle.Center:
return None
st = vp.flatten(self.state.preTransformBoxStart)
ed = vp.flatten(self.state.preTransformBoxEnd)
points = [st, ed, Point3(st.x, 0, ed.z), Point3(ed.x, 0, st.z)]
points.sort(key = lambda x: (self.state.moveStart - x).lengthSquared())
return points[0]
def getResizeDistance(self, vp):
origin = self.getResizeOrigin(vp)
if not origin:
return None
before = self.state.moveStart
after = vp.viewportToWorld(vp.getMouse())
return base.snapToGrid(origin + after - before) - origin
def getResizedBoxCoordinates(self, vp):
if self.state.action != BoxAction.Resizing and self.state.action != BoxAction.Drawing:
return [self.state.boxStart, self.state.boxEnd]
now = base.snapToGrid(vp.viewportToWorld(vp.getMouse()))
cstart = vp.flatten(self.state.boxStart)
cend = vp.flatten(self.state.boxEnd)
# Proportional scaling
ostart = vp.flatten(self.state.preTransformBoxStart if self.state.preTransformBoxStart else Vec3.zero())
oend = vp.flatten(self.state.preTransformBoxEnd if self.state.preTransformBoxEnd else Vec3.zero())
owidth = oend.x - ostart.x
oheight = oend.z - ostart.z
proportional = vp.mouseWatcher.isButtonDown(KeyboardButton.control()) and \
self.state.action == BoxAction.Resizing and owidth != 0 and oheight != 0
if self.state.handle == ResizeHandle.TopLeft:
cstart.x = now.x
cend.z = now.z
elif self.state.handle == ResizeHandle.Top:
cend.z = now.z
elif self.state.handle == ResizeHandle.TopRight:
cend.x = now.x
cend.z = now.z
elif self.state.handle == ResizeHandle.Left:
cstart.x = now.x
elif self.state.handle == ResizeHandle.Center:
cdiff = cend - cstart
distance = self.getResizeDistance(vp)
if not distance:
cstart = vp.flatten(self.state.preTransformBoxStart) + now \
- base.snapToGrid(self.state.moveStart)
else:
cstart = vp.flatten(self.state.preTransformBoxStart) + distance
cend = cstart + cdiff
elif self.state.handle == ResizeHandle.Right:
cend.x = now.x
elif self.state.handle == ResizeHandle.BottomLeft:
cstart.x = now.x
cstart.z = now.z
elif self.state.handle == ResizeHandle.Bottom:
cstart.z = now.z
elif self.state.handle == ResizeHandle.BottomRight:
cend.x = now.x
cstart.z = now.z
if proportional:
nwidth = cend.x - cstart.x
nheight = cend.z - cstart.z
mult = max(nwidth / owidth, nheight / oheight)
pwidth = owidth * mult
pheight = oheight * mult
wdiff = pwidth - nwidth
hdiff = pheight - nheight
if self.state.handle == ResizeHandle.TopLeft:
cstart.x -= wdiff
cend.z += hdiff
elif self.state.handle == ResizeHandle.TopRight:
cend.x += wdiff
cend.z += hdiff
elif self.state.handle == ResizeHandle.BottomLeft:
cstart.x -= wdiff
cstart.z -= hdiff
elif self.state.handle == ResizeHandle.BottomRight:
cend.x += wdiff
cstart.z -= hdiff
cstart = vp.expand(cstart) + vp.getUnusedCoordinate(self.state.boxStart)
cend = vp.expand(cend) + vp.getUnusedCoordinate(self.state.boxEnd)
return [cstart, cend]
def maybeCancel(self):
if self.state.action in [BoxAction.ReadyToDraw, BoxAction.DownToDraw]:
return False
if self.state.activeViewport:
self.state.activeViewport.setCursor(QtCore.Qt.ArrowCursor)
self.state.activeViewport = None
self.state.action = BoxAction.ReadyToDraw
self.hideText()
self.hideBox()
self.hideHandles()
return True
def enterDown(self):
if self.maybeCancel():
self.boxDrawnConfirm()
def escapeDown(self):
if self.maybeCancel():
self.boxDrawnCancel()
def boxDrawnConfirm(self):
pass
def boxDrawnCancel(self):
pass
def mouseEnter(self, vp):
if self.state.activeViewport:
self.state.activeViewport.setCursor(QtCore.Qt.ArrowCursor)
def mouseExit(self, vp):
if self.state.activeViewport:
self.state.activeViewport.setCursor(QtCore.Qt.ArrowCursor)
def getHandles(self, start, end, zoom, offset = None):
if offset is None:
offset = self.handleOffset
half = (end - start) / 2
dist = offset / zoom
return py_linq.Enumerable([
(ResizeHandle.TopLeft, start.x - dist, end.z + dist),
(ResizeHandle.TopRight, end.x + dist, end.z + dist),
(ResizeHandle.BottomLeft, start.x - dist, start.z - dist),
(ResizeHandle.BottomRight, end.x + dist, start.z - dist),
(ResizeHandle.Top, start.x + half.x, end.z + dist),
(ResizeHandle.Left, start.x - dist, start.z + half.z),
(ResizeHandle.Right, end.x + dist, start.z + half.z),
(ResizeHandle.Bottom, start.x + half.x, start.z - dist)
]).where(lambda x: self.filterHandle(x[0])) \
.select(lambda x: Point3(x[1], 0, x[2])) \
.to_list()
def filterHandle(self, handle):
return True
def moveBox(self, pos):
currPos = (self.state.boxStart + self.state.boxEnd) / 2.0
delta = pos - currPos
self.state.boxStart += delta
self.state.boxEnd += delta
self.state.action = BoxAction.Drawn
self.resizeBoxDone()
#def scaleBox(self, scale,):
# self.state.boxStart.componentwiseMult(scale)
# self.state.boxEnd.componentwiseMult(scale)
# self.state.action = BoxAction.Drawn
# self.resizeBoxDone()
def update(self):
for vp in self.vps:
if self.state.action != BoxAction.ReadyToDraw:
vp.updateTextPosScale()
if self.state.action in [BoxAction.ReadyToResize, BoxAction.Drawn]:
self.updateHandles(vp)
def getSelectionBox(self):
# Return a min/max point for a box that can be used
# for selection.
#
# If one of the dimensions has a depth value of 0, extend it out into
# infinite space.
# If two or more dimensions have depth 0, do nothing.
sameX = self.state.boxStart.x == self.state.boxEnd.x
sameY = self.state.boxStart.y == self.state.boxEnd.y
sameZ = self.state.boxStart.z == self.state.boxEnd.z
start = Vec3(self.state.boxStart)
end = Vec3(self.state.boxEnd)
invalid = False
inf = 99999999.0
negInf = -99999999.0
if sameX:
if sameY or sameZ:
invalid = True
start.x = negInf
end.x = inf
if sameY:
if sameZ:
invalid = True
start.y = negInf
end.y = inf
if sameZ:
start.z = negInf
end.z = inf
return [invalid, start, end]
|
import threading
import time
number = []
def add_num():
print("thread 1 sendo executada")
for i in range(5):
number.append(i)
time.sleep(1)
print("fim da thread")
def show_num(num):
print("thread 2 sendo executada")
for i in num:
print(i)
time.sleep(1)
print("fim da thread")
t = threading.Thread(target=add_num(),args=("thread sendo executada"))
t.start()
s = threading.Thread(target=show_num(number),args=("thread sendo executada"))
s.start()
|
#B
d,g,ai=map(int,input().split())
print(int(d*g*ai//100))
|
import os
import csv
from io import StringIO
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.contrib.auth.hashers import make_password
#from corecode.models import StudentClass
from .models import CustomUser, StaffBulkUpload, Staffs
@receiver(post_save, sender=StaffBulkUpload)
def create_bulk_staff(sender, created, instance, *args, **kwargs):
if created:
opened = StringIO(instance.csv_file.read().decode())
reading = csv.DictReader(opened, delimiter=',')
staffs = []
users= []
for row in reading:
if 'username' in row and row['username']:
username = row['username']
first_name = row['first_name'] if 'first_name' in row and row['first_name'] else ''
last_name = row['last_name'] if 'last_name' in row and row['last_name'] else ''
email = row['email'] if 'email' in row and row['email'] else ''
password = row['password'] if 'password' in row and row['password'] else ''
check = CustomUser.objects.filter(username=username).exists()
if not check:
users.append(
CustomUser(
first_name=first_name,
last_name=last_name,
username=username,
email=email,
user_type=2,
password=make_password(password)
)
)
for row in reading:
if 'staff_number' in row and row['staff_number']:
staff_number = row['staff_number']
address = row['address'] if 'address' in row and row['address'] else ''
dob = row['dob'] if 'dob' in row and row['dob'] else ''
gender = (row['gender']).lower() if 'gender' in row and row['gender'] else ''
staffs.append(
Staffs(
staff_number=staff_number,
gender=gender,
address=address,
dob=dob,
)
)
Staffs.objects.bulk_create(staffs)
CustomUser.objects.bulk_create(users)
instance.csv_file.close()
instance.delete()
def _delete_file(path):
""" Deletes file from filesystem. """
if os.path.isfile(path):
os.remove(path)
@receiver(post_delete, sender=StaffBulkUpload)
def delete_csv_file(sender, instance, *args, **kwargs):
if instance.csv_file:
_delete_file(instance.csv_file.path)
@receiver(post_delete, sender=Staffs)
def delete_passport_on_delete(sender, instance, *args, **kwargs):
if instance.passport:
_delete_file(instance.passport.path)
|
"""
Setup file for mimic
"""
from __future__ import print_function
from setuptools import setup, find_packages
try:
from py2app.build_app import py2app
py2app_available = True
except ImportError:
py2app_available = False
_NAME = "mimic"
_VERSION = "2.0.0"
def setup_options(name, version):
"""
If `py2app` is present in path, then enable to option to build the app.
This also disables the options needed for normal `sdist` installs.
:returns: a dictionary of setup options.
"""
info = dict(
install_requires=[
"klein>=15.3.1",
"twisted>=15.5.0",
"treq>=15.1.0",
"six>=1.6.0",
"xmltodict>=0.9.1",
"attrs>=15.1.0",
"testtools>=1.7.1,<1.8.0",
"iso8601>=0.1.10",
],
package_dir={"mimic": "mimic"},
packages=find_packages(exclude=[]) + ["twisted.plugins"],
)
if not py2app_available:
return info
# py2app available, proceed.
script="bundle/start-app.py"
test_script="bundle/run-tests.py"
plist = dict(
CFBundleName = _NAME,
CFBundleShortVersionString = " ".join([_NAME, _VERSION]),
CFBundleGetInfoString = _NAME,
CFBundleExecutable = _NAME,
CFBundleIdentifier = "com.%s.%s" % (_NAME, _VERSION),
LSUIElement = "1",
LSMultipleInstancesProhibited = "1",
)
app_data = dict(
script=script,
plist=plist,
extra_scripts=[test_script]
)
class BuildWithCache(py2app, object):
"""
Before building the application rebuild the `dropin.cache` files.
"""
def collect_recipedict(self):
"""
Implement a special Twisted plugins recipe so that dropin.cache
files are generated and included in site-packages.zip.
"""
result = super(BuildWithCache, self).collect_recipedict()
def check(cmd, mg):
from twisted.plugin import getPlugins, IPlugin
from twisted import plugins as twisted_plugins
from mimic import plugins as mimic_plugins
for plugin_package in [twisted_plugins, mimic_plugins]:
import time
list(getPlugins(IPlugin, package=plugin_package))
import os
def plugpath(what):
path_in_zip = what + "/plugins"
path_on_fs = (
os.path.abspath(
os.path.join(
os.path.dirname(
__import__(what + ".plugins",
fromlist=["nonempty"])
.__file__),
"dropin.cache")
))
os.utime(path_on_fs, (time.time() + 86400,) * 2)
return (path_in_zip, [path_on_fs])
data_files = [plugpath("mimic"), plugpath("twisted")]
return dict(loader_files=data_files)
result["bonus"] = check
return result
return dict(
info,
app=[app_data],
cmdclass={
"py2app": BuildWithCache
},
options={
"py2app": {
"includes": [
"syslog",
"mimic.test.*",
"mimic.plugins.*",
"twisted.plugins.*",
"twisted.plugin",
],
}
}
)
setup(
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
name=_NAME,
version=_VERSION,
description="An API-compatible mock service",
license="Apache License, Version 2.0",
url="https://github.com/rackerlabs/mimic",
include_package_data=True,
**setup_options(_NAME, _VERSION)
)
|
import cv2
import math
import numpy as np
def calculateDistance(firstPoint, centroid):
return math.sqrt((centroid[0] - firstPoint[0, 0])**2 + (centroid[1] - firstPoint[0, 1])**2)
def findMaximumR(points, centroid):
maxDistance = 0
distanceList = []
for point in points:
distance = calculateDistance(point, centroid)
distanceList.append(distance)
maxDistance = max(distance, maxDistance)
return distanceList, maxDistance
def convertToPolarImageWithInterpolation(points, centroid, filled = True, imageHeight = 500, imageWidth = 360):
distanceList, maxDistance = findMaximumR(points, centroid)
angleList = []
rList = []
scaleFactor = imageHeight / maxDistance
outputImage = np.zeros((imageHeight, imageWidth), np.uint8)
prevR = 0
prevAngle = 0
connectBottomIndex = 0
maxR = 0
for i in xrange(len(points) + 1):
i = i % len(points)
angle = math.atan2(points[i][0, 1] - centroid[1], points[i][0, 0] - centroid[0])
r = distanceList[i] * scaleFactor
#reverse the images
r = imageHeight - r
angle = (angle + 3.1415) * 180 / 3.1415
rList.append(r)
angleList.append(angle)
if prevR > 0 and abs(angle - prevAngle) > 200:
connectBottomIndex = i
prevAngle = angle
prevR = r
prevIndex = (connectBottomIndex - 1 + len(points)) % len(points)
#added_y = [imageHeight - 1, imageHeight - 1, imageHeight - 1]
#added_x = [angleList[prevIndex], 180, angleList[connectBottomIndex]]
added_y = [rList[prevIndex], imageHeight - 1, imageHeight - 1, imageHeight - 1, rList[connectBottomIndex]]
added_x = [0, 0, 180, 359, 359]
rList = rList[0 : prevIndex + 1] + added_y + rList[prevIndex + 1 : ]
angleList = angleList[0 : prevIndex + 1] + added_x + angleList[prevIndex + 1 : ]
points = np.zeros((len(angleList), 1, 2), np.int)
for i in xrange(len(angleList)):
points[i, 0, 0] = angleList[i]
points[i, 0, 1] = rList[i]
contours = [points]
if filled:
cv2.drawContours(outputImage, contours, 0, (255, 255, 255), cv2.cv.CV_FILLED)
else:
cv2.drawContours(outputImage, contours, 0, (255, 255, 255))
return outputImage, maxDistance
def convertToPolarImage(points, centroid, imageHeight = 500, imageWidth = 360):
distanceList, maxDistance = findMaximumR(points, centroid)
angleList = []
scaleFactor = imageHeight / maxDistance
outputImage = np.zeros((imageHeight, imageWidth), np.uint8)
prevR = 0
prevAngle = 0
connectBottomIndex = 0
for i in xrange(len(points) + 1):
i = i % len(points)
angle = math.atan2(points[i][0, 1] - centroid[1], points[i][0, 0] - centroid[0])
r = distanceList[i] * scaleFactor
#reverse the images
r = imageHeight - r
angle = (angle + 3.1415) * 180 / 3.1415
if prevR > 0 and abs(angle - prevAngle) < 200:
cv2.line(outputImage, ((int)(prevAngle), (int)(prevR)), ((int)(angle), (int)(r)), (255, 255, 255))
else:
if prevR > 0 and abs(angle - prevAngle) > 200:
connectBottomIndex = i
prevAngle = angle
prevR = r
return outputImage, maxDistance
|
import paho.mqtt.client as mqtt
class MessageHandler:
def __init__(self, qos=0):
self.topics = dict() # <topic, callback>
self.qos = qos
self.client = mqtt.Client()
self.client.on_connect = self.__on_connect__
self.client.on_message = self.__on_message__
def __on_connect__(self, client, userdata, flags, rc):
print("Connected with result code "+str(rc))
for topic in self.topics:
self.client.subscribe(topic, self.qos)
def __on_message__(self, client, userdata, msg):
print(client + " -> " + msg)
def subscribe(self, topic, callback):
self.topics[topic] = callback
self.client.subscribe(topic, self.qos)
self.client.message_callback_add(topic, callback)
def unsubscribe(self, topic):
self.client.message_callback_remove(topic)
del self.topics[topic]
def start_dispatcher(self, ip, port=1883, keepalive=60):
self.client.connect(ip, port, keepalive)
def loop(self):
self.client.loop_forever()
|
from pytube import YouTube
link = 'https://www.youtube.com/watch?v=Id8YjwJDQwg&t=839s'
#To save the video to your location
save_location = 'E:\\'
try:
YouTube(link).streams.first().download(save_location)
print('Video saved')
except:
print('Connection Error')
#To save the video where the python is installed
try:
YouTube(link).streams.first().download()
print('Video saved')
except:
print('Connection Error')
|
import logging
# setup logger
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('piiipod')
|
import requests
import os
from bs4 import BeautifulSoup as bs
baseURL = "https://www.ibiblio.org/ram/"
page= requests.get(baseURL+"bcst_all.htm")
soup= bs(page.text,'html.parser')
songs = soup.find_all('table')[4].select('li a')
folderName = 'Bhajans'
if(not os.path.exists(folderName)):
os.mkdir(folderName)
os.chdir('./'+folderName)
def dwd(song):
name ="unknown"
try:
name = " ".join(song.contents[0].split())
print("Name: "+song.contents[0].replace('\n',' ').replace(' ',''))
except:
print("SONG ITEM DOES NOT HAS ANY CONTENT!")
print("URL: "+ baseURL+song['href'])
print("Downloading Song...")
r = requests.get(baseURL+song['href'])
print("Song Downloaded ./"+folderName)
open(name+'.mp3','wb').write(r.content)
for song in songs:
dwd(song)
|
import time
import json
import csv
import os
import requests
from bs4 import BeautifulSoup
from jinja2 import Template
import headers
# these represent different job functions
FUNCTION_FACETS = [17, 18, 14, 2, 4, 20, 5, 13, 12, 26] #FA
SENIORITY_FACETS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] #SE
LOCATION_FACETS = [ #G
'us:8-2-0-1-2',
'us:97',
'us:va',
'us:dc',
'us:tx',
'us:ca',
'us:md',
'us:70',
'us:31',
'us:ny',
'us:8-8-0-8-1',
'us:8-8-0-3-1',
'us:ga',
'us:52',
'us:7',
'us:8-8-0-95-11',
'us:nj',
'us:3-2-0-31-1',
]
FACETS = [
('FA', FUNCTION_FACETS),
('SE', SENIORITY_FACETS),
('G', LOCATION_FACETS)
]
def download_file(url, local_filename=None):
'''Downloads a file with requests
from: https://stackoverflow.com/a/16696317
'''
if local_filename is None:
local_filename = url.split('/')[-1]
print('saving to', local_filename)
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return local_filename
def get_page(company_id, facet=None, facet_id=None, start=0, count=50):
'''Gets a single page of results from linkedin for a particular job function at a company'''
params = {
'facet': ['CC'],
'facet.CC': company_id,
'count': count,
'start': start,
}
if facet is not None and facet_id is not None:
params['facet'] = ['CC', facet]
params['facet.' + facet] = facet_id
response = requests.get('https://www.linkedin.com/sales/search/results', headers=headers.headers, params=params)
return response.json()
def get_company(company_id, outname):
'''Gets all employees from a company using particular job functions'''
people = []
for facet, facet_ids in FACETS:
for facet_id in facet_ids:
print('getting facet', facet, facet_id, 'for company', company_id)
count = 50
start = 0
results = get_page(company_id, facet, facet_id)
total = results['pagination']['total']
people += results['searchResults']
start += count
while start < total:
print('getting', start, 'of', total)
time.sleep(1)
results = get_page(company_id, facet, facet_id, start)
people += results['searchResults']
start += count
with open(outname, 'w') as outfile:
json.dump(people, outfile, indent=2)
return outname
def get_images(datafile):
'''Downloads profile images'''
with open(datafile, 'r') as infile:
people = json.load(infile)
people = [p['member'] for p in people]
for p in people:
if 'vectorImage' not in p:
continue
pid = p['memberId']
outname = 'images/{}.jpg'.format(pid)
if os.path.exists(outname):
print('skipping')
continue
url = p['vectorImage']['rootUrl']
url += sorted(p['vectorImage']['artifacts'], key=lambda x: x['width'])[-1]['fileIdentifyingUrlPathSegment']
print(url)
download_file(url, outname)
time.sleep(1)
def get_profile(pid):
'''Downloads individual profiles'''
outname = 'profiles/{}.json'.format(pid)
if os.path.exists(outname):
return outname
out = {}
url = 'https://www.linkedin.com/sales/people/{},NAME_SEARCH'.format(pid)
print(url)
response = requests.get(url, headers=headers.headers)
soup = BeautifulSoup(response.text, 'html.parser')
codes = soup.select('code')
for c in codes:
try:
d = json.loads(c.text)
if 'contactInfo' in d:
out = d
break
except Exception as e:
continue
with open(outname, 'w') as outfile:
json.dump(out, outfile)
time.sleep(1)
return outname
def get_profiles(datafile):
'''Gets all profiles'''
with open(datafile, 'r') as infile:
data = json.load(infile)
for d in data:
pid = d['member']['profileId']
get_profile(pid)
def clean_and_parse(datafile, outname):
'''Outputs csv, json and html from employee listings'''
out = []
mids = []
with open(datafile, 'r') as infile:
data = json.load(infile)
for d in data:
mid = d['member']['memberId']
pid = d['member']['profileId']
imgpath = 'images/{}.jpg'.format(mid)
if not os.path.exists(imgpath):
imgpath = None
item = {
'name': d['member'].get('formattedName', ''),
'title': d['member'].get('title', ''),
'img': imgpath,
'company': d['company'].get('companyName', ''),
'location': d['member'].get('location', ''),
'id': d['member']['memberId'],
'linkedin': 'https://linkedin.com/in/' + pid,
}
if mid not in mids:
out.append(item)
mids.append(mid)
with open(outname + '.json', 'w') as jsonfile:
json.dump(out, jsonfile, indent=2)
with open(outname + '.csv', 'w') as csvfile:
fieldnames = list(out[0].keys())
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in out:
writer.writerow(row)
with open('template.html', 'r') as templatefile:
template = Template(templatefile.read())
html = template.render(people=out)
with open('index.html', 'w') as htmlout:
htmlout.write(html)
if __name__ == '__main__':
ICE = '533534'
datafile = 'ice_raw.json'
get_company(ICE, datafile)
get_profiles(datafile)
get_images(datafile)
clean_and_parse(datafile, 'ice')
|
# contains the entities declaration and definition of medicine information
from protorpc import messages
from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop#alpha feature dangerous
#contains the real entities that we are going to model in for medicine api purpose
class Dossage(messages.Message):
class Units(messages.Enum):
MICRO_GRAM=1
MILI_GRAM=2
GRAM=3
MILI_LITRE=4
units=messages.EnumField('Dossage.Units',1,
default='MILI_GRAM',
required=True)
quant=messages.IntegerField(2,required=True)
class Cost(messages.Message):
value=messages.IntegerField(1,required=True)
currency=messages.StringField(2,required=True)
class Composition(messages.Message):
name=messages.StringField(1,required=True)
dossage=messages.MessageField(Dossage,2,required=True)
description=messages.StringField(3,required=True)
class Medicine(messages.Message):
class MedicineType(messages.Enum):
TABLET=1
INJECTION=2
CAPSULES=3
OINTMENT=4
DROPS=5
LOTION=6
SACHET=7
SYRUP=8
NULLL=9 #triple L done to avoid ambiguity with NULL
name=messages.StringField(1,required=True)
mrp=messages.MessageField(Cost,2,repeated=True)
composition=messages.MessageField(Composition,3,repeated=True)
dossage=messages.MessageField(Dossage,4,required=True)
medicine_type=messages.EnumField('Medicine.MedicineType',5,
default='NULLL',
required=True)
description=messages.StringField(6)
company_name=messages.StringField(7,required=True)
#handles the format the request will come
class MedicineMessage(messages.Message):
medicine=messages.MessageField(Medicine,1)
class MedicineFieldMessage(messages.Message):
limit=messages.IntegerField(1,default=10,required=False)
offset=messages.IntegerField(2,default=0,required=False)
name=messages.StringField(3)
company_name=messages.StringField(4)
medicine_type=messages.EnumField('Medicine.MedicineType',5)
class MedicineCompostionMessage(messages.Message):
limit=messages.IntegerField(1,default=10,required=False)
offset=messages.IntegerField(2,default=0,required=False)
compostion_name=messages.StringField(3,repeated=True)
class MedicineListMessage(messages.Message):
medicine_list=messages.MessageField(Medicine,1,repeated=True)
#handles the storage of data in datastore
class MedicineStore(ndb.Model):
medicine=msgprop.MessageProperty(Medicine,indexed_fields=['name','medicine_type',
'company_name',"composition.name"])
#warning this feature is in alpha
|
# Dependencies
import requests
# Google developer API key
gkey = "AIzaSyA_Clyz3478YAUnsESNHE5dyktvvMoa-vw"
# Target city
target_city = "Boise, Idaho"
# Build the endpoint URL
target_url = "https://maps.googleapis.com/maps/api/geocode/json" \
"?address=%s&key=%s" % (target_city, gkey)
# Print the assembled URL
print(target_url)
|
import os
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn')
STANDARD_COLORS = [
"Chartreuse",
"Aqua",
"Aquamarine",
"BlueViolet",
"BurlyWood",
"CadetBlue",
"Chocolate",
"Coral",
"CornflowerBlue",
"Cornsilk",
"Crimson",
"Cyan",
"DarkCyan",
"DarkGoldenRod",
"DarkGrey",
"DarkKhaki",
"DarkOrange",
"DarkOrchid",
"DarkSalmon",
"DarkSeaGreen",
"DarkTurquoise",
"DarkViolet",
"DeepPink",
"DeepSkyBlue",
"DodgerBlue",
"FireBrick",
"FloralWhite",
"ForestGreen",
"Fuchsia",
"Gainsboro",
"GhostWhite",
"Gold",
]
txtFile = open("checkpoint/Dec-16[18.19.35]-YOLOv4/log.txt")
txtList = txtFile.readlines()
total_loss = []
loss_ciou = []
loss_conf = []
loss_cls = []
number_map = []
T_map = []
left_map = []
right_map = []
add_map = []
minus_map = []
multi_map = []
total_map = []
# os.makedirs(xmlPath, exist_ok=True)
for idx, line in enumerate(txtList):
if line.find("total_loss") != -1:
k = line.find("total_loss")
k = k + 11
t = k
while (line[k] >='0' and line[k] <= '9') or line[k] == '.':
k = k + 1
total_loss.append(float(line[t:k]))
if line.find("loss_ciou") != -1:
k = line.find("loss_ciou")
k = k + 10
t = k
while (line[k] >='0' and line[k] <= '9') or line[k] == '.':
k = k + 1
loss_ciou.append(float(line[t:k]))
if line.find("loss_conf") != -1:
k = line.find("loss_conf")
k = k + 10
t = k
while (line[k] >='0' and line[k] <= '9') or line[k] == '.':
k = k + 1
loss_conf.append(float(line[t:k]))
if line.find("loss_cls") != -1:
k = line.find("loss_cls")
k = k + 9
t = k
while (line[k] >='0' and line[k] <= '9') or line[k] == '.':
k = k + 1
loss_cls.append(float(line[t:k]))
if line.find("number --> mAP : ") != -1:
k = line.find("number --> mAP : ")
k = k + len("number --> mAP : ")
t = k
while (line[k] >= '0' and line[k] <= '9') or line[k] == '.':
k = k + 1
number_map.append(float(line[t:k]))
if line.find("left_matrix --> mAP : ") != -1:
k = line.find("left_matrix --> mAP : ")
k = k + len("left_matrix --> mAP : ")
t = k
while (line[k] >= '0' and line[k] <= '9') or line[k] == '.':
k = k + 1
left_map.append(float(line[t:k]))
if line.find("right_matrix --> mAP : ") != -1:
k = line.find("right_matrix --> mAP : ")
k = k + len("right_matrix --> mAP : ")
t = k
while (line[k] >= '0' and line[k] <= '9') or line[k] == '.':
k = k + 1
right_map.append(float(line[t:k]))
if line.find("add --> mAP : ") != -1:
k = line.find("add --> mAP : ")
k = k + len("add --> mAP : ")
t = k
while (line[k] >= '0' and line[k] <= '9') or line[k] == '.':
k = k + 1
add_map.append(float(line[t:k]))
if line.find("minus --> mAP : ") != -1:
k = line.find("minus --> mAP : ")
k = k + len("minus --> mAP : ")
t = k
while (line[k] >= '0' and line[k] <= '9') or line[k] == '.':
k = k + 1
minus_map.append(float(line[t:k]))
if line.find("multi --> mAP : ") != -1:
k = line.find("multi --> mAP : ")
k = k + len("multi --> mAP : ")
t = k
while (line[k] >= '0' and line[k] <= '9') or line[k] == '.':
k = k + 1
multi_map.append(float(line[t:k]))
if line.find("T --> mAP : ") != -1:
k = line.find("T --> mAP : ")
k = k + len("T --> mAP : ")
t = k
while (line[k] >= '0' and line[k] <= '9') or line[k] == '.':
k = k + 1
T_map.append(float(line[t:k]))
if line.find(":mAP : ") != -1:
k = line.find(":mAP : ")
k = k + len(":mAP : ")
t = k
while (line[k] >= '0' and line[k] <= '9') or line[k] == '.':
k = k + 1
total_map.append(float(line[t:k]))
print(len(total_loss))
print(len(loss_ciou))
print(len(loss_conf))
print(len(loss_cls))
print(len(number_map))
print(len(T_map))
print(len(total_map))#总mAP
total_loss = np.array(total_loss)
loss_ciou = np.array(loss_ciou)
loss_conf = np.array(loss_conf)
loss_cls = np.array(loss_cls)
number_map = np.array(number_map)
T_map = np.array(T_map)
total_map = np.array(total_map)
# p1, = plt.plot(number_map, c=STANDARD_COLORS[0])
# p2, = plt.plot(T_map, c=STANDARD_COLORS[1])
# p3, = plt.plot(total_map, c=STANDARD_COLORS[2])
# p4, = plt.plot(left_map, c=STANDARD_COLORS[3])
# p5, = plt.plot(right_map, c=STANDARD_COLORS[4])
# p6, = plt.plot(add_map, c=STANDARD_COLORS[5])
# p7, = plt.plot(minus_map, c=STANDARD_COLORS[6])
# p8, = plt.plot(multi_map, c=STANDARD_COLORS[7])
# plt.xlabel("epoch")
# plt.ylabel("mAP")
# plt.legend([p1, p2, p4, p5, p6, p7, p8, p3], ["number", "T", "left_matrix", "right_matrix", "add", "minus", "multi", "Total"])
# plt.grid(True)
# plt.show()
plt.figure(figsize=(18,6))
epochs = np.linspace(0, 50, 1660)
plt.subplot(141)
plt.plot(epochs, total_loss, label="Total loss")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.grid(True)
plt.subplot(142)
plt.plot(epochs, loss_ciou, label="Bounding box regression loss")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.grid(True)
plt.subplot(143)
plt.plot(epochs, loss_conf, label="Confidence loss")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.grid(True)
plt.subplot(144)
plt.plot(epochs, loss_cls, label="Classification loss")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
plt.grid(True)
plt.tight_layout() # 调整整体空白
plt.show()
|
from archana import flask_demo
from archana.flask_demo import jsonify
from sqlalchemy import create_engine
app = flask_demo.Flask(__name__)
app.config["DEBUG"] = True
vehicle_types = [
{'id': 1,
'type': 'aircraft'},
{'id': 2,
'type': 'spacecraft'},
{'id': 3,
'type': 'watercraft'}
]
def connect_db():
db_connection_string = "postgresql://postgres:admin123@localhost:5432/maatram"
return create_engine(db_connection_string)
@app.route('/', methods=['GET'])
def home():
return '''<h1>Welcome to GoRide</h1>
<p>A ride hiring and offering app customised for your needs.</p>'''
@app.route('/vehicle-types', methods=['GET'])
def get_all_vehicle_types():
vehicle_types_from_db = db.execute("SELECT * FROM go_ride.vehicle_types")
formatted_result = [dict(row) for row in vehicle_types_from_db]
return jsonify(formatted_result)
# return jsonify(vehicle_types)
db = connect_db()
app.run()
|
#!/usr/bin/python
import sys
import logging
logging.basicConfig(stream=sys.stderr)
sys.stdout = sys.stderr
sys.path.insert(0,"/var/www/allure/")
from allure import app as application
application.secret_key = 'Add your secret key'
|
#Nothing needs to go here, Python just needs this file here so it can understand directory structure
|
# stdlib
from typing import List, Dict, Any, Tuple
import bson
# 3rd party
from pprint import pprint
# local
from . import usercollection, activitycollection
from . import START_OF_WEEK
from .util import datetime_to_secs
from .algo import train_from_interactions
def flatfeed(userid: bson.objectid.ObjectId) -> List[Dict[str, Any]]:
"""
pull out a flat feed, kinda messy
"""
# TODO: Fix messyness here
user = usercollection.find_one({'_id': userid})
feed = []
posts = [list(activitycollection.find({'actor': friend, 'verb': 'post'}))
for friend in user['friends']]
for post in posts:
for item in post:
feed.append(item)
feed.sort(key=lambda obj: obj['time'])
return feed
def trainedfeed(userid: bson.objectid.ObjectId) -> Tuple[List[Dict[str, Any]], Dict[str, float]]:
"""
Pull out a trained feed
"""
feed = flatfeed(userid)
interactions = list(activitycollection.find({'actor': userid, 'verb': 'interact'}))
interactions = [{
'time': obj['time'],
'topic': activitycollection.find_one({'_id': obj['object']})['topic']
} for obj in interactions]
# normalizing weights to 1, need max time.
maxtime = datetime_to_secs(max(feed, key=lambda obj: obj['time'])['time'], reftime=START_OF_WEEK)
# Adding normalized weights initially based on timestamps
# neat way to add keys
weighted_feed = [{**obj, **{'weight': datetime_to_secs(obj['time'], reftime=START_OF_WEEK) / maxtime}}
for obj in feed]
# now we can fiddle with weights based on interactions
# Using algo.py for algorithm here
weights = train_from_interactions(interactions)
weighted_feed = order_by_theme_weights(weights, weighted_feed)
return weighted_feed, weights
def order_by_theme_weights(weights, feed):
"""
Order a feed by provided topic weights
"""
new_feed = []
for obj in feed:
obj['weight'] = obj['weight'] * weights[obj['topic']][1]
new_feed.append(obj)
feed.sort(key=lambda obj: obj['weight'])
return feed
|
# -*- coding: utf-8 -*-
'''
Se conecta al router wamp y hace correr el Wamp del Digesto
'''
if __name__ == '__main__':
import sys, logging, inject
sys.path.insert(0,'../python')
logging.basicConfig(level=logging.DEBUG)
from autobahn.asyncio.wamp import ApplicationRunner
from model.config import Config
from actions.systems.camaras.camaras import WampCamaras
def config_injector(binder):
binder.bind(Config,Config('server-config.cfg'))
inject.configure(config_injector)
config = inject.instance(Config)
url = config.configs['server_url']
realm = config.configs['server_realm']
debug = config.configs['server_debug']
runner = ApplicationRunner(url=url,realm=realm,debug=debug, debug_wamp=debug, debug_app=debug)
runner.run(WampCamaras)
|
import unittest
from jousting.round.kick import Kick
from jousting.round.controller import Controller
from jousting.player.knight import Knight
class KickTest(unittest.TestCase):
def setUp(self):
p1 = Knight("P1")
p2 = Knight("P2")
self.controller = Controller(p1, p2)
self.kick = Kick(self.controller)
def test_kick(self):
self.kick.do_kick()
self.assertNotEquals(self.kick._controller.get_p1(), self.kick._controller.get_p2())
def test_knight_move_during_kick(self):
for i in range(1000):
self.kick.do_kick()
self.assertNotEquals(0, self.kick._controller.get_p1().get_current_position())
self.assertNotEquals(0, self.kick._controller.get_p2().get_current_position())
def test_swap_logic(self):
p1 = self.controller.get_p1()
p2 = self.controller.get_p2()
self.controller.set_p1(p2)
self.controller.set_p2(p1)
self.assertEquals(self.controller.get_p1(), p2)
self.assertEquals(self.controller.get_p2(), p1)
self.assertNotEquals(self.controller.get_p1(), self.controller.get_p2())
|
#!/usr/bin/env python3
def make_ext(modname, pyxfilename):
from distutils.extension import Extension
return Extension(
name = modname,
sources=[pyxfilename],
extra_compile_args=["-O3","-march=native"]
)
|
a2=int(input())
if(a2>0):
print("Positive")
elif(a2<0):
print("Negative")
else:
print("Zero")
|
# -*- coding: utf-8 -*-
from model.validator import Validator
from control.control_analise import ControlAnalise
class ControlValidator(object):
def __init__(self):
self.validator = Validator()
self.control_analise = ControlAnalise()
self.resultados = []
def validate(self, X, y, regressor):
# Retorna o array com os resutltadso Score do modelo específico
return self.validator.validate_models(X, y, regressor, self.resultados)
def compare(self, results, lst_models):
# self.validation = self.control_regressor.get_validation(
# self.X, self.y, test_size, validation, base, plt_text, balance, kwargs) # Invoca a função de validação StratifiedKFold
# Invoca a função de comparação dos resultados de 30 testes cada modelo
result_wilcox, result_fried, models_par, ranks, names, cds, average_ranks = self.validator.compare_results(
results, lst_models)
self.control_analise.print_conclusao(
result_wilcox, result_fried, ranks, models_par, names, cds, average_ranks, lst_models) # Imprime a conclusão das comparações de validação
return ranks, names
|
import torch
import torch.nn
import torchvision.models
import torchvision.models.resnet
from chess_recognizer.common import BOARD_DIMENSIONS
class Resnet(torch.nn.Module):
def __init__(self, pretrained: bool = True):
super().__init__()
self.resnet = torchvision.models.resnet18(pretrained=pretrained)
if pretrained:
for param in self.resnet.parameters():
param.requires_grad = True
self.resnet.fc = torch.nn.Linear(
in_features=512 * torchvision.models.resnet.BasicBlock.expansion,
out_features=BOARD_DIMENSIONS[0] * BOARD_DIMENSIONS[1],
)
for param in self.resnet.fc.parameters():
param.requires_grad = True
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.resnet.forward(x)
|
from django.contrib import admin
from .models import Portfolio, Service, Carousel_figure, Client_words, Structure
# Register your models here.
admin.site.register(Portfolio)
admin.site.register(Service)
admin.site.register(Client_words)
admin.site.register(Structure)
admin.site.register(Carousel_figure)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 23 13:51:00 2015
@author: fbeyer
"""
#to activate virtuals environment
#cd ..
#source venvPLS/bin/activate
#!/usr/bin/env python2
import itertools
from multiprocessing import Pool, freeze_support, Array, Process, Queue
from functools import partial
import numpy as np
from sklearn.utils.extmath import randomized_svd
from scipy.stats.mstats_basic import zscore
import pyprind
from sklearn.utils import arpack
from sklearn.decomposition.truncated_svd import TruncatedSVD
from sklearn.preprocessing.data import scale
from scipy.stats.stats import percentileofscore
from functools import partial
import numpy as np
import scipy as sc
from time import time
import os
import timeit
def fit_pls(X, Y, n_components, scale=True, algorithm="randomized"):
#scaling
print "calculating SVD"
if scale:
X_scaled = zscore(X, axis=0, ddof=1)
Y_scaled = zscore(Y, axis=0, ddof=1)
covariance = np.dot(Y_scaled.T, X_scaled)
else:
covariance = np.dot(Y.T, X)
print np.shape(covariance)
sum_var=covariance
svd = TruncatedSVD(n_components, algorithm)
#computes only the first n_components largest singular values
#produces a low-rank approximation of covariance matrix
Y_saliences, singular_values, X_saliences = svd._fit(covariance)
X_saliences = X_saliences.T
inertia = singular_values.sum()
if scale:
return X_saliences, Y_saliences, singular_values, inertia, X_scaled, Y_scaled, sum_var
else:
return X_saliences, Y_saliences, singular_values, inertia
def _procrustes_rotation(fixed, moving, moving_singular_values=None):
N, _, P = np.linalg.svd(np.dot(fixed.T,moving))
rotation_matrix = np.dot(N, P)
rotated = np.dot(moving, rotation_matrix)
if moving_singular_values != None:
rotated_scaled = np.dot(np.dot(moving, np.diag(moving_singular_values)), rotation_matrix)
rotated_singular_values = np.sqrt(np.square(rotated_scaled).sum(axis=0))
return rotated, rotation_matrix, rotated_singular_values
else:
return rotated, rotation_matrix
def _permute_and_calc_singular_values_process(X, Y, a, b, n_components, algorithm, output, x): #perm_i
""" basic version for parallel implementation using processes and output queue
"""
#call random seed so not the same random number is used each time
#pid = current_process()._identity[0]
#randst = np.random.mtrand.RandomState(pid)
np.random.seed( int( time() ) + x +50)
#test how permutation works
c=np.random.permutation(a)
print a
print c
if len(X) < len(Y):
#apply permutation to shorter list
#print "randomization X<Y"
X_perm = np.random.permutation(X)
covariance_perm = np.dot(Y.T, X_perm)
else:
#print "other permutation"
Y_perm = np.random.permutation(Y)
covariance_perm = np.dot(Y_perm.T, X)
svd = TruncatedSVD(n_components, algorithm=algorithm)
#print covariance_perm
Y_saliences_perm, singular_values_perm, X_saliences_perm = svd._fit(covariance_perm)
output.put(singular_values_perm)
def _permute_and_calc_singular_values_pool(X, Y, X_saliences, Y_saliences, n_components,procrustes, algorithm, perm_i):
""" basic version for parallel implementation using pool
"""
#call random seed so not the same random number is used in each process
np.random.seed( int( time() ) + perm_i)
if len(X) < len(Y):
#apply permutation to shorter list
#print "randomization X<Y"
X_perm = np.random.permutation(X)
covariance_perm = np.dot(Y.T, X_perm)
else:
#print "other permutation"
Y_perm = np.random.permutation(Y)
covariance_perm = np.dot(Y_perm.T, X)
svd = TruncatedSVD(n_components, algorithm=algorithm)
Y_saliences_perm, singular_values_perm, X_saliences_perm = svd._fit(covariance_perm)
if procrustes:
#It does not matter which side we use to calculate the rotated singular values
#let's pick the smaller one for optimization
if len(X_saliences_perm) > len(Y_saliences_perm):
_, _, singular_values_perm = _procrustes_rotation(Y_saliences, Y_saliences_perm, singular_values_perm)
else:
X_saliences_perm = X_saliences_perm.T
_, _, singular_values_perm = _procrustes_rotation(X_saliences, X_saliences_perm, singular_values_perm)
return singular_values_perm
def permutation_test(X_scaled, Y_scaled, X_saliences, Y_saliences, singular_values,inertia, n_perm, verbose=True, algorithm="randomized"):
n_components = X_saliences.shape[1]
print "Starting permutations"
#if verbose:
# my_perc = pyprind.ProgBar(n_perm, stream=1, title='running permutations', monitor=True)
#import warnings
#warnings.filterwarnings("ignore")
################################################################################
#create pool to run permutations in parallel
#procrustes=False
iterable = np.arange(n_perm)
P = Pool(processes = 20)
func = partial(_permute_and_calc_singular_values_pool, X_scaled, Y_scaled,X_saliences, Y_saliences, n_components, True, algorithm)
results=P.map(func, iterable)
P.close()
P.join()
#cpu-count
#multiprocessing.cpu_count()
#if verbose:
# my_perc.update()
#if verbose:
# print my_perc
# print "calculating p values"
################################################################################
################################################################################
#use a list of processes and output queue
# output = Queue()
#
# # Setup a list of processes that we want to run
# processes = [Process(target=_permute_and_calc_singular_values, args=(X_scaled, Y_scaled, a, b, n_components, algorithm, output, x)) for x in range(4)]
#
# # Run processes
# for p in processes:
# p.start()
#
# # Exit the completed processes
# for p in processes:
# p.join()
#
# # Get process results from the output queue
# results = [output.get() for p in processes]
#
# print(results)
################################################################################
print "end permutations"
singular_values_samples=np.array(results).reshape(n_perm,n_components) #reshape results from list to np.array
singvals_p_vals = np.zeros((n_components))#initialize saliences_p_vals
for component_i in range(n_components):
#percentileofscore compares rank to list of ranks (here singular value of component to bootstrapped
#list of singular values
singvals_p_vals[component_i] = (100.0-percentileofscore(singular_values_samples[:,component_i], singular_values[component_i]))/100.0
#inertia describes explained variance
inertia_p_val = (100.0-percentileofscore(singular_values_samples.sum(axis=1), inertia))/100.0
return singvals_p_vals, inertia_p_val, singular_values_samples
###------------########----------------#############------------#############
#BOOTSTRAPPING USING PROCESSES (outdated because of not finishing)
###------------########----------------#############------------#############
def _boostrap_process(X, Y, X_saliences, Y_saliences, n_components, algorithm, output_X, output_Y, x):
""" basic version for parallel implementation using processes and output queues
"""
#call random seed so not the same random number is used in each process
np.random.seed( int( time() ) + x)
#choose indices to resample randomly with replacement for a sample of same size
sample_indices = np.random.choice(range(X.shape[0]), size=X.shape[0], replace=True)
X_boot = X[sample_indices,:]
Y_boot = Y[sample_indices,:]
X_boot_scaled = scale(X_boot)
Y_boot_scaled = scale(Y_boot)
print np.shape(X_boot)
covariance_boot = np.dot(Y_boot_scaled.T, X_boot_scaled)
svd = TruncatedSVD(n_components, algorithm=algorithm)
print "finished calculating SVD"
Y_saliences_boot, _, X_saliences_boot = svd._fit(covariance_boot)
X_saliences_boot = X_saliences_boot.T
#It does not matter which side we use to calculate the rotated singular values
#let's pick the smaller one for optimization
print "rotating values"
if len(X_saliences_boot) > len(Y_saliences_boot):
#use procrustes_rotation on smaller dataset
Y_bootstraps, rotation_matrix = _procrustes_rotation(Y_saliences, Y_saliences_boot)
X_bootstraps = np.dot(X_saliences_boot, rotation_matrix)
else:
X_bootstraps, rotation_matrix = _procrustes_rotation(X_saliences, X_saliences_boot)
Y_bootstraps = np.dot(Y_saliences_boot, rotation_matrix)
output_X.put(X_bootstraps)
output_Y.put(Y_bootstraps)
print "finished rotating"
################################################################################
def bootstrap_test(X, Y, X_saliences, Y_saliences, n_components,n_boot, verbose=True):
print "starting bootstrap"
################################################################################
#use a list of processes and output queue
output_X = Queue()
output_Y = Queue()
#Setup a list of processes that we want to run
processes = [Process(target=_boostrap_process, args=(X, Y, X_saliences, Y_saliences, n_components, 'randomized', output_X, output_Y, x)) for x in range(n_boot)]
#Run processes
for p in processes:
p.daemon = True
p.start()
print "adding process"
#p.close()
#pool.join()
#Exit the completed processes
#for p in processes:
# p.join()
# print "exiting processes"
#Get process results from the output queue
X_boo = [output_X.get() for p in processes]
Y_boo = [output_Y.get() for p in processes]
X_saliences_bootstraps=np.array(X_boo).reshape(n_boot,X_saliences.shape[0], X_saliences.shape[1])
Y_saliences_bootstraps=np.array(Y_boo).reshape(n_boot,Y_saliences.shape[0], Y_saliences.shape[1])
#sum across rows because the bootstraps are appended "under" each other
X_saliences_bootstrap_ratios = X_saliences_bootstraps.mean(axis=0)/X_saliences_bootstraps.std(axis=0)
Y_saliences_bootstrap_ratios = Y_saliences_bootstraps.mean(axis=0)/Y_saliences_bootstraps.std(axis=0)
return X_saliences_bootstrap_ratios, Y_saliences_bootstrap_ratios
###------------########----------------#############------------#############
#BOOTSTRAPPING USING POOL
###------------########----------------#############------------#############
def _bootstrap_pool(X, Y, X_saliences, Y_saliences, n_components,procrustes, algorithm, boot_i):
""" basic version for parallel implementation of bootstrapping using pool
"""
#call random seed so not the same random number is used in each process
np.random.seed( int( time() ) + boot_i)
#choose indices to resample randomly with replacement for a sample of same size
sample_indices = np.random.choice(range(X.shape[0]), size=X.shape[0], replace=True)
X_boot = X[sample_indices,:]
Y_boot = Y[sample_indices,:]
X_boot_scaled = scale(X_boot)
Y_boot_scaled = scale(Y_boot)
covariance_boot = np.dot(Y_boot_scaled.T, X_boot_scaled)
svd = TruncatedSVD(n_components, algorithm=algorithm)
Y_saliences_boot, _, X_saliences_boot = svd._fit(covariance_boot)
X_saliences_boot = X_saliences_boot.T
#It does not matter which side we use to calculate the rotated singular values
#let's pick the smaller one for optimization
if len(X_saliences_boot) > len(Y_saliences_boot):
#use procrustes_rotation on smaller dataset
Y_bootstraps, rotation_matrix = _procrustes_rotation(Y_saliences, Y_saliences_boot)
X_bootstraps = np.dot(X_saliences_boot, rotation_matrix)
else:
X_bootstraps, rotation_matrix = _procrustes_rotation(X_saliences, X_saliences_boot)
Y_bootstraps = np.dot(Y_saliences_boot, rotation_matrix)
#print np.shape(X_bootstraps)
#print np.shape(Y_bootstraps)
return X_bootstraps, Y_bootstraps
def bootstrap_pool(X, Y, X_saliences, Y_saliences, n_components,n_boot, verbose, write_dir):
#bootstrap
X_saliences_bootstraps = np.zeros(X_saliences.shape + (n_boot,))
Y_saliences_bootstraps = np.zeros(Y_saliences.shape + (n_boot,))
print "shape of X bootstraps: "
print np.shape(X_saliences_bootstraps)
print "shape of Y bootstraps: "
print np.shape(Y_saliences_bootstraps)
print "starting bootstraping"
#create pool to run permutations in parallel
iterable = np.arange(n_boot)
P = Pool(processes = 6)
func = partial(_bootstrap_pool, X, Y, X_saliences, Y_saliences, n_components, True, 'randomized')
res=P.map(func, iterable)
P.close()
P.join()
X_saliences_bootstraps=[row[0] for row in res]
Y_saliences_bootstraps=[row[1] for row in res]
print "shape of X bootstraps: "
print np.shape(X_saliences_bootstraps)
print "shape of Y bootstraps: "
print np.shape(Y_saliences_bootstraps)
print "type of bootstraps:"
print type(X_saliences_bootstraps)
#shape of bootstrapped X,Y is now (n_perm, vox_X/Y, n_comp)
#saving subresults
#saving results
# os.chdir(write_dir)
# np.save('Xsal_bootstrap_save_temp.npy', X_saliences_bootstraps)
# np.save('Ysal_bootstrap_save_temp.npy', Y_saliences_bootstraps)
#
#
##
## #saving subresults
## #loading results
# X_boo=np.load('Xsal_bootstrap_save_temp.npy')
# Y_boo=np.load('Ysal_bootstrap_save_temp.npy')
# print np.shape(X_boo)
# print np.shape(Y_boo)
#sum across first dimension because the bootstraps are along first dim
X_saliences_bootstrap_asa = np.asarray(X_saliences_bootstraps)
Y_saliences_bootstrap_asa = np.asarray(Y_saliences_bootstraps)
X_saliences_bootstrap_ratios=X_saliences_bootstrap_asa.mean(axis=0)/X_saliences_bootstrap_asa.std(axis=0)
Y_saliences_bootstrap_ratios=Y_saliences_bootstrap_asa.mean(axis=0)/Y_saliences_bootstrap_asa.std(axis=0)
#
print "done with summing across first dimensions"
print np.shape(X_saliences_bootstrap_ratios)
print np.shape(Y_saliences_bootstrap_ratios)
return X_saliences_bootstrap_ratios, Y_saliences_bootstrap_ratios
def run_pls(X, Y, n_components, n_perm, n_boot, write_dir):
X_saliences, Y_saliences, singular_values, inertia, X_scaled, Y_scaled, sum_var = fit_pls(X, Y,
n_components=n_components,
algorithm="randomized")
#testing subresults
os.chdir(write_dir)
print "saving inputs"
np.save('Xsal.npy', X_saliences)
np.save('Ysal.npy', Y_saliences)
np.save('sing.npy', singular_values)
np.save('inert.npy', inertia)
np.save('Xscaled.npy', X_scaled)
np.save('Yscaled.npy', Y_scaled)
np.save('sumvar.npy', sum_var)
os.chdir("/home/raid1/fbeyer/Documents/Scripts/PLS/")
print "\nstart permutations"
singvals_p_vals, inertia_p_val, singular_values_samples = permutation_test(X_scaled,
Y_scaled,
X_saliences,
Y_saliences,
singular_values,
inertia,
n_perm,
verbose=True,
algorithm="randomized")
print "\npermutations done"
X_saliences_bootstrap_ratios, Y_saliences_bootstrap_ratios = bootstrap_pool(X, Y,X_saliences, Y_saliences, n_components,n_boot,True, write_dir)
print "\nbootstrapping done"
return X_saliences, Y_saliences, singular_values, inertia, singvals_p_vals, inertia_p_val, singular_values_samples, X_saliences_bootstrap_ratios, Y_saliences_bootstrap_ratios
if __name__ == "__main__":
perm_n=2000
boot_n=2000
n_comp=9
analysis_dir='/data/pt_life/data_fbeyer/PLS_on_TBM_obesity_markers/PLS/prep/N749/VBM_input/for_analysis/'
write_dir="/data/pt_life/data_fbeyer/PLS_on_TBM_obesity_markers/PLS/results/sampleN748/N748_TIV_regression/perm_boot_logtr_all_regressed_n9_wo_crp/"
brain_data=np.load(analysis_dir+'regression_N748/brain_data_mask0.3_N748.npy')
exclude_crp = np.array(
[.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, 1.00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, 1.00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, 1.00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00,
.00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00,
.00, .00, .00, .00, 1.00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00, .00,
.00], dtype=bool)
brain_data = brain_data[np.invert(exclude_crp), :]
Ob = np.load(analysis_dir + 'BMI_lab_measures_N748_logtr_all_reg_wocrp.npy')
print np.shape(brain_data)
print np.shape(Ob)
os.chdir("/home/raid1/fbeyer/Documents/Scripts/PLS/")
start_time = timeit.default_timer()
brain_saliences, Ob_data_saliences, singular_values, inertia, singvals_p_vals, \
inertia_p_value, singular_values_distr, brain_saliences_bootstrap_ratios, \
Ob_data_saliences_bootstrap_ratios= \
run_pls(brain_data,Ob,n_comp,perm_n, boot_n, write_dir)
print "time elapsed for whole analysis of %i perm and %i bootstraps in minutes" %(perm_n, boot_n)
print((timeit.default_timer() - start_time)/60)
#load results from analysis_dir
os.chdir(write_dir)
np.save('brain_saliences.npy', brain_saliences)
np.save('Ob_salience.npy', Ob_data_saliences)
np.save('singvals_p.npy',singvals_p_vals)
np.save('inertia.npy', inertia)
np.save('inertia_p.npy', inertia_p_value)
np.save('singular_values.npy', singular_values)
np.save('singular_values_sampled.npy', singular_values_distr)
np.save('brain_salience_bootstrap.npy',brain_saliences_bootstrap_ratios)
np.save('Ob_salience_bootstrap.npy', Ob_data_saliences_bootstrap_ratios)
#
#
|
# _*_ coding: utf-8 _*_
__author__ = 'FWJ'
__date__ = 2017 / 9 / 12
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
import pymysql
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:Qaz123456789@localhost:3306/net_news?charset=utf8'
db = SQLAlchemy(app)
# 被讲师坑了,发现和导入数据库不一样,这里的变量要重新修改一下
class News(db.Model):
__tablename__ = 'news'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200), nullable=False)
content = db.Column(db.String(2000), nullable=False)
types = db.Column(db.String(10), nullable=False)
image = db.Column(db.String(300),)
author = db.Column(db.String(20),)
view_count = db.Column(db.Integer)
created_at = db.Column(db.DateTime)
is_valid = db.Column(db.Boolean)
def __repr__(self):
return '<News %r>' % self.title
@app.route('/')
def index():
""" 新闻首页 """
news_list = News.query.all()
return render_template('index.html')
@app.route('/cat/<name>/')
def cat(name):
""" 新闻的类别 """
# 查询类别为name的新闻数据
# query官方文档:http://docs.sqlalchemy.org/en/rel_1_1/orm/tutorial.html
news_list = News.query.filter(News.types == name)
return render_template('cat.html')
@app.route('/detail/<int:pk>/')
def detail(pk):
""" 新闻详情信息 """
news_obj = News.query.get(pk)
return render_template('detail.html', news_obj=news_obj)
if __name__ == '__main__':
# 调试模式
# db.create_all()
app.run(debug=True)
|
class Person():
def __init__(self,name,surname):
self.name = name
self.surname = surname
@property
def fullname(self):
return f'{self.name} {self.surname}'
@fullname.setter
def fullname(self,tamad):
name, surname = tamad.split(' ')
self.name = name
self.surname = surname
p1 = Person('Togrul','Masimli')
print(p1.fullname)
p1.name = 'John'
p1.surname = 'Doe'
print(p1.fullname)
p1.fullname = 'Corey Schafer'
print(p1.name)
print(p1.surname)
print(p1.fullname)
|
from sdcli.config._config import Config
from sdcli.config.config_obs import ConfigOBS
|
# -*- coding: utf-8 -*-
import os
import requests
import tarfile
import shutil
from tqdm import tqdm
def download(src, url):
file_size = int(requests.head(url).headers['Content-Length'])
header = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'
'70.0.3538.67 Safari/537.36'
}
pbar = tqdm(total=file_size)
resp = requests.get(url, headers=header, stream=True)
with open(src, 'ab') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
abs_path = os.path.abspath(__file__)
download_url = "https://baidu-nlp.bj.bcebos.com/dmtk_data_1.0.0.tar.gz"
downlaod_path = os.path.join(os.path.dirname(abs_path), "dmtk_data_1.0.0.tar.gz")
target_dir = os.path.dirname(abs_path)
download(downlaod_path, download_url)
tar = tarfile.open(downlaod_path)
tar.extractall(target_dir)
os.remove(downlaod_path)
shutil.rmtree(os.path.join(target_dir, 'data/dstc2/'))
shutil.rmtree(os.path.join(target_dir, 'data/mrda/'))
shutil.rmtree(os.path.join(target_dir, 'data/multi-woz/'))
shutil.rmtree(os.path.join(target_dir, 'data/swda/'))
shutil.rmtree(os.path.join(target_dir, 'data/udc/'))
|
import xlrd
import pymysql
#读取EXCEL中内容到数据库中
wb = xlrd.open_workbook('./dates.xls')
sh = wb.sheet_by_index(0)
dfun=[]
nrows = sh.nrows #行数
ncols = sh.ncols #列数
fo=[]
fo.append(sh.row_values(0))
for i in range(1,nrows):
dfun.append(sh.row_values(i))
conn=pymysql.connect(host='localhost',user='root',passwd='12345678',db='wk')
cursor=conn.cursor()
#创建table
cursor.execute("create table dates("+fo[0][0]+" varchar(100),primary key(date));")
#创建table属性
for i in range(1,ncols):
if i ==1:
cursor.execute("alter table dates add "+fo[0][i]+" varchar(100);")
else:
cursor.execute("alter table dates add " + fo[0][i] + " enum('1','2','Q1','Q2','S1','S2');")
val=''
for i in range(0,ncols):
val = val+'%s,'
print (dfun)
cursor.executemany("insert into dates values("+val[:-1]+");" ,dfun)
conn.commit()
|
import json
from datetime import datetime
class Kestra:
def __init__(self):
pass
@staticmethod
def _send(map):
print("::" + json.dumps(map) + "::")
@staticmethod
def _metrics(name, type, value, tags=None):
Kestra._send({
"metrics": [
{
"name": name,
"type": type,
"value": value,
"tags": tags or {}
}
]
})
@staticmethod
def outputs(map):
Kestra._send({
"outputs": map
})
@staticmethod
def counter(name, value, tags=None):
Kestra._metrics(name, "counter", value, tags)
@staticmethod
def timer(name, duration, tags=None):
if callable(duration):
start = datetime.now()
duration()
Kestra._metrics(name, "timer", (datetime.now().microsecond - start.microsecond) / 1000, tags)
else:
Kestra._metrics(name, "timer", duration, tags);
|
import pandas as pd
df = pd.read_csv('../data/corona_virus_data.csv')
df = df.groupby(['Country', 'Date']).apply(lambda x: x.sort_values('Date'))
df.to_csv('corona_virus_condensed.csv')
|
numbers = [int(line.strip()) for line in open('day1_input.txt')]
for i in numbers:
for j in numbers:
for k in numbers:
if i + j + k == 2020:
print(f"{i} * {j} * {k} = {i*j*k}")
|
from django.db import models
from safedelete.config import SOFT_DELETE
from safedelete.models import SafeDeleteModel, SOFT_DELETE_CASCADE
from clients.models import Client
from users.models import User
from .managers import RevenueExpenditureManager
import datetime
class RevenueExpenditure(SafeDeleteModel):
_safedelete_policy = SOFT_DELETE_CASCADE
YEAR_CHOICES = []
for r in range(1980, (datetime.datetime.now().year+1)):
YEAR_CHOICES.append((r,r))
MONTH_CHOICES = []
for i in range(1, 13):
MONTH_CHOICES.append((i, i))
year = models.IntegerField(choices=YEAR_CHOICES,
default=datetime.datetime.now().year)
month = models.SmallIntegerField(choices=MONTH_CHOICES, default=datetime.datetime.now().month)
revenue = models.FloatField(null=True)
expenditure = models.FloatField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
creator = models.ForeignKey(User, related_name='rev_exp_creators', on_delete=models.SET_NULL, unique=False, null=True)
organization = models.ForeignKey(Client, related_name='rev_exp_organizations', on_delete=models.SET_NULL, unique=False, null=True)
objects = RevenueExpenditureManager()
class Meta():
app_label = 'revenue_expenditures'
db_table = 'revenue_expenditure'
|
#!/usr/bin/python
import sys
import os
sys.path.append(os.getcwd())
import argparse
import termcolor as T
import re
from collections import defaultdict
import matplotlib as mp
import matplotlib.pyplot as plt
import glob
import numpy
import math
import plot_defaults
import sys
import itertools
from helper import *
from output_parser import EthstatsParser, MPStatParser, SnifferParser
from site_config import config
parser = argparse.ArgumentParser(description="Plot comparing overhead of none,htb,etc..")
parser.add_argument('--dir',
required=True,
help="expt output dir")
parser.add_argument('--maxy',
default=30,
type=int,
help="max y-axis")
parser.add_argument('--num-runs', '-r',
default=3,
type=int,
help="number of times the expt was run")
parser.add_argument('--tolerance', '-t',
default=0.1,
type=float,
help="tolerance of achieved rate in fraction")
parser.add_argument('--num-class', '-n',
default=None,
type=int,
help="plot for a fixed num_classes")
parser.add_argument('--rates',
nargs="+",
type=int,
default=[],
help="plot for the above sweep of rate limits")
parser.add_argument('--out', '-o',
help="save plot to file")
args = parser.parse_args()
rspaces = re.compile(r'\s+')
def ints(str):
return map(int, str.split(' '))
SUBPLOT_HEIGHT = 4
SUBPLOT_WIDTH = 3.5
SUBPLOT_ROWS = len(args.rates)
SUBPLOT_COLS = 2 # CPU and stdev
plot_defaults.rcParams['figure.figsize'] = (SUBPLOT_HEIGHT * SUBPLOT_ROWS, SUBPLOT_WIDTH * SUBPLOT_COLS)
rls = config['EXPT_RL'].split(' ')
rls_seen = []
rl_name = dict(none="app", htb="htb",eyeq="eyeq", hwrl="hwrl")
rl_name['hwrl+'] = 'hwrl+'
colour_rl = dict(none="yellow", htb="green", tbf="blue", eyeq="grey", hwrl="cyan")
colour_rl['hwrl+'] = "cyan"
rates = ints(config["EXPT_RATES"])
num_classes = ints(config["EXPT_NCLASSES"])
runs = 1 + numpy.arange(args.num_runs)
# Stores references to the matplotlib artist that draws the bars so we
# can label it.
rl_bar = dict()
def DIR(rl, rate, num_class, run):
return "rl-%s-rate-%s-ncl-%s-run-%s" % (rl, rate, num_class, run)
def E(lst):
return list(enumerate(lst))
def get_rl_colour(rl):
return colour_rl[rl]
def get_minor_colour(minor):
return colour_rl[minor]
def err(s):
return T.colored(s, "red", attrs=["bold"])
def plot_by_qty(ax, fixed, major, minor, fn_qty, opts={}):
minor_bar = {}
minors_seen = []
for (i,XX), (j,YY) in itertools.product(E(minor['data']), E(major['data'])):
ys = []
for run in runs:
d = dict()
d.update(fixed)
d.update({major['name']: YY,
minor['name']: XX,
'run': run})
dir = DIR(**d)
fs_dir = os.path.join(args.dir, dir)
if not os.path.exists(fs_dir):
print dir, "does not exist; ignoring..."
continue
ethstats_fname = os.path.join(args.dir, dir, "net.txt")
mpstat_fname = os.path.join(args.dir, dir, "mpstat.txt")
sniff_fname = os.path.join(args.dir, dir, "pkt_snf.txt")
estats = EthstatsParser(ethstats_fname, iface='eth1')
mpstats = MPStatParser(mpstat_fname)
sniff = SnifferParser(sniff_fname, max_lines=1000000)
summ = estats.summary()
print '-'*80
print "Parameters", d
print "\tcpu ", mpstats.summary()
print "\tnet ", summ
print '-'*80
yvalue = fn_qty(estats, mpstats, sniff)
ys.append(yvalue)
if len(ys) == 0 or mean(ys) == 0:
continue
x = j * (len(minor['data']) + 1) + i
bar = ax.bar(x, mean(ys), width=1, color=get_minor_colour(XX),
yerr=stdev(ys), ecolor='red')
if XX == 'hwrl' and YY > 16:
bar[0].set_hatch('x')
bar[0].set_facecolor('white')
XX = XX + "+"
minor_bar[XX] = bar[0]
if XX not in minors_seen:
minors_seen.append(XX)
if opts.get('legend'):
lg = ax.legend([minor_bar[XX] for XX in minors_seen],
[rl_name[XX] for XX in minors_seen],
loc="upper right")
lg.draw_frame(False)
width = len(minor['data']) + 1
xtickloc = width * numpy.arange(len(major['data'])) + ((width - 1.0) / 2)
# This is a pain with matplotlib; the ax and plt apis are slightly
# different. plt.xticks(xtickloc, xticklabels) will work, but it
# has to be split as follows for axis.
ax.set_xticks(xtickloc)
ax.set_xticklabels(major['data'])
if opts.get('yticklabels'):
ax.set_yticklabels(opts.get('yticklabels'))
ax.set_ylim(opts.get('ylim'))
ax.set_ylabel(opts.get('ylabel'))
ax.set_xlabel(major['label'])
if opts.get('annotate'):
ax.text(0.12, 0.9,
opts.get('annotate'),
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
if opts.get('show'):
plt.show()
if args.rates:
def plot_cpu(estats, mpstats, sniff, rate):
achieved = estats.summary()
if abs(achieved['mean'] - rate) > args.tolerance * rate:
if achieved['mean'] > rate:
print T.colored("higher rate", "green", attrs=["bold"])
print err('tolerance failed: achieved %.3f, rate: %.3f' % (achieved['mean'], rate))
return 0
return mpstats.kernel()
def plot_ipt(estats, mpstats, sniff, rate):
achieved = estats.summary()
m = sniff.mean_ipt()
ideal_mean = sniff.ideal_ipt_nsec(total_rate_gbps=rate/1000.0)
std_norm = sniff.stdev_ipt()
if ideal_mean > 0:
std_norm /= ideal_mean
if abs(achieved['mean'] - rate) > args.tolerance * rate:
print err('tolerance failed: achieved %.3f, rate: %.3f' % (achieved['mean'], rate))
return 0
return std_norm
# plot keeping rate fixed.
fig = plt.figure()
plt_num = 0
for rate in args.rates:
assert(rate in rates)
plt_num += 1
ax = fig.add_subplot(SUBPLOT_ROWS, SUBPLOT_COLS, plt_num)
plot_by_qty(ax,
{'rate': rate},
minor={'name': 'rl',
'data': rls},
major={'name': 'num_class',
'data': num_classes,
'label': "number of classes"},
fn_qty=lambda e,m,s: plot_cpu(e, m, s, rate),
opts={'ylim': (0, args.maxy), 'legend': False,
'annotate': "Rate: %d Gb/s" % (rate/1000),
'yticklabels': ['0', '', '10', '', '20', '', '30'],
'ylabel': "Kernel CPU Util. (%)"})
# This should be the stdev plot.
plt_num += 1
ax = fig.add_subplot(SUBPLOT_ROWS, SUBPLOT_COLS, plt_num)
# Set yticks explicitly otherwise matplotlib does not seem to assign
# tick for such a floating point ylim
ax.set_yticks([0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3])
plot_by_qty(ax,
{'rate': rate},
minor={'name': 'rl',
'data': rls},
major={'name': 'num_class',
'data': num_classes,
'label': "number of classes"},
fn_qty=lambda e,m,s: plot_ipt(e, m, s, rate),
opts={'ylim': (0, 0.3), 'legend': (plt_num == 2),
'annotate': "Rate: %d Gb/s" % (rate/1000),
'yticklabels': ['0', '', '0.1', '', '0.2', '', '0.3'],
'ylabel': "Normalized stddev"})
plt.tight_layout()
if args.out:
plt.savefig(args.out)
else:
plt.show()
|
# MSSERG | 4.11.2016
# Variable = Word
# English; Russian;
e_habits="habits"; r_habits="привычки";
e_bit="bit"; r_bit="немного";
e_back_to_front="back to front"; r_back_to_front="начиная с последней страницы";
e_iustance="iustance"; r_iustance="экземпляр";
e_annoying="annoying"; r_annoying="раздрожать";
e_biting="biting"; r_biting="кусаться";
e_nails="nails"; r_nails="ногти";
e_being="being"; r_being="будучи";
e_late="late"; r_late="поздно";
e_locky="locky"; r_locky="повезло";
e_healthy="healthy"; r_healthy="здоровый";
e_jogging="jogging"; r_jogging="пробежка";
e_brushir="brushir"; r_brushir="чистка";
e_teeth="teeth"; r_teeth="зубы";
e_meals="meals"; r_meals="еда";
e_unfortunately="unfortunately"; r_unfortunately="к сожалению";
e_mainly="mainly"; r_mainly="в основном";
e_cause="cause"; r_cause="причина";
e_health="health"; r_health="здоровье";
e_common="common"; r_common="общие";
e_using="using"; r_using="используя";
e_anger="anger"; r_anger="гнев";
e_boredom="boredom"; r_boredom="скука";
e_teenagers="teenagers"; r_teenagers="подростки";
e_expensive="expensive"; r_expensive="дорогие";
e_addictive="addictive"; r_addictive="привыкание";
e_hard="hard"; r_hard="жёсткий";
e_quit="quit"; r_quit="бросить однажды";
e_once="once"; r_once="один раз";
e_ruin="ruin"; r_ruin="разорение";
e_someones="someonce"; r_someones="кто-то";
e_damages="damages"; r_damages="ущерб";
e_lungs="lungs"; r_lungs="лёгкие";
e_causes="causes"; r_causes="причины";
e_concer="concer"; r_concer="рак";
e_by_the_way="by the way"; r_by_the_way="кстати";
e_disease="disease"; r_disease="болезнь";
e_chain="chain"; r_chain="цепи";
e_susceptible="susceptible"; r_susceptible="восприимчивый";
e_strokes="strokes"; r_strokes="ударов";
e_harmful="harmful"; r_harmful="вредные";
e_surrounding="surrounding"; r_surrounding="окружающие";
e_especially="especially"; r_especially="особенно";
e_prengant="pregnant"; r_pregnant="беременная";
e_liver="liver"; r_liver="печень";
e_kidney="kidney"; r_kidnay="почки";
e_besides="besides"; r_besides="кроме того";
e_drunk="drunk"; r_drunk="пряные";
e_drug="drug"; r_drug="препарат";
e_addicts="addicts"; r_addicts="наркоманы";
e_violent="violent"; r_violent="жестокие";
e_easily="easily"; r_easily="легко";
e_commit="commit"; r_commit="совершить";
e_crime="crime"; r_crime="преступление";
e_influence="influence"; r_influence="влияние";
e_driving="driving"; r_driving="вождение";
e_main="main"; r_main="главная";
e_reason="reason"; r_reason="причина";
e_car="car"; r_car="автомобиль";
e_accident="accident"; r_accident="авария";
e_deaths="deaths"; r_deaths="смерть";
e_impact="impact"; r_impact="влияние";
e_breaking="breaking"; r_breaking="ломать";
e_hardest="hardest"; r_hardest="трудная";
e_try="try"; r_try="пытаться";
e_scientists="scientists"; r_scientists="учёные";
e_state="state"; r_state="государство";
e_treat="treat"; r_treat="лечить";
e_body="body"; r_body="тело";
e_half="half"; r_half="половина";
e_wold="wold"; r_wold="будет";
e_place="place"; r_place="место";
e_affect="affect"; r_affect="влиять";
e_century="century"; r_century="век";
e_take="take"; r_take="взять";
e_granted="granted"; r_granted="предоставлять";
e_inventions="inventions"; r_inventions="изобретение";
e_ever="ever"; r_ever="когда-либо";
e_made="made"; r_made="сделать";
e_significant="significant"; r_significant="значительное";
e_howaday="howaday"; r_howaday="в наше время";
e_affordable="affordable"; r_affordable="доступный";
e_almost="almost"; r_almost="почти";
e_anyone="anyone"; r_anyone="каждый";
e_connects="connects"; r_connects="соединения";
e_arround="arround"; r_arround="вокруг";
e_in_touch="in touch"; r_in_touch="на связи";
e_relative="relative"; r_relative="родные";
e_collegies="collegies"; r_collegies="коллеги";
e_modern="modern"; r_modern="современный";
e_without="without"; r_without="без";
e_net="net"; r_net="сеть";
e_transaction="transaction"; r_transaction="сделка";
e_manage="manage"; r_manage="управлять";
e_accounts="accounts"; r_accounts="счета";
e_pay="pay"; r_pay="платить";
e_bills="bills"; r_bills="банкноты";
e_send="send"; r_send="отправить";
e_important="important"; r_important="важно";
e_largest="largest"; r_largest="крупнейший";
e_source="source"; r_source="источник";
e_storing="storing"; r_storing="хранение";
e_plenty="plenty"; r_plenty="много";
e_useful="useful"; r_useful="полезный";
e_science="science"; r_science="наука";
e_coocking="coocking"; r_coocking="готовка";
e_subject="subject"; r_subject="предмет";
e_practising="practising"; r_practising="практиковать";
e_foreigh="foreigh"; r_foreigh="иностранный";
e_learning="learning"; r_learning="обучаться";
e_possible="possible"; r_possible="возможно";
e_saves="saves"; r_saves="экономия";
e_choosing="choosing"; r_choosing="выбор";
e_decirable="decirable"; r_decirable="желательный";
e_price="price"; r_price="цена";
e_thing="thing"; r_thing="вещь";
e_personality="personality"; r_personality="характер человека";
e_jolly="jolly"; r_jolly="жизнерадостный";
e_ease_going="ease-going"; r_ease_going="спокойный";
e_friendly="friendly"; r_friendly="дружелюбный";
e_reserved="reserved"; r_reserved="скрытый";
e_distrustful="distrustful"; r_distrustful="недоверчивый";
e_sociable="sociable"; r_sociable="общительный";
e_prefer="prefer"; r_prefer="предпочитать";
e_loneliness="loneliness"; r_loneliness="одиночество";
e_silence="silence"; r_silence="тишина";
e_complicated="complicated"; r_complicated="сложный";
e_part="part"; r_part="часть";
e_events="events"; r_events="события";
e_basic="basic"; r_basic="основной";
e_sanguine="sanguine"; r_sanguine="сангвиник";
e_communicative="communicative"; r_communicative="коммуникабельный";
e_fixate="fixate"; r_fixate="зацикливаться";
e_calm="calm"; r_calm="спокойный";
e_patient="patient"; r_patient="пациент";
e_caring="caring"; r_caring="заботливый";
e_exited="exited"; r_exited="возбуждённый";
e_feithful="feithful"; r_feithful="верный";
e_careful="careful"; r_careful="осторожный";
e_quiet="quiet"; r_quiet="тихий";
e_disposed="disposed"; r_disposed="склонный";
e_sad="sad"; r_sad="грустный";
e_thoughtful="thoughtful"; r_thoughtful="задумчивый";
e_moods="moods"; r_moods="настроение";
e_restless="restless"; r_restless="непоседа";
e_strong="strong"; r_strong="сильный";
e_leader="leader"; r_leader="лидер";
e_inpossible="inpossible"; r_inpossible="невозможно";
e_divided="divided"; r_divided="разделённый";
e_into="into"; r_into="до";
e_types="types"; r_types="типы";
e_expressive="expressive"; r_expressive="выразительный";
e_outgoing="outgoing"; r_outgoing="исходящий";
e_impressions="impressions"; r_impressions="впечатление";
e_second="second"; r_second="второй";
e_comfortable="comfortable"; r_comfortable="удобный";
e_themselves="themselves"; r_themselves="сами";
e_inside="inside"; r_inside="внутри";
e_alone="alone"; r_alone="один";
e_variety="variety"; r_variety="разнообразие";
e_cheerful="cheerful"; r_cheerful="весёлый";
e_unhappy="unhappy"; r_unhappy="несчастный";
e_depending="depending"; r_depending="зависимость";
e_general="general"; r_general="общий";
e_outlook="outlook"; r_outlook="взгляд";
e_call="call"; r_call="вызов";
e_former="former"; r_former="бывший";
e_self_confident="self-confident"; r_self_confident="уверенный в себе";
e_while="while"; r_while="промежуток времени";
e_latter="latter"; r_latter="последний";
# Start | Call Of Conditions | Print
test="word";
resuilt=0;
print("\n # MSSERG | 4.11.2016")
print(" # English -> Russian \n")
print("",e_habits)
test=input()
if(test==r_habits):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_bit)
test=input()
if(test==r_bit):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_back_to_front)
test=input()
if(test==r_back_to_front):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_iustance)
test=input()
if(test==r_iustance):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_annoying)
test=input()
if(test==r_annoying):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_biting)
test=input()
if(test==r_biting):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_nails)
test=input()
if(test==r_nails):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_being)
test=input()
if(test==r_being):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_late)
test=input()
if(test==r_late):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_locky)
test=input()
if(test==r_locky):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_healthy)
test=input()
if(test==r_healthy):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_jogging)
test=input()
if(test==r_jogging):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_brushir)
test=input()
if(test==r_brushir):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_teeth)
test=input()
if(test==r_teeth):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_meals)
test=input()
if(test==r_meals):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_unfortunately)
test=input()
if(test==r_unfortunately):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_mainly)
test=input()
if(test==r_mainly):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_cause)
test=input()
if(test==r_cause):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_health)
test=input()
if(test==r_health):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_common)
test=input()
if(test==r_common):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_using)
test=input()
if(test==r_using):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_anger)
test=input()
if(test==r_anger):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_boredom)
test=input()
if(test==r_boredom):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_teenagers)
test=input()
if(test==r_teenagers):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_expensive)
test=input()
if(test==r_expensive):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_addictive)
test=input()
if(test==r_addictive):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_hard)
test=input()
if(test==r_hard):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_quit)
test=input()
if(test==r_quit):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_once)
test=input()
if(test==r_once):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_ruin)
test=input()
if(test==r_ruin):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_someones)
test=input()
if(test==r_someones):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_damages)
test=input()
if(test==r_damages):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_lungs)
test=input()
if(test==r_lungs):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_causes)
test=input()
if(test==r_causes):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_concer)
test=input()
if(test==r_concer):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_by_the_way)
test=input()
if(test==r_by_the_way):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_disease)
test=input()
if(test==r_disease):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_chain)
test=input()
if(test==r_chain):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_susceptible)
test=input()
if(test==r_susceptible):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_strokes)
test=input()
if(test==r_strokes):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_harmful)
test=input()
if(test==r_harmful):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_surrounding)
test=input()
if(test==r_surrounding):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_especially)
test=input()
if(test==r_especially):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_prengant)
test=input()
if(test==r_pregnant):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_liver)
test=input()
if(test==r_liver):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_kidney)
test=input()
if(test==r_kidnay):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_besides)
test=input()
if(test==r_besides):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_drunk)
test=input()
if(test==r_drunk):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_drug)
test=input()
if(test==r_drug):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_addicts)
test=input()
if(test==r_addicts):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_violent)
test=input()
if(test==r_violent):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_easily)
test=input()
if(test==r_easily):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_commit)
test=input()
if(test==r_commit):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_crime)
test=input()
if(test==r_crime):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_influence)
test=input()
if(test==r_influence):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_driving)
test=input()
if(test==r_driving):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_main)
test=input()
if(test==r_main):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_reason)
test=input()
if(test==r_reason):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_car)
test=input()
if(test==r_car):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_accident)
test=input()
if(test==r_accident):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_deaths)
test=input()
if(test==r_deaths):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_impact)
test=input()
if(test==r_impact):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_breaking)
test=input()
if(test==r_breaking):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_hardest)
test=input()
if(test==r_hardest):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_try)
test=input()
if(test==r_try):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_scientists)
test=input()
if(test==r_scientists):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_state)
test=input()
if(test==r_state):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_treat)
test=input()
if(test==r_treat):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_body)
test=input()
if(test==r_body):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_half)
test=input()
if(test==r_half):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_wold)
test=input()
if(test==r_wold):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_place)
test=input()
if(test==r_place):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_affect)
test=input()
if(test==r_affect):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_century)
test=input()
if(test==r_century):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_take)
test=input()
if(test==r_take):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_granted)
test=input()
if(test==r_granted):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_inventions)
test=input()
if(test==r_inventions):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_ever)
test=input()
if(test==r_ever):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_made)
test=input()
if(test==r_made):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_significant)
test=input()
if(test==r_significant):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_howaday)
test=input()
if(test==r_howaday):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_affordable)
test=input()
if(test==r_affordable):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_almost)
test=input()
if(test==r_almost):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_anyone)
test=input()
if(test==r_anyone):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_connects)
test=input()
if(test==r_connects):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_arround)
test=input()
if(test==r_arround):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_in_touch)
test=input()
if(test==r_in_touch):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_relative)
test=input()
if(test==r_relative):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_collegies)
test=input()
if(test==r_collegies):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_modern)
test=input()
if(test==r_modern):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_without)
test=input()
if(test==r_without):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_net)
test=input()
if(test==r_net):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_transaction)
test=input()
if(test==r_transaction):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_manage)
test=input()
if(test==r_manage):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_accounts)
test=input()
if(test==r_accounts):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_pay)
test=input()
if(test==r_pay):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_bills)
test=input()
if(test==r_bills):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_send)
test=input()
if(test==r_send):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_important)
test=input()
if(test==r_important):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_largest)
test=input()
if(test==r_largest):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_source)
test=input()
if(test==r_source):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_storing)
test=input()
if(test==r_storing):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_plenty)
test=input()
if(test==r_plenty):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_useful)
test=input()
if(test==r_useful):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_science)
test=input()
if(test==r_science):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_coocking)
test=input()
if(test==r_coocking):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_subject)
test=input()
if(test==r_subject):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_practising)
test=input()
if(test==r_practising):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_foreigh)
test=input()
if(test==r_foreigh):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_learning)
test=input()
if(test==r_learning):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_possible)
test=input()
if(test==r_possible):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_saves)
test=input()
if(test==r_saves):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_choosing)
test=input()
if(test==r_choosing):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_decirable)
test=input()
if(test==r_decirable):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_price)
test=input()
if(test==r_price):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_thing)
test=input()
if(test==r_thing):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_personality)
test=input()
if(test==r_personality):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_jolly)
test=input()
if(test==r_jolly):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_ease_going)
test=input()
if(test==r_ease_going):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_friendly)
test=input()
if(test==r_friendly):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_reserved)
test=input()
if(test==r_reserved):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_distrustful)
test=input()
if(test==r_distrustful):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_sociable)
test=input()
if(test==r_sociable):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_prefer)
test=input()
if(test==r_prefer):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_loneliness)
test=input()
if(test==r_loneliness):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_silence)
test=input()
if(test==r_silence):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_complicated)
test=input()
if(test==r_complicated):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_part)
test=input()
if(test==r_part):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_events)
test=input()
if(test==r_events):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_basic)
test=input()
if(test==r_basic):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_sanguine)
test=input()
if(test==r_sanguine):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_communicative)
test=input()
if(test==r_communicative):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_fixate)
test=input()
if(test==r_fixate):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_calm)
test=input()
if(test==r_calm):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_patient)
test=input()
if(test==r_patient):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_caring)
test=input()
if(test==r_caring):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_exited)
test=input()
if(test==r_exited):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_feithful)
test=input()
if(test==r_feithful):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_careful)
test=input()
if(test==r_careful):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_quiet)
test=input()
if(test==r_quiet):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_disposed)
test=input()
if(test==r_disposed):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_sad)
test=input()
if(test==r_sad):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_thoughtful)
test=input()
if(test==r_thoughtful):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_moods)
test=input()
if(test==r_moods):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_restless)
test=input()
if(test==r_restless):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_strong)
test=input()
if(test==r_strong):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_leader)
test=input()
if(test==r_leader):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_inpossible)
test=input()
if(test==r_inpossible):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_divided)
test=input()
if(test==r_divided):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_into)
test=input()
if(test==r_into):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_types)
test=input()
if(test==r_types):
print(" Верно! \n")
resuilt+=1
print("",e_expressive)
test=input()
if(test==r_expressive):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_outgoing)
test=input()
if(test==r_outgoing):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_impressions)
test=input()
if(test==r_impressions):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_second)
test=input()
if(test==r_second):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_comfortable)
test=input()
if(test==r_comfortable):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_inside)
test=input()
if(test==r_inside):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_themselves)
test=input()
if(test==r_themselves):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_alone)
test=input()
if(test==r_alone):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_variety)
test=input()
if(test==r_variety):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_cheerful)
test=input()
if(test==r_cheerful):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_unhappy)
test=input()
if(test==r_unhappy):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_depending)
test=input()
if(test==r_depending):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_general)
test=input()
if(test==r_general):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_outlook)
test=input()
if(test==r_outlook):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_call)
test=input()
if(test==r_call):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_former)
test=input()
if(test==r_former):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_self_confident)
test=input()
if(test==r_self_confident):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_while)
test=input()
if(test==r_while):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
print("",e_latter)
test=input()
if(test==r_latter):
print(" Верно! \n")
resuilt+=1
else:
print(" Неверно! \n")
# Finish | Resuilt | Print
resuilt_low="очень плохой результат!"
resuilt_medium="неплохой результат!"
resuilt_normal="хороший результат!"
resuilt_good="отличный результат!"
resuilt_GOD="божественный результат!"
print(" # Результат тестирования \n")
if(resuilt<75):
print(" Вы закончили тест со следующим показателем: ",resuilt," слов из 170 слов! - Это ты и не учил вовсе...")
if(100>=resuilt>=75):
print(" Вы закончили тест со следующим показателем: ",resuilt," слов из 170 слов! - Это ",resuilt_low)
if(125>resuilt>=100):
print(" Вы закончили тест со следующим показателем: ",resuilt," слов из 170 слов! - Это ",resuilt_medium)
if(150>resuilt>=125):
print(" Вы закончили тест со следующим показателем: ",resuilt," слов из 170 слов! - Это ",resuilt_normal)
if(169>resuilt>=150):
print(" Вы закончили тест со следующим показателем: ",resuilt," слов из 170 слов! - Это ",resuilt_good)
if(resuilt>=169):
print(" Вы закончили тест со следующим показателем: ",resuilt," слов из 170 слов! - Это ",resuilt_GOD)
input()
input()
input()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given an array nums containing n + 1 integers where each integer is between 1 and n (inclusive),
# prove that at least one duplicate number must exist. Assume that there is only one duplicate number,
# find the duplicate one.
# Note:
# You must not modify the array (assume the array is read only).
# You must use only constant, O(1) extra space.
# Your runtime complexity should be less than O(n2).
# There is only one duplicate number in the array, but it could be repeated more than once.
# Credits:
# Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
# 53 / 53 test cases passed.
# Status: Accepted
# Runtime: 72 ms
# Your runtime beats 17.27 % of python submissions.
class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
lo, hi = 0, len(nums) - 1
while lo + 1 < hi:
count = 0
mid = (lo + hi) // 2
for num in nums:
if mid < num <= hi:
count += 1
if count > hi - mid:
lo = mid
else:
hi = mid
return hi
# 53 / 53 test cases passed.
# Status: Accepted
# Runtime: 42 ms
# Your runtime beats 72.54 % of python submissions.
class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Ideal from: http://keithschwarz.com/interesting/code/?dir=find-duplicate
# We have to see the input array from a whole different angel.
# This array in fact is a map(or route function) for a route, which is
# x_0 = 0 => Step 0: start from the position x_0
# f(1) => nums[x_0] = x_1 => Step 1: from x_0 go to x_1
# f(2) => nums[x_1] = x_2 => Step 2: from x_1 go to x_2
# ... ...
# f(i) => nums[x_i] = x_{i+1} => Step i: from x_i go to x_{i+1}.
# If there was infinite nums, the route would go along forever without any position revisited.
# If the nums was finite with no repeated position, the route would be a perfect circle loop.
# If the nums was finite with repeated position, the route would be rho-shaped.
# The key point is to find the start of the rho-shaped circle, x_c.
# x_c is the dulpicated number.
# To find x_c, we have to borrow Floyd 's "tortoise and hare" algorithm to find the x_l,
# where f(stepsToL) = x_l == x_2l = f(doubleStepsToL) and
# stepsToL is the smallest multiple of the length of the rho-shaped circle(rhoLen) larger than stepsToC.
hare, tortoise = nums[nums[0]], nums[0]
while hare != tortoise:
hare = nums[nums[hare]]
tortoise = nums[tortoise]
hare = 0
# After finding stepL, now we are able to find x_c.
# Consider stepsToL is the smallest multiple of rhoLen larger than stepsToC.
# Hence f(stepsToL) ends at f(stepC) plus (rhoLen - stepsToC) steps forward.
# So we know we will reach f(stepsToC) again after stepC steps forward starting from f(stepsToL),
# which is to find f(stepsToC) = f(stepsToL + stepsToC).
while hare != tortoise:
hare = nums[hare]
tortoise = nums[tortoise]
return tortoise
if __name__ == '__main__':
print(Solution().findDuplicate([1,2,1,3,4]))
|
# -*- coding:utf-8 -*-
# author: will
from aip import AipNlp
from flask import request, jsonify
from app import mongo_store
from app.models import Article
from config.lib_config import LibConfig
from utils.log_service import Logging
from . import api_article
@api_article.route('/article_tag', methods=['POST'])
def article_tag():
try:
res = request.get_json()
article_id = res.get('article_id')
Logging.logger.info('request_args:{0}'.format(res))
if not article_id:
return jsonify(errno=-1, errmsg='参数错误,请传入要查询的文章的article_id')
article = Article.query.get(article_id)
if not article:
return jsonify(errno=-1, errmsg='参数错误,该文章不存在')
docs = mongo_store.articles.find({'title': article.title})
doc = docs[0]
title = doc.get('title')
content_ls = doc.get('content')
text = ''
for content in content_ls:
if content.get('text'):
text += content.get('text')
print(text)
# text = text.encode('gbk')
client = AipNlp(LibConfig.get_baidu_language_app_id(), LibConfig.get_baidu_language_api_key(),
LibConfig.get_baidu_language_secret_key())
result_tag = client.keyword(title, text)
print(result_tag)
result_topic = client.topic(title, text)
print(result_topic)
return jsonify(errno=0, errmsg="OK", result_topic=result_topic, result_tag=result_tag)
except Exception as e:
Logging.logger.error('errmsg:{0}'.format(e))
return jsonify(errno=-1, errmsg='网络异常')
|
from django.shortcuts import render
def index(request):
print ("Index" * 10)
return render(request,'r_portfolio/index.html')
def projects(request):
return render(request,'r_portfolio/projects.html')
def about(request):
return render(request,'r_portfolio/about.html')
def testimonials(request):
return render(request,'r_portfolio/testimonials.html')
# Create your views here.
|
from settings import *
from sprites import *
class Pong:
def __init__(self):
pg.init()
self.screen = pg.display.set_mode ((WIDTH, HEIGHT))
self.clock = pg.time.Clock()
self.running = True
def new(self):
self.ball1 = ball(self)
self.player2 = paddle2()
self.player1 = paddle1()
self.paddlesprites = pg.sprite.Group()
self.paddlesprites.add(self.player1)
self.paddlesprites.add(self.player2)
self.ballsprites = pg.sprite.Group()
self.ballsprites.add(self.ball1)
self.run()
def run(self):
self.playing = True
while self.playing == True:
self.clock.tick(FPS)
self.update()
self.events()
self.draw()
def update(self):
self.clock.tick(FPS)
self.paddlesprites.update()
self.ballsprites.update()
def events(self):
for event in pg.event.get():
if event.type == pg.QUIT:
if self.playing == True:
self.playing = False
self.running = False
def draw(self):
self.screen.fill(BACKGROUND)
pg.draw.line(self.screen, (255, 255, 255), (WIDTH / 2, 0), (WIDTH / 2, HEIGHT), 3)
self.paddlesprites.draw(self.screen)
self.ballsprites.draw(self.screen)
#top left of image is (0,0)
pg.display.flip()
P = Pong()
while P.running == True:
#p.running: is same as above
P.new()
#pg.QUIT
|
import contextlib
from django.test import TestCase
from django.utils import timezone
from elections.models import (
DEFAULT_STATUS,
Election,
ModerationHistory,
ModerationStatuses,
)
from elections.tests.factories import ElectionFactory
from elections.utils import ElectionBuilder
from freezegun import freeze_time
from .base_tests import BaseElectionCreatorMixIn
class TestElectionModel(BaseElectionCreatorMixIn, TestCase):
def setUp(self):
super().setUp()
Election.private_objects.all().delete()
self.election_group = ElectionBuilder(
"local", "2017-06-08"
).build_election_group()
self.org_group = (
ElectionBuilder("local", "2017-06-08")
.with_organisation(self.org1)
.build_organisation_group(self.election_group)
)
self.ballot = (
ElectionBuilder("local", "2017-06-08")
.with_organisation(self.org1)
.with_division(self.org_div_1)
.build_ballot(self.org_group)
)
self.testshire_org_group = (
ElectionBuilder("local", "2017-06-08")
.with_organisation(self.testshire_org)
.build_organisation_group(self.election_group)
)
self.testshire_ballot = (
ElectionBuilder("local", "2017-06-08")
.with_organisation(self.testshire_org)
.with_division(self.testshire_div)
.build_ballot(self.testshire_org_group)
)
def test_recursive_save_group(self):
# table should be empty before we start
self.assertEqual(0, Election.private_objects.count())
# saving the child record should implicitly save the parent record too
self.org_group.save()
self.assertEqual(2, Election.private_objects.count())
def test_recursive_save_ballot(self):
# table should be empty before we start
self.assertEqual(0, Election.private_objects.count())
# From a performance perspective, saving a ballot and 2 parent groups
# is the worst-case scenario for database I/O
# we should monitor this and be aware if this number increases
with self.assertNumQueries(19):
self.ballot.save()
# saving the child record should implicitly save the parent records too
self.assertEqual(3, Election.private_objects.count())
def test_transaction_rollback_parent(self):
# table should be empty before we start
self.assertEqual(0, Election.private_objects.count())
# doing this will cause save() to throw a exception
# if we try to save parent_record
self.election_group.organisation_id = "foo"
with contextlib.suppress(ValueError):
self.org_group.save()
# the exception should have prevented both the
# parent and child records from being saved
self.assertEqual(0, Election.private_objects.count())
def test_transaction_rollback_child(self):
# table should be empty before we start
self.assertEqual(0, Election.private_objects.count())
# doing this will cause save() to throw a exception
# if we try to save child_record
self.org_group.organisation_id = "foo"
with contextlib.suppress(ValueError):
self.org_group.save()
# the exception should have prevented both the
# parent and child records from being saved
self.assertEqual(0, Election.private_objects.count())
def test_related_object_save(self):
# table should be empty before we start
self.assertEqual(0, ModerationHistory.objects.count())
# the first time we save a record, we should create
# a corresponding moderation status event
self.election_group.save()
self.assertEqual(1, ModerationHistory.objects.count())
# saving the same record again shouldn't though
self.election_group.seats_contests = 7
self.election_group.source = "some bloke down the pub told me"
self.election_group.save()
self.assertEqual(1, ModerationHistory.objects.count())
def test_save_with_status(self):
self.election_group.save()
self.assertEqual(self.election_group.current_status, DEFAULT_STATUS)
self.election_group.save(status=ModerationStatuses.approved.value)
self.assertEqual(
self.election_group.current_status,
ModerationStatuses.approved.value,
)
def test_get_ballots(self):
for election in [
self.election_group,
self.testshire_org_group,
self.testshire_ballot,
self.org_group,
self.ballot,
]:
election.save(status=ModerationStatuses.approved.value)
self.assertEqual(
len(self.org_group.get_ballots()),
1,
)
self.assertEqual(len(self.election_group.get_ballots()), 2)
def test_group_seats_contested(self):
for election in [
self.election_group,
self.testshire_org_group,
self.org_group,
]:
election.save(status=ModerationStatuses.approved.value)
for ballot in [
self.testshire_ballot,
self.ballot,
]:
ballot.seats_contested = 3
ballot.save(status=ModerationStatuses.approved.value)
self.assertEqual(self.ballot.group_seats_contested, 3)
self.assertEqual(self.org_group.group_seats_contested, 3)
self.assertEqual(self.election_group.group_seats_contested, 6)
def test_get_admin_url(self):
election = Election(pk=2021)
self.assertEqual(
election.get_admin_url(),
f"/admin/elections/election/{election.pk}/change/",
)
class TestModified(TestCase):
def test_update_changes_modified(self):
election = ElectionFactory()
future = timezone.datetime(2022, 5, 5, 12, 0, 0, tzinfo=timezone.utc)
with freeze_time("2022-5-5 12:00:00"):
self.assertNotEqual(election.modified, future)
Election.private_objects.update()
election.refresh_from_db()
self.assertEqual(election.modified, future)
|
import setuptools
from glob import glob
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="nbsimplegrader",
version="1.3.4",
author="Adam Blake",
author_email="adamblake@g.ucla.edu",
description="Author, distribute, and grade Jupyter notebook assignments, simply.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/UCLATALL/nbsimplegrader",
packages=setuptools.find_packages(),
install_requires=[
'notebook',
'lxml'
],
python_requires='>=3.6',
entry_points={
'nbconvert.exporters': [
'nbsimplegrader_html = nbsimplegrader:NBSGHTMLExporter'
]
},
include_package_data=True,
zip_safe=False,
data_files=[
("share/jupyter/nbextensions/nbsimplegrader",
glob('nbsimplegrader/static/*.*')),
("share/jupyter/nbextensions/nbsimplegrader/authoring_tools",
glob('nbsimplegrader/static/authoring_tools/*.*')),
("etc/jupyter/nbconfig/notebook.d",
["nbsimplegrader/etc/nbconfig/notebook.d/nbsimplegrader.json"]),
("etc/jupyter/nbconfig/tree.d",
["nbsimplegrader/etc/nbconfig/tree.d/nbsimplegrader.json"])
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: Jupyter',
]
)
|
from django.db import models
from General.models import Location
from General.models import User
from General.models import Deliveryman
from General.models import Delivery
# Create your models here.
STATUS_RENT_CHOICES = [
('на', 'На рассмотрении'),
('за', 'Оформлено'),
('оп', 'Оплачено'),
('во', 'Возврат средств'),
('дз', 'На доставке заказчику'),
('ак', 'Активно'),
('дл', 'На доставке в локацию'),
('вы', 'Выполнено'),
('от', 'Отменено'),
('шт', 'Ожидание оплаты штрафа')
]
STATUS_BIKE_CHOICES = [
('ре', 'В ремонте'),
('св', 'Свободен'),
('ар', 'Арендован')
]
class Bike_model(models.Model):
name = models.CharField(max_length=100)
type_bike = models.CharField(max_length=15)
wheel_size = models.CharField(max_length=11)
speeds = models.PositiveSmallIntegerField()
frame = models.CharField(max_length=200)
brakes = models.CharField(max_length=100)
rudder = models.CharField(max_length=100)
seat = models.CharField(max_length=100)
footrest = models.CharField(max_length=50, blank=True)
weight = models.FloatField()
def __str__(self):
return self.name
class Bike(models.Model):
brand = models.CharField(max_length=100)
bike_model_id = models.ForeignKey(Bike_model, on_delete=models.CASCADE)
color = models.CharField(max_length=20)
price = models.PositiveSmallIntegerField()
year = models.PositiveSmallIntegerField()
location_id = models.ForeignKey(Location, on_delete=models.CASCADE)
photo = models.CharField(max_length=100)
status = models.CharField(max_length=20, choices=STATUS_BIKE_CHOICES)
def __str__(self):
return str(self.bike_model_id)
class Bike_rent(models.Model):
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
bike_id = models.ForeignKey(Bike, on_delete=models.CASCADE)
status = models.CharField(max_length=20, choices=STATUS_RENT_CHOICES)
start = models.CharField(max_length=16)
end = models.CharField(max_length=16)
region = models.CharField(max_length=40)
delivery_to_id = models.ForeignKey(Delivery, on_delete=models.CASCADE, blank=True, null=True)
delivery_from_id = models.ForeignKey(Delivery, on_delete=models.CASCADE, blank=True, null=True, related_name='deliveryfrombike')
limit = models.PositiveIntegerField()
price = models.PositiveIntegerField()
comment = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return str(self.bike_id)
|
from __future__ import print_function
import torch
import numpy as np
import inspect
import re
import os
import collections
import random
from PIL import Image
from pdb import set_trace as st
from torch.autograd import Variable
def normalize(n, minN=-1, maxN=1):
"""
Normalize between -1 and 1
"""
if type(n) == np.ndarray:
min_ = np.min(n)
max_ = np.max(n)
elif type(n) == torch.Tensor:
min_ = n.min()
max_ = n.max()
if min_ > max_:
return None
return ((maxN - minN) * ((n - min_) / (max_ - min_))) + minN
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if isinstance(
getattr(object, e), collections.Callable)]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["%s %s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images.data:
image = torch.unsqueeze(image, 0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size - 1)
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
|
import base64
url = 'cHM6'
str_url = base64.b64decode(url).decode("utf-8")
print((str_url))
print(type(str_url))
url = "https://www.jianshu.com/p/9b4ab709cffb"
bytes_url = url.encode("utf-8")
print(bytes_url)
str_url = base64.b64encode(bytes_url) # 被编码的参数必须是二进制数据
print(str_url)
|
from sys import stdin
N = int(stdin.readline().strip())
# Odd = Alice; Even = Bob
while N > 2:
N += -2
if N == 2:
print("Bob")
else:
print("Alice")
|
from rest_framework import serializers
from .models import Company, Contact, Project, Task
class CompanySerializer(serializers.ModelSerializer):
class Meta:
model = Company
fields = (
'id', 'name', 'email', 'phone', 'url', 'description', 'contacts', 'projects'
)
class ContactSerializer(serializers.ModelSerializer):
class Meta:
model = Contact
fields = (
'id', 'first_name', 'last_name', 'email', 'company'
)
class ProjectSerializer(serializers.ModelSerializer):
company = serializers.PrimaryKeyRelatedField(queryset=Company.objects.all())
class Meta:
model = Project
fields = (
'id', 'name', 'description', 'tasks', 'company'
)
class TaskSerializer(serializers.ModelSerializer):
child_tasks = serializers.PrimaryKeyRelatedField(queryset=Task.objects.all())
project = serializers.PrimaryKeyRelatedField(queryset=Project.objects.all())
company = serializers.PrimaryKeyRelatedField(queryset=Company.objects.all())
class Meta:
model = Task
fields = (
'id', 'name', 'child_tasks', 'project', 'company'
)
|
#!/usr/bin/python3
# Filename: list_this.py
import pyperclip
def list_no_quotes(mylist):
# this could be one-lined but is broken up for improved readability
mylist = str(mylist.split('\n')).strip('[').strip(']').replace(r'\r', r",").replace(r"'", "").rstrip(', ')
pyperclip.copy(mylist)
def list_quotes(mylist):
# this could be one-lined but is broken a little up to improve readability
split_on_lines = str(mylist.split('\n')).strip('[').strip(']').replace(r'\r', r"', '").rstrip("''").rstrip(', ')
pyperclip.copy(split_on_lines)
|
from django.views.generic import *
from django.urls import reverse_lazy
from django.contrib.auth.mixins import AccessMixin, LoginRequiredMixin
from django.contrib.auth.models import User, Group
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.models import Q, Subquery, OuterRef
from django import forms
from django.contrib import messages
from .models import *
# 限定管理員才允許操作的混成類別
class SuperuserRequiredMixin(AccessMixin):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
# 使用者註冊
class UserRegister(CreateView):
extra_context = {'title': '使用者註冊'}
model = User
fields = ['username', 'first_name', 'last_name', 'email', 'password']
template_name = 'form.html'
success_url = reverse_lazy('home') # 註冊完成返回首頁
#
def get_form(self):
form = super().get_form()
form.fields['first_name'].label = '真實姓名'
form.fields['first_name'].required = True
form.fields['last_name'].label = '學校名稱'
form.fields['last_name'].required = True
form.fields['password2'] = forms.CharField(label='密碼驗證', max_length=255)
return form
# 表單驗證
def form_valid(self, form):
user = form.save(commit=False)
pw1 = form.cleaned_data['password']
pw2 = form.cleaned_data['password2']
if pw1 != pw2:
form.add_error('password2', '密碼與驗證密碼不相符')
return super().form_invalid(form)
user.set_password(pw1)
return super().form_valid(form)
# 使用者列表
class UserList(SuperuserRequiredMixin, ListView):
extra_context = {'title': '使用者列表'}
model = User
ordering = ['username']
paginate_by = 20
template_name = 'user/user_list.html'
def get_queryset(self):
return super().get_queryset().prefetch_related('groups')
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
user_list = []
for user in self.object_list:
is_teacher = user.groups.filter(name='teacher').exists()
user_list.append((user, is_teacher))
ctx['ulist'] = user_list
return ctx
class UserView(SuperuserRequiredMixin, DetailView):
model = User
template_name = 'user/user_detail.html'
context_object_name = 'tuser'
class UserEdit(SuperuserRequiredMixin, UpdateView):
model = User
fields = ['username', 'first_name', 'last_name', 'email']
success_url = reverse_lazy('user_list')
template_name = 'form.html'
def get_form(self):
form = super().get_form()
form.fields['first_name'].label = '真實姓名'
form.fields['first_name'].required = True
form.fields['last_name'].label = '學校名稱'
form.fields['last_name'].required = True
return form
class UserPasswordUpdate(SuperuserRequiredMixin, UpdateView):
model = User
fields = ['password']
success_url = reverse_lazy('user_list')
template_name = 'form.html'
def get_form(self):
form = super().get_form()
form.fields['password2'] = forms.CharField(label='密碼驗證', max_length=255)
return form
def get_initial(self): # 指定初始值來清掉密碼輸入欄位的原始值
return {'password': ''}
def form_valid(self, form):
user = form.save(commit=False)
pw1 = form.cleaned_data['password']
pw2 = form.cleaned_data['password2']
if pw1 != pw2:
form.add_error('password2', '密碼與驗證密碼不相符')
return super().form_invalid(form)
user.set_password(pw1)
return super().form_valid(form)
class UserTeacherToggle(SuperuserRequiredMixin, RedirectView):
def get_redirect_url(self, *args, **kwargs):
user = User.objects.get(id=self.kwargs['uid'])
try :
group = Group.objects.get(name="teacher")
except ObjectDoesNotExist :
group = Group(name="teacher")
group.save()
if user.groups.filter(name='teacher').exists():
group.user_set.remove(user)
else:
group.user_set.add(user)
return self.request.META.get('HTTP_REFERER', '/')
class UserDashboard(LoginRequiredMixin, TemplateView):
extra_context = {'title': '我的儀表板'}
template_name = 'user/user_detail.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['tuser'] = self.request.user
return ctx
class MsgList(LoginRequiredMixin, ListView):
extra_context = {'title': '收件匣'}
paginate_by = 20
def get_queryset(self):
user = self.request.user
return Message.objects.annotate(
read=Subquery(user.read_list.filter(
message=OuterRef('pk')).values('read'))
).filter(
Q(recipient=user) | Q(course__in=user.enroll_set.values('course'))
).select_related('course', 'sender').order_by('-created')
class MsgOutbox(LoginRequiredMixin, ListView):
extra_context = {'title': '寄件匣'}
def get_queryset(self):
user = self.request.user
return user.outbox.annotate(
read=Subquery(user.read_list.filter(
message=OuterRef('pk')).values('id'))
).select_related('course', 'recipient').order_by('-created')
class MsgRead(LoginRequiredMixin, DetailView):
model = Message
pk_url_kwarg = 'mid'
def get_queryset(self):
return super().get_queryset().select_related('course', 'sender')
def get_context_data(self, **kwargs):
user = self.request.user
msg = self.object
if not msg.status.filter(user=user).exists():
MessageStatus(message=msg, user=user).save()
return super().get_context_data()
class MsgSend(LoginRequiredMixin, CreateView):
extra_context = {'title': '傳送訊息'}
fields = ['title', 'body']
model = Message
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['recipient'] = User.objects.get(id=self.kwargs['rid'])
ctx['success_url'] = self.request.META.get('HTTP_REFERER', '/')
return ctx
def form_valid(self, form):
form.instance.sender = self.request.user
form.instance.recipient = User.objects.get(id=self.kwargs['rid'])
return super().form_valid(form)
def get_success_url(self):
messages.add_message(self.request, messages.SUCCESS, '訊息已送出!')
return self.request.POST.get('success_url')
class MsgReply(LoginRequiredMixin, CreateView):
extra_context = {'title': '回覆訊息'}
model = Message
fields = ['title', 'body']
def get_initial(self):
self.msg = Message.objects.get(id=self.kwargs['mid'])
return {
'title': 'Re: '+self.msg.title,
'body': "{}({}) 於 {} 寫道:\n> {}".format(
self.msg.sender.username,
self.msg.sender.first_name,
self.msg.created,
"\n> ".join(self.msg.body.split('\n'))
),
}
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['recipient'] = self.msg.sender
ctx['success_url'] = self.request.META.get('HTTP_REFERER', '/')
return ctx
def form_valid(self, form):
form.instance.sender = self.request.user
form.instance.recipient = self.msg.sender
return super().form_valid(form)
def get_success_url(self):
messages.add_message(self.request, messages.SUCCESS, '訊息已送出!')
return self.request.POST.get('success_url')
class QCreate(LoginRequiredMixin, CreateView):
model = Questionnaire
fields = '__all__'
template_name = "form.html"
def get_initial(self):
return {
'q1': 4,
}
# def get_form(self):
# form = super().get_form()
# form.fields['q1'].widget = forms.RadioSelect
# return form
class QEdit(LoginRequiredMixin, UpdateView):
model = Questionnaire
fields = '__all__'
template_name = "form.html"
|
"""
This code is adapted from Deep Learning tutorial introducing multilayer perceptron
using Theano.
Added Dropout from https://github.com/mdenil/dropout/blob/master/mlp.py
"""
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import numpy
import theano
import theano.tensor as T
from logistic_sgd_beta_zeros import LogisticRegression
from mlp import HiddenLayer
def _dropout_from_layer(rng, layer, p):
"""p is the probability of droping a unit
"""
srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
mask = srng.binomial(n=1, p=1.-p, size=layer.shape)
output = layer * T.cast(mask, theano.config.floatX)
return output
class DropoutHiddenLayer(HiddenLayer):
def __init__(self, rng, input, n_in, n_out, activation, dropout_rate, W=None, b=None, n_slack=0, dilate_factor=1.):
super(DropoutHiddenLayer, self).__init__(
rng=rng, input=input, n_in=n_in, n_out=n_out, W=W, b=b, activation=activation, n_slack=n_slack, dilate_factor=dilate_factor)
print >> sys.stderr, 'Slack size =', str(n_slack)
self.output = _dropout_from_layer(rng, self.output, p=dropout_rate) if dropout_rate > 0 else self.output
# start-snippet-2
class DropoutMLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hiddens, dropout_rates, activation=None, n_slack=0, init_ignore_out=False, init_account_dropout=False):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: list of int
:param n_hidden: number of hidden units
:type n_slack: int
:param n_slack: number of slack input units, which are always 0 on average
:type init_ignore_out: boolean
:param init_ignore_out: ignore output size when initializing, default False
:type init_account_dropout: boolean
:param init_account_dropout: account dropout coefficient when initializing, i.e, dilate dropout weights by inverse dropout coefficient
"""
self.params = []
self.W = []
self.b = []
self.W_actual = []
self.b_actual = []
# keep track of model input
self.input = input
# Multiple hidden layers
print >> sys.stderr, dropout_rates
last_layer_out = self.input
last_layer_dropout = _dropout_from_layer(rng, self.input, p=dropout_rates[0])
last_layer_size = n_in
slacks = numpy.append(numpy.asarray([n_slack], dtype='int32'), numpy.zeros((len(n_hiddens)-1,), dtype='int32')) if len(n_hiddens) > 0 else None
for i in range(0, len(n_hiddens)):
# dropped-out path: for training
dropoutLayer = DropoutHiddenLayer(rng=rng,
input=last_layer_dropout, activation=activation,
n_in=last_layer_size, n_out=n_hiddens[i],
dropout_rate=dropout_rates[i+1],
n_slack=slacks[i] + (n_hiddens[i] if init_ignore_out else 0),
dilate_factor = (1./(1. - dropout_rates[i]) if init_account_dropout else 1.)
)
last_layer_dropout = dropoutLayer.output
self.params += dropoutLayer.params
self.W += [dropoutLayer.W]
self.b += [dropoutLayer.b]
# original (untouched) path: for testing
hiddenLayer = HiddenLayer(rng=rng,
input=last_layer_out, activation=activation,
n_in=last_layer_size, n_out=n_hiddens[i],
W=dropoutLayer.W * (1. - dropout_rates[i]),
b=dropoutLayer.b,
n_slack=slacks[i]
)
last_layer_out = hiddenLayer.output
last_layer_size = n_hiddens[i]
self.W_actual += [hiddenLayer.W]
self.b_actual += [hiddenLayer.b]
# The logistic regression layer gets as input the hidden units
# of the hidden layer
# Dropped-out path: for training
self.dropoutLogRegressionLayer = LogisticRegression(
rng=rng, input=last_layer_dropout,
n_in=(n_hiddens[-1] if len(n_hiddens) > 0 else n_in)
)
self.params += self.dropoutLogRegressionLayer.params
# original (untouched) path: for testing
self.logRegressionLayer = LogisticRegression(
rng=rng, input=last_layer_out,
n_in=(n_hiddens[-1] if len(n_hiddens) > 0 else n_in),
W=self.dropoutLogRegressionLayer.W * (1. - dropout_rates[-1]),
b=self.dropoutLogRegressionLayer.b
)
# prediction of the MLP is given by the prediction of the output of the
# model, computed in the logistic regression layer
self.dropout_errors = self.dropoutLogRegressionLayer.errors
self.dropout_cross_entropy = self.dropoutLogRegressionLayer.cross_entropy
self.y_pred = self.logRegressionLayer.y_pred
self.errors = self.logRegressionLayer.errors
self.cross_entropy = self.logRegressionLayer.cross_entropy
self.true_positives = self.logRegressionLayer.true_positives
self.true_negatives = self.logRegressionLayer.true_negatives
self.false_positives = self.logRegressionLayer.false_positives
self.false_negatives = self.logRegressionLayer.false_negatives
|
from selenium import webdriver
from time import sleep
# import xlrd
import random
import os
import time
import sys
sys.path.append("..")
# import email_imap as imap
# import json
import re
# from urllib import request, parse
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.action_chains import ActionChains #导入ActionChains模块
# import base64
import Chrome_driver
import email_imap as imap
import name_get
import db
import selenium_funcs
import Submit_handle
import random
def web_submit(submit,chrome_driver,debug=0):
# test
if debug == 1:
site = 'https://personalloans.com/'
submit['Site'] = site
chrome_driver.get(submit['Site'])
chrome_driver.maximize_window()
chrome_driver.refresh()
sleep(3)
try:
chrome_driver.find_element_by_class_name('bsac-container bsac-type-custom_code').click()
except:
pass
# page1
# brightnessLine=chrome_driver.find_element_by_id('//*[@id="form"]/fieldset/div[2]/div')
# 定位到进度条
# brightnessLine.get_attribute("title")#通过title属性获取当前的值
brightnessSlider=chrome_driver.find_element_by_xpath('//*[@id="form"]/fieldset/div[2]/div/div/div')
#定位到滑动块
move_num = random.randint(10,150)
print('Move',move_num)
ActionChains(chrome_driver).click_and_hold(brightnessSlider).move_by_offset(move_num,7).release().perform()#通过move_by_offset()移动滑块,-6表示在水平方向上往左移动6个像素,7表示在垂直方向上往上移动7个像素
# email address
chrome_driver.find_element_by_xpath('//*[@id="email"]').send_keys(submit['Uspd']['email'])
# click
chrome_driver.find_element_by_xpath('//*[@id="form-submit"]').click()
sleep(10)
# page2
# credit type
for i in range(10):
try:
chrome_driver.find_element_by_xpath('//*[@id="creditType"]')
break
except:
sleep(10)
num_ = random.randint(0,1)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="creditType"]'))
if num_ == 0:
s1.select_by_value('good')
else:
s1.select_by_value('fair')
sleep(1)
# loan reason
num_reason = random.randint(1,12)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="loanReason"]'))
s1.select_by_index(num_reason)
sleep(1)
# firstname
chrome_driver.find_element_by_xpath('//*[@id="fName"]').send_keys(submit['Uspd']['first_name'])
# lastname
chrome_driver.find_element_by_xpath('//*[@id="lName"]').send_keys(submit['Uspd']['last_name'])
# birthday
date_of_birth = Submit_handle.get_auto_birthday(submit['Uspd']['date_of_birth'])
# mm
chrome_driver.find_element_by_xpath('//*[@id="birthdateMonth"]').send_keys(date_of_birth[0])
# dd
chrome_driver.find_element_by_xpath('//*[@id="birthdateDay"]').send_keys(date_of_birth[1])
# year
chrome_driver.find_element_by_xpath('//*[@id="birthdateYear"]').send_keys(date_of_birth[2])
sleep(1)
# military
elements = chrome_driver.find_element_by_xpath('//*[@id="label-armedForces-no"]').click()
# continue
element = '//*[@id="nextButton"]'
target = selenium_funcs.scroll_and_find_up(chrome_driver,element)
sleep(2)
target.click()
sleep(5)
# page3
# phone
# phone = submit['Uspd']['home_phone'].split('.')[0]
chrome_driver.find_element_by_xpath('//*[@id="phone"]').send_keys(submit['Uspd']['home_phone'].split('.')[0])
# address
chrome_driver.find_element_by_xpath('//*[@id="address"]').send_keys(submit['Uspd']['address'])
# zipcode
chrome_driver.find_element_by_xpath('//*[@id="zip"]').send_keys(submit['Uspd']['zip'])
# city
chrome_driver.find_element_by_xpath('//*[@id="city"]').click()
sleep(1)
# length at address
num_ = random.randint(3,10)
print('value is :',num_)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="lengthAtAddress"]'))
s1.select_by_value(str(num_))
sleep(1)
# own home
chrome_driver.find_element_by_xpath('//*[@id="label-rentOwn-rent"]').click()
# employment
# income source
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="incomeSource"]'))
s1.select_by_value('EMPLOYMENT')
sleep(1)
# time employed
num_time = random.randint(2,4)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="timeEmployed"]'))
s1.select_by_index(num_time)
sleep(1)
# get paid
num_paid = random.randint(1,4)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="paidEvery"]'))
s1.select_by_index(num_paid)
sleep(1)
# employer name
chrome_driver.find_element_by_xpath('//*[@id="employerName"]').send_keys(submit['Uspd']['employer'])
# employer's phone
chrome_driver.find_element_by_xpath('//*[@id="employerPhone"]').send_keys(submit['Uspd']['work_phone'].split('.')[0])
# monthly gross income
num_income = random.randint(1,12)
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="monthlyNetIncome"]'))
s1.select_by_index(num_income)
sleep(1)
# Identity and Bank Information
# Driver's License or state ID
chrome_driver.find_element_by_xpath('//*[@id="license"]').send_keys(submit['Uspd']['drivers_license'].split('.')[0])
# ssn
chrome_driver.find_element_by_xpath('//*[@id="ssn"]').send_keys(submit['Uspd']['ssn'].split('.')[0])
# bank account type
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="accountType"]'))
s1.select_by_value('checking')
sleep(1)
# checkbox
chrome_driver.find_element_by_xpath('//*[@id="privacyPolicy"]').click()
sleep(3)
# mobile phone
phone = submit['Uspd']['home_phone'].split('.')[0]
chrome_driver.find_element_by_xpath('//*[@id="smsCellphone"]').send_keys(phone)
sleep(3)
# submit
chrome_driver.find_element_by_xpath('//*[@id="submitButton"]').click()
db.update_plan_status(2,submit['ID'])
sleep(100)
def test():
# db.email_test()
# date_of_birth = Submit_handle.get_auto_birthday('')
Mission_list = ['10038']
excel = 'Uspd'
Excel_name = [excel,'']
Email_list = ['hotmail.com','outlook.com','yahoo.com','aol.com','gmail.com']
submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
[print(item,':',submit[excel][item]) for item in submit[excel] if submit[excel][item]!=None]
submit['Mission_Id'] = '10038'
chrome_driver = Chrome_driver.get_chrome(submit,pic=1)
web_submit(submit,chrome_driver,1)
if __name__=='__main__':
test()
|
while True:
try:
r = input()
q = int(input())
p = [int(x) - 1 for x in input().split()]
for c in range(q):
print(r[p[c]], end='')
print()
except EOFError:
break
|
import unittest
from hypothesis import given
from hypothesis import strategies as st
from validators import ValidationError, Required
from fields import Field, FormField, ListField, DictField, ChoiceField
class TestField(unittest.TestCase):
def test_simple(self):
field = Field()
for val in (None, 0, 1, -1, 0.1, 'text', [[]], {'key': 'val'}, object()):
self.assertFalse(field.validate(val))
def test_required(self):
field = Field(validators=[Required()])
for empty_val in ('', {}, [], None):
self.assertListEqual(field.validate(empty_val), ["Field is required"])
class FieldListTest(unittest.TestCase):
def test_min(self):
pass
def test_max(self):
pass
if __name__ == '__main__':
unittest.main()
|
class IntegerDemo:
def set_value(self, v):
self.value = v
def add(self, p):
self.value += p
def subtract(self, p):
self.value -= p
def multiply(self, p):
self.value *= p
i = IntegerDemo()
i.set_value(int(input("v: ")))
i.add(int(input("+: ")))
i.subtract(int(input("-: ")))
i.multiply(int(input("*: ")))
print(i.value)
# 檔名: exercise0904.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets.folder import pil_loader
from torchvision.models import resnet18, resnet34, resnet50, resnet101, densenet161
from basenet import BaseNet
from .base import DistilBaseModel
from .forest import ForestCV
from .helpers import tiebreaking_vote
from .metrics import metrics, classification_metrics, regression_metrics
# --
# IO helper
class PathDataset(Dataset):
def __init__(self, paths, transform=None, preload=True):
self.paths = paths
self.preload = preload
if self.preload:
print("PathDataset: preloading", file=sys.stderr)
self._samples = []
for p in tqdm(self.paths):
self._samples.append(pil_loader(p))
self.transform = transform
def __getitem__(self, idx):
if not self.preload:
sample = pil_loader(self.paths[idx])
else:
sample = self._samples[idx]
if self.transform is not None:
sample = self.transform(sample)
return sample, -1
def __len__(self):
return self.paths.shape[0]
# --
# Model
class FixedCNNFeatureExtractor(BaseNet):
def __init__(self, base_model, drop_last=1):
super().__init__()
self._model = nn.Sequential(*list(base_model.children())[:-drop_last])
def forward(self, x):
x = self._model(x)
while len(x.shape) > 2:
x = x.mean(dim=-1).squeeze()
return x
class FixedCNNForest(DistilBaseModel):
def __init__(self, target_metric):
self.target_metric = target_metric
self.is_classification = target_metric in classification_metrics
self._feature_extractors = [
resnet18,
resnet34,
resnet50,
resnet101,
densenet161,
]
self._y_train = None
self._models = None
self.transform = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
def extract_features(self, fe, dataloaders, mode):
model = FixedCNNFeatureExtractor(fe(pretrained=True)).to("cuda")
model.verbose = True
_ = model.eval()
feats, _ = model.predict(dataloaders, mode=mode)
del model
return feats
def fit(self, X_train, y_train, U_train=None):
self._y_train = y_train
dataloaders = {
"train": DataLoader(
PathDataset(paths=X_train, transform=self.transform),
batch_size=32,
shuffle=False,
),
}
self._models = []
for fe in self._feature_extractors:
train_feats = self.extract_features(fe, dataloaders, mode="train")
model = ForestCV(target_metric=self.target_metric)
model = model.fit(train_feats, y_train)
self._models.append(model)
return self
def predict(self, X):
dataloaders = {
"test": DataLoader(
PathDataset(paths=X, transform=self.transform),
batch_size=32,
shuffle=False,
),
}
all_preds = []
for fe, model in zip(self._feature_extractors, self._models):
test_feats = self.extract_features(fe, dataloaders, mode="test")
all_preds.append(model.predict(test_feats))
if self.is_classification:
ens_pred = tiebreaking_vote(np.vstack(all_preds), self._y_train)
else:
ens_pred = np.stack(all_preds).mean(axis=0)
return ens_pred
|
import pandas as pd
data = [
{"Line Number": 11, "Report Hour": 6, "Kits Completed": 34},
{"Line Number": 11, "Report Hour": 7, "Kits Completed": 55},
{"Line Number": 12, "Report Hour": 6, "Kits Completed": 67},
{"Line Number": 12, "Report Hour": 7, "Kits Completed": 56},
{"Line Number": 14, "Report Hour": 6, "Kits Completed": 0},
{"Line Number": 14, "Report Hour": 7, "Kits Completed": 0},
{"Line Number": 15, "Report Hour": 6, "Kits Completed": 123},
{"Line Number": 15, "Report Hour": 7, "Kits Completed": 97},
]
df = pd.DataFrame.from_records(data)
grouped_by_hour = (
df.groupby("Report Hour", as_index=False).sum().drop(columns="Line Number")
)
chart = grouped_by_hour.plot.bar(x="Report Hour", y="Kits Completed", rot=0)
chart.figure.savefig('chart.png')
chart.figure.show()
print(grouped_by_hour)
input("Press a key to close the chart")
|
import os
bashCommand = "sudo arp-scan --interface=enp0s25 --localnet | grep 192.168.2.* > output.txt"
os.system(bashCommand)
f = open('output.txt', 'r')
print f.read()
print f
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import json
from thread import allocate_lock as _allocate_lock
import requests
from mo_collections.queue import Queue
from mo_logs import Log
from mo_testing.fuzzytestcase import FuzzyTestCase
from mo_times.timer import Timer
from mo_threads import Lock, THREAD_STOP, Signal, Thread, ThreadedQueue, Till
from mo_threads.busy_lock import BusyLock
ACTIVEDATA_URL = "https://activedata.allizom.org/query"
class TestLocks(FuzzyTestCase):
@classmethod
def setUpClass(cls):
Log.start({"trace": True, "cprofile": False})
@classmethod
def tearDownClass(cls):
Log.stop()
def test_lock_speed(self):
SCALE = 1000*100
with Timer("create"):
locks = [_allocate_lock() for _ in range(SCALE)]
with Timer("acquire"):
for i in range(SCALE):
locks[i].acquire()
with Timer("release"):
for i in range(SCALE):
locks[i].release()
def test_queue_speed(self):
SCALE = 1000*10
done = Signal("done")
slow = Queue()
q = ThreadedQueue("test queue", queue=slow)
def empty(please_stop):
while not please_stop:
item = q.pop()
if item is THREAD_STOP:
break
done.go()
Thread.run("empty", empty)
timer = Timer("add {{num}} to queue", param={"num": SCALE})
with timer:
for i in range(SCALE):
q.add(i)
q.add(THREAD_STOP)
Log.note("Done insert")
done.wait()
self.assertLess(timer.duration.seconds, 1.5, "Expecting queue to be fast")
def test_lock_and_till(self):
locker = Lock("prime lock")
got_lock = Signal()
a_is_ready = Signal("a lock")
b_is_ready = Signal("b lock")
def loop(is_ready, please_stop):
with locker:
while not got_lock:
# Log.note("{{thread}} is waiting", thread=Thread.current().name)
locker.wait(till=Till(seconds=0))
is_ready.go()
locker.wait()
Log.note("thread is expected to get here")
thread_a = Thread.run("a", loop, a_is_ready)
thread_b = Thread.run("b", loop, b_is_ready)
a_is_ready.wait()
b_is_ready.wait()
with locker:
got_lock.go()
Till(seconds=0.1).wait() # MUST WAIT FOR a AND b TO PERFORM locker.wait()
Log.note("leaving")
pass
with locker:
Log.note("leaving again")
pass
Till(seconds=1).wait()
self.assertTrue(bool(thread_a.stopped), "Thread should be done by now")
self.assertTrue(bool(thread_b.stopped), "Thread should be done by now")
def test_till_in_loop(self):
def loop(please_stop):
counter = 0
while not please_stop:
(Till(seconds=0.001) | please_stop).wait()
counter += 1
Log.note("{{count}}", count=counter)
please_stop=Signal("please_stop")
Thread.run("loop", loop, please_stop=please_stop)
Till(seconds=1).wait()
with please_stop.lock:
self.assertLessEqual(len(please_stop.job_queue), 1, "Expecting only one pending job on go")
please_stop.go()
def test_consistency(self):
counter = [0]
lock = BusyLock()
def adder(please_stop):
for i in range(100):
with lock:
counter[0] += 1
threads = [Thread.run(unicode(i), adder) for i in range(50)]
for t in threads:
t.join()
self.assertEqual(counter[0], 100*50, "Expecting lock to work")
def query_activedata(suite, platforms=None):
query = json.dumps({
"from": "unittest",
"limit": 200000,
"groupby": ["result.test"],
"select": {"value": "result.duration", "aggregate": "average"},
"where": {"and": [
{"eq": {"suite": suite,
"build.platform": platforms
}},
{"gt": {"run.timestamp": {"date": "today-week"}}}
]},
"format": "list"
})
response = requests.post(
ACTIVEDATA_URL,
data=query,
stream=True
)
response.raise_for_status()
data = response.json()["data"]
return data
|
import click
from .mail import sendmail
from .utils import create_new_sheet, show_sheet,\
list_sheets, delete, check_in, check_out,\
generate_attachment_txt, generate_attachment_excel
@click.group()
def cli():
pass
@cli.command()
@click.argument('id', type=int, required=False)
def show(id):
"""Display a timesheet.Take timesheet id as parameter.If no parameter
is provided,latest timesheet will be shown by default"""
show_sheet(id)
@cli.command()
def new():
"""Add a new timesheet."""
create_new_sheet()
@cli.command()
def ls():
"""List all the timesheets."""
list_sheets()
@cli.command()
def checkin():
"""Checking in"""
check_in()
@cli.command()
@click.option('--message', '-m', type=str)
def checkout(message):
"""Checking out"""
check_out(message)
@cli.command()
@click.option('--sheet_id', '-s', type=int,
help='removes a sheet. take sheet id as parameter')
@click.option('--entry_id', '-e', type=int,
help='removes entry. take entry id as parameter')
def rm(sheet_id, entry_id):
"""remove sheet or entry of your choice"""
delete(sheet_id, entry_id)
@cli.command()
@click.option('--to', prompt='To')
@click.option('--subject', prompt='Subect')
@click.option('--message', prompt='Message')
@click.option('--id', prompt='Attachment ID')
def email(to, subject, message, id):
attachment = generate_attachment_excel(int(id))
sendmail(to, subject, message, attachment)
|
from mod_base import*
class Evl(Command):
"""Evaluate a python expression."""
def run(self,win,user,data,caller=None):
try:
result = str(eval(data))
win.Send(result)
return True
except Exception,e:
win.Send("fail:"+str(e))
return False
module = {
"class": Evl,
"type": MOD_COMMAND,
"level": 5,
"zone":IRC_ZONE_BOTH
}
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
"""
This module contains definitions of memory units that work well for USB applications.
"""
import unittest
from amaranth import Elaboratable, Module, Signal, Memory
from amaranth.hdl.xfrm import DomainRenamer
from .test import LunaGatewareTestCase, sync_test_case
class TransactionalizedFIFO(Elaboratable):
""" Transactionalized, buffer first-in-first-out queue.
This FIFO is "transactionalized", which means that it allows sets of reads and writes to be "undone".
Effectively, this FIFO allows "rewinding" its read and write pointers to a previous point in time,
which makes it ideal for USB transmission or receipt; where the protocol can require blocks of data
to be retransmitted or ignored.
Attributes
----------
read_data: Signal(width), output
Contains the next byte in the FIFO. Valid only when :attr:``empty`` is false.
read_en: Signal(), input
When asserted, the current :attr:``read_data`` will move to the next value. The data is not
internally consumed/dequeued until :attr:``read_commit`` is asserted. This read can be "undone"
by asserting :attr:``read_discard``. Should only be asserted when :attr:``empty`` is false.
read_commit: Signal(), input
Strobe; when asserted, any reads performed since the last commit will be "finalized".
This effectively frees the memory associated with past reads. If this value is tied to '1',
the read port on this FIFO gracefully degrades to non-transactionalized port.
read_discard: Signal(), input
Strobe; when asserted; any reads since the last commit will be "undone", placing the read pointer
back at the queue position it had after the last :attr:``read_commit`.
empty: Signal(), output
Asserted when no data is available in the FIFO. This signal refers to whether data is available to
read. :attr:``read_commit`` will not change this value; but :attr:``read_discard`` will.
write_data: Signal(width), input
Holds the byte to be added to the FIFO when :attr:``write_en`` is asserted.
write_en: Signal(), input
When asserted, the current :attr:``write_data`` will be added to the FIFO; but will not be ready for read
until :attr:``write_commit`` is asserted. This write can be "undone" by asserting :attr:``write_discard``.
Should only be asserted when :attr:``full`` is false.
write_commit: Signal(), input
Strobe; when asserted, any writes reads performed since the last commit will be "finalized".
This makes the relevant data available for read.
write_discard: Signal(), input
Strobe; when asserted; any writes since the last commit will be "undone", placing the write pointer
back at the queue position it had after the last :attr:``write_commit`. This frees the relevant memory
for new writes.
full: Signal(), output
Asserted when no space is available for writes in the FIFO. :attr:``write_commit`` will not change
this value; but :attr:``write_discard`` will.
space_available: Signal(range(0, depth + 1)), output
Indicates the amount of space available in the FIFO. Useful for knowing whether we can add e.g. an
entire packet to the FIFO.
Attributes
----------
width: int
The width of each entry in the FIFO.
depth: int
The number of allowed entries in the FIFO.
name: str
The name of the relevant FIFO; to produce nicer debug output.
If not provided, Amaranth will attempt auto-detection.
domain: str
The name of the domain this module should exist in.
"""
def __init__(self, *, width, depth, name=None, domain="sync"):
self.width = width
self.depth = depth
self.name = name
self.domain = domain
#
# I/O port
#
self.read_data = Signal(width)
self.read_en = Signal()
self.read_commit = Signal()
self.read_discard = Signal()
self.empty = Signal()
self.write_data = Signal(width)
self.write_en = Signal()
self.write_commit = Signal()
self.write_discard = Signal()
self.full = Signal()
self.space_available = Signal(range(0, depth + 1))
def elaborate(self, platform):
m = Module()
# Range shortcuts for internal signals.
address_range = range(0, self.depth + 1)
#
# Core internal "backing store".
#
memory = Memory(width=self.width, depth=self.depth + 1, name=self.name)
m.submodules.read_port = read_port = memory.read_port()
m.submodules.write_port = write_port = memory.write_port()
# Always connect up our memory's data/en ports to ours.
m.d.comb += [
self.read_data .eq(read_port.data),
write_port.data .eq(self.write_data),
write_port.en .eq(self.write_en & ~self.full)
]
#
# Write port.
#
# We'll track two pieces of data: our _committed_ write position, and our current un-committed write one.
# This will allow us to rapidly backtrack to our pre-commit position.
committed_write_pointer = Signal(address_range)
current_write_pointer = Signal(address_range)
m.d.comb += write_port.addr.eq(current_write_pointer)
# Compute the location for the next write, accounting for wraparound. We'll not assume a binary-sized
# buffer; so we'll compute the wraparound manually.
next_write_pointer = Signal.like(current_write_pointer)
with m.If(current_write_pointer == self.depth):
m.d.comb += next_write_pointer.eq(0)
with m.Else():
m.d.comb += next_write_pointer.eq(current_write_pointer + 1)
# If we're writing to the fifo, update our current write position.
with m.If(self.write_en & ~self.full):
m.d.sync += current_write_pointer.eq(next_write_pointer)
# If we're committing a FIFO write, update our committed position.
with m.If(self.write_commit):
m.d.sync += committed_write_pointer.eq(current_write_pointer)
# If we're discarding our current write, reset our current position,
with m.If(self.write_discard):
m.d.sync += current_write_pointer.eq(committed_write_pointer)
#
# Read port.
#
# We'll track two pieces of data: our _committed_ read position, and our current un-committed read one.
# This will allow us to rapidly backtrack to our pre-commit position.
committed_read_pointer = Signal(address_range)
current_read_pointer = Signal(address_range)
# Compute the location for the next read, accounting for wraparound. We'll not assume a binary-sized
# buffer; so we'll compute the wraparound manually.
next_read_pointer = Signal.like(current_read_pointer)
with m.If(current_read_pointer == self.depth):
m.d.comb += next_read_pointer.eq(0)
with m.Else():
m.d.comb += next_read_pointer.eq(current_read_pointer + 1)
# Our memory always takes a single cycle to provide its read output; so we'll update its address
# "one cycle in advance". Accordingly, if we're about to advance the FIFO, we'll use the next read
# address as our input. If we're not, we'll use the current one.
with m.If(self.read_en & ~self.empty):
m.d.comb += read_port.addr.eq(next_read_pointer)
with m.Else():
m.d.comb += read_port.addr.eq(current_read_pointer)
# If we're reading from our the fifo, update our current read position.
with m.If(self.read_en & ~self.empty):
m.d.sync += current_read_pointer.eq(next_read_pointer)
# If we're committing a FIFO write, update our committed position.
with m.If(self.read_commit):
m.d.sync += committed_read_pointer.eq(current_read_pointer)
# If we're discarding our current write, reset our current position,
with m.If(self.read_discard):
m.d.sync += current_read_pointer.eq(committed_read_pointer)
#
# FIFO status.
#
# Our FIFO is empty if our read and write pointers are in the same. We'll use the current
# read position (which leads ahead) and the committed write position (which lags behind).
m.d.comb += self.empty.eq(current_read_pointer == committed_write_pointer)
# For our space available, we'll use the current write position (which leads ahead) and our committed
# read position (which lags behind). This yields two cases: one where the buffer isn't wrapped around,
# and one where it is.
with m.If(self.full):
m.d.comb += self.space_available.eq(0)
with m.Elif(committed_read_pointer <= current_write_pointer):
m.d.comb += self.space_available.eq(self.depth - (current_write_pointer - committed_read_pointer))
with m.Else():
m.d.comb += self.space_available.eq(committed_read_pointer - current_write_pointer - 1)
# Our FIFO is full if we don't have any space available.
m.d.comb += self.full.eq(next_write_pointer == committed_read_pointer)
# If we're not supposed to be in the sync domain, rename our sync domain to the target.
if self.domain != "sync":
m = DomainRenamer({"sync": self.domain})(m)
return m
class TransactionalizedFIFOTest(LunaGatewareTestCase):
FRAGMENT_UNDER_TEST = TransactionalizedFIFO
FRAGMENT_ARGUMENTS = {'width': 8, 'depth': 16}
def initialize_signals(self):
yield self.dut.write_en.eq(0)
@sync_test_case
def test_simple_fill(self):
dut = self.dut
# Our FIFO should start off empty; and with a full depth of free space.
self.assertEqual((yield dut.empty), 1)
self.assertEqual((yield dut.full), 0)
self.assertEqual((yield dut.space_available), 16)
# If we add a byte to the queue...
yield dut.write_data.eq(0xAA)
yield from self.pulse(dut.write_en)
# ... we should have less space available ...
self.assertEqual((yield dut.space_available), 15)
# ... but we still should be "empty", as we won't have data to read until we commit.
self.assertEqual((yield dut.empty), 1)
# Once we _commit_ our write, we should suddenly have data to read.
yield from self.pulse(dut.write_commit)
self.assertEqual((yield dut.empty), 0)
# If we read a byte, we should see the FIFO become empty...
yield from self.pulse(dut.read_en)
self.assertEqual((yield dut.empty), 1)
# ... but we shouldn't see more space become available until we commit the read.
self.assertEqual((yield dut.space_available), 15)
yield from self.pulse(dut.read_commit)
self.assertEqual((yield dut.space_available), 16)
# If we write 16 more bytes of data...
yield dut.write_en.eq(1)
for i in range(16):
yield dut.write_data.eq(i)
yield
yield dut.write_en.eq(0)
# ... our buffer should be full, but also empty.
# This paradox exists as we've filled our buffer with uncomitted data.
yield
self.assertEqual((yield dut.full), 1)
self.assertEqual((yield dut.empty), 1)
# Once we _commit_ our data, it should suddenly stop being empty.
yield from self.pulse(dut.write_commit)
self.assertEqual((yield dut.empty), 0)
# Reading a byte _without committing_ shouldn't change anything about empty/full/space-available...
yield from self.pulse(dut.read_en)
self.assertEqual((yield dut.empty), 0)
self.assertEqual((yield dut.full), 1)
self.assertEqual((yield dut.space_available), 0)
# ... but committing should increase our space available by one, and make our buffer no longer full.
yield from self.pulse(dut.read_commit)
self.assertEqual((yield dut.empty), 0)
self.assertEqual((yield dut.full), 0)
self.assertEqual((yield dut.space_available), 1)
# Reading/committing another byte should increment our space available.
yield from self.pulse(dut.read_en)
yield from self.pulse(dut.read_commit)
self.assertEqual((yield dut.space_available), 2)
# Writing data into the buffer should then fill it back up again...
yield dut.write_en.eq(1)
for i in range(2):
yield dut.write_data.eq(i)
yield
yield dut.write_en.eq(0)
# ... meaning it will again be full, and have no space remaining.
yield
self.assertEqual((yield dut.full), 1)
self.assertEqual((yield dut.space_available), 0)
# If we _discard_ this data, we should go back to having two bytes available.
yield from self.pulse(dut.write_discard)
self.assertEqual((yield dut.full), 0)
self.assertEqual((yield dut.space_available), 2)
# If we read the data that's remaining in the fifo...
yield dut.read_en.eq(1)
for i in range(2, 16):
yield
self.assertEqual((yield dut.read_data), i)
yield dut.read_en.eq(0)
# ... our buffer should again be empty.
yield
self.assertEqual((yield dut.empty), 1)
self.assertEqual((yield dut.space_available), 2)
# If we _discard_ our current read, we should then see our buffer no longer empty...
yield from self.pulse(dut.read_discard)
self.assertEqual((yield dut.empty), 0)
# and we should be able to read the same data again.
yield dut.read_en.eq(1)
for i in range(2, 16):
yield
self.assertEqual((yield dut.read_data), i)
yield dut.read_en.eq(0)
# On committing this, we should see a buffer that is no longer full, and is really empty.
yield from self.pulse(dut.read_commit)
self.assertEqual((yield dut.empty), 1)
self.assertEqual((yield dut.full), 0)
self.assertEqual((yield dut.space_available), 16)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
class Solution:
def maximum69Number(self, num: int) -> int:
digits = list(str(num))
for i, digit in enumerate(digits):
if digit == "6":
digits[i] = "9"
break
return int("".join(digits))
if __name__ == "__main__":
solution = Solution()
assert 9969 == solution.maximum69Number(9669)
assert 9999 == solution.maximum69Number(9996)
assert 9999 == solution.maximum69Number(9999)
|
#!/usr/bin/python
import BaseHTTPServer
import re
import urllib
import sys
import traceback
import os
def ParseArgs(uri):
if not uri:
return {}
kvpairs = uri.split('&')
result = {}
for kvpair in kvpairs:
k, v = kvpair.split('=')
result[k] = urllib.unquote(v)
return result
class RequestDispatcher(BaseHTTPServer.BaseHTTPRequestHandler):
# Workaround for stupidity in BaseHTTPServer: it creates new handler
# for each request.
#
# Warning: it is NOT safe to call methods of this object in parallel.
def __init__(self, self_addr):
self.handlers = {}
self.proxy_handler = None
self.self_addr = self_addr
def RegisterHandler(self, uri, handler):
assert uri not in self.handlers
self.handlers[uri] = handler
def RegisterProxyHandler(self, handler):
self.proxy_handler = handler
def HandleRequest(self, path):
"""Returns tuple (content, mime-type).
Returns None for unknown requests.
"""
if path.startswith(self.self_addr):
path = path[len(self.self_addr):]
if not path.startswith('/'):
if not self.proxy_handler:
raise Exception('No host-handler defined')
return self.proxy_handler(path)
m = re.match('(/[^?]*)\??(.*)', path)
uri = m.group(1)
handler = self.handlers.get(uri)
if not handler:
raise Exception('No handler for uri %s' % uri)
args = ParseArgs(m.group(2))
return handler(args)
def DoHandle(self, request, client_address, socket_server):
self.request = request
self.client_address = client_address
self.server = socket_server
self.setup()
self.handle()
self.finish()
def do_GET(self):
try:
content, mimetype = self.HandleRequest(self.path)
self.send_response(200)
self.send_header('Content-type', mimetype)
self.send_header('Content-length', len(content))
self.end_headers()
self.wfile.write(content)
except Exception, exc:
print 'Occured exception', exc
traceback.print_exc()
error = str(exc)
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(error)
# FIXME duplication:
def do_HEAD(self):
try:
content, mimetype = self.HandleRequest(self.path)
self.send_response(200)
self.send_header('Content-type', mimetype)
self.send_header('Content-length', len(content))
self.end_headers()
except Exception, exc:
print 'Occured exception', exc
traceback.print_exc()
error = str(exc)
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
class MoreBaseHttpServer(object):
def __init__(self, port):
self.dispatcher = RequestDispatcher('http://localhost:' + str(port))
self.server = BaseHTTPServer.HTTPServer(('', port),
self.dispatcher.DoHandle)
def Serve(self):
self.server.serve_forever()
def RegisterHandler(self, uri, handler):
self.dispatcher.RegisterHandler(uri, handler)
def RegisterProxyHandler(self, handler):
self.dispatcher.RegisterProxyHandler(handler)
|
from django.conf.urls import url
from django.urls import path, include
from django.views.generic import TemplateView
from post_app.views import *
from post_app import views
urlpatterns = [
path('', PostListViewAPI.as_view(), name='list'),
path('create/', PostCreateAPIView.as_view(), name='create'),
url(r'^(?P<pk>\d+)/$', PostDetailAPIView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/delete$', PostDeleteAPIView.as_view(), name='delete'),
url(r'^(?P<pk>\d+)/edit$', PostUpdateAPIView.as_view(), name='update'),
url(r'^(?P<post>\w+)/comment$', CommentCreateAPIView.as_view(), name='create_comment'),
url(r'^(?P<post>\w+)/like/', LikeCreateAPIView.as_view(), name='create_like'),
]
|
#!/usr/bin/env python3
# Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""Shared front-end analyzer specific presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import imp
import os.path
import subprocess
USE_PYTHON3 = True
def runSmokeTest(input_api, output_api):
hasChangedFiles = False
for git_file in input_api.AffectedTextFiles():
filename = git_file.AbsoluteLocalPath()
if filename.endswith(".dart"):
hasChangedFiles = True
break
if hasChangedFiles:
local_root = input_api.change.RepositoryRoot()
utils = imp.load_source('utils',
os.path.join(local_root, 'tools', 'utils.py'))
dart = os.path.join(utils.CheckedInSdkPath(), 'bin', 'dart')
smoke_test = os.path.join(local_root, 'pkg', '_fe_analyzer_shared',
'tool', 'smoke_test_quick.dart')
windows = utils.GuessOS() == 'win32'
if windows:
dart += '.exe'
if not os.path.isfile(dart):
print('WARNING: dart not found: %s' % dart)
return []
if not os.path.isfile(smoke_test):
print('WARNING: _fe_analyzer_shared smoke test not found: %s' %
smoke_test)
return []
args = [dart, smoke_test]
process = subprocess.Popen(
args, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
outs, _ = process.communicate()
if process.returncode != 0:
return [
output_api.PresubmitError(
'_fe_analyzer_shared smoke test failure(s):',
long_text=outs)
]
return []
def CheckChangeOnCommit(input_api, output_api):
return runSmokeTest(input_api, output_api)
def CheckChangeOnUpload(input_api, output_api):
return runSmokeTest(input_api, output_api)
|
import random
from termcolor import colored
import argparse
import re
import mana
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("-l", "--library", help='The library path', required=True)
parser.add_argument("-d", "--debug", action='store_true', help='Enable Debug mode')
parser.add_argument("-t", "--turns", type=int, default=1000, help='Number of turns')
args = parser.parse_args()
if args is None:
parser.print_help()
else:
print(args)
# Vars
library = []
battlefield = []
tapped = []
stormCount = 0
landCount = 0
# Parses Library
def parseList(list):
with open(list, "r") as ins:
array = []
pattern = re.compile("^([A-Za-z ]+)x([0-9]+)$")
for line in ins:
if pattern.match(line.strip()):
match = re.search(pattern, line)
for i in range(0, int(match.group(2))):
array.append(match.group(1).strip())
else:
array.append(line.strip())
return array
def shuffle(library):
random.shuffle(library)
# Draws from library
def draw(library, hand):
if len(library) == 0:
return
card = library[0]
library.remove(card)
hand.append(card)
return card
# Returns all of one card to your hand
def bounceAll(item, hand):
global battlefield
global tapped
battleFieldTemp = []
for card in battlefield:
if card == item:
hand.append(card)
else:
battleFieldTemp.append(card)
battlefield = battleFieldTemp
tappedTemp = []
for card in tapped:
if card == item:
hand.append(card)
else:
tappedTemp.append(card)
tapped = tappedTemp
return hand
# Draws the starting hand
def drawHand():
global library
global battlefield
global tapped
global hand
battlefield = []
tapped = []
library=parseList(args.library)
if len(library) < 60:
print "Library too small", len(library), library
return null
shuffle(library)
hand = []
handSize = 7
isDrawing = True
while isDrawing:
hand = []
for i in range(0,handSize):
draw(library, hand)
if handSize == 5:
isDrawing = False
if "engine" in hand and mana.inHand(hand) > 0 and "cheerio" in hand:
isDrawing = False
handSize -= 1
return hand
# The mechanics of playing a card
def play(card, hand):
if args.debug: print "Playing", card, " | Mana Available", mana.available(battlefield, hand), " | Storm Count", stormCount
global stormCount
hand.remove(card)
battlefield.append(card)
if card is "fetchland" or card is "land":
global landCount
landCount += 1
if card == "fetchland" and "land" in library:
battlefield.remove("fetchland")
library.remove("land")
battlefield.append("land")
else:
stormCount += 1
if card is "wraith":
battlefield.remove("wraith")
drawCard = draw(library, hand)
if args.debug: print "..drew ", drawCard
if card == "engine":
mana.tap(2, battlefield, tapped, hand)
if card == "serum":
drawCard = draw(library, hand)
battlefield.remove(card)
mana.tap(1, battlefield, tapped, hand)
scry(2)
if card == "retract":
hand = bounceAll("cheerio", hand)
hand = bounceAll("mox opal", hand)
battlefield.remove(card)
mana.tap(1, battlefield, tapped, hand)
if card == "recall":
hand = bounceAll("cheerio", hand)
hand = bounceAll("mox opal", hand)
battlefield.remove(card)
mana.tap(2, battlefield, tapped, hand)
if card == "cheerio":
draws = []
for i in range(0, battlefield.count("engine")):
draws.append(draw(library, hand))
if args.debug and len(draws) > 0: print ".. drew", draws
return hand
# Looks at the top cards on a library
def scry(scryNum):
for i in range(0, scryNum - 1):
scryCard = library[i]
if args.debug: print "..scrying", scryCard
if scryCard in ["land", "fetchland", "mox opal"] and mana.available(battlefield, hand) + mana.inHand(hand) >= 1:
library.remove(scryCard)
library.append(scryCard)
if scryCard not in ["land", "fetchland", "mox opal"] and mana.available(battlefield, hand) + mana.inHand(hand) < 2:
library.remove(scryCard)
library.append(scryCard)
if scryCard == "erayo":
library.remove(scryCard)
library.append(scryCard)
if battlefield.count("engine") + hand.count("engine") > 2 and scryCard == "engine":
library.remove(scryCard)
library.append(scryCard)
# Main game loop
def game():
if args.debug: print " "
hand = drawHand()
if len(hand) < 7:
scry(1)
turnNum = 0
global stormCount
global landCount
while "grapeshot" not in battlefield and turnNum < 20:
turnNum += 1
stormCount = 0
landCount = 0
turn(turnNum, hand)
return turnNum
# Handles your turn logic
def turn(turnNum, hand):
if args.debug:
print colored('Turn ' + str(turnNum), 'green')
print colored(hand, 'green')
# Upkeep: Untap
mana.untap(tapped, battlefield)
# Upkeep: draw
if turnNum > 1:
drawCard = draw(library, hand)
if args.debug: print "Drawing", drawCard
# Mainphase
turnMainPhase(turnNum, hand)
def isCheerio(card):
return card is "cheerio" or card is "paradise mantle"
# Main phase of your turn
def turnMainPhase(turnNum, hand):
if stormCount > 20 and "grapeshot" in hand:
play("grapeshot", hand)
elif landCount is 0 and "fetchland" in hand:
play("fetchland", hand)
turnMainPhase(turnNum, hand)
elif landCount is 0 and "land" in hand:
play("land", hand)
turnMainPhase(turnNum, hand)
elif "mox opal" in hand and "mox opal" not in battlefield:
play("mox opal", hand)
turnMainPhase(turnNum, hand)
elif "engine" in hand and battlefield.count("engine") < 4 and mana.available(battlefield, hand) >= 2:
play("engine", hand)
turnMainPhase(turnNum, hand)
elif "serum" in hand and mana.available(battlefield, hand) >= 1 and len(library) > 2:
play("serum", hand)
turnMainPhase(turnNum, hand)
elif "cheerio" in hand and battlefield.count("engine") >= 2:
play("cheerio", hand)
turnMainPhase(turnNum, hand)
elif "cheerio" in hand and "engine" in battlefield and "engine" not in hand:
play("cheerio", hand)
turnMainPhase(turnNum, hand)
elif "mox opal" in battlefield and battlefield.count("cheerio") < 2 and hand.count("cheerio") >= 2 and "engine" in hand and mana.available(battlefield, hand) < 2:
if args.debug: print ".. attempting to turn on mox"
play("cheerio", hand)
play("cheerio", hand)
turnMainPhase(turnNum, hand)
elif "retract" in hand and battlefield.count("cheerio") > 0 and mana.available(battlefield, hand) >= 1:
play("retract", hand)
turnMainPhase(turnNum, hand)
elif "recall" in hand and battlefield.count("cheerio") > 0 and mana.available(battlefield, hand) >= 2:
play("recall", hand)
turnMainPhase(turnNum, hand)
elif "wraith" in hand:
play("wraith", hand)
turnMainPhase(turnNum, hand)
# Average hand sizes that contain engine
handSizes = []
for i in range(0, 1000):
hand = drawHand()
handSizes.append(len(hand))
print "Hand size average:", sum(handSizes)/float(len(handSizes))
# Average turns until win
turnCounts = []
for i in range(0, args.turns):
turnCounts.append( game() )
print "Simulating", args.turns, "Average turns to win:", sum(turnCounts)/float(len(turnCounts))
for i in range(0, 20):
if turnCounts.count(i) > 0:
print "Turn", i, 100 * turnCounts.count(i) / float(args.turns), "%"
|
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Audience
from advertising.serializers import AudienceSerializer
AUDIENCE_URL = reverse("advertising:audience-list")
class PublicAudienceApiTests(TestCase):
"""Test that publicity is available"""
def setUp(self) -> None:
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving audiences"""
res = self.client.get(AUDIENCE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateAudienceApiTests(TestCase):
"""Test the authorized user tags api"""
def setUp(self) -> None:
self.client = APIClient()
self.user = get_user_model().objects.create(
email='test.random@mail.com',
password='11111'
)
self.client.force_authenticate(user=self.user)
def test_retrieve_audiences(self):
"""Test retrieving audiences"""
Audience.objects.create(name='woman')
Audience.objects.create(name='man')
res = self.client.get(AUDIENCE_URL)
audience = Audience.objects.all().order_by('-name')
serializer = AudienceSerializer(audience, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def create_audience_successful(self):
"""Test create the audience"""
payload = {
'name': 'woman'
}
res = self.client.post(AUDIENCE_URL, payload)
exists = Audience.objects.filter(
name=payload['name']
).exists()
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertTrue(exists)
def test_create_audience_invalid(self):
"""Test creating a new audience with invalid payload"""
payload = {
'name': ''
}
res = self.client.post(AUDIENCE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
|
from django.conf.urls import patterns, include, url
from bees import urls as bees_urls
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'bee_farm.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^bees/', include(bees_urls, namespace='bees')),
)
|
from django.db.models.query import QuerySet
from django.shortcuts import render
from question.models import Question
from rest_framework import generics
from .serializers import QuestionSerializer
# Create your views here.
class QuestionsList(generics.ListAPIView):
queryset=Question.objects.all()
serializer_class=QuestionSerializer
class QuestionDetailView(generics.RetrieveAPIView):
queryset=Question.objects.all()
serializer_class=QuestionSerializer
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################
# #
# create_ede_plots.py: create ede plots #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 12, 2021 #
# #
#############################################################################
import os
import sys
import re
import random
import numpy
import time
import Chandra.Time
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/Grating/Grating_EdE/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append pathes to private folders to a python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
import mta_common_functions as mcf
import robust_linear as robust
#--------------------------------------------------------------------------
#-- plot_ede_data: create letg, metg, and hetg ede plots --
#--------------------------------------------------------------------------
def plot_ede_data():
"""
create letg, metg, and hetg ede plots
input: none, but read from <type><side>_all.txt'
output: <web_dir>/Plots/<type>_<side>_<start>_<stop>.png
<web_dir>/Plots/<type>_ede_plot.html
"""
pfile = 'hegp1_data'
mfile = 'hegm1_data'
itype = 'hetg'
create_plot(pfile, mfile, itype)
pfile = 'megp1_data'
mfile = 'megm1_data'
itype = 'metg'
create_plot(pfile, mfile, itype)
pfile = 'legpall_data'
mfile = 'legmall_data'
itype = 'letg'
create_plot(pfile, mfile, itype)
#--------------------------------------------------------------------------
#-- create_plot: creating plots for given catgories --
#--------------------------------------------------------------------------
def create_plot(pfile, mfile, itype):
"""
creating plots for given catgories
input: pfile --- plus side data file name
mfile --- minus side data file name
itype --- type of the data letg, metg, hetg
output: <web_dir>/Plots/<type>_<side>_<start>_<stop>.png
<web_dir>/Plots/<type>_ede_plot.html
"""
pdata = read_ede_data(pfile)
p_p_list = plot_each_year(pdata, itype, 'p')
mdata = read_ede_data(mfile)
m_p_list = plot_each_year(mdata, itype, 'm')
create_html_page(itype, p_p_list, m_p_list)
#--------------------------------------------------------------------------
#-- plot_each_year: create plots for each year for the given categories ---
#--------------------------------------------------------------------------
def plot_each_year(tdata, itype, side):
"""
create plots for each year for the given categories
input: tdata --- a list of lists of data (see select_data below)
itype --- a type of grating; letg, metg, hetg
side --- plus or mius
"""
#
#--- find the current year and group data for 5 year interval
#
tyear = int(float(datetime.datetime.today().strftime("%Y")))
nstep = int((tyear - 1999)/5) + 1
tarray = numpy.array(tdata[0])
energy = numpy.array(tdata[2])
denergy = numpy.array(tdata[4])
p_list = []
for k in range(0, nstep):
start = 1999 + 5 * k
stop = start + 5
#
#--- selecting out data
#
selec = (tarray > start) & (tarray < stop)
eng = energy[selec]
ede = denergy[selec]
outfile = str(itype) + '_' + str(side) + '_' + str(start) + '_' + str(stop) + '.png'
p_list.append(outfile)
outfile = web_dir+ 'Plots/' + outfile
#
#--- actually plotting data
#
plot_data(eng, ede, start, stop, itype, outfile)
return p_list
#--------------------------------------------------------------------------
#-- select_data: select out data which fit to the selection criteria --
#--------------------------------------------------------------------------
def select_data(idata, itype):
"""
select out data which fit to the selection criteria
input: indata
idata[0]: year
idata[1]: obsid
idata[2]: links
idata[3]: energy
idata[4]: fwhm
idata[5]: denergy
idata[6]: error
idata[7]: order
idata[8]: cnt
idata[9]: roi_cnt
idata[10]: acf
idata[11]: acf_err
itype --- type of the data; letg, metg, hetg
output: out --- selected potion of the data
"""
out = []
for k in range(0, 12):
out.append([])
for m in range(0, len(idata[0])):
if (idata[6][m] / idata[4][m] < 0.15):
#
#-- letg case
#
if itype == 'letg':
for k in range(0, 12):
out[k].append(idata[k][m])
#
#--- metg case
#
elif idata[4][m] * 1.0e3 / idata[3][m] < 5.0:
if itype == 'metg':
for k in range(0, 12):
out[k].append(idata[k][m])
#
#--- hetg case
#
else:
if abs(idata[3][m] - 1.01) > 0.01:
for k in range(0, 12):
out[k].append(idata[k][m])
return out
#--------------------------------------------------------------------------
#-- read_ede_data: read data file --
#--------------------------------------------------------------------------
def read_ede_data(infile):
"""
read ede data file
input: infile --- input file name
output: a list of:
idata[0]: year
idata[1]: obsid
idata[2]: energy
idata[3]: fwhm
idata[4]: denergy
idata[5]: error
idata[6]: order
idata[7]: cnt
idata[8]: roi_cnt
idata[9]: acf
idata[10]: acf_err
idata[11]: links
"""
dfile = data_dir + infile
data = mcf.read_data_file(dfile)
idata = mcf.separate_data_to_arrays(data, com_out='#')
year = numpy.array(idata[0]).astype(int)
obsid = []
for ent in idata[1]:
try:
val = str(int(float(ent)))
except:
val = ent
obsid.append(val)
energy = numpy.array(idata[2])
fwhm = idata[3]
denergy = numpy.array(idata[4])
error = idata[5]
order = list(numpy.array(idata[6]).astype(int))
cnt = list(numpy.array(idata[7]).astype(int))
roi_cnt = idata[8]
acf = idata[9]
acf_err = idata[10]
links = idata[11]
return [year, obsid, energy, fwhm, denergy, error, order, cnt, roi_cnt, acf, acf_err, links]
#--------------------------------------------------------------------------
#-- plot_data: plot a data in log-log form --
#--------------------------------------------------------------------------
def plot_data(x, y, start, stop, itype, outfile):
"""
plot a data in log-log form
input: x --- x data
y --- y data
start --- starting year
stop --- stopping year
itype --- type of the data, letg, metg, hetg
outfile --- output png file name
output; outfile
"""
#
#--- set plotting range
#
if itype == 'letg':
xmin = 0.05
xmax = 20.0
ymin = 0.01
ymax = 100000
xpos = 2
ypos = 15000
ypos2 = 9000
else:
xmin = 0.2
xmax = 10.0
ymin = 1
ymax = 100000
xpos = 2
ypos = 30000
ypos2 =18000
plt.close('all')
ax = plt.subplot(111)
ax.set_autoscale_on(False)
ax.set_xbound(xmin,xmax)
ax.set_xlim(left=xmin, right=xmax, auto=False)
ax.set_ylim(bottom=ymin, top=ymax, auto=False)
ax.set_xscale('log')
ax.set_yscale('log')
props = font_manager.FontProperties(size=24)
mpl.rcParams['font.size'] = 24
mpl.rcParams['font.weight'] = 'bold'
#
#--- plot data
#
plt.plot(x, y, color='blue', marker='o', markersize=6, lw=0)
plt.xlabel('Energy (KeV)')
plt.ylabel('E / dE')
text = 'Years: ' + str(start) + ' - ' + str(stop)
plt.text(xpos, ypos, text)
#
#--- compute fitting line and plot on the scattered plot
#
[xe, ye, a, b] = fit_line(x, y, xmin, xmax)
plt.plot(xe, ye, color='red', marker='', markersize=0, lw=2)
line = 'Slope(log-log): %2.3f' % (b)
plt.text(xpos, ypos2, line)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(15.0, 10.0)
plt.tight_layout()
plt.savefig(outfile, format='png')
plt.close('all')
#--------------------------------------------------------------------------
#-- fit_line: fit robust fit line on the data on log-log plane --
#--------------------------------------------------------------------------
def fit_line(x, y, xmin, xmax):
"""
fit robust fit line on the data on log-log plane
input: x --- x data
y --- y data
xmin --- min of x
xmax --- max of x
"""
#
#--- convert the data into log
#
xl = numpy.log10(x)
yl = numpy.log10(y)
#
#--- fit a line on log-log plane with robust fit
#
if len(xl) > 4:
[a, b, e] = robust.robust_fit(xl, yl)
else:
a = 0.0
b = 0.0
e = 0.0
#
#--- compute plotting data points on non-log plane; it is used by the plotting routine
#
xsave = []
ysave = []
step = (xmax - xmin) /100.0
for k in range(0, 100):
xe = xmin + step * k
ye = 10**(a + b * math.log10(xe))
xsave.append(xe)
ysave.append(ye)
return [xsave, ysave, a , b]
#--------------------------------------------------------------------------
#-- create_html_page: create html page for the given type --
#--------------------------------------------------------------------------
def create_html_page(itype, p_p_list, m_p_list):
"""
create html page for the given type
input: itype --- type of data; letg, metg, hetg
p_p_list --- a list of plus side png plot file names
m_p_list --- a list of minus side png plot file names
output: <web_dir>/<itype>_ede_plot.html
"""
if itype == 'letg':
rest = ['metg', 'hetg']
elif itype == 'metg':
rest = ['letg', 'hetg']
else:
rest = ['letg', 'metg']
hfile = house_keeping + 'plot_page_header'
with open(hfile, 'r') as f:
line = f.read()
line = line + '<h2>' + itype.upper() + '</h2>\n'
line = line + '<p style="text-align:right;">\n'
for ent in rest:
line = line + '<a href="' + ent + '_ede_plot.html">Open: ' + ent.upper() + '</a></br>\n'
line = line + '<a href="../index.html">Back to Main Page</a>'
line = line + '</p>\n'
line = line + '<table border = 0 >\n'
line = line + '<tr><th style="width:45%;">Minus Side</th>'
line = line + '<th style="width:45%;">Plus Side</th></tr>\n'
for k in range(0, len(p_p_list)):
line = line + '<tr>'
line = line + '<th style="width:45%;">'
line = line + '<a href="javascript:WindowOpener(\'Plots/' + m_p_list[k] +'\')">'
line = line + '<img src="./Plots/' + m_p_list[k] + '" style="width:95%;"></a></th>\n'
line = line + '<th style="width:45%;">'
line = line + '<a href="javascript:WindowOpener(\'Plots/' + p_p_list[k] +'\')">'
line = line + '<img src="./Plots/' + p_p_list[k] + '" style="width:95%;"></a></th>\n'
line = line + '</tr>\n'
line = line + '</table>\n'
line = line + '</body>\n</html>\n'
outname = web_dir + itype + '_ede_plot.html'
with open(outname, 'w') as fo:
fo.write(line)
#--------------------------------------------------------------------------
if __name__ == '__main__':
plot_ede_data()
|
from django.contrib import admin
from .models import Video, Device, RemoteHistory, Lock, Record, Door, AddDevice
# Register your models here.
admin.site.register(Door)
admin.site.register(Video)
admin.site.register(Device)
admin.site.register(RemoteHistory)
admin.site.register(Lock)
admin.site.register(Record)
admin.site.register(AddDevice)
|
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
import dataflow_pipeline.unificadas.unificadas_segmentos_beam as unificadas_segmentos_beam
import dataflow_pipeline.unificadas.unificadas_campanas_beam as unificadas_campanas_beam
import dataflow_pipeline.unificadas.unificadas_codigos_gestion_beam as unificadas_codigos_gestion_beam
import dataflow_pipeline.unificadas.unificadas_codigos_causal_beam as unificadas_codigos_causal_beam
import dataflow_pipeline.unificadas.unificadas_tipificaciones_beam as unificadas_tipificaciones_beam
import dataflow_pipeline.unificadas.unificadas_gestiones_beam as unificadas_gestiones_beam
import dataflow_pipeline.unificadas.unificadas_usuarios_beam as unificadas_usuarios_beam
import dataflow_pipeline.unificadas.unificadas_clientes_beam as unificadas_clientes_beam
import dataflow_pipeline.unificadas.unificadas_seg_campanas_beam as unificadas_seg_campanas_beam
import dataflow_pipeline.unificadas.unificadas_data_beam as unificadas_data_beam
import dataflow_pipeline.unificadas.base_asignacion_avalcreditos as base_asignacion_avalcreditos
import dataflow_pipeline.unificadas.base_asignacion_coopantex as base_asignacion_coopantex
import dataflow_pipeline.unificadas.base_asignacion_diaria_avalcreditos as base_asignacion_diaria_avalcreditos
import dataflow_pipeline.unificadas.base_asignacion_diaria_coopantex as base_asignacion_diaria_coopantex
import dataflow_pipeline.unificadas.base_gestiones_coopantex as base_gestiones_coopantex
import dataflow_pipeline.unificadas.base_gestiones_avalcredito as base_gestiones_avalcredito
import dataflow_pipeline.unificadas.base_acuerdos_avalcredito as base_acuerdos_avalcredito
import dataflow_pipeline.unificadas.base_acuerdos_coopantex as base_acuerdos_coopantex
import dataflow_pipeline.unificadas.base_estrategia_beam as base_estrategia_beam
import dataflow_pipeline.unificadas.base_asignacion_diaria_epm_beam as base_asignacion_diaria_epm_beam
import dataflow_pipeline.unificadas.base_asignacion_epm_beam as base_asignacion_epm_beam
import dataflow_pipeline.unificadas.base_gestiones_epm_beam as base_gestiones_epm_beam
import dataflow_pipeline.unificadas.base_acuerdos_epm_beam as base_acuerdos_epm_beam
import dataflow_pipeline.unificadas.base_gestiones_banco_agricola_beam as base_gestiones_banco_agricola_beam
import dataflow_pipeline.unificadas.base_gestiones_virtuales_banco_agricola_beam as base_gestiones_virtuales_banco_agricola_beam
import dataflow_pipeline.unificadas.pagos_beam as pagos_beam
import cloud_storage_controller.cloud_storage_controller as gcscontroller
import os
import time
import socket
import _mssql
import datetime
import sys
#coding: utf-8
unificadas_api = Blueprint('unificadas_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
@unificadas_api.route("/segmento")
def segmento():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Tb_segmentos"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
conn.execute_query('SELECT Id_Segmento,Nombre_Segmento,Fecha_Creacion,Usuario_Creacion,Estado FROM ' + TABLE_DB )
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Segmento']).encode('utf-8') + "|"
text_row += str(row['Nombre_Segmento']).encode('utf-8') + "|"
text_row += str(row['Fecha_Creacion']).encode('utf-8') + "|"
text_row += str(row['Usuario_Creacion']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "Segmento/Unificadas_Segmento" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.segmentos` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_segmentos_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("Segmento/Unificadas_Segmento" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "Segmento cargado" + "flowAnswer"
############################################################
@unificadas_api.route("/campana")
def campana():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Tb_Campanas"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
conn.execute_query('SELECT Id_Campana,Nombre_Campana,Codigo_Campana,Id_UEN,Fecha_Creacion,Estado FROM ' + TABLE_DB )
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Campana']).encode('utf-8') + "|"
text_row += str(row['Nombre_Campana']).encode('utf-8') + "|"
text_row += str(row['Codigo_Campana']).encode('utf-8') + "|"
text_row += str(row['Id_UEN']).encode('utf-8') + "|"
text_row += str(row['Fecha_Creacion']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8') + "|"
# text_row += str(row['Logo']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "campanas/Unificadas_campanas" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.Campanas` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_campanas_beam.run()
# time.sleep(180)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("campanas/Unificadas_campanas" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "Campana cargada" + "flowAnswer"
####################################################
@unificadas_api.route("/codigo_ges")
def codigo_ges():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Tb_Cod_Gestion"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
conn.execute_query('SELECT Id_Cod_Gestion,Nombre_Codigo,Descripcion,Fecha_Creacion,Usuario_gestion,Estado FROM ' + TABLE_DB )
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Cod_Gestion']).encode('utf-8') + "|"
text_row += str(row['Nombre_Codigo']).encode('utf-8') + "|"
text_row += str(row['Descripcion']).encode('utf-8') + "|"
text_row += str(row['Fecha_Creacion']).encode('utf-8') + "|"
text_row += str(row['Usuario_gestion']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "codigos_gestion/Unificadas_cod_ges" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.Codigos_Gestion` WHERE 5=5"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_codigos_gestion_beam.run()
# time.sleep(180)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("codigos_gestion/Unificadas_cod_ges" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "codigo_ges cargada" + "flowAnswer"
#####################################################################
@unificadas_api.route("/codigos")
def codigos():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Tb_Cod_Causal"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
conn.execute_query('SELECT Id_Cod_Causal,Nombre_Causal,Descripcion,Fecha_Creacion,Usuario_Creacion,Estado FROM ' + TABLE_DB )
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Cod_Causal']).encode('utf-8') + "|"
text_row += str(row['Nombre_Causal']).encode('utf-8') + "|"
text_row += str(row['Descripcion']).encode('utf-8') + "|"
text_row += str(row['Fecha_Creacion']).encode('utf-8') + "|"
text_row += str(row['Usuario_Creacion']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "codigos_causal/Unificadas_cod_causal" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.Codigos_Causal` WHERE 1=1"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_codigos_causal_beam.run()
# time.sleep(180)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("codigos_causal/Unificadas_cod_causal" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "codigos cargados" + "flowAnswer"
####################################################
@unificadas_api.route("/tipificaciones")
def tipificaciones():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Rel_Tipificaciones"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
conn.execute_query('SELECT Id_Tipif,Id_Campana,Id_Cod_Gestion,Id_Cod_Causal,Id_Cod_SubCausal,Cod_Homologado,Cod_Homologado_Causal,AdicionalOne,AdicionalTwo,AdicionalTree,Fecha_Creacion,Plantilla,Plantilla1,Estado,Usuario_Gestor,HIT FROM ' + TABLE_DB )
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Tipif']).encode('utf-8') + "|"
text_row += str(row['Id_Campana']).encode('utf-8') + "|"
text_row += str(row['Id_Cod_Gestion']).encode('utf-8') + "|"
text_row += str(row['Id_Cod_Causal']).encode('utf-8') + "|"
text_row += str(row['Id_Cod_SubCausal']).encode('utf-8') + "|"
text_row += str(row['Cod_Homologado']).encode('utf-8') + "|"
text_row += str(row['Cod_Homologado_Causal']).encode('utf-8') + "|"
text_row += str(row['AdicionalOne']).encode('utf-8') + "|"
text_row += str(row['AdicionalTwo']).encode('utf-8') + "|"
text_row += str(row['AdicionalTree']).encode('utf-8') + "|"
text_row += str(row['Fecha_Creacion']).encode('utf-8') + "|"
text_row += str(row['Plantilla']).encode('utf-8') + "|"
text_row += str(row['Plantilla1']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8') + "|"
text_row += str(row['Usuario_Gestor']).encode('utf-8') + "|"
text_row += str(row['HIT']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "tipificaciones/Unificadas_tipificaciones" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.Tipificaciones` WHERE 2=2"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_tipificaciones_beam.run()
# time.sleep(180)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("tipificaciones/Unificadas_tipificaciones" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "tipificaciones cargadas" + "flowAnswer"
####################################################
@unificadas_api.route("/gestiones")
def gestiones():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Tb_Gestion"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
# conn.execute_query('SELECT Id_Gestion,Documento,Num_Obligacion,Id_Data,Id_Cod_Gestion,Id_Cod_Causal,Id_Cod_SubCausal,Id_Bot,Vlr_Promesa,Fecha_Promesa,Usuario_Gestor,Fecha_Gestion FROM ' + TABLE_DB + ' where CAST(Fecha_Gestion AS date) = CAST(GETDATE() as DATE) ')
conn.execute_query("SELECT Id_Gestion,Documento,Num_Obligacion,Id_Campana,Id_Segmento,Id_Cod_Gestion,Id_Cod_Causal,Id_Cod_SubCausal,Vlr_Promesa,Fecha_Promesa,Num_Cuotas,Telefono,Fecha_Gestion,Usuario_Gestor,Opt_1,Opt_2,Opt_3,Opt_4,Opt_5,Cuadrante,Modalidad_Pago FROM " + TABLE_DB + " WHERE CONVERT(DATE, Fecha_Gestion) = CONVERT(DATE,GETDATE())")
# conn.execute_query("SELECT Id_Gestion,Documento,Num_Obligacion,Id_Campana,Id_Segmento,Id_Cod_Gestion,Id_Cod_Causal,Id_Cod_SubCausal,Vlr_Promesa,Fecha_Promesa,Num_Cuotas,Telefono,Fecha_Gestion,Usuario_Gestor,Opt_1,Opt_2,Opt_3,Opt_4,Opt_5,Cuadrante,Modalidad_Pago FROM " + TABLE_DB + " WHERE CONVERT(DATE, Fecha_Gestion) = CAST('2020-07-04' AS DATE)")
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Gestion']).encode('utf-8') + "|"
text_row += str(row['Documento']).encode('utf-8') + "|"
text_row += str(row['Num_Obligacion']).encode('utf-8') + "|"
text_row += str(row['Id_Campana']).encode('utf-8') + "|"
text_row += str(row['Id_Segmento']).encode('utf-8') + "|"
text_row += str(row['Id_Cod_Gestion']).encode('utf-8') + "|"
text_row += str(row['Id_Cod_Causal']).encode('utf-8') + "|"
text_row += str(row['Id_Cod_SubCausal']).encode('utf-8') + "|"
# text_row += str(row['Observacion']).encode('utf-8') + "|"
text_row += str(row['Vlr_Promesa']).encode('utf-8') + "|"
text_row += str(row['Fecha_Promesa']).encode('utf-8') + "|"
text_row += str(row['Num_Cuotas']).encode('utf-8') + "|"
text_row += str(row['Telefono']).encode('utf-8') + "|"
text_row += str(row['Fecha_Gestion']).encode('utf-8') + "|"
text_row += str(row['Usuario_Gestor']).encode('utf-8') + "|"
text_row += str(row['Opt_1']).encode('utf-8') + "|"
text_row += str(row['Opt_2']).encode('utf-8') + "|"
text_row += str(row['Opt_3']).encode('utf-8') + "|"
text_row += str(row['Opt_4']).encode('utf-8') + "|"
text_row += str(row['Opt_5']).encode('utf-8') + "|"
text_row += str(row['Cuadrante']).encode('utf-8') + "|"
text_row += str(row['Modalidad_Pago']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "gestiones/Unificadas_gestiones" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.Gestiones` WHERE CAST(SUBSTR(Fecha_Gestion,0,10) AS DATE) = CURRENT_DATE()"
# deleteQuery = "DELETE FROM `contento-bi.unificadas.Gestiones` WHERE CAST(SUBSTR(Fecha_Gestion,0,10) AS DATE) = '2020-07-04'"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_gestiones_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("gestiones/Unificadas_gestiones" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "gestiones cargadas" + "flowAnswer"
##################################################
@unificadas_api.route("/usuarios")
def usuarios():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Tb_usuarios"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
conn.execute_query('SELECT Id_Usuario,Documento_Usuario,Nombre_usuario,Estado,Fecha_Cargue,Usuario_Creacion,Extension,Id_Perfil FROM ' + TABLE_DB )
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Usuario']).encode('utf-8') + "|"
text_row += str(row['Documento_Usuario']).encode('utf-8') + "|"
text_row += str(row['Nombre_usuario']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8') + "|"
text_row += str(row['Fecha_Cargue']).encode('utf-8') + "|"
text_row += str(row['Usuario_Creacion']).encode('utf-8') + "|"
text_row += str(row['Extension']).encode('utf-8') + "|"
text_row += str(row['Id_Perfil']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "usuarios/Unificadas_usuarios" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.Usuarios` WHERE 3=3"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_usuarios_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("usuarios/Unificadas_usuarios" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "usuarios cargados" + "flowAnswer"
#########################################
@unificadas_api.route("/clientes")
def clientes():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Tb_Clientes"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
conn.execute_query('SELECT Id_Cliente,Documento,Nombre_Cliente,Apellidos_Cliente,Fecha_Creacion FROM ' + TABLE_DB )
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Cliente']).encode('utf-8') + "|"
text_row += str(row['Documento']).encode('utf-8') + "|"
text_row += str(row['Nombre_Cliente']).encode('utf-8') + "|"
text_row += str(row['Apellidos_Cliente']).encode('utf-8') + "|"
text_row += str(row['Fecha_Creacion']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "clientes/Unificadas_clientes" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.Clientes` WHERE CAST(SUBSTR(Fecha_Creacion,0,10) AS DATE) = CURRENT_DATE()"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_clientes_beam.run()
# time.sleep(60)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("clientes/Unificadas_clientes" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "clientes cargados " + "flowAnswer"
################################################
@unificadas_api.route("/segmento_camp")
def segmento_camp():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Rel_Seg_Campanas"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
conn.execute_query('SELECT Id_Rel_Seg_Campana,Id_Campana,Id_Segmento,Id_Bot,Fecha_Creacion,Usuario_Creacion,Estado FROM ' + TABLE_DB )
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Rel_Seg_Campana']).encode('utf-8') + "|"
text_row += str(row['Id_Campana']).encode('utf-8') + "|"
text_row += str(row['Id_Segmento']).encode('utf-8') + "|"
text_row += str(row['Id_Bot']).encode('utf-8') + "|"
text_row += str(row['Fecha_Creacion']).encode('utf-8') + "|"
text_row += str(row['Usuario_Creacion']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8') + "|"
# text_row += str(row['Logo']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "seg_campanas/Unificadas_seg_campanas" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.Seg_campanas` WHERE 4=4"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_seg_campanas_beam.run()
# time.sleep(180)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("seg_campanas/Unificadas_seg_campanas" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "Seg_Campana cargada" + "flowAnswer"
#####################################################################
@unificadas_api.route("/data_uni")
def data_uni():
reload(sys)
sys.setdefaultencoding('utf8')
SERVER="192.168.20.63\MV"
USER="DP_USER"
PASSWORD="DPUSER12062020*"
DATABASE="Mirror_UN1002XZCVBN"
TABLE_DB = "dbo.Tb_Data"
HOY = datetime.datetime.today().strftime('%Y-%m-%d')
#Nos conectamos a la BD y obtenemos los registros
conn = _mssql.connect(server=SERVER, user=USER, password=PASSWORD, database=DATABASE)
conn.execute_query('SELECT Id_Data,Id_Campana,Zona,Documento,Cod_Interno,Tipo_Comprador,Customer_Class,Cupo,Num_Obligacion,Vlr_Factura,Fecha_Factura,Fecha_Vencimiento,Vlr_Saldo_Cartera,Dias_vencimiento,Campana_Orig,Ult_Campana,Codigo,Abogado,Division,Pais,Fecha_Prox_Conferencia,Cod_Gestion,Fecha_Gestion,Fecha_Promesa_Pago,Actividad_Economica,Saldo_Capital,Num_Cuotas,Num_Cuotas_Pagadas,Num_Cuotas_Faltantes,Num_Cuotas_Mora,Cant_Veces_Mora,Fecha_Ult_Pago,Saldo_Total_Vencido,Cod_Consecionario,Concesionario,Cod_Gestion_Ant,Grabador,Estado,Fecha_Cargue,Usuario_Cargue,Interes_Mora,Vlr_Cuotas_Vencidas FROM ' + TABLE_DB )
# conn.execute_query('SELECT Id_Gestion,Id_Causal,Fecha_Seguimiento,Id_Usuario,Valor_Obligacion,Id_Docdeu, Nota FROM ' + TABLE_DB + ' where CAST(Fecha_Seguimiento AS date) >= CAST(' + "'2019-02-01' as DATE) ")
cloud_storage_rows = ""
# Debido a que los registros en esta tabla pueden tener saltos de linea y punto y comas inmersos
for row in conn:
text_row = ""
text_row += str(row['Id_Data']).encode('utf-8') + "|"
text_row += str(row['Id_Campana']).encode('utf-8') + "|"
text_row += str(row['Documento']).encode('utf-8') + "|"
text_row += str(row['Cod_Interno']).encode('utf-8') + "|"
text_row += str(row['Tipo_Comprador']).encode('utf-8') + "|"
text_row += str(row['Customer_Class']).encode('utf-8') + "|"
text_row += str(row['Cupo']).encode('utf-8') + "|"
text_row += str(row['Num_Obligacion']).encode('utf-8') + "|"
text_row += str(row['Vlr_Factura']).encode('utf-8') + "|"
text_row += str(row['Fecha_Factura']).encode('utf-8') + "|"
text_row += str(row['Fecha_Vencimiento']).encode('utf-8') + "|"
text_row += str(row['Vlr_Saldo_Cartera']).encode('utf-8') + "|"
text_row += str(row['Dias_vencimiento']).encode('utf-8') + "|"
text_row += str(row['Campana_Orig']).encode('utf-8') + "|"
text_row += str(row['Ult_Campana']).encode('utf-8') + "|"
text_row += str(row['Codigo']).encode('utf-8') + "|"
text_row += str(row['Abogado']).encode('utf-8') + "|"
text_row += str(row['Division']).encode('utf-8') + "|"
text_row += str(row['Pais']).encode('utf-8') + "|"
text_row += str(row['Fecha_Prox_Conferencia']).encode('utf-8') + "|"
text_row += str(row['Cod_Gestion']).encode('utf-8') + "|"
text_row += str(row['Fecha_Gestion']).encode('utf-8') + "|"
text_row += str(row['Fecha_Promesa_Pago']).encode('utf-8') + "|"
text_row += str(row['Actividad_Economica']).encode('utf-8') + "|"
text_row += str(row['Saldo_Capital']).encode('utf-8') + "|"
text_row += str(row['Num_Cuotas']).encode('utf-8') + "|"
text_row += str(row['Num_Cuotas_Pagadas']).encode('utf-8') + "|"
text_row += str(row['Num_Cuotas_Faltantes']).encode('utf-8') + "|"
text_row += str(row['Num_Cuotas_Mora']).encode('utf-8') + "|"
text_row += str(row['Cant_Veces_Mora']).encode('utf-8') + "|"
text_row += str(row['Fecha_Ult_Pago']).encode('utf-8') + "|"
text_row += str(row['Saldo_Total_Vencido']).encode('utf-8') + "|"
text_row += str(row['Cod_Consecionario']).encode('utf-8') + "|"
text_row += str(row['Concesionario']).encode('utf-8') + "|"
text_row += str(row['Cod_Gestion_Ant']).encode('utf-8') + "|"
text_row += str(row['Grabador']).encode('utf-8') + "|"
text_row += str(row['Estado']).encode('utf-8') + "|"
text_row += str(row['Fecha_Cargue']).encode('utf-8') + "|"
text_row += str(row['Usuario_Cargue']).encode('utf-8') + "|"
text_row += str(row['Interes_Mora']).encode('utf-8') + "|"
text_row += str(row['Vlr_Cuotas_Vencidas']).encode('utf-8') + "|"
text_row += "\n"
cloud_storage_rows += text_row
conn.close()
filename = "data_unificadas/Unificadas_data" + ".csv"
#Finalizada la carga en local creamos un Bucket con los datos
gcscontroller.create_file(filename, cloud_storage_rows, "ct-unificadas")
try:
deleteQuery = "DELETE FROM `contento-bi.unificadas.Data` WHERE CAST(SUBSTR(Fecha_Creacion,0,10) AS DATE) = CURRENT_DATE()"
client = bigquery.Client()
query_job = client.query(deleteQuery)
query_job.result()
except:
print("no se pudo eliminar")
#Primero eliminamos todos los registros que contengan esa fecha
# time.sleep(60)
flowAnswer = unificadas_data_beam.run()
# time.sleep(180)
# Poner la ruta en storage cloud en una variable importada para posteriormente eliminarla
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
blob = bucket.blob("data_unificadas/Unificadas_data" + ".csv")
# Eliminar el archivo en la variable
blob.delete()
# return jsonify(flowAnswer), 200
return "data cargada " + "flowAnswer"
############################## BASE ASIGNACION AVA CREDITOS #######################
@unificadas_api.route("/asignacion_aval")
def asignacion_aval():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion/avalcreditos/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('bases_asignacion/avalcreditos/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_asignacion_avalcredito` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_asignacion_avalcreditos.run('gs://ct-unificadas/bases_asignacion/avalcreditos/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion/avalcreditos/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
################################## BASE ASIGNACION COOPANTEX ###################################
@unificadas_api.route("/asignacion_coopantex")
def asignacion_coopantex():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion/coopantex/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('bases_asignacion/coopantex/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_asignacion_coopantex` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_asignacion_coopantex.run('gs://ct-unificadas/bases_asignacion/coopantex/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion/coopantex/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
############################## BASE ASIGNACION DIARIA AVAL CREDITOS #######################
@unificadas_api.route("/asignacion_aval_diaria")
def asignacion_aval_diaria():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion diaria/avalcreditos/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('bases_asignacion_diaria/avalcreditos/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_asignacion_diaria_avalcredito` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_asignacion_diaria_avalcreditos.run('gs://ct-unificadas/bases_asignacion_diaria/avalcreditos/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion diaria/avalcreditos/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
########################################## BASE ASIGNACION COOPANTEX #######################################
@unificadas_api.route("/asignacion_diaria_coopantex")
def asignacion_diaria_coopantex():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion diaria/coopantex/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('bases_asignacion_diaria/coopantex/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_asignacion_diaria_coopantex` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_asignacion_diaria_coopantex.run('gs://ct-unificadas/bases_asignacion_diaria/coopantex/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion diaria/coopantex/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
################################# BASE GESTIONES COOPANTEX ##############################
@unificadas_api.route("/gestiones_coopantex")
def gestiones_coopantex():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/coopantex/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('gestiones/coopantex/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_gestiones_coopantex` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_gestiones_coopantex.run('gs://ct-unificadas/gestiones/coopantex/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/coopantex/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
############################ BASE GESTIONES AVALCREDITOS ###################################
@unificadas_api.route("/gestiones_avalcredito")
def gestiones_avalcredito():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/avalcredito/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('gestiones/avalcredito/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_gestiones_avalcredito` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_gestiones_avalcredito.run('gs://ct-unificadas/gestiones/avalcredito/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/avalcredito/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
############################ ACUERDOS DE PAGO AVALCREDITOS ################################
@unificadas_api.route("/acuerdos_avalcredito")
def acuerdos_avalcredito():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Acuerdos/avalcredito/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('acuerdos/avalcredito/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_acuerdos_avalcredito` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_acuerdos_avalcredito.run('gs://ct-unificadas/acuerdos/avalcredito/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Acuerdos/avalcredito/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
############################ ACUERDOS DE PAGO CCOPANTEX ##############################
@unificadas_api.route("/acuerdos_coopantex")
def acuerdos_coopantex():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Acuerdos/coopantex/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('acuerdos/coopantex/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_acuerdos_coopantex` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_acuerdos_coopantex.run('gs://ct-unificadas/acuerdos/coopantex/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Acuerdos/coopantex/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
########################## CARGUE BASE DE ESTRATEGIAS ##############################
@unificadas_api.route("/estrategia")
def estrategia():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Estrategia/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('estrategia/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_estrategia` WHERE archivo = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_estrategia_beam.run('gs://ct-unificadas/estrategia/' + archivo, mifecha)
if mensaje == "cargo de forma exitosa":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Estrategia/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
######################### AIGNACION EPM ###############################
@unificadas_api.route("/asignacion_diaria_epm")
def asignacion_diaria_epm():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion diaria/epm/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('bases_asignacion_diaria/epm/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_asignacion_diaria_epm` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_asignacion_diaria_epm_beam.run('gs://ct-unificadas/bases_asignacion_diaria/epm/' + archivo, mifecha)
if mensaje == "Corrio sin problemas":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion diaria/epm/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
########################## BASE ASIGNACION EPM #######################################
@unificadas_api.route("/asignacion_epm")
def asignacion_epm():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion/epm/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('bases_asignacion/epm/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_asignacion_epm` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_asignacion_epm_beam.run('gs://ct-unificadas/bases_asignacion/epm/' + archivo, mifecha)
if mensaje == "Corrio sin problemas":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Bases asignacion/epm/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
############################ BASE GESTIONES EPM ###############################
@unificadas_api.route("/gestiones_epm")
def gestiones_epm():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/epm/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('gestiones/epm/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_gestiones_epm` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_gestiones_epm_beam.run('gs://ct-unificadas/gestiones/epm/' + archivo, mifecha)
if mensaje == "Corrio sin problemas":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/epm/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
############################### BASE COMPROMISOS EPM ################################
@unificadas_api.route("/compromisos_epm")
def compromisos_epm():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Acuerdos/epm/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('acuerdos/epm/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_acuerdos_epm` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_acuerdos_epm_beam.run('gs://ct-unificadas/acuerdos/epm/' + archivo, mifecha)
if mensaje == "Corrio sin problemas":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Acuerdos/epm/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
##################################### BASE GESTIONES BANCO AGRICOLA ##################################
@unificadas_api.route("/gestiones_agricola")
def gestiones_agricola():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/banco_agricola/gestiones_tek/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('gestiones/banco_agricola/gestiones_tek/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
# deleteQuery = "DELETE FROM `contento-bi.ucc.SMS_V2` WHERE fecha = '" + mifecha + "'"
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_gestiones_banco_agricola` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_gestiones_banco_agricola_beam.run('gs://ct-unificadas/gestiones/banco_agricola/gestiones_tek/' + archivo, mifecha)
if mensaje == "Corrio sin problemas":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/banco_agricola/gestiones_tek/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
################################# BASE GESTIONES VIRTUALES BANCO AGRICOLA ######################################
@unificadas_api.route("/gestiones_virtuales_agricola")
def gestiones_virtuales_agricola():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/banco_agricola/gestiones_virtuales/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('gestiones/banco_agricola/gestiones_virtuales/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.unificadas.base_gestiones_virtuales_banco_agricola` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = base_gestiones_virtuales_banco_agricola_beam.run('gs://ct-unificadas/gestiones/banco_agricola/gestiones_virtuales/' + archivo, mifecha)
if mensaje == "Corrio sin problemas":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Gestiones/banco_agricola/gestiones_virtuales/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
############################## ETL CARGUE PAGOS ############################
@unificadas_api.route("/pagos")
def pagos():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Pagos/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[0:]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-unificadas')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('pagos/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.unificadas.pagos_opx_unificadas` WHERE archivo = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = pagos_beam.run('gs://ct-unificadas/pagos/' + archivo, mifecha)
if mensaje == "Corrio sin problemas":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Unificadas/Pagos/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo el cargue correctamente"
response["status"] = True
# # return jsonify(response), response["code"]
# return "Corriendo : "
return jsonify(response), response["code"]
|
# coding: utf-8
# In[4]:
# Naive Bayes using NLP
# USe following code if it wont work in first place with UTF-8 code error
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
import os
os.chdir("C:\\Users\\manishk.bajpai\\Desktop\\")
import csv
smsdata = open('SMSSpamCollection.txt','r')
csv_reader = csv.reader(smsdata,delimiter='\t')
smsdata_data = []
smsdata_labels = []
for line in csv_reader:
smsdata_labels.append(line[0])
smsdata_data.append(line[1])
smsdata.close()
# Printing top 5 lines
for i in range(5):
print (smsdata_data[i],smsdata_labels[i])
# In[5]:
# Printing Spam & Ham count
from collections import Counter
c = Counter( smsdata_labels )
print(c)
# In[9]:
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import string
import pandas as pd
from nltk import pos_tag
from nltk.stem import PorterStemmer
import nltk
nltk.download('popular')
def preprocessing(text):
text2 = " ".join("".join([" " if ch in string.punctuation else ch for ch in text]).split())
tokens = [word for sent in nltk.sent_tokenize(text2) for word in
nltk.word_tokenize(sent)]
tokens = [word.lower() for word in tokens]
stopwds = stopwords.words('english')
tokens = [token for token in tokens if token not in stopwds]
tokens = [word for word in tokens if len(word)>=3]
stemmer = PorterStemmer()
tokens = [stemmer.stem(word) for word in tokens]
tagged_corpus = pos_tag(tokens)
Noun_tags = ['NN','NNP','NNPS','NNS']
Verb_tags = ['VB','VBD','VBG','VBN','VBP','VBZ']
lemmatizer = WordNetLemmatizer()
def prat_lemmatize(token,tag):
if tag in Noun_tags:
return lemmatizer.lemmatize(token,'n')
elif tag in Verb_tags:
return lemmatizer.lemmatize(token,'v')
else:
return lemmatizer.lemmatize(token,'n')
pre_proc_text = " ".join([prat_lemmatize(token,tag) for token,tag in tagged_corpus])
return pre_proc_text
smsdata_data_2 = []
for i in smsdata_data:
smsdata_data_2.append(preprocessing(i))
import numpy as np
trainset_size = int(round(len(smsdata_data_2)*0.70))
print ('The training set size for this classifier is ' + str(trainset_size) + '\n')
# In[10]:
x_train = np.array([''.join(rec) for rec in smsdata_data_2[0:trainset_size]])
y_train = np.array([rec for rec in smsdata_labels[0:trainset_size]])
x_test = np.array([''.join(rec) for rec in smsdata_data_2[trainset_size+1:len(smsdata_data_2)]])
y_test = np.array([rec for rec in smsdata_labels[trainset_size+1:len(smsdata_labels)]])
# building TFIDF vectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(min_df=2, ngram_range=(1, 2), stop_words='english',
max_features= 4000,strip_accents='unicode', norm='l2')
x_train_2 = vectorizer.fit_transform(x_train).todense()
x_test_2 = vectorizer.transform(x_test).todense()
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB().fit(x_train_2, y_train)
ytrain_nb_predicted = clf.predict(x_train_2)
ytest_nb_predicted = clf.predict(x_test_2)
from sklearn.metrics import classification_report,accuracy_score
print ("\nNaive Bayes - Train Confusion Matrix\n\n",pd.crosstab(y_train,ytrain_nb_predicted,rownames = ["Actuall"],colnames = ["Predicted"]))
print ("\nNaive Bayes- Train accuracy",round(accuracy_score(y_train,ytrain_nb_predicted),3))
print ("\nNaive Bayes - Train Classification Report\n",classification_report(y_train,ytrain_nb_predicted))
print ("\nNaive Bayes - Test Confusion Matrix\n\n",pd.crosstab(y_test,ytest_nb_predicted,rownames = ["Actuall"],colnames = ["Predicted"]))
print ("\nNaive Bayes- Test accuracy",round(accuracy_score(y_test,ytest_nb_predicted),3))
print ("\nNaive Bayes - Test Classification Report\n",classification_report(y_test,ytest_nb_predicted))
# In[11]:
# printing top features
feature_names = vectorizer.get_feature_names()
coefs = clf.coef_
intercept = clf.intercept_
coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))
print ("\n\nTop 10 features - both first & last\n")
n=10
top_n_coefs = zip(coefs_with_fns[:n], coefs_with_fns[:-(n + 1):-1])
for (coef_1, fn_1), (coef_2, fn_2) in top_n_coefs:
print('\t%.4f\t%-15s\t\t%.4f\t%-15s' % (coef_1, fn_1, coef_2, fn_2))
|
#coding:gb2312
#´æ´¢Êý¾Ý
import json
numbers = [2,3,5,6,7,8,4,9]
filename = 'numbers.json'
with open(filename,'w') as f:
json.dump(numbers,f)
|
def closestsubarraysum(arr, x):
closest = arr[0] + arr[1]
for i in range(0, len(arr)):
for j in range(1, len(arr)):
if (arr[i] + arr[j]) < x and (arr[i] + arr[j] > closest):
closest = arr[i] + arr[j]
return closest
print(closestsubarraysum([10,22,28,29,30,40],54))
1
|
# coding=UTF-8
import time
import signal
from functools import wraps
from os.path import join, getsize
import os
log_dir = './logs/'
def initiDir(dir):
# Detect whether the directory is existing, if not then create
if os.path.exists(dir) == False:
os.makedirs(dir)
def getCurrentPath():
import os
print(os.getcwd())
def format_time(t):
import time
format = '%Y-%m-%d %l:%M %p'
value = time.localtime(int(t))
dt = time.strftime(format, value)
def log(*args, **kwargs):
args = " ".join(args)
format = '%Y-%m-%d %H:%M:%S'
# time.time() return the unix time
value = time.localtime(int(time.time()))
dt = time.strftime(format, value)
if kwargs == {}:
kwargs = ''
with open(log_dir + 'log.txt', 'a+') as f:
print >> f, dt, args, kwargs
# Estimate the function/method running time
# How to use: @fn_timer before the function
def fn_timer(fn):
@wraps(fn)
def function_timer(*args, **kwargs):
t0 = time.time()
result = fn(*args, **kwargs)
t1 = time.time()
print("Total time running {}: {} seconds".format(fn.__name__,str(t1 - t0)))
return result
return function_timer
def commandLineTool(command):
import os
re = os.system(command)
return re
# read the file
def readFile(file):
data = None
path = os.path.split(file)[0]
filename = os.path.split(file)[1]
try:
with open(file, 'rb') as f:
data = f.read()
except IOError:
print("无法打开文件 或 无法找到当前文件 {}".format(filename))
return None
return data
# Get the file directory inside the directory
def getDirFileLists(dir):
file_paths = []
for parent, dirnames, filenames in os.walk(dir):
for filename in filenames:
file_paths.append(parent+filename)
# print(file_paths)
return file_paths
# Delete the all files inside the directory
def deleteAllFiles(dir):
import os
imgList = os.listdir(dir)
for fileName in imgList:
file = os.path.join(dir, fileName)
os.remove(file)
# Calculate the total Size inside the directory
def getdirsize(dir):
size = 0
for root, dirs, files in os.walk(dir):
size += sum([getsize(join(root, name)) for name in files])
size = size/1024/1024
import math
num = size * 1000
num = math.floor(num) / 1000
return str(num)+'MB'
# Todo
def time_limit(interval):
def wraps(func):
def handler(signum, frame):
#TODO have a test, let it directly return
print('tim_limit RuntimeError')
# raise RuntimeError()
def deco(*args, **kwargs):
signal.signal(signal.SIGALRM, handler)
signal.alarm(interval)
res = func(*args, **kwargs)
signal.alarm(0)
return res
return deco
return wraps
def _initialize_dirs_for_utils():
initiDir(log_dir)
if __name__ == '__main__':
# _initialize_dirs_for_utils()
log('test', 'dd', 'dddd', a = 'dd')
# a = ['ab']
# print("%")
|
from . import db
class UserProfile(db.Model):
first_name = db.Column(db.String(80))
last_name = db.Column(db.String(80))
gender = db.Column(db.String(1))
email = db.Column(db.String(255), unique=True)
location = db.Column(db.String(80))
biography = db.Column(db.Text)
image = db.Column(db.String(255))
created_on = db.Column(db.String(80))
uid = db.Column(db.Integer, primary_key=True, autoincrement=False)
__tablename__ = "profiles"
def __init__(self,first_name, last_name, gender,email,location,biography,image,created_on,uid):
self.first_name = first_name
self.last_name = last_name
self.gender = gender
self.email = email
self.location = location
self.biography = biography
self.image = image
self.created_on = created_on
self.uid = uid
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2 support
except NameError:
return str(self.id) # python 3 support
def __repr__(self):
return "User: {0} {1}".format(self.firstname, self.lastname)
|
# Find the number of n-digit natural numbers A such that no three consecutive
# digits of A have a sum greater than 9.
# Let f(n, B) = the number of n-digit natural numbers A (leading zeros allowed)
# with B is the number formed by the first two digits of A such that
# no three consecutive digits of A have a sum greater than a specified
# constant C (Here C = 9).
# Recurrence relation:
# f(n, B) = sum{f(n - 1, D)} where D = 10 * (B % 10) + H for all H such that
# 0 <= H <= 9 and B % 10 + H + floor(B / 10) <= C.
# Base cases: f(n, B) = 1 if n < 3 and B % 10 + floor(B / 10) <= C
# f(n, B) = 0 if n < 3 and B % 10 + floor(B / 10 > C
# Let g(n, B) = the number of n-digit natural numbers A (no leading zeros)
# with B is the number formed by the first two digits of A such that
# no three consecutive digits of A have a sum greater than a specified
# constant C (Here C = 9).
#
# g(n, B) = sum(f(n, P)) where 10 <= P <= 99
# Methodology: Using memoization to prevent recomputation.
MEMO_TABLE = []
def countSpecialNum(numDigit, upperLimit):
initializeMemoTable(numDigit)
count = 0
for firstTwoDigit in range(10, 100):
count += f(numDigit, firstTwoDigit, upperLimit)
return count
def initializeMemoTable(n):
global MEMO_TABLE
MEMO_TABLE = [[-1] * 100 for row in range(0, n + 1)]
def f(numDigit, firstTwoDigit, upperLimit):
if numDigit < 3:
if firstTwoDigit // 10 + firstTwoDigit % 10 <= upperLimit:
return 1
else:
return 0
if MEMO_TABLE[numDigit][firstTwoDigit] >= 0:
return MEMO_TABLE[numDigit][firstTwoDigit]
mySum = 0
firstDigit = firstTwoDigit // 10
secondDigit = firstTwoDigit % 10
thirdDigit = 0
while thirdDigit < 10 and thirdDigit + firstDigit + secondDigit <= upperLimit:
mySum += f(numDigit - 1, secondDigit * 10 + thirdDigit, upperLimit)
thirdDigit += 1
MEMO_TABLE[numDigit][firstTwoDigit] = mySum
return mySum
|
import matplotlib.pyplot as plt
#import matplotlib.patches as mpatches
import numpy as np
import datetime
import os
from collections import OrderedDict
import LearnerProfile
def Graph(data, number, name='graph', title=''):
MAX_RANGE = 10
MIN_RANGE = -5
# displaces chunks for the histogram based on width of individual
# bars
CHUNK_DISPLACE = 3
BAR_RANGE = 100
tableau20 = [(31,119,180), (174,199,232), (255,127,14), (255,187,120),
(44,160,44), (152,223,138), (214,39,40), (255,152,150),
(148,103,189), (197,176,213), (140,86,75), (196,156,148),
(227,119,194), (247, 182, 210), (127, 127, 127), (199,199,199),
(188,189,34), (219,219,141), (23,190,207), (158,218,229)]
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b/255.)
plt.figure(figsize=(12,9))
start = data[0][1]
end = data[-1][1]
timeLength = (end - start).total_seconds()
plt.ylim(MIN_RANGE,MAX_RANGE)
plt.xlim(0, timeLength)
plt.text(timeLength/2, MAX_RANGE+1, title, fontsize=24, color='k')
profileData = []
for metric in data[0][0].average.errorTracking:
x = []
y = []
for profile in data:
# print(profile[0].errorTracking)
x.append((profile[1] - start).total_seconds())
y.append(profile[0].average.errorTracking[metric])
profileData.append([x,y,metric])
# print('x = ', str((profile[1] - start).total_seconds()), '; y = ', str(profile[0].errorTracking[metric]) + '\n')
for i, metric in enumerate(profileData):
plt.plot(profileData[i][0],profileData[i][1], lw=1.0, color=tableau20[i])
plt.text(timeLength + timeLength*.05, 10 - i, metric[2], fontsize=14, color=tableau20[i])
plt.savefig(''.join(['graphs/',name,'_',str(number)]), bbox_inches = 'tight')
plt.clf()
# plot the problem statistics
problemCount = len(data[-1][0].problems) + 0.0
metricCount = len(data[-1][0].problems[0].errorTracking) + 0.0
barWidth = BAR_RANGE / ((metricCount + CHUNK_DISPLACE) * problemCount * 1.0)
groupWidth = (barWidth*metricCount + barWidth*CHUNK_DISPLACE)
# print(problemCount,metricCount)
plt.figure(figsize=(12,9))
plt.ylim(MIN_RANGE, MAX_RANGE)
plt.xlim(0, BAR_RANGE)
plt.text(timeLength/2, MAX_RANGE+1, title, fontsize=24, color='k')
problemStats = []
for i, metric in enumerate(data[-1][0].problems[0].errorTracking):
x = []
y = []
for j, problem in enumerate(data[-1][0].problems):
x.append((j*groupWidth + i*barWidth))
y.append(problem.errorTracking[metric])
problemStats.append([x,y,problem.problemId])
for i in range(0, int(problemCount)):
# print('generating text', problemCount)
plt.text((groupWidth * i) + (groupWidth / 2), MIN_RANGE - 1, data[-1][0].problems[i].problemId)
ax = plt.subplot(111)
ax.set_xticklabels([])
col = 0
# print('creating graph')
for stat in problemStats:
ax.bar(stat[0], stat[1], width=barWidth, color=tableau20[col], align='center')
col += 1
if col > metricCount:
col = 0
# create legend
for i, metric in enumerate(data[-1][0].problems[0].errorTracking):
ax.text(BAR_RANGE + 2, MAX_RANGE - (i * .5), metric, color = tableau20[i], fontsize = 14)
plt.savefig(''.join(['graphs/',name,'_bar']))
plt.clf()
plt.close()
return
# graph data from all profiles: we'll need to be smart
def allGraph(profiles, problemAnalysis, conditionAnalysis, totalConditionAnalysis, studentCount, rawProfiles, timeData):
print('Graphing all')
MAX_RANGE_OFF = 10
MIN_RANGE_OFF = -5
MAX_RANGE_OTHER = 10
MIN_RANGE_OTHER = -5
WIGGLE = .05
PRB_MIN_RANGE = 0
PRB_MAX_RANGE = 16
barWidth = 2
# displaces chunks for the histogram based on width of individual
# bars
CHUNK_DISPLACE = 3
BAR_RANGE = 100
tableau20 = [(31,119,180), (174,199,232), (255,127,14), (255,187,120),
(44,160,44), (152,223,138), (214,39,40), (255,152,150),
(148,103,189), (197,176,213), (140,86,75), (196,156,148),
(227,119,194), (247, 182, 210), (127, 127, 127), (199,199,199),
(188,189,34), (219,219,141), (23,190,207), (158,218,229)]
PLOT_ORDER = ['offByNx', 'offByNy', 'offByNxMag', 'offByNyMag', 'offByNxChir', 'offByNyChir', 'offByCount']
OTHER_ORDER = ['attempts', 'correctProb','sumError', 'ignoreX', 'ignoreY', 'flippingError', 'noPlot', 'offByCount', 'invertX', 'invertY']
LATERAL_ANALYSIS = ['correctProb', 'sumError', 'ignoreX', 'ignoreY', 'flippingError', 'noPlot', 'offByCount', 'invertX', 'invertY','deleteMoves']
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b/255.)
# create lateral problem analysis graphs
if not os.path.exists('graphs'):
os.makedirs('graphs')
if not os.path.exists('graphs/problemAnalysis'):
os.makedirs('graphs/problemAnalysis')
if not os.path.exists('graphs/conditionAnalysis'):
os.makedirs('graphs/conditionAnalysis')
os.chdir('graphs/problemAnalysis')
for problemId, problem in problemAnalysis.items():
plt.figure(figsize=(12,9))
plt.xlim(PRB_MIN_RANGE, len(problem.errorTracking)+2)
problemSum = 0
for condition in studentCount:
if problemId in studentCount[condition]:
problemSum += studentCount[condition][problemId]
# print(problemSum)
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_yaxis().tick_left()
plt.xticks(visible=False)
plt.yticks(fontsize=14)
legend = OrderedDict()
attempts = problem.errorTracking['attempts']
correct = problem.errorTracking['correctProb']
incorrect = attempts - correct
if attempts == correct:
attempts += 1
for index, error in enumerate(problem.errorTracking.items()):
if error[0] == 'correctProb':
legend[('incorrect', incorrect)] = ax.bar(index + 1, incorrect/problemSum, 1, color = tableau20[index])
elif error[0] in LATERAL_ANALYSIS:
legend[error] = ax.bar(index + 1, error[1]/problemSum, 1, color=tableau20[index])
plt.title( problemId +' lateral Analysis', fontsize=24, color='k')
# create legend
# for i, metric in enumerate(problem.errorTracking):
# ax.text(len(problem.errorTracking)+2, float((ymax)-(ymax-ymin)*.03*i), metric, color=tableau20[i], fontsize = 14)
plt.legend(list(legend.values()), list(legend.keys()))
plt.savefig(''.join(['error_analysis/',problemId,'analysis.png']))
plt.close()
# create behavior graph
plt.figure(figsize=(12,9))
ax = plt.subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_yaxis().tick_left()
plt.yticks(fontsize=14)
plt.xticks(visible=False)
ydata = problem.behaviors.values()
bars = ax.bar(range(0,len(ydata)), ydata, color=tableau20, width=barWidth)
plt.title( problemId + ' behavior Analysis', fontsize=24, color='k')
x_range = plt.axis()[1] - plt.axis()[0]
x_max = plt.axis()[1]
plt.xlim(xmax = x_max + x_range*.3)
plt.legend(bars, list(problem.behaviors.keys()))
ax.set_ylabel('Count/sessions')
plt.savefig(''.join(['behaviors/' ,problemId, 'behaviors.png']))
plt.clf()
plt.close()
print('Problem ', problemId, ' analysis completed')
os.chdir('..')
# create lateral condition/Problem analysis Graphs
conditionStats = {}
conditionStats['total'] = {}
legendColor = {}
for condition, problems in conditionAnalysis.items():
conditionStats[condition] = {}
keys = sorted(problems.keys())
for index, error in enumerate(problems[keys[0]].errorTracking.items()):
conditionStats[condition][error[0]] = []
legendColor[error[0]] = index
if not error[0] in conditionStats[condition]:
conditionStats[condition][error[0]] = [0.0]*len(keys)
for index, key in enumerate(keys):
conditionStats[condition][error[0]].append((problems[key].errorTracking[error[0]] * 1.0)/studentCount[condition][key])
for problemId, problem in problems.items():
if condition in studentCount and problemId in studentCount[condition]:
userCount = studentCount[condition][problemId]
else:
continue
plt.figure(figsize=(12,9))
ax = plt.subplot(111)
legend = OrderedDict()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_yaxis().tick_left()
plt.xticks(visible=False)
plt.yticks(fontsize=14)
attempts = problem.errorTracking['attempts']
correct = problem.errorTracking['correctProb']
incorrect = attempts - correct
# avoid divide by 0
if attempts == correct:
attempts += 1
for index, error in enumerate(problem.errorTracking.items()):
if error[0] == 'correctProb':
legend[('incorrect', incorrect)] = ax.bar(index + 1, incorrect/userCount, 1, color = tableau20[index])
elif error[0] in LATERAL_ANALYSIS:
legend[error] = ax.bar(index + 1, error[1]/userCount, 1, color=tableau20[index])
plt.title(condition + problemId +' condition Analysis', fontsize=24, color='k')
plt.legend(list(legend.values()), list(legend.keys()))
plt.ylabel('Count/User Session')
plt.xlabel('Problem')
if(condition is ''):
condition = 'None'
plt.savefig(''.join(['conditionAnalysis/all/', problemId, '_', condition, '.png']))
plt.savefig(''.join(['conditionAnalysis/', condition, '/', problemId, '_', condition, '.png']))
plt.savefig(''.join(['conditionAnalysis/Problems/', problemId, '/', problemId, '_', condition, '.png']))
print(problemId, '/', condition, 'analysis complete')
plt.close()
# check conditions over all sessions
for condition, stats in conditionStats.items():
plt.figure(figsize=(12,9))
ax = plt.subplot(111)
legend = OrderedDict()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_yaxis().tick_left()
plt.yticks(fontsize=14)
for error, data in stats.items():
if error in LATERAL_ANALYSIS:
if error == 'correctProb':
incorrect = [attempts - correct for attempts, correct in zip(stats['attempts'], data)]
legend['incorrect'] = plt.plot(range(1, 1+len(incorrect)), incorrect, marker='.', color=tableau20[int(legendColor[error])], label='incorrect')
else:
plt.plot(range(1, 1+len(data)), data, marker='.', color=tableau20[int(legendColor[error])], label=str(error))
plt.title(condition + ' Session Analysis', fontsize=24, color='k')
plt.ylabel('Count/User Session')
plt.legend()
plt.show()
# plt.legend(list(legend.values()), list(legend.keys()))
if(condition is ''):
condition = 'None'
plt.savefig(''.join(['conditionAnalysis/Session/', condition, '_scatter', '.png']))
plt.close()
print(condition, 'session analysis complete')
# Create overall condition graphs - i.e. errors in all problems for all sessinos in a condition
for condition, data in totalConditionAnalysis.items():
sessionCount = studentCount[condition]['541']
plt.figure(figsize=(12,9))
ax = plt.subplot(111)
legend = OrderedDict()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_yaxis().tick_left()
print(data)
plt.xticks(visible=False)
plt.yticks(fontsize=14)
attempts = data.errorTracking['attempts']
correct = data.errorTracking['correctProb']
incorrect = attempts - correct
print(incorrect)
# avoid divide by 0
if attempts == correct:
attempts += 1
for index, error in enumerate(data.errorTracking.items()):
if error[0] == 'correctProb':
legend[('incorrect', incorrect)] = ax.bar(index + 1, incorrect/sessionCount, 1, color = tableau20[index])
elif error[0] in LATERAL_ANALYSIS:
legend[error] = ax.bar(index + 1, error[1]/sessionCount, 1, color=tableau20[index])
plt.title(condition + ' Total condition Analysis', fontsize=24, color='k')
plt.legend(list(legend.values()), list(legend.keys()))
plt.ylabel('Count')
if(condition is ''):
condition = 'None'
plt.savefig(''.join(['conditionAnalysis/Session/', condition, '_bar', '.png']))
plt.close()
print(condition, 'error graphs created')
# now create profile graphs
# print(profiles[0][0].problems[-1])
print('Problem Analysis Graphs Complete.')
# Profile analysis graphs
for profile in profiles:
if(len(profile.problems) == 0):
continue
profileOffByData = {}
profileOtherData = {}
# dictionary storage for easy access
profileCorrectnessData = {}
profileAttemptData = {}
# get all the data into useful groups
for i, metric in enumerate(profile.problems[-1].errorTracking):
for problem in profile.problems:
if metric in PLOT_ORDER:
if metric in profileOffByData:
profileOffByData[metric].append((problem.problemId, problem.errorTracking[metric]))
else:
profileOffByData[metric] = [(problem.problemId, problem.errorTracking[metric])]
if metric in OTHER_ORDER:
if metric in profileOtherData:
profileOtherData[metric].append((problem.problemId, problem.errorTracking[metric]))
else:
profileOtherData[metric] = [(problem.problemId, problem.errorTracking[metric])]
if metric == 'correct':
profileCorrectnessData[problem.problemId] = problem.errorTracking[metric]
elif metric == 'attempts':
profileCorrectnessData[problem.problemId] = problem.errorTracking[metric]
sortedOffBy = OrderedDict()
sortedOther = OrderedDict()
for metric in OTHER_ORDER:
sortedOther[metric] = OrderedDict(sorted(profileOtherData[metric], key=lambda id: int(id[0])))
for metric in PLOT_ORDER:
sortedOffBy[metric] = OrderedDict(sorted(profileOffByData[metric], key=lambda id: int(id[0])))
problemCount = len(profile.problems)
# groupSize = len(PLOT_ORDER)*barWidth + CHUNK_DISPLACE
# tickPlacement = np.arange(CHUNK_DISPLACE + groupSize *.5, CHUNK_DISPLACE + groupSize*.5 + groupSize*problemCount, groupSize)
tickLabels = map(lambda x: str(int(x.problemId)-540), profile.problems)
plt.figure(figsize=(12,9))
plt.autoscale(enable=True, axis='y', tight=False)
ax = plt.subplot(211)
stat_legend = OrderedDict()
index = np.arange(0, len(OTHER_ORDER)*barWidth*len(profile.problems) + CHUNK_DISPLACE*len(profile.problems), len(OTHER_ORDER)*barWidth + CHUNK_DISPLACE)
labelLoc = np.arange(len(OTHER_ORDER)*barWidth/2.0, len(OTHER_ORDER)*barWidth*len(profile.problems) + CHUNK_DISPLACE*len(profile.problems), len(OTHER_ORDER)*barWidth + CHUNK_DISPLACE )
for i, statTup in enumerate(sortedOther.items()):
stat_legend[statTup[0]] = ax.bar(index + (barWidth * i), statTup[1].values(), barWidth, color=tableau20[i])
x_range = plt.axis()[1] - plt.axis()[0]
x_max = plt.axis()[1]
ax.set_xticks(labelLoc)
ax.set_xticklabels(tickLabels)
plt.xlim(xmax = x_max + x_range*.25)
# create legend
plt.legend(list(stat_legend.values()), list(stat_legend.keys()))
plt.xlabel('Problem', labelpad=25)
plt.ylabel('Count')
plt.title(''.join(['subject ', profile.subjectID, ' ', profile.condition , ' Errors']))
plt.savefig(''.join(['profileAnalysis/other/', str(profile.subjectID), '_', profile.condition, '_other']), bbox_inches='tight')
plt.clf()
plt.close()
# off By plot
plt.figure(figsize=(12,9))
plt.autoscale(enable=True, axis='y', tight=False)
ax = plt.subplot(211)
stat_legend = OrderedDict()
index = np.arange(0, len(PLOT_ORDER)*barWidth*len(profile.problems) + CHUNK_DISPLACE*len(profile.problems), len(PLOT_ORDER)*barWidth + CHUNK_DISPLACE)
labelLoc = np.arange(len(PLOT_ORDER)*barWidth/2.0, len(PLOT_ORDER)*barWidth*len(profile.problems) + CHUNK_DISPLACE*len(profile.problems), len(OTHER_ORDER)*barWidth + CHUNK_DISPLACE)
for i, statTup in enumerate(sortedOffBy.items()):
stat_legend[statTup[0]] = ax.bar(index + (barWidth * i), statTup[1].values(), barWidth, color=tableau20[i])
x_range = plt.axis()[1] - plt.axis()[0]
x_max = plt.axis()[1]
ax.set_xticks(labelLoc)
ax.set_xticklabels(tickLabels)
plt.xlim(xmax = x_max + x_range*.25)
# plt.xticks(index + barWidth, tickLabels)
# create legend
plt.legend(list(stat_legend.values()), list(stat_legend.keys()))
plt.xlabel('Problem', labelpad=25)
plt.ylabel('Count')
plt.title(''.join(['subject', profile.subjectID, ' ', profile.condition , ' Off-By errors']))
plt.savefig(''.join(['profileAnalysis/offBy/', str(profile.subjectID), '_', profile.condition, '_offBy']), bbox_inches='tight')
plt.clf()
plt.close()
# time analysis
timeData[profile.subjectID]
print('Profile ', profile.subjectID, ' completed')
plt.clf()
plt.close()
print ('All graph complete')
return
|
# coding: utf-8
import datetime
import random
import string
class VotePool(object):
_pool = {}
@classmethod
def _gen_unique_id(cls):
id = ''.join([random.choice(string.digits) for i in range(4)])
if id in cls._pool:
return cls._gen_unique_id()
return id
@classmethod
def create_vote(cls, threshold, callback, success_msg, timeout=300):
id = cls._gen_unique_id()
cls._pool[id] = Vote(id, threshold, callback, success_msg, timeout)
return cls._pool[id]
@classmethod
def get_valid_vote(cls, id):
if id not in cls._pool:
return None
vote = cls._pool[id]
if not vote.is_valid():
return None
return vote
@classmethod
def has_valid_vote(cls, id):
return bool(cls.get_valid_vote(id))
@classmethod
def incr_vote(cls, id, member):
vote = cls.get_valid_vote(id)
if not vote:
return False
return vote.incr(member)
class Vote(object):
def __init__(self, id, threshold, callback, success_msg, timeout):
self.id = id
self.cnt = 0
self.callback = callback
self.members = set()
self.timeout = 300
self.threshold = threshold
self.success_msg = success_msg
self.created_at = datetime.datetime.now()
def incr(self, member):
if not self.is_valid():
return False
if member in self.members:
return False
self.members.add(member)
self.cnt += 1
if self.cnt >= self.threshold:
self.callback()
return True
def is_valid(self):
return True
def is_success(self):
return self.cnt >= self.threshold
|
#!python
#
# tofpgen: Do the easy things to convert the output of F* (or guiguts)
# into fpgen input. Try not to do anything that will be wrong.
# Output is simply stuck into a file called out
# -- character set to UTF-8
# -- em-dashes
# -- curly quotes
# -- /##/ -> <quote>...</quote>
# -- [#] -> <fn id='#'>
# -- [Footnote:] -> <footnote>...</footnote>
# -- [Sidenote:] -> <sidenote>...</sidenote>
# -- [Illustration:] -> <illustration>...</illustration>
# -- \n\n\n\nXXX -> <chap-head>XXX</chap-head>
#
import sys
import re
import os
import fnmatch
import chardet
import datetime
def fatal(line):
sys.stderr.write("ERROR " + line)
exit(1)
if len(sys.argv) > 2:
fatal("Too many args; Usage: tofpgen [xxx-src.txt]\n")
if len(sys.argv) < 2:
src = None
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*-src.txt'):
if src != None:
fatal("Multiple *-src.txt files in current directory\n")
src = file
if src == None:
fatal("No *-src.txt file found in current directory\n")
else:
src = sys.argv[1]
basename = re.sub('-src.txt$', '', src)
regexFootnote = re.compile("^\[Footnote ([ABC0-9][0-9]*): (.*)$")
regexIllStart = re.compile("^\[Illustration: *")
regexIllOne = re.compile("^\[Illustration: (.*)\]$")
regexIllNoCap = re.compile("^\[Illustration]$")
regexFNRef = re.compile("\[([ABCD0-9][0-9]*)\]")
sidenoteRE = re.compile("\[Sidenote: (.*)\]$")
postamble = """
<l rend='center mt:3em'>THE END</l>
<heading level='1'>TRANSCRIBER NOTES</heading>
Mis-spelled words and printer errors have been fixed.
Inconsistency in hyphenation has been retained.
Inconsistency in accents has been fixed.
Inconsistency in accents has been retained.
Because of copyright considerations, the illustrations by X (y-z) have been omitted from this etext.
Illustrations have been relocated due to using a non-page layout.
Some photographs have been enhanced to be more legible.
When nested quoting was encountered, nested double quotes were
changed to single quotes.
Space between paragraphs varied greatly. The thought-breaks which
have been inserted attempt to agree with the larger paragraph
spacing, but it is quite possible that this was simply the methodology
used by the typesetter, and that there should be no thought-breaks.
<nobreak>[End of TITLE, by AUTHOR]
/* end of """ + basename + """-src */
"""
date = datetime.datetime.now().strftime('%d-%b-%Y')
preamble = """/* This is """ + basename + """-src as of """ + date + """ */
<property name="cover image" content="images/cover.jpg">
<option name="pstyle" content="indent">
//<option name="summary-style" content="center">
//<option name="poetry-style" content="center">
<meta name="DC.Creator" content="AUTHOR">
<meta name="DC.Title" content="TITLE">
<meta name="DC.Language" content="en">
<meta name="DC.Created" content="DATE">
<meta name="DC.date.issued" content="DATE">
<meta name="DC.Subject" content="SUBJECT">
<meta name="Tags" content="SUBJECT">
<meta name="Series" content="SERIES [15]">
<meta name="generator" content="fpgen 4.63b">
<lit section="head">
<style type="text/css">
.poetry-container { margin-top:.5em; margin-bottom:.5em }
.literal-container { margin-top:.5em; margin-bottom:.5em }
div.lgc { margin-top:.5em; margin-bottom:.5em }
p { margin-top:0em; margin-bottom:0em; }
.index1 .line0, .index2 .line0 {
text-align: left;
text-indent:-2em;
margin:0 auto 0 2em;
}
</style>
</lit>
<if type='h'>
<illustration rend="w:80%" src="images/cover.jpg"/>
<pb>
</if>
"""
def quote(line):
if line == "<pn='+1'>":
return line;
# Leading or trailing double or single quotes on the line
if line[0] == '"':
line = '“' + line[1:]
if line[-1] == '"':
line = line[0:-1] + '”'
if line[0] == "'":
line = '‘' + line[1:]
if line[-1] == "'":
line = line[0:-1] + '’'
# space quote starts sentence, opening quote
line = re.sub('" ', '” ', line)
# space quote, starts sentence
# em-dash quote, starts sentence?
# open paren, quote starts sent
line = re.sub('([ —(])"', r'\1“', line)
# Punctuation or lower-case letter, followed by quote, ends a sentence
line = re.sub(r'([\.,!?a-z])"', r'\1”', line)
# quote, open-square is probably a footnote ref at the end of a quote
line = re.sub(r'"\[', '”[', line)
# quote, close-square is the end of a quote
line = re.sub(r'"]', '”]', line)
# single between two letters is a contraction
line = re.sub(r"(\w)'(\w)", r"\1’\2", line)
# Match the direction if single/double
line = re.sub(r"“'", r"“‘", line)
line = re.sub(r"'”", r"’”", line)
# End of sentence for single
line = re.sub(r"' ", r"’ ", line)
# Start single after dash
line = re.sub("([ —])'", r"\1‘", line)
# End single after letters
line = re.sub(r"([\.,!?a-z])'", r"\1’", line)
# Common, non-ambiguous contractions
for word in [ "em", "Twas", "twas", "Tis", "tis", "Twould", "twould", "Twill", "twill", "phone", "phoned", "phoning", "cello" ]:
line = re.sub(r'([ “]|^)‘' + word + r'([ !,\.?—:]|$)', r'\1’' + word + r'\2', line)
# Insert narrow non-breaking space between adjacent quotes
line = re.sub(r'([\'"‘’“”])([\'"‘’“”])', r'\1<nnbsp>\2', line)
if "'" in line or '"' in line:
sys.stderr.write("CHECK QUOTE: " + line + '\n')
return line
def sidenote(line):
m = sidenoteRE.match(line)
if m:
return "<sidenote>" + m.group(1) + "</sidenote>"
return line
def footnote(line):
global inFN
# Replace [12] footnote references
line = regexFNRef.sub(r"<fn id='#'>/*\1*/", line)
m = regexFootnote.match(line)
if not m:
if inFN:
if line.endswith("]"):
line = line[:-1] + "</footnote>"
inFN = False
return line
if inFN and m:
fatal("ERROR already in FN at " + line)
fn = m.group(1)
rest = m.group(2)
inFN = True
if rest.endswith("]"):
rest = rest[:-1] + "</footnote>"
inFN = False
return "<footnote id='#'>/*" + fn + "*/" + rest
def illustration(line):
global inIll, illStartLine
if inIll and line.endswith("]"):
inIll = False
illStartLine = None
return line[:-1] + "\n</caption>\n</illustration>"
mOne = regexIllOne.match(line)
mStart = regexIllStart.match(line)
mNoCap = regexIllNoCap.match(line)
if (mStart or mNoCap) and inIll:
fatal("already in ill at " + line + ", which started at " + illStartLine)
commonIll = "<illustration rend='w:WW%' alt='AAA' src='images/XXX.jpg'";
# One line illustration with caption
if mOne:
caption = mOne.group(1)
line = commonIll + ">\n<caption>\n" + \
caption + "\n</caption>\n</illustration>\n"
return line
# Illustration with caption starts, but does not end on this line
if mStart:
captionStart = line[15:].strip()
inIll = True
illStartLine = line
return commonIll + ">\n<caption>\n" + captionStart
# Illustration without caption
if mNoCap:
return commonIll + "/>";
return line
def emitTOC(block, output):
# If there is at least one line ending in whitespace number, assume
# the TOC has page numbers
hasPageNumbers = False
for l in block:
if re.search(r" *[0-9][0-9]*$", l):
hasPageNumbers = True
break
if hasPageNumbers:
output.write("<table pattern='r h r'>\n")
r = re.compile("^([^ ][^ ]*) *(.*)[ \.][ \.]*([0-9][0-9]*)$")
else:
output.write("<table pattern='r h'>\n")
r = re.compile("^ *([^ ][^ ]*) *(.*)$")
for l in block:
if l == "":
continue
m = re.match(r"^([A-Z]*) *([A-Z]*)$", l)
if m:
# CHAPTER PAGE
a = m.group(1)
b = m.group(2)
output.write("<fs:xs>"+a+"</fs>||<fs:xs>"+b+"</fs>\n")
else:
m = r.match(l)
if m:
chno = m.group(1)
name = m.group(2).strip()
if hasPageNumbers:
pn = m.group(3)
output.write(chno + "|" + name + "|#" + pn + "#\n")
else:
output.write(chno + "|" + name + "\n")
else:
output.write("???? " + l + "\n")
output.write("</table>\n")
inFN = False
inIll = False
illStartLine = None
chapHead = False
subHead = False
inTOC = False
startTOC = False
rawdata = open(src, "rb").read()
encoding = chardet.detect(rawdata)['encoding']
if encoding[:3] == "ISO":
encoding = "ISO-8859-1"
sys.stderr.write("Source file encoding: " + encoding + "\n")
sys.stderr.write("converting " + src + " into file out\n")
with open(src, "r", encoding=encoding) as input:
with open("out", "w", encoding="UTF-8") as output:
output.write(preamble)
blanks = 0
for line in input:
line = line.rstrip()
if line == "":
blanks += 1
if not inTOC:
output.write("\n")
continue
line = line.replace("--", "—")
line = quote(line)
# Matched the /* after a line with CONTENTS
# Accumulate the whole block, process it in one
if inTOC:
if line == "*/":
# End of TOC, process now
startTOC = False
inTOC = False
emitTOC(tocBlock, output)
blanks = 0
elif line != "":
# Accumulate line
tocBlock.append(line)
blanks = 0
continue
if blanks == 2 and chapHead and line == "/*" and startTOC:
inTOC = True
tocBlock = []
continue
if line == "/*" or regexIllOne.match(line) or regexIllNoCap.match(line) \
or regexIllStart.match(line):
blanks = 0
chapHead = False
if blanks >= 4:
if re.match(r'contents', line, re.IGNORECASE) != None:
startTOC = True
if not ("<chap-head " in line):
line = "<chap-head pn='XXX'>" + line + "</chap-head>"
chapHead = True
subHead = False
elif blanks == 1 and chapHead and line != "/*":
if subHead:
line = "<heading level='3'>" + line + "</heading>"
else:
if not ("<sub-head>" in line):
line = "<sub-head>" + line + "</sub-head>"
subHead = True
elif blanks >= 2:
chapHead = False
subHead = False
startTOC = False # Or error!
if line == "/#":
line = "<quote>"
elif line == "#/":
line = "</quote>"
else:
line = footnote(line)
line = illustration(line)
line = sidenote(line)
blanks = 0
output.write(line + "\n")
output.write(postamble)
if inFN:
sys.stderr.write("END OF FILE in a footnote\n")
exit(1)
if inIll:
sys.stderr.write("END OF FILE in an illustration\n")
exit(1)
|
# Python Substrate Interface Library
#
# Copyright 2018-2023 Stichting Polkascan (Polkascan Foundation).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from substrateinterface import SubstrateInterface
def subscription_handler(storage_key, updated_obj, update_nr, subscription_id):
print(f"Update for {storage_key}: {updated_obj.value}")
substrate = SubstrateInterface(url="ws://127.0.0.1:9944")
# Accounts to track
storage_keys = [
substrate.create_storage_key(
"System", "Account", ["5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY"]
),
substrate.create_storage_key(
"System", "Account", ["5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty"]
),
substrate.create_storage_key(
"System", "Events"
),
]
result = substrate.subscribe_storage(
storage_keys=storage_keys, subscription_handler=subscription_handler
)
|
"""
Data visualization functions to simplify plots in Seaborn.
"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def _get_steps(df, ax, width):
"""Properly spaces values in x-axis."""
steps = ( ( df[ax].max() - df[ax].min() ) / width / 10 )
steps = max(int(round(steps)), 1)
return steps
def get_scatterplot(df, x, y, height=5, width=5, hue=None):
"""Create a scatterplot using Seaborn.
"""
p = sns.catplot(
x=x,
y=y,
hue=hue,
kind="swarm",
data=df,
height=height,
aspect=width/height,
#palette="Set1",
palette=sns.color_palette(['blue', 'grey'])
)
step = _get_steps(df, x, width)
p.set_xticklabels(rotation=90, step=step)
p.fig.suptitle("Scatterplot: {} vs {}".format(x, y))
return p
def get_historgram(df, x, height=5, width=5, bins=20):
"""Create a histogram plot using Seaborn
Docs: https://seaborn.pydata.org/tutorial/distributions.html
"""
fig, ax = plt.subplots()
fig.set_size_inches(width, height)
p = sns.distplot(
df[x],
bins=bins,
kde=False,
#hue=None,
color='blue',
)
p.set(title = "Histogram: {}".format(x))
return p
def get_boxplot(df, x, y, height=5, width=5):
"""Create a boxplot using Seaborn
"""
fig, ax = plt.subplots()
fig.set_size_inches(width, height)
p = sns.boxplot(x=x, y=y, data=df)
p.set(title = "Box Chart: {} vs {}".format(x, y))
return p
def get_bar_chart(df, x, y, height=5, width=5):
"""Create a bar chart using Seaborn
"""
p = sns.catplot(
x=x, y=y, kind='bar',
data=df,
height=height,
aspect=width/height,
)
p.set_xticklabels(rotation=90, step=1)
p.fig.suptitle("Bar Chart: {} vs {}".format(x, y))
return p
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
* http://nghiaho.com/?page_id=846
NB Opposite Sign Convention to GLM
--------------------------------------
* http://planning.cs.uiuc.edu/node102.html
* http://planning.cs.uiuc.edu/node103.html
| r11 r12 r13 |
| r21 r22 r23 |
| r31 r32 r33 |
Rz (yaw) conterclockwise "alpha"
cosZ -sinZ 0
sinZ cosZ 0
0 0 1
Ry (pitch) counterclockwise "beta"
cosY 0 sinY
0 1 0
-sinY 0 cosY
Rx (roll) counterclockwise "gamma"
1 0 0
0 cosX -sinX
0 sinX cosX
yawPitchRoll
Rz Ry Rx
Rzyx = Rz(alpha).Ry(beta).Rx(gamma)
^^^^^^ roll first
First roll Rx, then pitch Ry then finally yaw Rz
11: cosZ cosY 12: cosZ sinY sinX - sinZ cosX 13: cosZ sinY cosX + sinZ sinX
21: sinZ cosY 22: sinZ sinY sinX + cosZ cosX 23: sinZ sinY cosX - cosZ sinX
31: -sinY 32: cosY sinX 33: cosY cosX
r32/r33 = cosY sinX / cosY cosX = tanX
r32^2 + r33^2 = cosY^2 sinX^2 + cosY^2 cosX^2 = cosY^2
-r31/sqrt(r32^2 + r33^2) = sinY / cosY = tanY
r21/r11 = tanZ
r11^2 + r21^2 = cosZ^2 cosY^2 + sinZ^2 cosY^2 = cosY^2 "cosb"^2
-r31/sqrt(r11^2 + r21^2) = sinY / cosY = tanY
cosY->0 => sinY=>1
... DONT FOLLOW THE LEAP TO sinZ = 0, cosZ = 1
-r23/r22 = -(sinZ sinY cosX - cosZ sinX) / (sinZ sinY sinX + cosZ cosX )
how is this meant to yield tanY ??? ... perhaps a mal-assumption made here that sinY->0 ???
cosZ sinX / cosZ cosX -> tanX (if sinY->0, BUT IT DOESNT ???)
* https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2012/07/euler-angles1.pdf
* ~/opticks_refs/Extracting_Euler_Angles.pdf
"""
import numpy as np
def GetAngles(m):
"""
51 G4ThreeVector G4GDMLWriteDefine::GetAngles(const G4RotationMatrix& mtx)
52 {
53 G4double x,y,z;
54 G4RotationMatrix mat = mtx;
55 mat.rectify(); // Rectify matrix from possible roundoff errors
56
57 // Direction of rotation given by left-hand rule; clockwise rotation
58
59 static const G4double kMatrixPrecision = 10E-10;
60 const G4double cosb = std::sqrt(mtx.xx()*mtx.xx()+mtx.yx()*mtx.yx());
.. r11^2 + r21^2
61
62 if (cosb > kMatrixPrecision)
63 {
64 x = std::atan2(mtx.zy(),mtx.zz());
.. r32 r33
65 y = std::atan2(-mtx.zx(),cosb);
.. -r31
66 z = std::atan2(mtx.yx(),mtx.xx());
.. r21 r11
67 }
68 else
69 {
70 x = std::atan2(-mtx.yz(),mtx.yy());
.. -r23 r22
71 y = std::atan2(-mtx.zx(),cosb);
.. huh division by smth very small... unhealthy
.. -r31 sqrt(r11^2 + r21^2)
72 z = 0.0;
73 }
74
75 return G4ThreeVector(x,y,z);
76 }
"""
pass
def extractEulerAnglesXYZ(M, unit=np.pi/180., dtype=np.float32):
"""
https://github.com/jzrake/glm/commit/d3313421c664db5bd1b672d39ba3faec0d430117
https://github.com/g-truc/glm/blob/master/glm/gtx/euler_angles.inl
https://gamedev.stackexchange.com/questions/50963/how-to-extract-euler-angles-from-transformation-matrix
~/opticks_refs/Extracting_Euler_Angles.pdf
::
template<typename T>
GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const & M,
T & t1,
T & t2,
T & t3)
{
float T1 = glm::atan2<T, defaultp>(M[2][1], M[2][2]);
float C2 = glm::sqrt(M[0][0]*M[0][0] + M[1][0]*M[1][0]);
float T2 = glm::atan2<T, defaultp>(-M[2][0], C2);
float S1 = glm::sin(T1);
float C1 = glm::cos(T1);
float T3 = glm::atan2<T, defaultp>(S1*M[0][2] - C1*M[0][1], C1*M[1][1] - S1*M[1][2 ]);
t1 = -T1;
t2 = -T2;
t3 = -T3;
}
"""
T1 = np.arctan2(M[2][1], M[2][2]);
C2 = np.sqrt(M[0][0]*M[0][0] + M[1][0]*M[1][0]);
T2 = np.arctan2(-M[2][0], C2);
S1 = np.sin(T1);
C1 = np.cos(T1);
T3 = np.arctan2(S1*M[0][2] - C1*M[0][1], C1*M[1][1] - S1*M[1][2 ]);
t1 = -T1;
t2 = -T2;
t3 = -T3;
return np.array([t1/unit,t2/unit,t3/unit], dtype=dtype)
def yawPitchRoll(yaw, pitch, roll, dtype=np.float32):
"""
yaw: Z
https://github.com/g-truc/glm/blob/master/glm/gtx/euler_angles.inl
::
template<typename T>
GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> yawPitchRoll
(
T const & yaw,
T const & pitch,
T const & roll
)
{
T tmp_ch = glm::cos(yaw);
T tmp_sh = glm::sin(yaw);
T tmp_cp = glm::cos(pitch);
T tmp_sp = glm::sin(pitch);
T tmp_cb = glm::cos(roll);
T tmp_sb = glm::sin(roll);
mat<4, 4, T, defaultp> Result;
Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb;
Result[0][1] = tmp_sb * tmp_cp;
Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb;
Result[0][3] = static_cast<T>(0);
Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb;
Result[1][1] = tmp_cb * tmp_cp;
Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb;
Result[1][3] = static_cast<T>(0);
Result[2][0] = tmp_sh * tmp_cp;
Result[2][1] = -tmp_sp;
Result[2][2] = tmp_ch * tmp_cp;
Result[2][3] = static_cast<T>(0);
Result[3][0] = static_cast<T>(0);
Result[3][1] = static_cast<T>(0);
Result[3][2] = static_cast<T>(0);
Result[3][3] = static_cast<T>(1);
return Result;
}
"""
tmp_ch = np.cos(yaw);
tmp_sh = np.sin(yaw);
tmp_cp = np.cos(pitch);
tmp_sp = np.sin(pitch);
tmp_cb = np.cos(roll);
tmp_sb = np.sin(roll);
Result = np.eye(4, dtype=dtype)
Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb;
Result[0][1] = tmp_sb * tmp_cp;
Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb;
Result[0][3] = 0;
Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb;
Result[1][1] = tmp_cb * tmp_cp;
Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb;
Result[1][3] = 0;
Result[2][0] = tmp_sh * tmp_cp;
Result[2][1] = -tmp_sp;
Result[2][2] = tmp_ch * tmp_cp;
Result[2][3] = 0;
Result[3][0] = 0;
Result[3][1] = 0;
Result[3][2] = 0;
Result[3][3] = 1;
return Result;
def eulerAngleX(angleX, dtype=np.float32):
"""
* opposite sign to *roll* of http://planning.cs.uiuc.edu/node102.html
/usr/local/opticks/externals/glm/glm-0.9.6.3/glm/gtx/euler_angles.inl::
35 template <typename T>
36 GLM_FUNC_QUALIFIER tmat4x4<T, defaultp> eulerAngleX
37 (
38 T const & angleX
39 )
40 {
41 T cosX = glm::cos(angleX);
42 T sinX = glm::sin(angleX);
43
44 return tmat4x4<T, defaultp>(
45 T(1), T(0), T(0), T(0),
46 T(0), cosX, sinX, T(0),
47 T(0),-sinX, cosX, T(0),
48 T(0), T(0), T(0), T(1));
49 }
50
"""
m = np.eye(4, dtype=dtype)
cosX = np.cos(angleX);
sinX = np.sin(angleX);
m[0] = [1., 0., 0., 0.]
m[1] = [0., cosX, sinX, 0.]
m[2] = [0., -sinX, cosX, 0.]
m[3] = [0., 0., 0., 1.]
return m
def eulerAngleY(angleY, dtype=np.float32):
"""
* opposite sign to *pitch* of http://planning.cs.uiuc.edu/node102.html
/usr/local/opticks/externals/glm/glm-0.9.6.3/glm/gtx/euler_angles.inl
::
51 template <typename T>
52 GLM_FUNC_QUALIFIER tmat4x4<T, defaultp> eulerAngleY
53 (
54 T const & angleY
55 )
56 {
57 T cosY = glm::cos(angleY);
58 T sinY = glm::sin(angleY);
59
60 return tmat4x4<T, defaultp>(
61 cosY, T(0), -sinY, T(0),
62 T(0), T(1), T(0), T(0),
63 sinY, T(0), cosY, T(0),
64 T(0), T(0), T(0), T(1));
65 }
"""
m = np.eye(4, dtype=dtype)
cosY = np.cos(angleY);
sinY = np.sin(angleY);
m[0] = [cosY, 0., -sinY, 0.]
m[1] = [0., 1., 0., 0.]
m[2] = [sinY, 0., cosY, 0.]
m[3] = [0., 0., 0., 1.]
return m
def eulerAngleZ(angleZ, dtype=np.float32):
"""
* opposite sign to *yaw* of http://planning.cs.uiuc.edu/node102.html
/usr/local/opticks/externals/glm/glm-0.9.6.3/glm/gtx/euler_angles.inl
::
67 template <typename T>
68 GLM_FUNC_QUALIFIER tmat4x4<T, defaultp> eulerAngleZ
69 (
70 T const & angleZ
71 )
72 {
73 T cosZ = glm::cos(angleZ);
74 T sinZ = glm::sin(angleZ);
75
76 return tmat4x4<T, defaultp>(
77 cosZ, sinZ, T(0), T(0),
78 -sinZ, cosZ, T(0), T(0),
79 T(0), T(0), T(1), T(0),
80 T(0), T(0), T(0), T(1));
81 }
"""
m = np.eye(4, dtype=dtype)
cosZ = np.cos(angleZ);
sinZ = np.sin(angleZ);
m[0] = [ cosZ, sinZ, 0., 0.]
m[1] = [-sinZ, cosZ, 0., 0.]
m[2] = [ 0., 0., 1., 0.]
m[3] = [ 0., 0., 0., 1.]
return m
def eulerAngleXYZ(t123, unit=np.pi/180., dtype=np.float32):
"""
::
In [14]: eulerAngleXYZ([45,0,0])
Out[14]:
array([[ 1. , 0. , 0. , 0. ],
[-0. , 0.7071, 0.7071, 0. ],
[ 0. , -0.7071, 0.7071, 0. ],
[ 0. , 0. , 0. , 1. ]], dtype=float32)
In [15]: eulerAngleXYZ([0,45,0])
Out[15]:
array([[ 0.7071, 0. , -0.7071, 0. ],
[-0. , 1. , 0. , 0. ],
[ 0.7071, -0. , 0.7071, 0. ],
[ 0. , 0. , 0. , 1. ]], dtype=float32)
In [16]: eulerAngleXYZ([0,0,45])
Out[16]:
array([[ 0.7071, 0.7071, 0. , 0. ],
[-0.7071, 0.7071, 0. , 0. ],
[ 0. , -0. , 1. , 0. ],
[ 0. , 0. , 0. , 1. ]], dtype=float32)
In [11]: extractEulerAnglesXYZ(eulerAngleXYZ([45,0,0]))
Out[11]: array([ 45., 0., 0.], dtype=float32)
In [12]: extractEulerAnglesXYZ(eulerAngleXYZ([0,45,0]))
Out[12]: array([ 0., 45., -0.], dtype=float32)
In [13]: extractEulerAnglesXYZ(eulerAngleXYZ([0,0,45]))
Out[13]: array([ 0., 0., 45.], dtype=float32)
https://github.com/g-truc/glm/blob/master/glm/gtx/euler_angles.inl
::
template<typename T>
GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYZ
(
T const & t1,
T const & t2,
T const & t3
)
{
T c1 = glm::cos(-t1);
T c2 = glm::cos(-t2);
T c3 = glm::cos(-t3);
T s1 = glm::sin(-t1);
T s2 = glm::sin(-t2);
T s3 = glm::sin(-t3);
mat<4, 4, T, defaultp> Result;
Result[0][0] = c2 * c3;
Result[0][1] =-c1 * s3 + s1 * s2 * c3;
Result[0][2] = s1 * s3 + c1 * s2 * c3;
Result[0][3] = static_cast<T>(0);
Result[1][0] = c2 * s3;
Result[1][1] = c1 * c3 + s1 * s2 * s3;
Result[1][2] =-s1 * c3 + c1 * s2 * s3;
Result[1][3] = static_cast<T>(0);
Result[2][0] =-s2;
Result[2][1] = s1 * c2;
Result[2][2] = c1 * c2;
Result[2][3] = static_cast<T>(0);
Result[3][0] = static_cast<T>(0);
Result[3][1] = static_cast<T>(0);
Result[3][2] = static_cast<T>(0);
Result[3][3] = static_cast<T>(1);
return Result;
}
"""
a = np.asarray(t123, dtype=dtype)
a *= unit
t1 = a[0]
t2 = a[1]
t3 = a[2]
c1 = np.cos(-t1);
c2 = np.cos(-t2);
c3 = np.cos(-t3);
s1 = np.sin(-t1);
s2 = np.sin(-t2);
s3 = np.sin(-t3);
Result = np.eye(4, dtype=dtype);
Result[0][0] = c2 * c3;
Result[0][1] =-c1 * s3 + s1 * s2 * c3;
Result[0][2] = s1 * s3 + c1 * s2 * c3;
Result[0][3] = 0;
Result[1][0] = c2 * s3;
Result[1][1] = c1 * c3 + s1 * s2 * s3;
Result[1][2] =-s1 * c3 + c1 * s2 * s3;
Result[1][3] = 0;
Result[2][0] =-s2;
Result[2][1] = s1 * c2;
Result[2][2] = c1 * c2;
Result[2][3] = 0;
Result[3][0] = 0;
Result[3][1] = 0;
Result[3][2] = 0;
Result[3][3] = 1;
return Result;
if __name__ == '__main__':
pass
# YXZ
#m = yawPitchRoll( )
t1 = 10.
t2 = 20.
t3 = 30.
a0 = np.array([t1,t2,t3])
m = eulerAngleXYZ(a0, unit=np.pi/180. )
a1 = extractEulerAnglesXYZ( m, unit=np.pi/180. )
|
from openpyxl import load_workbook
def copyData(sourceFilePath,destinationFilePath):
wb = load_workbook(sourceFilePath)
sheet = wb.sheetnames
dataSheet = ''.join(sheet)
sheet = wb[dataSheet]
# get max row count
max_row = sheet.max_row
#get max column count
max_column = sheet.max_column
a = ''
for i in range(1, max_row + 1):
for j in range(1, max_column + 1):
cell_obj = sheet.cell(row=i, column=j)
result = str(cell_obj.value)
a = a + result + '|'
a = a + '\n'
with open(destinationFilePath,"a") as myfile:
myfile.write(a)
a = ''
def main():
copyData("..//SpreadSheet//SampleData.xlsx","..//SpreadSheet//ResultData.txt")
if __name__ == '__main__':
main()
|
# Submission by Smit Rao
# Email: raosmit2@gmail.com
VAR = 'x'
def differentiate(expr: str) -> str:
'''Differentiate simple expression expr and return the result as a string.
An expression is simple if its derivation only requires use of the
power/exponent rule.
Keep negative powers - do not convert to fractions.
Preconditions:
- all numbers are integers
- all powers EXCEPT powers of one are prefixed with a '^'
AND they can be negative
AND they can be of any length
ex: x to the power of negative twenty two is x^-123
- coefficients can be of any length
- all input expressions are single variable: 'x'
AND have only one term
:param expr: the expression to differentiate
:return: the derivative of the input expression
>>> differentiate('44')
'0'
>>> differentiate('x^123')
'123x^122'
>>> differentiate('-12x^3')
'-36x^2'
>>> differentiate('x')
'1'
>>> differentiate('')
'0'
'''
global VAR # use this for the variable
lst = expr.split('^')
if len(lst) == 1:
if VAR in lst[0]:
sub = lst[0][: -1]
if sub == '':
return '1'
return sub
else:
return '0'
lst[0] = lst[0][: -1]
if lst[0] == '':
lst[0] = 1
else:
lst[0] = int(lst[0])
lst[0] *= int(lst[1])
lst[0] = str(lst[0])
lst[0] += VAR
lst[1] = str(int(lst[1]) - 1)
return '^'.join(lst)
def calculate_heat(d: dict) -> int:
'''Returns the total heat generated by all of the magic orbs
Gazoulee is a fortune teller that owns many magic orbs, each of
which generate a constant amount of heat. He is building a
house that he will move into shorty which needs to be kept at a steady
temperature for the rest of his contraptions to function correctly.
Gazoulee must tell his HVAC contractor how much heat will be
generated by his magic orbs so that the contractor can properly
design the HVAC system.
Gazoulee has hired you to do the total heat calculations for him,
given necessary quantities and measurements.
NOTES:
- all distances are in centimeters.
- every 2 square centimeters of a magic orb dissipates 4 J (4 joules)
of heat.
- answers should be rounded down to the nearest whole joule.
- each dictionary entry contains the information for that magic orb
type
- the value of each dictionary entry is a tuple of:
(<radius>, <number of magic orbs with this dimension>)
:param d: dictionary containing the details of all of the magic orbs
:return: the total heat generated by all of Gazoulee's magic
orbs in joules
>>> calculate_heat({"Standard Wizarding Orb": \
(7.5, 12), "Alfredo the Orb": (17.5, 1)})
24662
'''
pi = 3.141592653589793
joules = 0
for tup in d.values():
joules += tup[0] * tup[0] * pi * tup[1] * 4 * 2
return round(joules)
def is_balanced(lst: list) -> bool:
'''Returns whether list lst is composed entirely of only 'C', 'S' pairs.
NOTES:
- only lists composed of 'C's and 'S's are balanced lists
- the 'C' and 'S' pairs can be interleaved
HINT:
- every 'C' must eventually have a corresponding 'S'
further in the list
:param lst:
:return: whether the list lst is balanced
>>> is_balanced(['C', 'S'])
True
>>> is_balanced(['S', 'C'])
False
>>> is_balanced(['C', 'S', 'S'])
False
>>> is_balanced(['C', 'S', 'C', 'C', 'S', 'C', 'S', 'S'])
True
'''
C_count = 0
S_count = 0
balanced = True
if len(lst) % 2 == 1:
return not balanced
for index in range(len(lst)):
if lst[index] != 'C' and lst[index] != 'S':
return not balanced
if C_count < S_count:
return not balanced
if lst[index] == 'C':
C_count += 1
if lst[index] == 'S':
S_count += 1
balanced = (C_count == S_count)
return balanced
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# fmt: off
# isort: skip_file
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import party_management_service_pb2 as com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2
class PartyManagementServiceStub(object):
"""This service allows inspecting the party management state of the ledger known to the participant
and managing the participant-local party metadata.
The authorization rules for its RPCs are specified on the ``<RpcName>Request``
messages as boolean expressions over these facts:
(1) ``HasRight(r)`` denoting whether the authenticated user has right ``r`` and
(2) ``IsAuthenticatedIdentityProviderAdmin(idp)`` denoting whether ``idp`` is equal to the ``identity_provider_id``
of the authenticated user and the user has an IdentityProviderAdmin right.
The fields of request messages (and sub-messages) are marked either as ``Optional`` or ``Required``:
(1) ``Optional`` denoting the client may leave the field unset when sending a request.
(2) ``Required`` denoting the client must set the field to a non-default value when sending a request.
A party details resource is described by the ``PartyDetails`` message,
A party details resource, once it has been created, can be modified using the ``UpdatePartyDetails`` RPC.
The only fields that can be modified are those marked as ``Modifiable``.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetParticipantId = channel.unary_unary(
'/com.daml.ledger.api.v1.admin.PartyManagementService/GetParticipantId',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetParticipantIdRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetParticipantIdResponse.FromString,
)
self.GetParties = channel.unary_unary(
'/com.daml.ledger.api.v1.admin.PartyManagementService/GetParties',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetPartiesRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetPartiesResponse.FromString,
)
self.ListKnownParties = channel.unary_unary(
'/com.daml.ledger.api.v1.admin.PartyManagementService/ListKnownParties',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.ListKnownPartiesRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.ListKnownPartiesResponse.FromString,
)
self.AllocateParty = channel.unary_unary(
'/com.daml.ledger.api.v1.admin.PartyManagementService/AllocateParty',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.AllocatePartyRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.AllocatePartyResponse.FromString,
)
self.UpdatePartyDetails = channel.unary_unary(
'/com.daml.ledger.api.v1.admin.PartyManagementService/UpdatePartyDetails',
request_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.UpdatePartyDetailsRequest.SerializeToString,
response_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.UpdatePartyDetailsResponse.FromString,
)
class PartyManagementServiceServicer(object):
"""This service allows inspecting the party management state of the ledger known to the participant
and managing the participant-local party metadata.
The authorization rules for its RPCs are specified on the ``<RpcName>Request``
messages as boolean expressions over these facts:
(1) ``HasRight(r)`` denoting whether the authenticated user has right ``r`` and
(2) ``IsAuthenticatedIdentityProviderAdmin(idp)`` denoting whether ``idp`` is equal to the ``identity_provider_id``
of the authenticated user and the user has an IdentityProviderAdmin right.
The fields of request messages (and sub-messages) are marked either as ``Optional`` or ``Required``:
(1) ``Optional`` denoting the client may leave the field unset when sending a request.
(2) ``Required`` denoting the client must set the field to a non-default value when sending a request.
A party details resource is described by the ``PartyDetails`` message,
A party details resource, once it has been created, can be modified using the ``UpdatePartyDetails`` RPC.
The only fields that can be modified are those marked as ``Modifiable``.
"""
def GetParticipantId(self, request, context):
"""Return the identifier of the participant.
All horizontally scaled replicas should return the same id.
daml-on-kv-ledger: returns an identifier supplied on command line at launch time
canton: returns globally unique identifier of the participant
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetParties(self, request, context):
"""Get the party details of the given parties. Only known parties will be
returned in the list.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListKnownParties(self, request, context):
"""List the parties known by the participant.
The list returned contains parties whose ledger access is facilitated by
the participant and the ones maintained elsewhere.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AllocateParty(self, request, context):
"""Allocates a new party on a ledger and adds it to the set managed by the participant.
Caller specifies a party identifier suggestion, the actual identifier
allocated might be different and is implementation specific.
Caller can specify party metadata that is stored locally on the participant.
This call may:
- Succeed, in which case the actual allocated identifier is visible in
the response.
- Respond with a gRPC error
daml-on-kv-ledger: suggestion's uniqueness is checked by the validators in
the consensus layer and call rejected if the identifier is already present.
canton: completely different globally unique identifier is allocated.
Behind the scenes calls to an internal protocol are made. As that protocol
is richer than the surface protocol, the arguments take implicit values
The party identifier suggestion must be a valid party name. Party names are required to be non-empty US-ASCII strings built from letters, digits, space,
colon, minus and underscore limited to 255 chars
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdatePartyDetails(self, request, context):
"""Update selected modifiable participant-local attributes of a party details resource.
Can update the participant's local information for local parties.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PartyManagementServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetParticipantId': grpc.unary_unary_rpc_method_handler(
servicer.GetParticipantId,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetParticipantIdRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetParticipantIdResponse.SerializeToString,
),
'GetParties': grpc.unary_unary_rpc_method_handler(
servicer.GetParties,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetPartiesRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetPartiesResponse.SerializeToString,
),
'ListKnownParties': grpc.unary_unary_rpc_method_handler(
servicer.ListKnownParties,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.ListKnownPartiesRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.ListKnownPartiesResponse.SerializeToString,
),
'AllocateParty': grpc.unary_unary_rpc_method_handler(
servicer.AllocateParty,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.AllocatePartyRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.AllocatePartyResponse.SerializeToString,
),
'UpdatePartyDetails': grpc.unary_unary_rpc_method_handler(
servicer.UpdatePartyDetails,
request_deserializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.UpdatePartyDetailsRequest.FromString,
response_serializer=com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.UpdatePartyDetailsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'com.daml.ledger.api.v1.admin.PartyManagementService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class PartyManagementService(object):
"""This service allows inspecting the party management state of the ledger known to the participant
and managing the participant-local party metadata.
The authorization rules for its RPCs are specified on the ``<RpcName>Request``
messages as boolean expressions over these facts:
(1) ``HasRight(r)`` denoting whether the authenticated user has right ``r`` and
(2) ``IsAuthenticatedIdentityProviderAdmin(idp)`` denoting whether ``idp`` is equal to the ``identity_provider_id``
of the authenticated user and the user has an IdentityProviderAdmin right.
The fields of request messages (and sub-messages) are marked either as ``Optional`` or ``Required``:
(1) ``Optional`` denoting the client may leave the field unset when sending a request.
(2) ``Required`` denoting the client must set the field to a non-default value when sending a request.
A party details resource is described by the ``PartyDetails`` message,
A party details resource, once it has been created, can be modified using the ``UpdatePartyDetails`` RPC.
The only fields that can be modified are those marked as ``Modifiable``.
"""
@staticmethod
def GetParticipantId(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.admin.PartyManagementService/GetParticipantId',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetParticipantIdRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetParticipantIdResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetParties(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.admin.PartyManagementService/GetParties',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetPartiesRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.GetPartiesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListKnownParties(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.admin.PartyManagementService/ListKnownParties',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.ListKnownPartiesRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.ListKnownPartiesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def AllocateParty(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.admin.PartyManagementService/AllocateParty',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.AllocatePartyRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.AllocatePartyResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdatePartyDetails(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/com.daml.ledger.api.v1.admin.PartyManagementService/UpdatePartyDetails',
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.UpdatePartyDetailsRequest.SerializeToString,
com_dot_daml_dot_ledger_dot_api_dot_v1_dot_admin_dot_party__management__service__pb2.UpdatePartyDetailsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
# libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.special import factorial
df = pd.read_csv('Absenteeism_at_work.csv', delimiter=';')
plt.plot(df['Age'], df['Absenteeism time in hours'], 'bo')
plt.xlabel('Age')
plt.ylabel('Absenteeism time in hours')
plt.title('Absenteeism at Work')
plt.savefig('age.png', dpi=128)
plt.close()
plt.plot(df['Distance from Residence to Work'], df['Absenteeism time in hours'], 'r+')
plt.xlabel('Distance from Residence to Work')
plt.ylabel('Absenteeism time in hours')
plt.title('Absenteeism at Work')
plt.savefig('distance.png', dpi=128)
plt.close()
plt.hist(df['Age'], bins=20, density=True)
plt.xlabel('Age of Employees')
plt.ylabel('Probability wrt Total Employees')
plt.title('Histogram of Age Factor of Employees')
plt.savefig('histogram_age.png', dpi=128)
plt.close()
plt.hist(df['Distance from Residence to Work'], bins=20, density=True)
plt.xlabel('Distance from Residence to Work')
plt.ylabel('Probability wrt Total Employees')
plt.title('Histogram of Distance Factor of Employees')
plt.savefig('histogram_distance.png', dpi=128)
plt.close()
plt.hist(df['Service time'], bins=5, density=True)
plt.xlabel('Service Time')
plt.ylabel('Probability wrt Total Employees')
plt.title('Histogram of Service Time Factor of Employees')
plt.savefig('histogram_service_time.png', dpi=128)
plt.close()
plt.hist(df['Body mass index'], bins=5, density=True)
plt.xlabel('Body mass index')
plt.ylabel('Probability wrt Total Employees')
plt.title('Histogram of BMI Factor of Employees')
plt.savefig('histogram_bmi.png', dpi=128)
plt.close()
def likelihood(theta, n, x):
return (factorial(n) / (factorial(x) * factorial(n - x))) * (theta ** x) * ((1 - theta) ** (n - x))
n = 10.
x = 7.
prior = x / n
possible_theta_values = list(map(lambda x: x / 100, range(100)))
likelihoods = list(map(lambda theta: likelihood(theta, n, x), possible_theta_values))
mle = possible_theta_values[np.argmax(likelihoods)]
f, ax = plt.subplots(1)
ax.plot(df['Age'], df['Absenteeism time in hours'])
ax.axvline(mle, linestyle="--")
ax.set_xlabel("Theta")
ax.set_ylabel("Likelihood")
ax.grid()
ax.set_title('Likelihood of Age')
plt.close()
|
class Solution:
def isSymmetric(self, root: Optional[TreeNode]) -> bool:
def inverse(root):
if not root:
return root
l = inverse(root.left)
r = inverse(root.right)
root.left, root.right = r, l
return root
def same(t1, t2):
if not t1 and not t2:
return True
if not t1 or not t2 or t1.val != t2.val:
return False
return same(t1.left, t2.left) and same(t1.right, t2.right)
return same(inverse(root.left), root.right)
|
import time
import random
class Trade:
#Simple class to represent a trade
def __init__(self, timestamp, quantity, indicator, price):
self.timestamp = timestamp
self.quantity = quantity
self.indicator = indicator
self.price = price
class Stock:
#Simple class to represent a Stock superclass
#From test data given, assume that all stocks will have these fields
def __init__(self, stock_symbol, last_dividend, par_value):
self.symbol = stock_symbol
self.last_dividend = last_dividend
self.par_value = par_value
self.trades = []
def get_dividend_yield(self, market_price):
raise NotImplementedError("get_dividend_yield not implemented yet")
def get_PE_ratio (self, market_price):
try:
return market_price / self.last_dividend
except ZeroDivisionError as e:
print (self.symbol, "P/E ratio Err:", e)
return None
def record_trade(self, trade):
if isinstance(trade, Trade):
self.trades.append(trade)
else:
raise TypeError("Must pass a Trade object to Stock.record_trade()", trade)
def get_volume_weighted_stock_price(self, time_margin_sec):
#Ref basic arithmetic: dividend/divisor = quatient
dividend = 0
divisor = 0
time_cut_off = time.time() - time_margin_sec
#asuming trade data comes in real time, this list will be chronologically sorted
#traversing most recent data then becomes easy
for trade in reversed(self.trades):
if trade.timestamp < time_cut_off:
break
dividend += trade.price * trade.quantity
divisor += trade.quantity
try:
return dividend / divisor
except ZeroDivisionError as e:
print (self.symbol, "Volume weighted stock price Err:", e)
return None
class CommonStock(Stock):
#Stock class extention to represent a Common Stock
def __init__(self, stock_symbol, last_dividend, par_value):
super().__init__(stock_symbol, last_dividend, par_value)
def get_dividend_yield(self, market_price):
try:
return self.last_dividend / market_price
except ZeroDivisionError as e:
print (self.symbol, "Dividend yield Err:", e)
return None
class PreferredStock(Stock):
#Stock class extention to represent a Preferred Stock
def __init__(self, stock_symbol, last_dividend, fixed_dividend, par_value):
super().__init__(stock_symbol, last_dividend, par_value)
self.fixed_dividend = fixed_dividend
def get_dividend_yield(self, market_price):
try:
return (self.fixed_dividend * self.par_value) / market_price
except ZeroDivisionError as e:
print (self.symbol, "Dividend yield Err:", e)
return None
FIFTEEN_MINUTES = 15 * 60
PRINT_FORMAT = ".3f"
def generate_trade():
return Trade(time.time(), random.randint(1,20), random.choice('BS'), random.randint(60, 500))
#Even though not utilized below, a dictionary to quickly look up a specific stock seems rational
stocks = {
"TEA" : CommonStock("TEA", 0, 100),
"POP" : CommonStock("POP", 8, 100),
"ALE" : CommonStock("ALE", 23, 60),
"GIN" : PreferredStock("GIN", 8, 0.02, 100),
"JOE" : CommonStock("JOE", 13, 250)
}
#"Calculate the GBCE All Share Index using the geometric mean of prices for all stocks"
#TODO: Ask - Is this geometric mean supposed to be based on Volume Weighted Stock prices, or Market prices?
MP_geometric_mean = 1
VWSP_geometric_mean = 1
print("Stock, Market price, Dividend yield, P/E Ratio, Volume Weighted Stock Price")
for stock in stocks.values():
#Generate and record some random trades
for i in range(1, random.randint(2,20)):
stock.record_trade(generate_trade())
#Generate random market price and calculate metrics
rmp = random.randint(60, 500);
div_yield = stock.get_dividend_yield(rmp)
pe_ratio = stock.get_PE_ratio(rmp)
vw_stock_price = stock.get_volume_weighted_stock_price(FIFTEEN_MINUTES)
MP_geometric_mean *= rmp
VWSP_geometric_mean *= vw_stock_price
#printing table entries with somewhat nicer format
print(
stock.symbol,
rmp,
"None" if div_yield is None else format(div_yield, PRINT_FORMAT),
"None" if pe_ratio is None else format(pe_ratio, PRINT_FORMAT),
"None" if vw_stock_price is None else format(vw_stock_price, PRINT_FORMAT))
#NOTE: pow(x, 1/n) returns nth root of x.
#It is inaccurate due to floating point arithmetic, but seems like a commonly used solution
#EX: pow(5**6, 1/6) returned 4.999999999999999...
#Could use a more sophisticated library if required
print("All share Index (based on Market price):", pow(MP_geometric_mean, 1/len(stocks)))
print("All share Index (based on Volume Weighted stock price):", pow(VWSP_geometric_mean, 1/len(stocks)))
|
from rest_framework import permissions
class UpdateProfile(permissions.BasePermission):
def has_object_permission(self, request, view, object):
if request.method in permissions.SAFE_METHODS :
return True
else:
return object.id == request.user.id
class UpdateStatus(permissions.BasePermission):
"""Restrict user to update self created status messages"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.user.id == request.user.id
|
# Copyright 2018 Cable Television Laboratories, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base_model import BaseModel
class ParamsConfigModel(BaseModel):
def __init__(self, **params_object):
super(ParamsConfigModel, self).__init__(**params_object)
self.name = params_object.get('name')
self.value = params_object.get('value')
self.schema = params_object.get('schema')
self.type = params_object.get('type')
self.description = params_object.get('description')
self.extension = params_object.get('extension', {})
|
# Generated by Django 2.1.3 on 2018-11-06 08:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='locations',
name='location',
field=models.CharField(choices=[('Not defined', ''), ('floor 1', 'Floor 1'), ('floor 2', 'Floor 2'), ('floor 3', 'Floor 3')], max_length=8),
),
]
|
# -*- coding:utf-8 -*-
# Created by LuoJie at 11/22/19
from gensim.models.word2vec import LineSentence, Word2Vec
import numpy as np
import codecs
# 引入日志配置
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def load_word2vec_file(save_wv_model_path):
# 保存词向量模型
wv_model = Word2Vec.load(save_wv_model_path)
embedding_matrix = wv_model.wv.vectors
return embedding_matrix
def get_vocab(save_wv_model_path):
# 保存词向量模型
wv_model = Word2Vec.load(save_wv_model_path)
reverse_vocab = {index: word for index, word in enumerate(wv_model.wv.index2word)}
vocab = {word: index for index, word in enumerate(wv_model.wv.index2word)}
return vocab, reverse_vocab
def get_embedding_matrix(w2v_model):
vocab_size = len(w2v_model.wv.vocab)
embedding_dim = len(w2v_model.wv['<START>'])
print('vocab_size, embedding_dim:', vocab_size, embedding_dim)
embedding_matrix = np.zeros((vocab_size, embedding_dim))
for i in range(vocab_size):
embedding_matrix[i, :] = w2v_model.wv[w2v_model.wv.index2word[i]]
embedding_matrix = embedding_matrix.astype('float32')
assert embedding_matrix.shape == (vocab_size, embedding_dim)
np.savetxt('embedding_matrix.txt', embedding_matrix, fmt='%0.8f')
print('embedding matrix extracted')
return embedding_matrix
def build_vocab(vocab):
"""
:param vocab:词表
:return: 处理后的词表
"""
start_token = u"<s>"
end_token = u"<e>"
unk_token = u"<unk>"
# 按索引排序
vocab = sorted([(vocab[i].index, i) for i in vocab])
# 排序后的词
sorted_words = [word for index, word in vocab]
# 拼接标志位的词
sorted_words = [start_token, end_token, unk_token] + sorted_words
# 构建索引表
vocab = {index: word for index, word in enumerate(sorted_words)}
reverse_vocab = {word: index for index, word in enumerate(sorted_words)}
return vocab, reverse_vocab
|
from datetime import datetime, timedelta
from pytz import timezone
import pytz
def est_time():
eastern = timezone('US/Eastern')
# fmt = '%Y-%m-%d %H:%M:%S %Z%z'
fmt = '%Y-%m-%d'
loc_dt = eastern.localize(datetime.now())
return str(loc_dt.strftime(fmt))
|
import pickle
import torch
import numpy as np
import torch.nn as nn
import pytorch_lightning as pl
import torch.nn.functional as F
from .rnn_nn import *
from .base_classifier import *
class RNN_Classifier(Base_Classifier):
def __init__(self,classes=10, input_size=28 , hidden_size=128, activation="relu" ):
super(RNN_Classifier, self).__init__()
############################################################################
# TODO: Build a RNN classifier #
############################################################################
self.hidden_size = hidden_size
self.RNN = nn.RNN(input_size, hidden_size)
self.fc1 = nn.Linear(hidden_size, 64)
self.fc2 = nn.Linear(64, classes)
############################################################################
# END OF YOUR CODE #
############################################################################
def forward(self, x):
############################################################################
# TODO: Perform the forward pass #
############################################################################
batch_size = x.size()[1]
rec, x = self.RNN(x)
x = F.dropout(F.relu(self.fc1(x.reshape(batch_size, self.hidden_size))))
x = F.relu(self.fc2(x))
############################################################################
# END OF YOUR CODE #
############################################################################
return x
class LSTM_Classifier(Base_Classifier):
def __init__(self, classes=10, input_size=28, hidden_size=128):
super(LSTM_Classifier, self).__init__()
#######################################################################
# TODO: Build a LSTM classifier #
#######################################################################
self.hidden_size = hidden_size
self.LSTM = nn.LSTM(input_size, hidden_size)
self.fc1 = nn.Linear(hidden_size, 64)
self.fc2 = nn.Linear(64, classes)
#######################################################################
# END OF YOUR CODE #
#######################################################################
def forward(self, x):
#######################################################################
# TODO: Perform the forward pass #
#######################################################################
batch_size = x.size()[1]
rec, (x, _) = self.LSTM(x)
x = F.dropout(F.relu(self.fc1(x.reshape(batch_size, self.hidden_size))))
x = F.relu(self.fc2(x))
#######################################################################
# END OF YOUR CODE #
#######################################################################
return x
|
from django.test import TestCase
from django.contrib.auth import get_user_model
import datetime
from treatment_sheets.models import TxSheet, TxItem
from common.models import Prescription
User = get_user_model()
class TxSheetTest(TestCase):
def setUp(self):
self.owner = User.objects.create(username='Marfalo')
def test_get_absolute_url(self):
sheet = TxSheet.objects.create(owner=self.owner)
self.assertEqual(sheet.get_absolute_url(), '/tx_sheet/{}/'.format(sheet.id))
def test_tx_sheet_saves_owner(self):
# Should not raise
TxSheet(owner=User())
def test_tx_sheet_saves_date_on_creation(self):
date = datetime.date.today()
sheet = TxSheet.objects.create(owner=self.owner, name='Poochy', comment='Euthanasia')
self.assertEqual(date, sheet.date)
class TxItemTest(TestCase):
def setUp(self):
self.sheet = TxSheet.objects.create(owner=User.objects.create())
self.drug = Prescription.objects.create(name='Drug')
def test_item_related_to_tx_sheet(self):
item = TxItem()
item.med = self.drug
item.sheet = self.sheet
item.save()
self.assertEqual(self.sheet.id, item.sheet_id)
def test_get_absolute_url(self):
item = TxItem.objects.create(sheet=self.sheet, med=self.drug, dose=11, unit='mL', freq='BID')
self.assertEqual(item.get_absolute_url(), '/tx_sheet/{}/'.format(self.sheet.id))
def test_output_instructions(self):
item = TxItem.objects.create(sheet=self.sheet, med=self.drug, dose=11, unit='mL', freq='BID')
instruction = 'Take 11 mLs of Drug twice a day.'
self.assertEqual(instruction, item.instruction)
|
# -*- coding: utf-8 -*-
from collections import deque
class Solution:
def numIslands(self, grid):
if not grid:
return 0
result = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == "1":
result += 1
self.deleteIsland(grid, i, j)
return result
def deleteIsland(self, grid, i, j):
stack = deque([(i, j)])
while stack:
x, y = stack.pop()
grid[x][y] = "0"
if 0 < x and grid[x - 1][y] == "1":
stack.append((x - 1, y))
if 0 < y and grid[x][y - 1] == "1":
stack.append((x, y - 1))
if x < len(grid) - 1 and grid[x + 1][y] == "1":
stack.append((x + 1, y))
if y < len(grid[0]) - 1 and grid[x][y + 1] == "1":
stack.append((x, y + 1))
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.numIslands(
[
["1", "1", "1", "1", "0"],
["1", "1", "0", "1", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "0", "0", "0"],
]
)
assert 3 == solution.numIslands(
[
["1", "1", "0", "0", "0"],
["1", "1", "0", "0", "0"],
["0", "0", "1", "0", "0"],
["0", "0", "0", "1", "1"],
]
)
|
class_names_10 = ['Right Hand', 'Left Hand', 'Rest', 'Feet',
'Face', 'Navigation', 'Music', 'Rotation', 'Subtraction', 'Words']
class_names_5 = ['Right Hand', 'Rest', 'Feet', 'Rotation', 'Words']
marker_10_class = {'01 - Right Hand': [1], '02 - Left Hand': [2], '03 - Rest': [3],
'04 - Feet': [4], '05 - Face': [5], '06 - Navigation': [6], '07 - Music': [7],
'08 - Rotation': [8], '09 - Subtraction': [9], '10 - Words': [10]}
marker_5_class = {'1 - Right Hand': [1], '2 - Rest': [3],
'3 - Feet': [4], '4 - Rotation': [8], '5 - Words': [10]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.