blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e03dd7cf9f30096a3fcd724160094c5729decd0e
|
7949f96ee7feeaa163608dbd256b0b76d1b89258
|
/toontown/coghq/DistributedBanquetTable.py
|
a40dee6c0601918967b38ce75552be557387627e
|
[] |
no_license
|
xxdecryptionxx/ToontownOnline
|
414619744b4c40588f9a86c8e01cb951ffe53e2d
|
e6c20e6ce56f2320217f2ddde8f632a63848bd6b
|
refs/heads/master
| 2021-01-11T03:08:59.934044
| 2018-07-27T01:26:21
| 2018-07-27T01:26:21
| 71,086,644
| 8
| 10
| null | 2018-06-01T00:13:34
| 2016-10-17T00:39:41
|
Python
|
UTF-8
|
Python
| false
| false
| 50,791
|
py
|
# File: t (Python 2.4)
import math
import random
from pandac.PandaModules import NodePath, Point3, VBase4, TextNode, Vec3, deg2Rad, CollisionSegment, CollisionHandlerQueue, CollisionNode, BitMask32, SmoothMover
from direct.fsm import FSM
from direct.distributed import DistributedObject
from direct.distributed.ClockDelta import globalClockDelta
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import Sequence, ProjectileInterval, Parallel, LerpHprInterval, ActorInterval, Func, Wait, SoundInterval, LerpPosHprInterval, LerpScaleInterval
from direct.gui.DirectGui import DGG, DirectButton, DirectLabel, DirectWaitBar
from direct.task import Task
from toontown.suit import Suit
from toontown.suit import SuitDNA
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.coghq import BanquetTableBase
from toontown.coghq import DinerStatusIndicator
from toontown.battle import MovieUtil
class DistributedBanquetTable(DistributedObject.DistributedObject, FSM.FSM, BanquetTableBase.BanquetTableBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBanquetTable')
rotationsPerSeatIndex = [
90,
90,
0,
0,
-90,
-90,
180,
180]
pitcherMinH = -360
pitcherMaxH = 360
rotateSpeed = 30
waterPowerSpeed = base.config.GetDouble('water-power-speed', 15)
waterPowerExponent = base.config.GetDouble('water-power-exponent', 0.75)
useNewAnimations = True
TugOfWarControls = False
OnlyUpArrow = True
if OnlyUpArrow:
BASELINE_KEY_RATE = 3
else:
BASELINE_KEY_RATE = 6
UPDATE_KEY_PRESS_RATE_TASK = 'BanquetTableUpdateKeyPressRateTask'
YELLOW_POWER_THRESHOLD = 0.75
RED_POWER_THRESHOLD = 0.96999999999999997
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedBanquetTable')
self.boss = None
self.index = -1
self.diners = { }
self.dinerStatus = { }
self.serviceLocs = { }
self.chairLocators = { }
self.sitLocators = { }
self.activeIntervals = { }
self.dinerStatusIndicators = { }
self.preparedForPhaseFour = False
self.avId = 0
self.toon = None
self.pitcherSmoother = SmoothMover()
self.pitcherSmoother.setSmoothMode(SmoothMover.SMOn)
self.smoothStarted = 0
self._DistributedBanquetTable__broadcastPeriod = 0.20000000000000001
self.changeSeq = 0
self.lastChangeSeq = 0
self.pitcherAdviceLabel = None
self.fireLength = 250
self.fireTrack = None
self.hitObject = None
self.setupPowerBar()
self.aimStart = None
self.toonPitcherPosition = Point3(0, -2, 0)
self.allowLocalRequestControl = True
self.fadeTrack = None
self.grabTrack = None
self.gotHitByBoss = False
self.keyTTL = []
self.keyRate = 0
self.buttons = [
0,
1]
self.lastPowerFired = 0
self.moveSound = None
self.releaseTrack = None
def disable(self):
DistributedObject.DistributedObject.disable(self)
taskMgr.remove(self.triggerName)
taskMgr.remove(self.smoothName)
taskMgr.remove(self.watchControlsName)
taskMgr.remove(self.pitcherAdviceName)
taskMgr.remove(self.posHprBroadcastName)
taskMgr.remove(self.waterPowerTaskName)
if self.releaseTrack:
self.releaseTrack.finish()
self.releaseTrack = None
if self.fireTrack:
self.fireTrack.finish()
self.fireTrack = None
self.cleanupIntervals()
def delete(self):
DistributedObject.DistributedObject.delete(self)
self.boss = None
self.ignoreAll()
for indicator in self.dinerStatusIndicators.values():
indicator.delete()
self.dinerStatusIndicators = { }
for diner in self.diners.values():
diner.delete()
self.diners = { }
self.powerBar.destroy()
self.powerBar = None
self.pitcherMoveSfx.stop()
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.loadAssets()
self.smoothName = self.uniqueName('pitcherSmooth')
self.pitcherAdviceName = self.uniqueName('pitcherAdvice')
self.posHprBroadcastName = self.uniqueName('pitcherBroadcast')
self.waterPowerTaskName = self.uniqueName('updateWaterPower')
self.triggerName = self.uniqueName('trigger')
self.watchControlsName = self.uniqueName('watchControls')
def setBossCogId(self, bossCogId):
self.bossCogId = bossCogId
self.boss = base.cr.doId2do[bossCogId]
self.boss.setTable(self, self.index)
def setIndex(self, index):
self.index = index
def setState(self, state, avId, extraInfo):
self.gotHitByBoss = extraInfo
if state == 'F':
self.demand('Off')
elif state == 'N':
self.demand('On')
elif state == 'I':
self.demand('Inactive')
elif state == 'R':
self.demand('Free')
elif state == 'C':
self.demand('Controlled', avId)
elif state == 'L':
self.demand('Flat', avId)
else:
self.notify.error('Invalid state from AI: %s' % state)
def setNumDiners(self, numDiners):
self.numDiners = numDiners
def setDinerInfo(self, hungryDurations, eatingDurations, dinerLevels):
self.dinerInfo = { }
for i in xrange(len(hungryDurations)):
hungryDur = hungryDurations[i]
eatingDur = eatingDurations[i]
dinerLevel = dinerLevels[i]
self.dinerInfo[i] = (hungryDur, eatingDur, dinerLevel)
def loadAssets(self):
self.tableGroup = loader.loadModel('phase_12/models/bossbotHQ/BanquetTableChairs')
tableLocator = self.boss.geom.find('**/TableLocator_%d' % (self.index + 1))
if tableLocator.isEmpty():
self.tableGroup.reparentTo(render)
self.tableGroup.setPos(0, 75, 0)
else:
self.tableGroup.reparentTo(tableLocator)
self.tableGeom = self.tableGroup.find('**/Geometry')
self.setupDiners()
self.setupChairCols()
self.squirtSfx = loader.loadSfx('phase_4/audio/sfx/AA_squirt_seltzer_miss.mp3')
self.hitBossSfx = loader.loadSfx('phase_5/audio/sfx/SA_watercooler_spray_only.mp3')
self.hitBossSoundInterval = SoundInterval(self.hitBossSfx, node = self.boss, volume = 1.0)
self.serveFoodSfx = loader.loadSfx('phase_4/audio/sfx/MG_sfx_travel_game_bell_for_trolley.mp3')
self.pitcherMoveSfx = base.loadSfx('phase_4/audio/sfx/MG_cannon_adjust.mp3')
def setupDiners(self):
for i in xrange(self.numDiners):
newDiner = self.createDiner(i)
self.diners[i] = newDiner
self.dinerStatus[i] = self.HUNGRY
def createDiner(self, i):
diner = Suit.Suit()
diner.dna = SuitDNA.SuitDNA()
level = self.dinerInfo[i][2]
level -= 4
diner.dna.newSuitRandom(level = level, dept = 'c')
diner.setDNA(diner.dna)
if self.useNewAnimations:
diner.loop('sit', fromFrame = i)
else:
diner.pose('landing', 0)
locator = self.tableGroup.find('**/chair_%d' % (i + 1))
locatorScale = locator.getNetTransform().getScale()[0]
correctHeadingNp = locator.attachNewNode('correctHeading')
self.chairLocators[i] = correctHeadingNp
heading = self.rotationsPerSeatIndex[i]
correctHeadingNp.setH(heading)
sitLocator = correctHeadingNp.attachNewNode('sitLocator')
base.sitLocator = sitLocator
pos = correctHeadingNp.getPos(render)
if SuitDNA.getSuitBodyType(diner.dna.name) == 'c':
sitLocator.setPos(0.5, 3.6499999999999999, -3.75)
else:
sitLocator.setZ(-2.3999999999999999)
sitLocator.setY(2.5)
sitLocator.setX(0.5)
self.sitLocators[i] = sitLocator
diner.setScale(1.0 / locatorScale)
diner.reparentTo(sitLocator)
newLoc = NodePath('serviceLoc-%d-%d' % (self.index, i))
newLoc.reparentTo(correctHeadingNp)
newLoc.setPos(0, 3.0, 1)
self.serviceLocs[i] = newLoc
base.serviceLoc = newLoc
head = diner.find('**/joint_head')
newIndicator = DinerStatusIndicator.DinerStatusIndicator(parent = head, pos = Point3(0, 0, 3.5), scale = 5.0)
newIndicator.wrtReparentTo(diner)
self.dinerStatusIndicators[i] = newIndicator
return diner
def setupChairCols(self):
for i in xrange(self.numDiners):
chairCol = self.tableGroup.find('**/collision_chair_%d' % (i + 1))
colName = 'ChairCol-%d-%d' % (self.index, i)
chairCol.setTag('chairIndex', str(i))
chairCol.setName(colName)
chairCol.setCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + colName, self.touchedChair)
def touchedChair(self, colEntry):
chairIndex = int(colEntry.getIntoNodePath().getTag('chairIndex'))
if chairIndex in self.dinerStatus:
status = self.dinerStatus[chairIndex]
if status in (self.HUNGRY, self.ANGRY):
self.boss.localToonTouchedChair(self.index, chairIndex)
def serveFood(self, food, chairIndex):
self.removeFoodModel(chairIndex)
serviceLoc = self.serviceLocs.get(chairIndex)
if not food or food.isEmpty():
foodModel = loader.loadModel('phase_12/models/bossbotHQ/canoffood')
foodModel.setScale(ToontownGlobals.BossbotFoodModelScale)
foodModel.reparentTo(serviceLoc)
else:
food.wrtReparentTo(serviceLoc)
tray = food.find('**/tray')
if not tray.isEmpty():
tray.hide()
ivalDuration = 1.5
foodMoveIval = Parallel(SoundInterval(self.serveFoodSfx, node = food), ProjectileInterval(food, duration = ivalDuration, startPos = food.getPos(serviceLoc), endPos = serviceLoc.getPos(serviceLoc)), LerpHprInterval(food, ivalDuration, Point3(0, -360, 0)))
intervalName = 'serveFood-%d-%d' % (self.index, chairIndex)
foodMoveIval.start()
self.activeIntervals[intervalName] = foodMoveIval
def setDinerStatus(self, chairIndex, status):
if chairIndex in self.dinerStatus:
oldStatus = self.dinerStatus[chairIndex]
self.dinerStatus[chairIndex] = status
if oldStatus != status:
if status == self.EATING:
self.changeDinerToEating(chairIndex)
elif status == self.HUNGRY:
self.changeDinerToHungry(chairIndex)
elif status == self.ANGRY:
self.changeDinerToAngry(chairIndex)
elif status == self.DEAD:
self.changeDinerToDead(chairIndex)
elif status == self.HIDDEN:
self.changeDinerToHidden(chairIndex)
def removeFoodModel(self, chairIndex):
serviceLoc = self.serviceLocs.get(chairIndex)
if serviceLoc:
for i in xrange(serviceLoc.getNumChildren()):
serviceLoc.getChild(0).removeNode()
def changeDinerToEating(self, chairIndex):
indicator = self.dinerStatusIndicators.get(chairIndex)
eatingDuration = self.dinerInfo[chairIndex][1]
if indicator:
indicator.request('Eating', eatingDuration)
diner = self.diners[chairIndex]
intervalName = 'eating-%d-%d' % (self.index, chairIndex)
eatInTime = 32.0 / 24.0
eatOutTime = 21.0 / 24.0
eatLoopTime = 19 / 24.0
rightHand = diner.getRightHand()
waitTime = 5
loopDuration = eatingDuration - eatInTime - eatOutTime - waitTime
serviceLoc = self.serviceLocs[chairIndex]
def foodAttach(self = self, diner = diner):
foodModel = self.serviceLocs[chairIndex].getChild(0)
(foodModel.reparentTo(diner.getRightHand()),)
(foodModel.setHpr(Point3(0, -94, 0)),)
(foodModel.setPos(Point3(-0.14999999999999999, -0.69999999999999996, -0.40000000000000002)),)
scaleAdj = 1
if SuitDNA.getSuitBodyType(diner.dna.name) == 'c':
scaleAdj = 0.59999999999999998
(foodModel.setPos(Point3(0.10000000000000001, -0.25, -0.31)),)
else:
scaleAdj = 0.80000000000000004
(foodModel.setPos(Point3(-0.25, -0.84999999999999998, -0.34000000000000002)),)
oldScale = foodModel.getScale()
newScale = oldScale * scaleAdj
foodModel.setScale(newScale)
def foodDetach(self = self, diner = diner):
foodModel = diner.getRightHand().getChild(0)
(foodModel.reparentTo(serviceLoc),)
(foodModel.setPosHpr(0, 0, 0, 0, 0, 0),)
scaleAdj = 1
if SuitDNA.getSuitBodyType(diner.dna.name) == 'c':
scaleAdj = 0.59999999999999998
else:
scakeAdj = 0.80000000000000004
oldScale = foodModel.getScale()
newScale = oldScale / scaleAdj
foodModel.setScale(newScale)
eatIval = Sequence(ActorInterval(diner, 'sit', duration = waitTime), ActorInterval(diner, 'sit-eat-in', startFrame = 0, endFrame = 6), Func(foodAttach), ActorInterval(diner, 'sit-eat-in', startFrame = 6, endFrame = 32), ActorInterval(diner, 'sit-eat-loop', duration = loopDuration, loop = 1), ActorInterval(diner, 'sit-eat-out', startFrame = 0, endFrame = 12), Func(foodDetach), ActorInterval(diner, 'sit-eat-out', startFrame = 12, endFrame = 21))
eatIval.start()
self.activeIntervals[intervalName] = eatIval
def changeDinerToHungry(self, chairIndex):
intervalName = 'eating-%d-%d' % (self.index, chairIndex)
if intervalName in self.activeIntervals:
self.activeIntervals[intervalName].finish()
self.removeFoodModel(chairIndex)
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Hungry', self.dinerInfo[chairIndex][0])
diner = self.diners[chairIndex]
if random.choice([
0,
1]):
diner.loop('sit-hungry-left')
else:
diner.loop('sit-hungry-right')
def changeDinerToAngry(self, chairIndex):
self.removeFoodModel(chairIndex)
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Angry')
diner = self.diners[chairIndex]
diner.loop('sit-angry')
def changeDinerToDead(self, chairIndex):
def removeDeathSuit(suit, deathSuit):
if not deathSuit.isEmpty():
deathSuit.detachNode()
suit.cleanupLoseActor()
self.removeFoodModel(chairIndex)
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Dead')
diner = self.diners[chairIndex]
deathSuit = diner
locator = self.tableGroup.find('**/chair_%d' % (chairIndex + 1))
deathSuit = diner.getLoseActor()
ival = Sequence(Func(self.notify.debug, 'before actorinterval sit-lose'), ActorInterval(diner, 'sit-lose'), Func(self.notify.debug, 'before deathSuit.setHpr'), Func(deathSuit.setHpr, diner.getHpr()), Func(self.notify.debug, 'before diner.hide'), Func(diner.hide), Func(self.notify.debug, 'before deathSuit.reparentTo'), Func(deathSuit.reparentTo, self.chairLocators[chairIndex]), Func(self.notify.debug, 'befor ActorInterval lose'), ActorInterval(deathSuit, 'lose', duration = MovieUtil.SUIT_LOSE_DURATION), Func(self.notify.debug, 'before remove deathsuit'), Func(removeDeathSuit, diner, deathSuit, name = 'remove-death-suit-%d-%d' % (chairIndex, self.index)), Func(self.notify.debug, 'diner.stash'), Func(diner.stash))
spinningSound = base.loadSfx('phase_3.5/audio/sfx/Cog_Death.mp3')
deathSound = base.loadSfx('phase_3.5/audio/sfx/ENC_cogfall_apart.mp3')
deathSoundTrack = Sequence(Wait(0.80000000000000004), SoundInterval(spinningSound, duration = 1.2, startTime = 1.5, volume = 0.20000000000000001, node = deathSuit), SoundInterval(spinningSound, duration = 3.0, startTime = 0.59999999999999998, volume = 0.80000000000000004, node = deathSuit), SoundInterval(deathSound, volume = 0.32000000000000001, node = deathSuit))
intervalName = 'dinerDie-%d-%d' % (self.index, chairIndex)
deathIval = Parallel(ival, deathSoundTrack)
deathIval.start()
self.activeIntervals[intervalName] = deathIval
def changeDinerToHidden(self, chairIndex):
self.removeFoodModel(chairIndex)
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Inactive')
diner = self.diners[chairIndex]
diner.hide()
def setAllDinersToSitNeutral(self):
startFrame = 0
for diner in self.diners.values():
if not diner.isHidden():
diner.loop('sit', fromFrame = startFrame)
startFrame += 1
continue
def cleanupIntervals(self):
for interval in self.activeIntervals.values():
interval.finish()
self.activeIntervals = { }
def clearInterval(self, name, finish = 1):
if self.activeIntervals.has_key(name):
ival = self.activeIntervals[name]
if finish:
ival.finish()
else:
ival.pause()
if self.activeIntervals.has_key(name):
del self.activeIntervals[name]
else:
self.notify.debug('interval: %s already cleared' % name)
def finishInterval(self, name):
if self.activeIntervals.has_key(name):
interval = self.activeIntervals[name]
interval.finish()
def getNotDeadInfo(self):
notDeadList = []
for i in xrange(self.numDiners):
if self.dinerStatus[i] != self.DEAD:
notDeadList.append((self.index, i, 12))
continue
return notDeadList
def enterOn(self):
pass
def exitOn(self):
pass
def enterInactive(self):
for chairIndex in xrange(self.numDiners):
indicator = self.dinerStatusIndicators.get(chairIndex)
if indicator:
indicator.request('Inactive')
self.removeFoodModel(chairIndex)
def exitInactive(self):
pass
def enterFree(self):
self.resetPowerBar()
if self.fadeTrack:
self.fadeTrack.finish()
self.fadeTrack = None
self.prepareForPhaseFour()
if self.avId == localAvatar.doId:
self.tableGroup.setAlphaScale(0.29999999999999999)
self.tableGroup.setTransparency(1)
taskMgr.doMethodLater(5, self._DistributedBanquetTable__allowDetect, self.triggerName)
self.fadeTrack = Sequence(Func(self.tableGroup.setTransparency, 1), self.tableGroup.colorScaleInterval(0.20000000000000001, VBase4(1, 1, 1, 0.29999999999999999)))
self.fadeTrack.start()
self.allowLocalRequestControl = False
else:
self.allowLocalRequestControl = True
self.avId = 0
def exitFree(self):
pass
def touchedTable(self, colEntry):
tableIndex = int(colEntry.getIntoNodePath().getTag('tableIndex'))
if self.state == 'Free' and self.avId == 0 and self.allowLocalRequestControl:
self.d_requestControl()
def prepareForPhaseFour(self):
if not self.preparedForPhaseFour:
for i in xrange(8):
chair = self.tableGroup.find('**/chair_%d' % (i + 1))
if not chair.isEmpty():
chair.hide()
colChairs = self.tableGroup.findAllMatches('**/ChairCol*')
for i in xrange(colChairs.getNumPaths()):
col = colChairs.getPath(i)
col.stash()
colChairs = self.tableGroup.findAllMatches('**/collision_chair*')
for i in xrange(colChairs.getNumPaths()):
col = colChairs.getPath(i)
col.stash()
tableCol = self.tableGroup.find('**/collision_table')
colName = 'TableCol-%d' % self.index
tableCol.setTag('tableIndex', str(self.index))
tableCol.setName(colName)
tableCol.setCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.BanquetTableBitmask)
self.accept('enter' + colName, self.touchedTable)
self.preparedForPhaseFour = True
self.waterPitcherModel = loader.loadModel('phase_12/models/bossbotHQ/tt_m_ara_bhq_seltzerBottle')
lampNode = self.tableGroup.find('**/lamp_med_5')
pos = lampNode.getPos(self.tableGroup)
lampNode.hide()
bottleLocator = self.tableGroup.find('**/bottle_locator')
pos = bottleLocator.getPos(self.tableGroup)
self.waterPitcherNode = self.tableGroup.attachNewNode('pitcherNode')
self.waterPitcherNode.setPos(pos)
self.waterPitcherModel.reparentTo(self.waterPitcherNode)
self.waterPitcherModel.ls()
self.nozzle = self.waterPitcherModel.find('**/nozzle_tip')
self.handLocator = self.waterPitcherModel.find('**/hand_locator')
self.handPos = self.handLocator.getPos()
def d_requestControl(self):
self.sendUpdate('requestControl')
def d_requestFree(self, gotHitByBoss):
self.sendUpdate('requestFree', [
gotHitByBoss])
def enterControlled(self, avId):
self.prepareForPhaseFour()
self.avId = avId
toon = base.cr.doId2do.get(avId)
if not toon:
return None
self.toon = toon
self.grabTrack = self.makeToonGrabInterval(toon)
self.notify.debug('grabTrack=%s' % self.grabTrack)
self.pitcherCamPos = Point3(0, -50, 40)
self.pitcherCamHpr = Point3(0, -21, 0)
if avId == localAvatar.doId:
self.boss.toMovieMode()
self._DistributedBanquetTable__enableControlInterface()
self.startPosHprBroadcast()
self.grabTrack = Sequence(self.grabTrack, Func(camera.wrtReparentTo, localAvatar), LerpPosHprInterval(camera, 1, self.pitcherCamPos, self.pitcherCamHpr), Func(self.boss.toCraneMode))
if self.TugOfWarControls:
self._DistributedBanquetTable__spawnUpdateKeyPressRateTask()
self.accept('exitCrane', self.gotBossZapped)
else:
self.startSmooth()
toon.stopSmooth()
self.grabTrack.start()
def exitControlled(self):
self.ignore('exitCrane')
if self.grabTrack:
self.grabTrack.finish()
self.grabTrack = None
nextState = self.getCurrentOrNextState()
self.notify.debug('nextState=%s' % nextState)
if nextState == 'Flat':
place = base.cr.playGame.getPlace()
self.notify.debug('%s' % place.fsm)
if self.avId == localAvatar.doId:
self._DistributedBanquetTable__disableControlInterface()
elif self.toon and not self.toon.isDisabled():
self.toon.loop('neutral')
self.toon.startSmooth()
self.releaseTrack = self.makeToonReleaseInterval(self.toon)
self.stopPosHprBroadcast()
self.stopSmooth()
if self.avId == localAvatar.doId:
localAvatar.wrtReparentTo(render)
self._DistributedBanquetTable__disableControlInterface()
camera.reparentTo(base.localAvatar)
camera.setPos(base.localAvatar.cameraPositions[0][0])
camera.setHpr(0, 0, 0)
self.goToFinalBattle()
self.safeBossToFinalBattleMode()
else:
toon = base.cr.doId2do.get(self.avId)
if toon:
toon.wrtReparentTo(render)
self.releaseTrack.start()
def safeBossToFinalBattleMode(self):
if self.boss:
self.boss.toFinalBattleMode()
def goToFinalBattle(self):
if self.cr:
place = self.cr.playGame.getPlace()
if place and hasattr(place, 'fsm'):
if place.fsm.getCurrentState().getName() == 'crane':
place.setState('finalBattle')
def makeToonGrabInterval(self, toon):
toon.pose('leverNeutral', 0)
toon.update()
rightHandPos = toon.rightHand.getPos(toon)
self.toonPitcherPosition = Point3(self.handPos[0] - rightHandPos[0], self.handPos[1] - rightHandPos[1], 0)
destZScale = rightHandPos[2] / self.handPos[2]
grabIval = Sequence(Func(toon.wrtReparentTo, self.waterPitcherNode), Func(toon.loop, 'neutral'), Parallel(ActorInterval(toon, 'jump'), Sequence(Wait(0.42999999999999999), Parallel(ProjectileInterval(toon, duration = 0.90000000000000002, startPos = toon.getPos(self.waterPitcherNode), endPos = self.toonPitcherPosition), LerpHprInterval(toon, 0.90000000000000002, Point3(0, 0, 0)), LerpScaleInterval(self.waterPitcherModel, 0.90000000000000002, Point3(1, 1, destZScale))))), Func(toon.setPos, self.toonPitcherPosition), Func(toon.loop, 'leverNeutral'))
return grabIval
def makeToonReleaseInterval(self, toon):
temp1 = self.waterPitcherNode.attachNewNode('temp1')
temp1.setPos(self.toonPitcherPosition)
temp2 = self.waterPitcherNode.attachNewNode('temp2')
temp2.setPos(0, -10, -self.waterPitcherNode.getZ())
startPos = temp1.getPos(render)
endPos = temp2.getPos(render)
temp1.removeNode()
temp2.removeNode()
def getSlideToPos(toon = toon):
return render.getRelativePoint(toon, Point3(0, -10, 0))
if self.gotHitByBoss:
self.notify.debug('creating zap interval instead')
grabIval = Sequence(Func(toon.loop, 'neutral'), Func(toon.wrtReparentTo, render), Parallel(ActorInterval(toon, 'slip-backward'), toon.posInterval(0.5, getSlideToPos, fluid = 1)))
else:
grabIval = Sequence(Func(toon.loop, 'neutral'), Func(toon.wrtReparentTo, render), Parallel(ActorInterval(toon, 'jump'), Sequence(Wait(0.42999999999999999), ProjectileInterval(toon, duration = 0.90000000000000002, startPos = startPos, endPos = endPos))))
return grabIval
def b_clearSmoothing(self):
self.d_clearSmoothing()
self.clearSmoothing()
def d_clearSmoothing(self):
self.sendUpdate('clearSmoothing', [
0])
def clearSmoothing(self, bogus = None):
self.pitcherSmoother.clearPositions(1)
def doSmoothTask(self, task):
self.pitcherSmoother.computeAndApplySmoothHpr(self.waterPitcherNode)
return Task.cont
def startSmooth(self):
if not self.smoothStarted:
taskName = self.smoothName
taskMgr.remove(taskName)
self.reloadPosition()
taskMgr.add(self.doSmoothTask, taskName)
self.smoothStarted = 1
def stopSmooth(self):
if self.smoothStarted:
taskName = self.smoothName
taskMgr.remove(taskName)
self.forceToTruePosition()
self.smoothStarted = 0
def _DistributedBanquetTable__enableControlInterface(self):
gui = loader.loadModel('phase_3.5/models/gui/avatar_panel_gui')
self.closeButton = DirectButton(image = (gui.find('**/CloseBtn_UP'), gui.find('**/CloseBtn_DN'), gui.find('**/CloseBtn_Rllvr'), gui.find('**/CloseBtn_UP')), relief = None, scale = 2, text = TTLocalizer.BossbotPitcherLeave, text_scale = 0.040000000000000001, text_pos = (0, -0.070000000000000007), text_fg = VBase4(1, 1, 1, 1), pos = (1.05, 0, -0.81999999999999995), command = self._DistributedBanquetTable__exitPitcher)
self.accept('escape', self._DistributedBanquetTable__exitPitcher)
self.accept('control', self._DistributedBanquetTable__controlPressed)
self.accept('control-up', self._DistributedBanquetTable__controlReleased)
self.accept('InputState-forward', self._DistributedBanquetTable__upArrow)
self.accept('InputState-reverse', self._DistributedBanquetTable__downArrow)
self.accept('InputState-turnLeft', self._DistributedBanquetTable__leftArrow)
self.accept('InputState-turnRight', self._DistributedBanquetTable__rightArrow)
self.accept('arrow_up', self._DistributedBanquetTable__upArrowKeyPressed)
self.accept('arrow_down', self._DistributedBanquetTable__downArrowKeyPressed)
taskMgr.add(self._DistributedBanquetTable__watchControls, self.watchControlsName)
taskMgr.doMethodLater(5, self._DistributedBanquetTable__displayPitcherAdvice, self.pitcherAdviceName)
self.arrowVert = 0
self.arrowHorz = 0
self.powerBar.show()
def _DistributedBanquetTable__disableControlInterface(self):
if self.closeButton:
self.closeButton.destroy()
self.closeButton = None
self._DistributedBanquetTable__cleanupPitcherAdvice()
self.ignore('escape')
self.ignore('control')
self.ignore('control-up')
self.ignore('InputState-forward')
self.ignore('InputState-reverse')
self.ignore('InputState-turnLeft')
self.ignore('InputState-turnRight')
self.ignore('arrow_up')
self.ignore('arrow_down')
self.arrowVert = 0
self.arrowHorz = 0
taskMgr.remove(self.watchControlsName)
taskMgr.remove(self.waterPowerTaskName)
self.resetPowerBar()
self.aimStart = None
self.powerBar.hide()
if self.TugOfWarControls:
self._DistributedBanquetTable__killUpdateKeyPressRateTask()
self.keyTTL = []
self._DistributedBanquetTable__setMoveSound(None)
def _DistributedBanquetTable__displayPitcherAdvice(self, task):
if self.pitcherAdviceLabel == None:
self.pitcherAdviceLabel = DirectLabel(text = TTLocalizer.BossbotPitcherAdvice, text_fg = VBase4(1, 1, 1, 1), text_align = TextNode.ACenter, relief = None, pos = (0, 0, 0.68999999999999995), scale = 0.10000000000000001)
def _DistributedBanquetTable__cleanupPitcherAdvice(self):
if self.pitcherAdviceLabel:
self.pitcherAdviceLabel.destroy()
self.pitcherAdviceLabel = None
taskMgr.remove(self.pitcherAdviceName)
def showExiting(self):
if self.closeButton:
self.closeButton.destroy()
self.closeButton = DirectLabel(relief = None, text = TTLocalizer.BossbotPitcherLeaving, pos = (1.05, 0, -0.88), text_pos = (0, 0), text_scale = 0.059999999999999998, text_fg = VBase4(1, 1, 1, 1))
self._DistributedBanquetTable__cleanupPitcherAdvice()
def _DistributedBanquetTable__exitPitcher(self):
self.showExiting()
self.d_requestFree(False)
def _DistributedBanquetTable__controlPressed(self):
self._DistributedBanquetTable__cleanupPitcherAdvice()
if self.TugOfWarControls:
if self.power:
self.aimStart = 1
self._DistributedBanquetTable__endFireWater()
elif self.state == 'Controlled':
self._DistributedBanquetTable__beginFireWater()
def _DistributedBanquetTable__controlReleased(self):
if self.TugOfWarControls:
pass
1
if self.state == 'Controlled':
self._DistributedBanquetTable__endFireWater()
def _DistributedBanquetTable__upArrow(self, pressed):
self._DistributedBanquetTable__incrementChangeSeq()
self._DistributedBanquetTable__cleanupPitcherAdvice()
if pressed:
self.arrowVert = 1
elif self.arrowVert > 0:
self.arrowVert = 0
def _DistributedBanquetTable__downArrow(self, pressed):
self._DistributedBanquetTable__incrementChangeSeq()
self._DistributedBanquetTable__cleanupPitcherAdvice()
if pressed:
self.arrowVert = -1
elif self.arrowVert < 0:
self.arrowVert = 0
def _DistributedBanquetTable__rightArrow(self, pressed):
self._DistributedBanquetTable__incrementChangeSeq()
self._DistributedBanquetTable__cleanupPitcherAdvice()
if pressed:
self.arrowHorz = 1
elif self.arrowHorz > 0:
self.arrowHorz = 0
def _DistributedBanquetTable__leftArrow(self, pressed):
self._DistributedBanquetTable__incrementChangeSeq()
self._DistributedBanquetTable__cleanupPitcherAdvice()
if pressed:
self.arrowHorz = -1
elif self.arrowHorz < 0:
self.arrowHorz = 0
def _DistributedBanquetTable__incrementChangeSeq(self):
self.changeSeq = self.changeSeq + 1 & 255
def stopPosHprBroadcast(self):
taskName = self.posHprBroadcastName
taskMgr.remove(taskName)
def startPosHprBroadcast(self):
taskName = self.posHprBroadcastName
self.b_clearSmoothing()
self.d_sendPitcherPos()
taskMgr.remove(taskName)
taskMgr.doMethodLater(self._DistributedBanquetTable__broadcastPeriod, self._DistributedBanquetTable__posHprBroadcast, taskName)
def _DistributedBanquetTable__posHprBroadcast(self, task):
self.d_sendPitcherPos()
taskName = self.posHprBroadcastName
taskMgr.doMethodLater(self._DistributedBanquetTable__broadcastPeriod, self._DistributedBanquetTable__posHprBroadcast, taskName)
return Task.done
def d_sendPitcherPos(self):
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('setPitcherPos', [
self.changeSeq,
self.waterPitcherNode.getH(),
timestamp])
def setPitcherPos(self, changeSeq, h, timestamp):
self.changeSeq = changeSeq
if self.smoothStarted:
now = globalClock.getFrameTime()
local = globalClockDelta.networkToLocalTime(timestamp, now)
self.pitcherSmoother.setH(h)
self.pitcherSmoother.setTimestamp(local)
self.pitcherSmoother.markPosition()
else:
self.waterPitcherNode.setH(h)
def _DistributedBanquetTable__watchControls(self, task):
if self.arrowHorz:
self._DistributedBanquetTable__movePitcher(self.arrowHorz)
else:
self._DistributedBanquetTable__setMoveSound(None)
return Task.cont
def _DistributedBanquetTable__movePitcher(self, xd):
dt = globalClock.getDt()
h = self.waterPitcherNode.getH() - xd * self.rotateSpeed * dt
h %= 360
self.notify.debug('rotSpeed=%.2f curH=%.2f xd =%.2f, dt = %.2f, h=%.2f' % (self.rotateSpeed, self.waterPitcherNode.getH(), xd, dt, h))
limitH = h
self.waterPitcherNode.setH(limitH)
if xd:
self._DistributedBanquetTable__setMoveSound(self.pitcherMoveSfx)
def reloadPosition(self):
self.pitcherSmoother.clearPositions(0)
self.pitcherSmoother.setHpr(self.waterPitcherNode.getHpr())
self.pitcherSmoother.setPhonyTimestamp()
def forceToTruePosition(self):
if self.pitcherSmoother.getLatestPosition():
self.pitcherSmoother.applySmoothHpr(self.waterPitcherNode)
self.pitcherSmoother.clearPositions(1)
def getSprayTrack(self, color, origin, target, dScaleUp, dHold, dScaleDown, horizScale = 1.0, vertScale = 1.0, parent = render):
track = Sequence()
SPRAY_LEN = 1.5
sprayProp = MovieUtil.globalPropPool.getProp('spray')
sprayScale = hidden.attachNewNode('spray-parent')
sprayRot = hidden.attachNewNode('spray-rotate')
spray = sprayRot
spray.setColor(color)
if color[3] < 1.0:
spray.setTransparency(1)
def showSpray(sprayScale, sprayRot, sprayProp, origin, target, parent):
if callable(origin):
origin = origin()
if callable(target):
target = target()
sprayRot.reparentTo(parent)
sprayRot.clearMat()
sprayScale.reparentTo(sprayRot)
sprayScale.clearMat()
sprayProp.reparentTo(sprayScale)
sprayProp.clearMat()
sprayRot.setPos(origin)
sprayRot.lookAt(Point3(target))
track.append(Func(showSpray, sprayScale, sprayRot, sprayProp, origin, target, parent))
def calcTargetScale(target = target, origin = origin, horizScale = horizScale, vertScale = vertScale):
if callable(target):
target = target()
if callable(origin):
origin = origin()
distance = Vec3(target - origin).length()
yScale = distance / SPRAY_LEN
targetScale = Point3(yScale * horizScale, yScale, yScale * vertScale)
return targetScale
track.append(LerpScaleInterval(sprayScale, dScaleUp, calcTargetScale, startScale = Point3(0.01, 0.01, 0.01)))
track.append(Func(self.checkHitObject))
track.append(Wait(dHold))
def prepareToShrinkSpray(spray, sprayProp, origin, target):
if callable(target):
target = target()
if callable(origin):
origin = origin()
sprayProp.setPos(Point3(0.0, -SPRAY_LEN, 0.0))
spray.setPos(target)
track.append(Func(prepareToShrinkSpray, spray, sprayProp, origin, target))
track.append(LerpScaleInterval(sprayScale, dScaleDown, Point3(0.01, 0.01, 0.01)))
def hideSpray(spray, sprayScale, sprayRot, sprayProp, propPool):
sprayProp.detachNode()
MovieUtil.removeProp(sprayProp)
sprayRot.removeNode()
sprayScale.removeNode()
track.append(Func(hideSpray, spray, sprayScale, sprayRot, sprayProp, MovieUtil.globalPropPool))
return track
def checkHitObject(self):
if not self.hitObject:
return None
if self.avId != base.localAvatar.doId:
return None
tag = self.hitObject.getNetTag('pieCode')
pieCode = int(tag)
if pieCode == ToontownGlobals.PieCodeBossCog:
self.hitBossSoundInterval.start()
self.sendUpdate('waterHitBoss', [
self.index])
if self.TugOfWarControls:
damage = 1
if self.lastPowerFired < self.YELLOW_POWER_THRESHOLD:
damage = 1
elif self.lastPowerFired < self.RED_POWER_THRESHOLD:
damage = 2
else:
damage = 3
self.boss.d_hitBoss(damage)
else:
damage = 1
if self.lastPowerFired < self.YELLOW_POWER_THRESHOLD:
damage = 1
elif self.lastPowerFired < self.RED_POWER_THRESHOLD:
damage = 2
else:
damage = 3
self.boss.d_hitBoss(damage)
def waterHitBoss(self, tableIndex):
if self.index == tableIndex:
self.hitBossSoundInterval.start()
def setupPowerBar(self):
self.powerBar = DirectWaitBar(pos = (0.0, 0, -0.93999999999999995), relief = DGG.SUNKEN, frameSize = (-2.0, 2.0, -0.20000000000000001, 0.20000000000000001), borderWidth = (0.02, 0.02), scale = 0.25, range = 1, sortOrder = 50, frameColor = (0.5, 0.5, 0.5, 0.5), barColor = (0.75, 0.75, 1.0, 0.80000000000000004), text = '', text_scale = 0.26000000000000001, text_fg = (1, 1, 1, 1), text_align = TextNode.ACenter, text_pos = (0, -0.050000000000000003))
self.power = 0
self.powerBar['value'] = self.power
self.powerBar.hide()
def resetPowerBar(self):
self.power = 0
self.powerBar['value'] = self.power
self.powerBar['text'] = ''
self.keyTTL = []
def _DistributedBanquetTable__beginFireWater(self):
if self.fireTrack and self.fireTrack.isPlaying():
return None
if self.aimStart != None:
return None
if not self.state == 'Controlled':
return None
if not self.avId == localAvatar.doId:
return None
time = globalClock.getFrameTime()
self.aimStart = time
messenger.send('wakeup')
taskMgr.add(self._DistributedBanquetTable__updateWaterPower, self.waterPowerTaskName)
def _DistributedBanquetTable__endFireWater(self):
if self.aimStart == None:
return None
if not self.state == 'Controlled':
return None
if not self.avId == localAvatar.doId:
return None
taskMgr.remove(self.waterPowerTaskName)
messenger.send('wakeup')
self.aimStart = None
origin = self.nozzle.getPos(render)
target = self.boss.getPos(render)
angle = deg2Rad(self.waterPitcherNode.getH() + 90)
x = math.cos(angle)
y = math.sin(angle)
fireVector = Point3(x, y, 0)
if self.power < 0.001:
self.power = 0.001
self.lastPowerFired = self.power
fireVector *= self.fireLength * self.power
target = origin + fireVector
segment = CollisionSegment(origin[0], origin[1], origin[2], target[0], target[1], target[2])
fromObject = render.attachNewNode(CollisionNode('pitcherColNode'))
fromObject.node().addSolid(segment)
fromObject.node().setFromCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask | ToontownGlobals.FloorBitmask)
fromObject.node().setIntoCollideMask(BitMask32.allOff())
queue = CollisionHandlerQueue()
base.cTrav.addCollider(fromObject, queue)
base.cTrav.traverse(render)
queue.sortEntries()
self.hitObject = None
if queue.getNumEntries():
entry = queue.getEntry(0)
target = entry.getSurfacePoint(render)
self.hitObject = entry.getIntoNodePath()
base.cTrav.removeCollider(fromObject)
fromObject.removeNode()
self.d_firingWater(origin, target)
self.fireWater(origin, target)
self.resetPowerBar()
def _DistributedBanquetTable__updateWaterPower(self, task):
if not self.powerBar:
print '### no power bar!!!'
return task.done
newPower = self._DistributedBanquetTable__getWaterPower(globalClock.getFrameTime())
self.power = newPower
self.powerBar['value'] = newPower
if self.power < self.YELLOW_POWER_THRESHOLD:
self.powerBar['barColor'] = VBase4(0.75, 0.75, 1.0, 0.80000000000000004)
elif self.power < self.RED_POWER_THRESHOLD:
self.powerBar['barColor'] = VBase4(1.0, 1.0, 0.0, 0.80000000000000004)
else:
self.powerBar['barColor'] = VBase4(1.0, 0.0, 0.0, 0.80000000000000004)
return task.cont
def _DistributedBanquetTable__getWaterPower(self, time):
elapsed = max(time - self.aimStart, 0.0)
t = elapsed / self.waterPowerSpeed
exponent = self.waterPowerExponent
if t > 1:
t = t % 1
power = 1 - math.pow(1 - t, exponent)
if power > 1.0:
power = 1.0
return power
def d_firingWater(self, origin, target):
self.sendUpdate('firingWater', [
origin[0],
origin[1],
origin[2],
target[0],
target[1],
target[2]])
def firingWater(self, startX, startY, startZ, endX, endY, endZ):
origin = Point3(startX, startY, startZ)
target = Point3(endX, endY, endZ)
self.fireWater(origin, target)
def fireWater(self, origin, target):
color = VBase4(0.75, 0.75, 1, 0.80000000000000004)
dScaleUp = 0.10000000000000001
dHold = 0.29999999999999999
dScaleDown = 0.10000000000000001
horizScale = 0.10000000000000001
vertScale = 0.10000000000000001
sprayTrack = self.getSprayTrack(color, origin, target, dScaleUp, dHold, dScaleDown, horizScale, vertScale)
duration = self.squirtSfx.length()
if sprayTrack.getDuration() < duration:
duration = sprayTrack.getDuration()
soundTrack = SoundInterval(self.squirtSfx, node = self.waterPitcherModel, duration = duration)
self.fireTrack = Parallel(sprayTrack, soundTrack)
self.fireTrack.start()
def getPos(self, wrt = render):
return self.tableGroup.getPos(wrt)
def getLocator(self):
return self.tableGroup
def enterFlat(self, avId):
self.prepareForPhaseFour()
self.resetPowerBar()
self.notify.debug('enterFlat %d' % self.index)
if self.avId:
toon = base.cr.doId2do.get(self.avId)
if toon:
toon.wrtReparentTo(render)
toon.setZ(0)
self.tableGroup.setScale(1, 1, 0.01)
if self.avId and self.avId == localAvatar.doId:
localAvatar.b_squish(ToontownGlobals.BossCogDamageLevels[ToontownGlobals.BossCogMoveAttack])
def exitFlat(self):
self.tableGroup.setScale(1.0)
if self.avId:
toon = base.cr.doId2do.get(self.avId)
if toon:
if toon == localAvatar:
self.boss.toCraneMode()
toon.b_setAnimState('neutral')
toon.setAnimState('neutral')
toon.loop('leverNeutral')
def _DistributedBanquetTable__allowDetect(self, task):
if self.fadeTrack:
self.fadeTrack.finish()
self.fadeTrack = Sequence(self.tableGroup.colorScaleInterval(0.20000000000000001, VBase4(1, 1, 1, 1)), Func(self.tableGroup.clearColorScale), Func(self.tableGroup.clearTransparency))
self.fadeTrack.start()
self.allowLocalRequestControl = True
def gotBossZapped(self):
self.showExiting()
self.d_requestFree(True)
def _DistributedBanquetTable__upArrowKeyPressed(self):
if self.TugOfWarControls:
self._DistributedBanquetTable__pressHandler(0)
def _DistributedBanquetTable__downArrowKeyPressed(self):
if self.TugOfWarControls:
self._DistributedBanquetTable__pressHandler(1)
def _DistributedBanquetTable__pressHandler(self, index):
if index == self.buttons[0]:
self.keyTTL.insert(0, 1.0)
if not self.OnlyUpArrow:
self.buttons.reverse()
def _DistributedBanquetTable__spawnUpdateKeyPressRateTask(self):
taskMgr.remove(self.taskName(self.UPDATE_KEY_PRESS_RATE_TASK))
taskMgr.doMethodLater(0.10000000000000001, self._DistributedBanquetTable__updateKeyPressRateTask, self.taskName(self.UPDATE_KEY_PRESS_RATE_TASK))
def _DistributedBanquetTable__killUpdateKeyPressRateTask(self):
taskMgr.remove(self.taskName(self.UPDATE_KEY_PRESS_RATE_TASK))
def _DistributedBanquetTable__updateKeyPressRateTask(self, task):
if self.state not in 'Controlled':
return Task.done
for i in range(len(self.keyTTL)):
self.keyTTL[i] -= 0.10000000000000001
for i in range(len(self.keyTTL)):
if self.keyTTL[i] <= 0:
a = self.keyTTL[0:i]
del self.keyTTL
self.keyTTL = a
break
continue
self.keyRate = len(self.keyTTL)
keyRateDiff = self.keyRate - self.BASELINE_KEY_RATE
diffPower = keyRateDiff / 300.0
if self.power < 1 and diffPower > 0:
diffPower = diffPower * math.pow(1 - self.power, 1.25)
newPower = self.power + diffPower
if newPower > 1:
newPower = 1
elif newPower < 0:
newPower = 0
self.notify.debug('diffPower=%.2f keyRate = %d, newPower=%.2f' % (diffPower, self.keyRate, newPower))
self.power = newPower
self.powerBar['value'] = newPower
if self.power < self.YELLOW_POWER_THRESHOLD:
self.powerBar['barColor'] = VBase4(0.75, 0.75, 1.0, 0.80000000000000004)
elif self.power < self.RED_POWER_THRESHOLD:
self.powerBar['barColor'] = VBase4(1.0, 1.0, 0.0, 0.80000000000000004)
else:
self.powerBar['barColor'] = VBase4(1.0, 0.0, 0.0, 0.80000000000000004)
self._DistributedBanquetTable__spawnUpdateKeyPressRateTask()
return Task.done
def _DistributedBanquetTable__setMoveSound(self, sfx):
if sfx != self.moveSound:
if self.moveSound:
self.moveSound.stop()
self.moveSound = sfx
if self.moveSound:
base.playSfx(self.moveSound, looping = 1, volume = 0.5)
|
[
"fr1tzanatore@aol.com"
] |
fr1tzanatore@aol.com
|
48056c7a32622758cfb3818b9273a4f1de5b1921
|
0475b7d5791114c913e0ccc432ea7893fcd5182d
|
/webServer/webServer.py
|
f7b6750739694524227ed9c31fa897f53dcbaa67
|
[] |
no_license
|
jonzhaocn/python_projects
|
5c7717f5e81248e99f3252bba94c81d0cf3b6d5f
|
22d171aebd46590661c0ea3fc20a3fa5bef8bafd
|
refs/heads/master
| 2021-09-13T00:25:45.990594
| 2018-04-23T07:20:06
| 2018-04-23T07:20:06
| 87,550,515
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,828
|
py
|
import socket
import os
import logging
import subprocess
# 自己写的一个简单的we服务器代码,实现了get、post
class WebServer(object):
def __init__(self):
self.HOST = ''
self.PORT = 80
self.root_dir = 'd:/root_dir' # 文件的根目录
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 设置协议、套接字
self.server.bind((self.HOST, self.PORT)) # 绑定端口
self.server.listen()
self.allowed_readable_text_file_types = ['.html', '.htm', '.txt', '.js', '.css'] # 设置允许访问的文件类型
self.allowed_readable_img_file_types = ['.jpg', '.gif', '.png', '.jpeg']
self.allowed_readable_file = self.allowed_readable_text_file_types + self.allowed_readable_img_file_types
self.allow_show_dir = True # 如果文件夹下没有index.html文件是否允许显示文件夹的结构
self.HTTP_version = 'HTTP/1.x'
logging.basicConfig(level=logging.DEBUG, # 设置log日志
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='mylog.log',
filemode='w')
server_Logging = logging.StreamHandler()
server_Logging.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s: %(message)s')
server_Logging.setFormatter(formatter)
logging.getLogger('').addHandler(server_Logging)
def serve_forever(self): # 开启服务的主函数
while True:
client, address = self.server.accept() # 接受请求
request = client.recv(1024).decode()
if request is None or len(request) == 0:
continue
else:
request_list = request.split(' ')
method = request_list[0] # 请求是get还是post
file_src = request_list[1] # 请求的文件路径
content = None
path = self.root_dir + file_src
logging.info(str(address) + ':' + method + ' ' + file_src)
if method == 'GET':
if os.path.exists(path): # 该路径存在
if os.path.isdir(path): # 该路径是一个文件夹
if self.allow_show_dir: # 如果允许显示文件夹中的内容
content = self.read_index_file(path)
if content is None:
content = self.display_dir_structure(path)
else:
content = self.get_head(200, '.html') + content
else: # 如果不允许显示文件夹中的内容
content = self.read_index_file(path) # 查找该文件夹中是否存在index.html
if content is None:
content = self.get_head(403, '.html') + self.create_info_html("Forbidden")
else:
content = self.get_head(200, '.html') + content
elif os.path.isfile(path): # 该路径是一个文件
file_type = self.get_filnameExt(path)
if file_type in self.allowed_readable_file: # 如果该文件内容允许读取
content = self.get_head(200, '.html') + self.read_file(path)
else:
content = self.get_head(403, '.html') + self.create_info_html("Forbidden")
else: # 如果该路径不存在
content = self.get_head(404, '.html')+self.create_info_html("file not found")
client.sendall(content)
client.close()
if method == 'POST': # Post请求
# new_process = subprocess.Popen('')
content = None
if os.path.exists(path): # 处理表单的文件存在
form = request.split('\r\n')[-1] # 表单的内容在request的最后一行
form_list = form.split('&') # 如果表单中有多个内容,内容已&分隔
submit_args = ''
for item in form_list:
submit_args = submit_args + item + ";" # 提取表单中的内容
# python post.py firstname=1;lastname=2
args = ['python', path, submit_args]
try:
result = subprocess.check_output(args, shell=True) # 运行请求
except subprocess.CalledProcessError as e:
result = self.create_info_html('error')
content = self.get_head(200, '.html') + result
else: # 处理表单的文件不存在
content = self.get_head(404, '.html') + self.create_info_html('file not found')
client.sendall(content)
client.close()
def display_dir_structure(self, path): # 用于展示指定路径下的目录结构
dir_structure = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=mbcs">
<title>Directory listing for {path}</title>
</head>
<body>
<h1>Directory listing for {path}</h1>
<hr>
<ul>
'''
for file in os.listdir(path):
dir_structure += '<li><a href=\"'+file+'\">'+file+'</a></li>'
dir_structure += '''
</ul>
<hr>
</body>
</html>'''
index = len(self.root_dir)
path = path[index:]
dir_structure = dir_structure.format(path=path).encode()
dir_structure = self.get_head(200, '.html')+dir_structure
return dir_structure
def get_head(self, status_code, file_type): # 返回头信息
status_code_dict = {
100: 'Continue', 101: 'Switching Protocols', 102: 'Processing', 200: 'OK',
400: 'Bad Request', 401: 'Unauthorized', 402: 'Payment Required', 403: 'Forbidden',
404: 'Not Found'
}
content = self.HTTP_version + ' ' + str(status_code) + ' ' + status_code_dict[status_code]+'\n'
if file_type in self.allowed_readable_text_file_types:
content += 'Content-Type: text/'+file_type.split('.')[-1]+'\n'+'\n'
elif file_type in self.allowed_readable_img_file_types:
content += 'Content-Type: image/'+file_type.split('.')[-1]+'\n'+'\n'
return content.encode()
def read_file(self, path): # 读取指定文件并返回
file = open(path, 'rb')
content = file.read()
file.close()
return content
def read_index_file(self, path): # 查找制定目录下的index文件,并返回其内容
for file in os.listdir(path):
list = file.split('.')
if len(list) == 2 and list[0].upper() == 'INDEX' and list[1] == 'html':
return self.read_file(path+'/'+file)
return None
def create_info_html(self, info): # 生成指定内容的网页
content = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=mbcs">
<title>{info}</title>
</head>
<body>
<h1>{info}</h1>
</body>
</html>
'''.format(info=info).encode()
return content
def get_filnameExt(self, filename): # 获取文件的扩展名
import os
(filepath, tempfilename) = os.path.split(filename);
(shotname, extension) = os.path.splitext(tempfilename);
return extension
if __name__ == '__main__':
server = WebServer()
server.serve_forever()
|
[
"1044264932@qq.com"
] |
1044264932@qq.com
|
e5be18291d8f73243a4c9f25f3a48e3ce073b814
|
a7e40fc92f2681beef1c41afb0e348a910d0dc06
|
/misc_tool/fixfix.py
|
07be6bea5d3f88e9f20eff4f8610623fda11e076
|
[] |
no_license
|
yacheng1127/YW-Python
|
ae0e7480859e21c6aaab20a166561c818387ec44
|
e88cfd202c7449a23dd8bb8d659bbdc0df1a1ff9
|
refs/heads/package
| 2020-12-24T17:26:23.905392
| 2013-09-26T13:41:49
| 2013-09-26T13:41:49
| 7,425,035
| 0
| 1
| null | 2013-05-14T13:04:40
| 2013-01-03T15:36:42
|
Python
|
UTF-8
|
Python
| false
| false
| 22,076
|
py
|
################################################################################################
## fixfix: post-processes drifter tracks
## xiuling.wu
## August 9, 2011
##
## Jim Manning modifications
## October 2011
## March 2012
## Nov 2012 trouble with ginput causing segfault
## STEPS ARE AS FOLLOWS:
# - load all the ascii data file
# - for each drifter it conducts 4 basic steps
# - eliminate repeat times
# - calculate forward and backward differences (velocity) and eliminate bad points
# - writes meta data to log file
# - check for drogue loss
# - clean plots of track (pth) and velocity uv_id_final.png)
# - generates oracle ready ascii file
##################################################################################################
import sys
sys.path.append("/home3/ocn/jmanning/py/jmanning/whython6/")
import csv
from conversions import ll2uv #To convert from longitude/latitude to unit vectors
import numpy as np
import matplotlib as mpl
import matplotlib.mlab as ml
#part1
#import scipy
#from scipy import *
#import pylab
from pylab import *
import matplotlib.pyplot as plt
#import basemap
from matplotlib.dates import num2date,date2num, DateFormatter
import math
### HARD CODE ################################################################################
critfactor=8 #multipe of the mean velcity to discard
minla=20;maxla=48;minlo=-150;maxlo=-20 # delimiting data to make it easier
bathy=True # set to "True" if isobaths are wanted
fid=open("/home3/ocn/jmanning/drift/drift.log","a")#permanent log file that is appended to../home3/ocn/jmanning/drift/drift.log
# load /data5/jmanning/drift/massdmf/withtemp_2009.dat ##ids with temperature
#list(set(line.strip() for line in open('X:/drift/bowdoin/2009/withtemp_2009.dat'))) # ids with temperature
#fileformat='bowdoin'; # format of temperature data (sometimes "emolt") if using minilog/dat files
#wt='withtemp_2009'
direcin="/net/nwebserver/drifter/" # directory where the final plots are stored
#direcout='/net/data5/jmanning/drift/umassb/2012' #'/data5/jmanning/drift/css/2011'
#fn='drift_umb_2012_1.dat'
direcout='/net/data5/jmanning/drift/kocik/2012' #'/data5/jmanning/drift/css/2011'
fn='drift_cfcc_2012_1.dat'
fid.write('\n'+'#'*40+' below is '+str(fn)+' log '+'#'*40+'\n')
depcont=[-10] # depth contour in final plot (apparently not used in 5/2012)
year=int(2012)
strattle=0 # this will eventually flag to "1" if yeardays change by more than 365
### END HARDCODES ################################################
#raw data loaded
idss,yeardays_all,lat_all,lon_all,day_all,hours_all,minutes_all,depths,temps=[],[],[],[],[],[],[],[],[]
csv_reader=csv.reader(open(direcin+fn,"r"))
for line in (x for x in csv_reader if x[0][0] !='%'): # if the first line is comment line, skip
# print float(line[0].split()[8])
if float(line[0].split()[8])<maxla and float(line[0].split()[8])>minla and float(line[0].split()[7])>minlo and float(line[0].split()[7])<maxlo:
idss.append(line[0].split()[0])
yeardays_all.append(line[0].split()[6])
lat_all.append(line[0].split()[8])
lon_all.append(line[0].split()[7])
day_all.append(line[0].split()[3])
hours_all.append(line[0].split()[4])
minutes_all.append(line[0].split()[5])
depths.append(line[0].split()[9])
temps.append(line[0].split()[10])
# get ids
id=list(set(idss))
# convert string to float
yeardays_all=[float(i)+1 for i in yeardays_all]# in python num2date(), less one day than matlab, so add 1 here
lat_all=[float(i) for i in lat_all]
lon_all=[float(i) for i in lon_all]
#days_all=[float(i) for i in days_all]
#ids=[float(i) for i in ids]
days_all,ids=[],[]
for i in range(len(day_all)):
days_all.append(int(float(day_all[i])))
for i in range(len(id)):
ids.append(int(float(id[i])))
fido=open(direcout+'/prep_for_oracle_'+fn[6:],'w')
ids=np.sort(ids)
#ids=[110410712]#,11041073]
for k in range(len(ids)): #where "ids" is a list of distinct ids and int
# latitude, longitude, time
strattle=0
lat,lon,time,yeardays,depth,temp=[],[],[],[],[],[]
for i in range(len(idss)):
if int(float(idss[i]))==ids[k]:
lat.append(lat_all[i])
lon.append(lon_all[i])
if (i>1)&(strattle==0):# here's where we account for a new year
if yeardays_all[i]-yeardays_all[i-1]<-200:
year=year+1
print 'incremented year to '+str(year)
strattle=1
yeardays.append(yeardays_all[i])
#time.append(date2num(num2date(yeardays_all[i]).replace(year=year).replace(day=days_all[i])))
time.append(date2num(num2date(yeardays_all[i]).replace(year=year)))
depth.append(depths[i])
temp.append(temps[i])
#print time
print "there are ", len(lat), " fixes for id =",ids[k]
print "Note: Check to see if any of these already exist in database before loading"
# STEP 1a: check to make sure time is monotonic
#if len(find(diff(time)<=0))>0:
# plot(time)
# show()
#raw_input('Trouble with time not increasing press return to continue')
# close()
# STEP 1b: check for repeat times
###### while time[i]==time[i-1], get the del_same_time_index ########
del_same_time_index=[]
for i in range(1,len(time)):
if int(time[i-1])==int(time[i]) and num2date(time[i-1]).hour== num2date(time[i]).hour and num2date(time[i-1]).minute== num2date(time[i]).minute:
del_same_time_index.append(i)
del_same_time_index.reverse()
if del_same_time_index==[]:
print "there is no same time."
else:
print str(len(del_same_time_index))+' points deleted with the same times'
index=range(len(time))
for i in del_same_time_index:
del lat[i],lon[i],time[i],yeardays[i],depth[i],temp[i]
# STEP 2a:
############ calculate forward and backward velocities of the raw data ##########################
forward_u,forward_v,forward_spd,jdn=ll2uv(time,lat,lon)# was yeardays but now uses "time" as of 3/29/2012
backward_u,backward_v,backward_spd,backward_jdn=ll2uv(time[::-1],lat[::-1],lon[::-1])
## calculate resultants
id_fs=list(np.where(np.array(forward_spd)<500)[0])
id_bs=list(np.where(np.array(backward_spd)<500)[0])
idc=[val for val in id_fs if val in id_bs]
jdraw,spdraw=[],[]
for i in idc:
jdraw.append(jdn[i])
spdraw.append(forward_spd[i])
########### plot the velocities ###################################################
## def plot_speed(time,speed):
## #fig=plt.figure()
## ax = fig.add_subplot(111) #to divide the fig into some area and (line row chose)
## plt.title('Difter#'+str(ids[k]))
## jd=[]
## for i in time:
## jd.append(i)
## plt.plot(jd,speed,'b-')
#locator = mpl.dates.AutoDateLocator()
#ax.xaxis.set_major_locator(locator)
## if len(jd)<100:
## else:
## monthsFmt = DateFormatter('%b/%d')
## ax.set_ylabel('cm/s')
#ax.xaxis.set_major_formatter(monthsFmt)
## ax.set_xlabel(str(year),fontsize=17)
## plt.grid()
## fig=plt.figure()
## plot_speed(jdraw,spdraw)## plot speed
## plt.show()
## plt.close()
#######################################################################################
# calculate a reasonable criteria for this drifter
crit=np.mean([abs(i) for i in forward_spd])*critfactor
print "Velocity criteria set to ", str(critfactor),' times the mean or ',str(crit),' cm/s'
# check for drifter going aground (ie very low velocity)
idlow=list(np.where(np.array(spdraw)<float(np.mean([abs(i) for i in forward_spd]))/100)[0])
# if idlow is not empty, add the comments in fid file
if idlow<>[]:
for i in range(len(idlow)):
print 'WARNING: Drifter ',str(ids[k]),' may be hung up on gear or aground on ',str(idlow[i]),' where velocity is < 1# mean'
#fid.write(str(ids[k]).rjust(10)+' apparently hung-up on '+str(idlow[i])+'\n')
idlow_print0=str(sorted(idlow))
idlow_print1=idlow_print0.replace(', ',' ')
tempochunk0=str(ids[k]).rjust(10)+' apparently hung-up on '+str(idlow_print1)+'\n'#'from'+str(idlow[0])+'to'+str(idlow[-1])+'\n'
else:
tempochunk0='There is no point hung up\n'
#### find bad velocities where criteria was just calculated
idbadf=list(np.where(abs(np.array(forward_spd))>crit)[0])
idbadb=list(np.where(abs(np.array(backward_spd))>crit)[0])
#if it is the 2nd time/point in the bad forward velocity (fu) that caused the problem
# then the 2nd time/point associated with the bad backward velocity should match
timeb=time[::-1] # backwards time vector
badtime=list(set([time[i+1] for i in idbadf]).intersection(set([timeb[i+1] for i in idbadb])))
print "%10.3f percent bad velocities deleted according to velocity criteria" % float(len(badtime)/float(len(lat))*100.)
index_badtime=[]# find near the badtime points
for i in badtime:
index_badtime.append(int(np.interp(i,time,range(len(time)))))
if index_badtime<>[]:
index_badtime.reverse()
for i in index_badtime:
index_near_badtimes=[]
if i-5<0:
ra=range(0,i+5)
elif i+5>len(lat):
ra=range(i-5,len(lat)-1)
else:
ra=range(i-5,i+5)
for m in ra:
index_near_badtimes.append(m)
plot_badtime=list(set(index_near_badtimes))
#plot the bad time data and near the bad time data's points
#plt.plot([lon[l] for l in plot_badtime],[lat[l] for l in plot_badtime],marker='.',)
#plt.plot([lon[l] for l in index_badtime],[lat[l] for l in index_badtime],marker='o',markerfacecolor='r',linestyle='None')
fig=plt.figure()
plt.plot([lon[l] for l in plot_badtime],[lat[l] for l in plot_badtime],marker='.',)
plt.plot(lon[i],lat[i],marker='o',markerfacecolor='r',linestyle='None')
plt.show()
#plt.close()
del_or_not=raw_input('Delete? (y/n or 1 for the end point): ')
if del_or_not=='y':
del time[i],lat[i],lon[i],yeardays[i],depth[i],temp[i]
elif del_or_not=='1':
plt.plot(lon[i-1],lat[i-1],marker='o',markerfacecolor='r',linestyle='None')
plt.show()
raw_input('How is that? press return')
del time[i-1],lat[i-1],lon[i-1],yeardays[i-1],depth[i-1],temp[i-1]
plt.close()
plt.close()
# STEP 3:
# delelte values bad due to objective criteria
#index_badtime.reverse()
#for i in index_badtime:
# if lat[i]!=999:
# del time[i],lat[i],lon[i],yeardays[i],depth[i],temp[i]
#print str(float(len(badtime))/len(time)*100),'# editted due to bad velocities > criteria'
idgood=len(lat)
###############################################################################################
# Step 4a:
# calculate forward velocities of the automatically editted data
fu,fv,spd1,jd1=ll2uv(time,lat,lon)
fig=plt.figure()
#plot_speed(jd1,spd1)
plt.plot(jd1,spd1)
plt.plot(jd1,spd1,marker="o",markerfacecolor="r",linestyle='None')
plt.show()
print 'click on any obviously bad points and then press the enter key.'
badpoints=ginput(n=0)
print badpoints#,timeout=10)
#badpoints=ginput(0,timeout=10,mouse_stop=3)
#badpoints=[]
#close()
# Step 4b:
# eliminate those points clicked as bads
# find badpoints index in yeardays
index_badpoints=[]
badpoints_num=len(badpoints)
for i in range(len(badpoints)):
index_badpoints.append(int(np.interp(badpoints[i][0],jd1,range(len(jd1)))))
print index_badpoints
index_badpoints=list(set(index_badpoints))
print "%10.2f percent bad velocities deleted according to manual clicks on velocity" % float(float(badpoints_num)/len(lat)*100.)
for i in sorted(index_badpoints)[::-1]:
del time[i], lat[i], lon[i], yeardays[i],depth[i],temp[i]
#plt.close()
plot_again=raw_input("Do you want to replot the figure after delete the bad points?(y/n)")
#plot_again='y'
if plot_again=="y" or plot_again=="Y" or plot_again=="yes":
#plt.close('all')
fig=plt.figure()
fu2,fv2,spd2,jd2=ll2uv(time,lat,lon)
#plot_speed(jd2,spd2)
plt.plot(jd2,spd2,'mo-',markersize=5)#marker='o',markerfacecolor="r",linestyle='None')
plt.show()
#plt.close()
#print 'pausing 10 seconds'
#sleep(10)
# if there are a list of bad points, click the first point and the last point, then delete between them
del_between=raw_input('Do you want to delete all the points between two points? input "N" or "Y"' )
#del_between='N'
if del_between=="N" or del_between=="n":
print "You have choosen NOT to delete all the points between two points."
if del_between=="Y" or del_between=="y" :
print "Please click the first bad point and the last bad point to choose the range of the bad points"
between_badpoints=ginput(n=0)
print between_badpoints#,timeout=0)#,mouse_stop=2)
index_between_badpoints=[]
for i in range(len(between_badpoints)):
index_between_badpoints.append(int(np.interp(between_badpoints[i][0],jd2,range(len(jd2)))))
print index_between_badpoints
index_betweens=[]
for i in range(sorted(index_between_badpoints)[0],sorted(index_between_badpoints)[1]+1):
index_betweens.append(i)
for i in index_betweens[::-1]:
del lat[i],lon[i],time[i],yeardays[i],depth[i],temp[i]
del_between_badpoints=sorted(index_between_badpoints)[1]-sorted(index_between_badpoints)[0]+1
badpoints_num=len(badpoints)+del_between_badpoints
print "%10.2f percent editted due to bad velocities from manual clicks between two points" % float(float(badpoints_num)/len(time)*100.)
if ids[k]==1174306915 or ids[k]==1174306911:
del time[-1], lat[-1], lon[-1], yeardays[-1],depth[-1],temp[-1]
fig=plt.figure()
fu3,fv3,spd3,jd3=ll2uv(time,lat,lon)
#plot_speed(jd3,spd3)
plt.plot(jd3,spd3,'bo-')
plt.show()
#step 5a:
#manually delete points based on track
##############################################################################################################
fig=plt.figure()
#plt.figure(2)
#plt.plot(lon,lat,'k-')
plt.plot(lon,lat,'ro-')
plt.show()
print 'click on any obviously bad points and then press the enter key on the track.'
bad=ginput(n=0)
print bad
badplotpts=[] #index of points that are found to be near the same index of x & y
if len(bad)>0:
for kbad in range(len(bad)):
idxbad=ml.find(abs(lon-bad[kbad][0])==min(abs(lon-bad[kbad][0])))
idybad=ml.find(abs(lat-bad[kbad][1])==min(abs(lat-bad[kbad][1])))
print idxbad,idybad
if idxbad==idybad:
print lat[int(idxbad)],lon[int(idxbad)],' is bad'
badplotpts.append(int(idxbad))
for kk in range(len(badplotpts)):
plt.plot(lon[badplotpts[kk]],lat[badplotpts[kk]],'bo')
# #thismanager = get_current_fig_manager()
# #thismanager.window.SetPosition((1000,500))
plt.show()
for i in sorted(badplotpts)[::-1]:
del time[i], lat[i], lon[i], yeardays[i],depth[i],temp[i]
#fig=plt.figure()
plt.plot(lon,lat,'yo-')
plt.show()
raw_input(str(len(badplotpts))+' deleted from manual click on track. Press return to continue')
#plt.close()
# write to log file if some data was editted
if badpoints_num>0:
tempochunk1=(str(ids[k]).rjust(10)+' '+ str(crit).rjust(10)+' '+ str(badpoints_num).rjust(10)+' '+
str(idgood).rjust(10)+" "+str(math.floor(time[-1]-time[0])).rjust(10)+" manual editted uv plot\n")
else:
tempochunk1='There is no bad point delete manual.\n'
if len(badtime)>0:
tempochunk2=(str(ids[k]).rjust(10)+' '+ str(crit).rjust(10)+' '+ str(len(badtime)).rjust(10)+' '+
str(idgood).rjust(10)+" "+str(math.floor(time[-1]-time[0])).rjust(10)+" objectively editted\n")
else:
tempochunk2='There is no bad velocities deleted according to velocity criteria.\n'
if len(badplotpts)>0:
tempochunk3=(str(ids[k]).rjust(10)+' '+ str(crit).rjust(10)+' '+ str(len(badplotpts)).rjust(10)+' '+
str(idgood).rjust(10)+" "+str(math.floor(time[-1]-time[0])).rjust(10)+" manually editted track points\n")
else:
tempochunk3='There is no bad point delete manual on track.\n'
# clean velocities w/out bad points
[u2,v2,spd2,jd2]=ll2uv(time,lat,lon) #was "yeardays" until 3/2012 to deal with strattling new year
#plot time, lat,lon
fig=plt.figure(figsize=(10,8))
ax = fig.add_subplot(111)
print 'calling basemap_usgs '
#print min(lat),min(lon),max(lat),max(lon)
#if max(lon)-min(lon)>4.0:
# basemap.basemap(lat,lon)#,(float(min(max(lat)-min(lat),max(lon)-min(lon)))+1.0)/5*4)
#else:
# basemap.basemap_detail(lat,lon,bathy, False,float(min(max(lat)-min(lat),max(lon)-min(lon)))/5*4)
#basemap.basemap_usgs(lat,lon,False)#,depcont)
print 'past basemap'
ax.plot(lon,lat,marker=".",markerfacecolor='r',markersize=10)
points_num=10
ax.set_ylabel('latitude')
ax.set_xlabel("longitude")
#ax.set_xlim(min(lon)-(max(lon)-min(lon))/10.,max(lon)+(max(lon)-min(lon))/10.)
#ax.set_ylim(min(lat)-(max(lat)-min(lat))/10.,max(lat)+(max(lat)-min(lat))/10.)
ax.set_xlim(min(lon),max(lon))
ax.set_ylim(min(lat),max(lat))
plt.title('Drifter #'+str(ids[k]),fontsize=16)
# fig.autofmt_xdate() #display the time "lean"
ax.xaxis.set_label_coords(0.5, -0.1)#set the position of the xtick labels
ax.yaxis.set_label_coords(-0.08, 0.4)
# the last points, annotate "end"
self_annotate1=ax.annotate("End", xy=(lon[-1], lat[-1]),xycoords='data', xytext=(8, 11),
textcoords='offset points',arrowprops=dict(arrowstyle="->"))
self_annotate2=ax.annotate("Start", xy=(lon[0], lat[0]),xycoords='data', xytext=(8, 11),
textcoords='offset points',arrowprops=dict(arrowstyle="->"))
if time[-1]-time[0]<=2:
if len(time)<5: skip=1
else: skip=int(float(len(time))/5)
for i in range(0,len(time),skip):
self_annotate3=ax.annotate(num2date(time[i]).replace(tzinfo=None).strftime('%d-%b %H:%M'), xy=(lon[i], lat[i]),
xycoords='data', xytext=(8, 11), textcoords='offset points',arrowprops=dict(arrowstyle="->"))
# self_annotate3.draggable()
elif (time[-1]-time[0]>2.0)&(time[-1]-time[0]<20.0):
for i in range(1,len(time)):
if num2date(time[i-1]).day<>num2date(time[i]).day:
self_annotate4=ax.annotate(num2date(time[i]).replace(tzinfo=None).strftime('%d-%b'), xy=(lon[i], lat[i]),
xycoords='data', xytext=(8, 11), textcoords='offset points',arrowprops=dict(arrowstyle="->"))
else: # place approximately 10 labels
for i in range(1,len(time),int(len(time)/10.)):
#if num2date(time[i-1]).day<>num2date(time[i]).day:
self_annotate4=ax.annotate(num2date(time[i]).replace(tzinfo=None).strftime('%d-%b'), xy=(lon[i], lat[i]),
xycoords='data', xytext=(8, 11), textcoords='offset points',arrowprops=dict(arrowstyle="->")) # self_annotate4.draggable() #drag the text if you want
thismanager = plt.get_current_fig_manager()
thismanager.window.SetPosition((1000, 0))
plt.show()
plt.savefig(direcin+'pth_'+str(ids[k])+'_final'+".ps")
plt.savefig(direcin+'pth_'+str(ids[k])+'_final'+".png")
raw_input('press return to close final track window')
plt.close()
# plt.show()
#plot u & v
fig=plt.figure()
ax1 = fig.add_subplot(111)
plt.plot(jdn,forward_u,"r",label='raw eastward')
plt.plot(jdn, forward_v,"b",label='raw northward')
plt.plot(jd2,u2,"m",linewidth=2,label='final eastward')
plt.plot(jd2,v2,"g",linewidth=2,label='final northward')
leg=plt.legend()
# leg.draggable()
locator = mpl.dates.AutoDateLocator()
ax1.xaxis.set_major_locator(locator)
if len(jdn)<100:
monthsFmt = DateFormatter('%b/%d %H:')
else:
monthsFmt = DateFormatter('%b/%d')
ax1.xaxis.set_major_formatter(monthsFmt)
ax1.set_xlabel(str(year))
ax1.set_ylabel('cm/s (where 50 ~ 1 knot)')
fig.autofmt_xdate() #display the time "lean"
plt.title('Drifter '+str(ids[k])+' cleaned',fontsize=16)
plt.savefig(direcin+'uv_'+str(ids[k])+'_final'+'.ps')
plt.savefig(direcin+'uv_'+str(ids[k])+'_final'+'.png')
plt.show()
raw_input('press return to close uv window')
# close()
# write out id,date,lat,lon,yrday0_gmt,temp, and depth_i
depth=[float(i) for i in depth]
for i in range(len(time)):
fido.write(str(ids[k]).rjust(10)+ " " +num2date(time[i]).replace(tzinfo=None).strftime('%d-%b-%Y:%H:%M')+" ")
fido.write(("%10.6f") %(lat[i]))
fido.write(" ")
fido.write(("%10.6f") %(lon[i]))
fido.write(" ")
fido.write(("%10.6f") %(yeardays[i]-1))
fido.write(" ")
fido.write(temp[i]+ " ")
fido.write(("%5.1f") %(depth[i]))
fido.write('\n')
if k<>len(ids)-1:
raw_input("Press Enter to process next drifter")
whetherlog=raw_input('Do you want to keep this log?')
if whetherlog=="Y" or whetherlog=="y" :
fid.write(tempochunk0)
fid.write(tempochunk1)
fid.write(tempochunk2)
fid.write(tempochunk3)
print 'log has been saved.'
fido.close()
fid.close()
|
[
"james.manning@noaa.gov"
] |
james.manning@noaa.gov
|
4d641b7b452b7e43378724205d8c5690b44cd11a
|
5b9c50baaa3182868c9f4a744a7361abe422a510
|
/tests/test_base.py
|
f7f5133f7951074f1287e3257df0b73b129805e8
|
[
"MIT"
] |
permissive
|
jasontangxf/geometer
|
3307889c087a1f498d58b5ae6bbf1b037119ca46
|
931df0aff6c680ad13a6c5989f2a89c276370c5e
|
refs/heads/master
| 2023-01-06T17:39:41.837342
| 2020-11-07T15:42:10
| 2020-11-07T15:42:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,251
|
py
|
import numpy as np
from geometer.base import TensorDiagram, Tensor, TensorCollection, LeviCivitaTensor, KroneckerDelta
class TestTensor:
def test_arithmetic(self):
a = Tensor(2, 3)
b = Tensor(5, 4)
# vector operations
assert a + b == Tensor(7, 7)
assert a - b == Tensor(-3, -1)
assert -a == Tensor(-2, -3)
# scalar operations
assert a + 6 == Tensor(8, 9)
assert a - 6 == Tensor(-4, -3)
assert a * 6 == Tensor(12, 18)
assert a / 6 == Tensor(1/3, 0.5)
def test_transpose(self):
a = Tensor([[1, 2],
[3, 4]], covariant=[0])
assert a.transpose() == Tensor([[1, 3], [2, 4]])
assert a.T._covariant_indices == {1}
assert a.T.T == a
def test_getitem(self):
a = Tensor([[1, 2],
[3, 4]], covariant=[0])
assert a[0, 1] == 2
assert a[None, 1] == [[3, 4]]
assert a[None, 1].tensor_shape == (0, 1)
assert a[::-1, 0] == [3, 1]
assert a[::-1, 0].tensor_shape == (1, 0)
def test_dtype(self):
a = Tensor(2, 3, dtype=np.float32)
assert a.dtype == np.float32
a = Tensor(2, 3, dtype=np.complex64)
assert a.dtype == np.complex64
class TestTensorCollection:
def test_init(self):
# empty list
a = TensorCollection([])
assert len(a) == 0
# numpy array
a = TensorCollection(np.ones((1, 2, 3)))
assert len(a) == 1
assert a.size == 2
# nested list of numbers
a = TensorCollection([[1, 2], [3, 4]])
assert len(a) == 2
assert a.size == 2
# nested tuple of numbers
a = TensorCollection(((1, 2), (3, 4)))
assert len(a) == 2
assert a.size == 2
# nested list of Tensor objects
a = TensorCollection([[Tensor(1, 2, 3), Tensor(3, 4, 5)]])
assert a.shape == (1, 2, 3)
assert len(a) == 1
assert a.size == 2
# object with __array__ function
class A:
def __array__(self):
return np.array([Tensor(1, 2), Tensor(3, 4)])
a = TensorCollection(A())
assert len(a) == 2
assert a.size == 2
def test_flat(self):
a = [Tensor([[1, 2], [3, 4]]), Tensor([[5, 6], [7, 8]])]
b = TensorCollection([a], tensor_rank=2)
assert list(b.flat) == a
def test_getitem(self):
a = Tensor([[1, 2],
[3, 4]])
b = Tensor([[5, 6],
[7, 8]])
c = TensorCollection([a, b])
assert c[0] == a
assert c[1] == b
assert list(c) == [a, b]
assert c[:, 1] == TensorCollection([Tensor([3, 4]), Tensor([7, 8])])
assert c[:, 0, 0] == [1, 5]
class TestTensorDiagram:
def test_add_edge(self):
a = Tensor([1, 0, 0, 0])
b = Tensor([[42, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], covariant=False)
diagram = TensorDiagram((a, b))
assert diagram.calculate() == Tensor([42, 0, 0, 0])
diagram.add_edge(a.copy(), b)
assert diagram.calculate() == 42
def test_tensor_product(self):
e1 = Tensor(1, 0)
e2 = Tensor(0, 1)
a = Tensor([0, 1],
[1, 0], covariant=[0])
b = Tensor([1, 0],
[0, 1], covariant=[0])
m = a.tensor_product(b)
e = e1.tensor_product(e2)
assert TensorDiagram((e, m), (e, m)).calculate() == (a * e1).tensor_product(b * e2)
d = TensorDiagram()
d.add_node(a)
d.add_node(b)
assert d.calculate() == a.tensor_product(b)
def test_epsilon_delta_rule(self):
e1 = LeviCivitaTensor(3, True)
e2 = LeviCivitaTensor(3, False)
d = KroneckerDelta(3)
d2 = d.tensor_product(d)
d1 = d2.transpose((0, 1))
diagram = TensorDiagram((e1, e2.transpose()))
assert diagram.calculate() == d1 - d2
def test_kronecker_delta(self):
d = KroneckerDelta(4, 3)
assert d.array.shape == (4,)*6
assert d.array[0, 1, 2, 0, 1, 2] == 1
assert d.array[0, 2, 1, 0, 1, 2] == -1
|
[
"jan.rv@t-online.de"
] |
jan.rv@t-online.de
|
9ed2c8e05d8b011321f54977f2d63a7de27935b1
|
e93117371d0c5c6b3eb0b177fc4b2acc8f9524de
|
/TestFiles/test_cp.py
|
8d5a4bfb1545f4f8b2be1b6b96f89b41a25a789c
|
[] |
no_license
|
marthinwurer/TournamentRecorder
|
94dfc8af28455e3844628616cef45d1e3a3fdd6c
|
87f04c4caa68d12935e339d253525e3364ab8f68
|
refs/heads/master
| 2021-03-24T09:11:44.460711
| 2016-11-29T04:47:44
| 2016-11-29T04:47:44
| 67,151,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
"""
This file tests the create player (cp) method of the API
Author: TangentTally
"""
import unittest
import sys
sys.path.insert(0, "../src")
import tr_api
class TestAp(unittest.TestCase):
def setUp(self):
tr_api.createPlayer(1, 'Evan')
self.topdict = tr_api.listPlayers()
self.topdict2 = tr_api.createPlayer(1, "Fail2")
def tearDown(self):
tr_api.createPlayer(2, 'Ben')
tr_api.createPlayer(3, 'Will')
tr_api.createPlayer(4, 'Jon')
def test_add_player(self):
self.assertEqual(len(self.topdict.get('rows')), 1)
def test_new_player_id(self):
self.assertEqual(self.topdict.get('rows')[0].get('id'), 1)
def test_new_player_name(self):
self.assertEqual(self.topdict.get('rows')[0].get('name'), 'Evan')
def test_fail_cp_same_id(self):
self.assertFalse(self.topdict2.get('outcome'))
def test_fail_cp_error_message(self):
self.assertEqual(self.topdict2.get('reason'), 'DCI Exists')
|
[
"evanditto9@gmail.com"
] |
evanditto9@gmail.com
|
f14acc9a228a0a7f5084dd0ecd4364f8fdac0641
|
5d984b4ece241bf5006a6a36029b2ef0776e643c
|
/main.py
|
9d481483b571993648607e4b9db4fdf7dd4fa53d
|
[] |
no_license
|
teros0/SimpleHTTPServer
|
596350481f6ce6a12ed0a8cc919f3b724f0243e0
|
0eaa0f879b66ea0b732d1bbeb0e9f90580b7d574
|
refs/heads/master
| 2021-01-19T01:28:59.686423
| 2017-04-08T09:11:06
| 2017-04-08T09:11:06
| 87,244,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,795
|
py
|
import os
import sys
import socket
import mimetypes
from functools import lru_cache
class SimpleHTTPServer:
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((self.host, self.port))
self.sock.listen(5)
print("Server initialized on host {} port {}"
.format(self.host, self.port))
def process_request(self):
conn, addr = self.sock.accept()
data = conn.recv(1024)
request = data.decode('utf-8')
file_path = request.split()[1]
full_path = sys.path[0] + file_path
try:
response = self.create_response(full_path)
conn.sendall(response[0])
conn.sendall(response[1])
finally:
conn.close()
@lru_cache(maxsize=10)
def create_head(self, path):
headers = "HTTP/1.1 200 OK\n"
if os.path.isfile(path):
type = mimetypes.guess_type(path)[0]
headers += 'Content-Type: {}\n\n'.format(type)
else:
headers += 'Content-Type: text/html\n\n'
return headers.encode('utf-8')
def list_dirs(self, path):
with open(path + '/.temp_index.html', 'w+') as f:
f.write("""<!DOCTYPE html>
<html>
<title>Directory listing for {0}</title>
<body>
<h2>Directory listing for {0}</h2>
<hr>
<ul>\n""".format(path))
for entry in os.listdir(path):
if entry == '.temp_index.html':
continue
f.write("<li><a href='/{0}/{1}'>{1}</a>\n"
.format(os.path.relpath(path), entry))
return path + '/.temp_index.html'
def create_body(self, path):
with open(path, 'rb') as f:
body = f.read()
return body
def create_response(self, path):
if os.path.isdir(path):
folder_content = os.listdir(path)
if 'index.html' in folder_content:
path += '/index.html'
else:
path = self.list_dirs(path)
headers = self.create_head(path)
body = self.create_body(path)
response = (headers, body)
return response
def serve_forever(self):
while True:
self.process_request()
host = ''
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 8000
if __name__ == "__main__":
server = SimpleHTTPServer(host, port)
try:
server.serve_forever()
except KeyboardInterrupt:
print("\Keyboard interruption, exiting.")
sys.exit(0)
|
[
"best.guess.oni@gmail.com"
] |
best.guess.oni@gmail.com
|
3a24fc4e4e2066ac18fd2bf58725d03a2817ee1f
|
c8a1424dd8b46636b4e739ad2d3ad77afe9c8b86
|
/set.py
|
08f6953ae90f9e6cda8c694fdc294e77f0930df6
|
[] |
no_license
|
tanakan2544/myscript
|
b2b504dc3f7c8025776d2523034fc591890e86b8
|
f390fd72e481a14fa43596bb33cf2d49c6096000
|
refs/heads/master
| 2020-07-25T09:00:43.287349
| 2019-09-13T10:05:36
| 2019-09-13T10:05:36
| 208,239,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
zoo = ( "Kangaroo" , "Leopard" , "Moose" )
print( "Tuple:" , zoo , "\tLength:" , len( zoo ) )
print( type( zoo ) )
bag = { "Red" , "Green" , "Blue" }
bag.add( "Yellow" )
print( "\nSet:" , bag , "\tLength" , len( bag ) )
print( type( bag ) )
print( "\nIs Green In bag Set?:" , "Green" in bag )
print( "Is Orange In bag Set?:" , "Orange" in bag
box = { "Red" , "Purple" , "Yellow" }
print( "\nSet:" , box , "\t\tLength" , len( box ) )
print( "Common To Both Sets:" , bag.intersection( box ) )
|
[
"noreply@github.com"
] |
noreply@github.com
|
74292104dd0ffdaadc0625c511ab93c18939e9d0
|
ebea625962d5864821d7e7b9c63670f4f238efd3
|
/Contests/Week of Code 28/Boat Trips/boatTrips.py
|
0a105684bee08ab33772b2afe05e0a2e9a569372
|
[] |
no_license
|
ThiagoAugustoSM/hackerRank
|
68ddbee48d2a26e6c0cb9c3f58476c0424feaf85
|
15d19f1013f7742d6d2587d56f4e2f0ef3579c77
|
refs/heads/master
| 2021-08-11T18:52:30.041716
| 2017-11-14T02:45:56
| 2017-11-14T02:45:56
| 78,483,488
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 201
|
py
|
#!/bin/python3
import sys
n,c,m = input().strip().split(' ')
n,c,m = [int(n),int(c),int(m)]
p = list(map(int, input().strip().split(' ')))
if m * c >= max(p):
print("Yes")
else:
print("No")
|
[
"martinsthiagoaugusto@gmail.com"
] |
martinsthiagoaugusto@gmail.com
|
06592bbd733bf700a6951202c02f4f34307e9454
|
94790bb61891f99cf0a6bea9ea7c052c6f6c6ed0
|
/zhaoping/zhaoping/middlewares.py
|
ec5e29e68f380a29b7d82c2600f6b26169214425
|
[] |
no_license
|
huazhicai/Douban_scrapy
|
1ddaac9a1414099aaa00b9813dd93618c64e6199
|
846adb26b7f52904f056ec6b4d0ea9b2255eed0d
|
refs/heads/master
| 2020-03-21T02:42:22.124741
| 2018-07-14T14:53:38
| 2018-07-14T14:53:38
| 138,013,256
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,982
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
import random
import time
class ZhaopingSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class ProxyMiddleware(object):
def process_request(self, request, spider):
"""request对象加上proxy"""
proxy = self.get_random_proxy()
print("this is request ip:" + proxy)
request.meta['proxy'] = proxy
def process_response(self, request, response, spider):
"""对返回的response处理"""
# 如果返回的response状态不是200,重新生成当前request对象
if response.status != 200:
proxy = self.get_random_proxy()
print("this is response ip:" + proxy)
# 对当前reque加上代理
request.meta['proxy'] = proxy
return request
return response
def get_random_proxy(self):
"""随机从文件中读取proxy"""
while True:
with open('proxies.txt', 'r') as f:
proxies = f.readlines()
if proxies is not None:
break
else:
time.sleep(1)
proxy = random.choice(proxies).strip()
return proxy
|
[
"zhihua cai"
] |
zhihua cai
|
24d38b1c79dc504b389b64276c398a8a39f2423d
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2022_02_01_preview/aio/operations/_operations.py
|
2a6e7c95997bb3ead85375c355f3241e726885e6
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,963
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2022_02_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.OperationListResult"]:
"""Lists all of the available Azure Container Registry REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2022_02_01_preview.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-02-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.ContainerRegistry/operations"} # type: ignore
|
[
"noreply@github.com"
] |
noreply@github.com
|
ded444c5ef21501ca65e58e3e231f59360fd4b45
|
49d3e0c50b940773a5c57986a1c222ffd519c2bf
|
/programs/4_HackTools/Quantum_Listener/key_logger/key_logger.py
|
3e49ec5a012d4a846d37b839c2f211882255f504
|
[] |
no_license
|
HelpMeFinishPhD/Qcamp2019
|
6ff1344471bb6a071ddba67384adcc24d38a1ba9
|
8d42615bc5838edbbefb6fc5e2d74aafb64d1cd5
|
refs/heads/master
| 2021-10-24T15:32:43.919150
| 2021-10-21T08:37:18
| 2021-10-21T08:37:18
| 187,752,697
| 3
| 3
| null | 2019-06-20T14:16:06
| 2019-05-21T03:10:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,181
|
py
|
#!/usr/bin/env python2
'''
Python wrapper program to log the measurement result for Eve
This version will log from two devices: Arduino (serial1) and powermeter (serial2)
Author: Qcumber 2018
'''
import serial
import sys
import time
import datetime
# Serial 1
serial_addr1 = '/dev/ttyACM0' # Arduino
# Serial 2
serial_addr2 = '/dev/ttyACM1' # Powermeter
print "The JW Eavesdropping... will record any voltages into a file"
print "To exit the program, use Ctrl+C \n"
# Other parameters declarations
# Serial 1
baudrate1 = 9600 # Default in Arduino
timeout1 = 0.1 # Serial timeout (in s).
refresh_rate= 0.0 # Minimum offset around 115 ms
# Serial 2
baudrate2 = 115200 # Default in Arduino
timeout2 = 0 # Serial timeout (in s).
# Some note (may be a bug in the future):
# The pyserial somehow does not properly respond to powermeter timeout
# I will just assume it to have 0 timeout, and let the blocking donw
# by the other device's response time.
# Opens the receiver side serial port
receiver1 = serial.Serial(serial_addr1, baudrate1, timeout=timeout1)
receiver2 = serial.Serial(serial_addr2, timeout=timeout2)
# Waiting until the serial device is open (for some computer models)
time.sleep(2)
print "Ready!\n"
# Setting the range for powermeter
receiver2.write("RANGE3\n")
# The filename is the current time
filename = str(datetime.datetime.now().time())[:8] + '.dat'
print "Logging the voltages into:", filename
while True:
try:
receiver1.write("VOLT? ")
receiver2.write("VOLT?\n")
# Block until receive the reply
while True:
if receiver1.in_waiting:
volt_now1 = receiver1.readlines()[0][:-2] # Remove the /n
volt_now2 = receiver2.readlines()[0][1:-2] # Remove the /n
break
print volt_now1, volt_now2
# Write to a file
with open(filename, "a") as myfile:
myfile.write(volt_now1 + " " + str(volt_now2) + "\n")
# Wait until the next time
time.sleep(refresh_rate)
except KeyboardInterrupt:
print ("\nThank you for using the program!")
sys.exit() # Exits the program
|
[
"anuutama@gmail.com"
] |
anuutama@gmail.com
|
2a8ac94bf969597c274ffb429ddc98534f770588
|
f538212bf4cc14ccab8eeee87cce3c714045e7fa
|
/tkproj.py
|
dee2fe8bd9d235e0539288b86db2c2e4d0f74c41
|
[] |
no_license
|
Hayden-Neilson/Gui-with-python
|
956e43cfc49e015b4b171c5e0a6b71514d56fdf4
|
d0a49e46fa1f745e7f69934100c06e35552cae2a
|
refs/heads/main
| 2022-12-30T07:25:35.176051
| 2020-10-19T00:56:17
| 2020-10-19T00:56:17
| 304,502,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
from tkinter import *
window = Tk()
def km_to_miles():
miles = float(el_value.get())*1.6
txt1.insert(END, miles)
but1 = Button(window, text="Execute", command=km_to_miles)
but1.grid(row=0, column=0)
el_value = StringVar()
entry1 = Entry(window, textvariable=el_value)
entry1.grid(row=0, column=1)
txt1 = Text(window, height=1, width=20)
txt1.grid(row=0, column=2)
window.mainloop()
|
[
"haydenneilson5@gmail.com"
] |
haydenneilson5@gmail.com
|
e9d6a9e74ae41588308338c09581de2615802eed
|
26b50d07e15bbbb216dd4e7091a48be9e92a2b78
|
/In Progress/fast_individualtimeEElimited.py
|
f176b36ad1cae5977a8cd19d6ad3bc0eefff6e53
|
[] |
no_license
|
kchang2/pi0-analysis
|
1ffb2546a674422207688666b917a69d6ee501d2
|
db4504ad9e24e0bdb3c50bfc8b1e3e8f95e1af15
|
refs/heads/master
| 2020-05-30T18:57:21.415496
| 2015-12-23T16:09:20
| 2015-12-23T16:09:20
| 39,848,825
| 1
| 1
| null | 2015-11-21T02:05:43
| 2015-07-28T17:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 4,441
|
py
|
##
## Tutorial for drawing a histogram plot (2D) from tree variables
## in the endcap region. We want a general time resolution to see
## which sections are affected the most.
##
## Updated as of 07/27/2015
## Running as of 07/27/2015
##
import ROOT as rt
import sys, random, math
import time
import os
import stackNfit as snf
import numpy as np
from FastProgressBar import progressbar
if __name__ == "__main__":
# input comes in from the fastAnalysis as [script, path of ROOT file, path of Result directory, starting position in the list of files in the 'path of ROOT file', ending position in the list of files in 'path of ROOT file']
fileLocation = sys.argv[1]
resultLocation = sys.argv[2]
bf = int(sys.argv[3])
ef = int(sys.argv[4])
# Check and change current working directory.
stardir = os.getcwd()
print "Current working directory %s" % stardir
os.chdir(fileLocation)
retdir = os.getcwd()
print "Directory changed successfully %s" % retdir
## Root file path and file name you analyze ##
rootList = os.listdir(fileLocation)
rootfilename = p.runNumber + "EcalNtp_"
## Info about the Run ##
runinfo = np.array("ROOT info") #ROOT file
## creates histogram for time response ##
htimep = rt.TH2F("Time Response in Endcap plus for all photons", "X vs Y",100,0,100,100,0,100)
htimem = rt.TH2F("Time Response in Endcap minus for all photons", "X vs Y",100,0,100,100,0,100)
#creates histogram for event count
#hevent = rt.TH2F("Events in Barrel", "X vs Y",100,0,100,100,0,100)
#creation of numpy array to store values for faster analysis(courtesy of Ben Bartlett)
dataListp = np.array([-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0]) #(photon, x, y, mean, mean error, sigma, sigma error)
dataListm = np.array([-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0]) #(photon, x, y, time response, time response error, time resolution, time resolution error)
#creates a list of histograms
histListp = [[0 for x in range(101)] for y in range(101)]
histListm = [[0 for x in range(101)] for y in range(101)]
for x in range (0,101):
for y in range (0,101):
histnamep = "time on plus sc (%i,%i)" %(x,y)
histtitlep = "time response (ns) for plus crystal (%i,%i)" %(x,y)
histnamem = "time on minus sc (%i,%i)" %(x,y)
histtitlem = "time response (ns) for minus crystal (%i,%i)" %(x,y)
histListp[x][y] = rt.TH1F(histnamep,histtitlep,1000,-30,30)
histListm[x][y] = rt.TH1F(histnamem,histtitlem,1000,-30,30)
#fills the histogram with data
runinfo = a.openEE(rTree,histListp, histListm, 0, 0)
#fits the histograms
htimep = snf.fitTime(histListp,htimep)
htimem = snf.fitTime(histListm,htimem)
# Same procedure, going back to directory where results are printed
retdir = os.getcwd()
print "Current working directory %s" % retdir
os.chdir(resultLocation + '/' + p.folderName + '/')
folder = 'ctEE' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
os.system('mkdir ' + folder)
os.chdir(os.getcwd() + '/' + folder +'/')
retdir = os.getcwd()
print "Directory changed successfully %s" % retdir
shutil.copyfile(stardir + '/' + 'unpack.py', retdir + '/unpack.py')
#saving run info to a numpy file for reference later
np.save(p.runNumber+"RunInfoEEAll.npy", runinfo)
#saving all 1D histograms in tree
f = rt.TFile("timeEB_"+str(int(time.time()))+".root","new")
for x in range(0,len(histListp)):
for y in range(0, len(histListp[0])):
histListp[x][y].Write()
histListm[x][y].Write()
#Saving value of data in tuple list
dataListp = np.vstack((dataListp, ["p", x, y, htimep.GetBinContent(x+1, y)]))
dataListm = np.vstack((dataListp, ["m", x, y, htimem.GetBinContent(x+1, y)]))
pbar.update(x+1)
htimep.Write()
htimem.Write()
np.save("TimeResponseEEp_0T.npy", dataListp)
np.save("TimeResponseEEm_0T.npy", dataListm)
pbar.finish()
#Tacks on histogram to canvas frame and ouputs on canvas
c = rt.TCanvas()
htimep.SetFillColor(0)
htimep.Draw("colz")
c.Print("2DTimeResponseEBp_0T.png")
#Tacks on histogram to canvas frame and ouputs on canvas
c = rt.TCanvas()
htimem.SetFillColor(0)
htimem.Draw("colz")
c.Print("2DTimeResponseEBm_0T.png")
|
[
"kchang2@caltech.edu"
] |
kchang2@caltech.edu
|
c972411fa5a6af06b18b1f3009f29e7584e09189
|
40f5f2a5df486aa9074148d3e3ab22752b52bfa5
|
/cogs/help.py
|
308339492064e59f26898aa0d2e936c384a3a346
|
[] |
no_license
|
NikosSiak/uniwa-discord-bot
|
f59f667e53b21efa6e0977fe2cc6da9b5599a487
|
f367cf91ad50f462964204f64936f3e7f46fa4df
|
refs/heads/master
| 2021-07-01T01:43:01.767804
| 2020-12-02T13:41:37
| 2020-12-02T13:41:37
| 200,112,722
| 5
| 1
| null | 2020-12-02T13:41:39
| 2019-08-01T20:08:39
|
Python
|
UTF-8
|
Python
| false
| false
| 5,974
|
py
|
import time
import asyncio
import discord
from discord.ext import commands
class Help(commands.Cog):
def __init__(self, bot):
self.bot = bot
def n_embed(self, ctx, page: int, pages, cogs, cogsD):
embed = discord.Embed(
description=f"Prefix: `{ctx.prefix}`\n`[argument]` = required, `(argument)` = optional",
color=ctx.author.color,
timestamp=ctx.message.created_at
)
cog_name = cogs[page].replace("_", " ")
embed.set_author(
name=f"Help - {cog_name} - {len(cogsD[cogs[page]])} command(s)",
icon_url=self.bot.user.avatar_url
)
embed.set_footer(text=f"Page {page + 1}/{pages}")
for command in cogsD[cogs[page]]:
aliases = "None" if not command.aliases else [f"`{al}`" for al in command.aliases]
embed.add_field(
name=command.name,
value=f"`{ctx.prefix}{command.help}`\n{command.brief}\nAliases: {'None' if aliases == 'None' else ', '.join(aliases)}",
inline=False
)
return embed
@commands.command(name="help", aliases=["h"], brief="Display this message", hidden=True)
async def help_(self, ctx, *, command=None):
"""help (command)"""
if not command:
commands_ = []
for command in self.bot.commands:
if bool(command.cog_name) and not command.hidden:
try:
can_run = await command.can_run(ctx)
except Exception:
can_run = False
if can_run:
commands_.append(command)
cogs = list({command.cog_name for command in commands_})
cogs_dict = {}
for cog in cogs:
cogs_dict[cog] = []
for command in commands_:
if command.cog_name == cog:
cogs_dict[cog].append(command)
pages = len(cogs_dict)
page = 0
if len(cogs) == 0:
await ctx.send("It looks like you don't have permissions to run any command")
return
embed = self.n_embed(ctx, page, pages, cogs, cogs_dict)
msg = await ctx.send(embed=embed)
await msg.add_reaction('⬅')
await msg.add_reaction('➡')
await msg.add_reaction('❌')
def check(r, user):
return user != self.bot.user and r.message.id == msg.id
t_end = time.time() + 300
while time.time() < t_end:
try:
res, user = await self.bot.wait_for('reaction_add', check=check, timeout=60.0)
except asyncio.TimeoutError:
continue
if str(res.emoji) == "➡":
try:
await msg.remove_reaction('➡', user)
except discord.Forbidden:
pass
page += 1
if page == pages:
page -= 1
new_embed = self.n_embed(ctx, page, pages, cogs, cogs_dict)
await msg.edit(embed=new_embed)
elif str(res.emoji) == "⬅":
try:
await msg.remove_reaction('⬅', user)
except discord.Forbidden:
pass
page -= 1
if page < 0:
page = 0
new_embed = self.n_embed(ctx, page, pages, cogs, cogs_dict)
await msg.edit(embed=new_embed)
elif str(res.emoji) == "❌":
try:
await msg.remove_reaction("❌", user)
except discord.Forbidden:
pass
break
await msg.remove_reaction('⬅', self.bot.user)
await msg.remove_reaction('➡', self.bot.user)
await msg.remove_reaction("❌", self.bot.user)
await msg.edit(content=f"Type `{ctx.prefix}help` or `{ctx.prefix}h`", embed=None)
else:
command = self.bot.get_command(command)
if not command:
await ctx.send(f"Use the right command, try `{ctx.prefix}help`")
return
sub_commands = []
if isinstance(command, commands.Group):
sub_commands = list(command.commands)
embed = discord.Embed(
description=f"Prefix: `{ctx.prefix}`\n`[argument]` = required, `(argument)` = optional",
color=ctx.author.color,
timestamp=ctx.message.created_at
)
aliases = "None" if not command.aliases else [f"`{al}`" for al in command.aliases]
if command.name not in ["+rep", "-rep"]:
embed.add_field(
name=command.name,
value=f"`{ctx.prefix}{command.help}`\n{command.brief}\nAliases: {'None' if aliases == 'None' else ', '.join(aliases)}",
)
else:
embed.add_field(
name=command.name,
value=f"`{command.help}`\n{command.brief}\nAliases: {'None' if aliases == 'None' else ', '.join(aliases)}",
)
for command in sub_commands:
embed.add_field(
name=f"**{command.name}**",
value=f"`{ctx.prefix}{command.help}`\n{command.brief}\nAliases: {'None' if aliases == 'None' else ', '.join(aliases)}",
inline=False
)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Help(bot))
|
[
"spyrosr1@gmail.com"
] |
spyrosr1@gmail.com
|
29a2473d9b631c2d0c10c2d692539b428748fa59
|
3731cdf1609a8fe5fa0da5aaadf1c92ebc1f2410
|
/Vampire extension/get_bdprops_v3b.py
|
ad2997117dc1d93f6dc83fb8b2eed2259ea0733f
|
[] |
no_license
|
kukionfr/miscal-codes
|
19c3bac88769eb05315918a1926d9131eee5e235
|
56bb822a5a0f7ce365404a201dafe21642ab384d
|
refs/heads/master
| 2021-01-08T08:18:33.191412
| 2020-02-24T17:21:14
| 2020-02-24T17:21:14
| 241,968,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,749
|
py
|
#! C:\Python27
import numpy as np
import os
import pandas as pd
import time
from get_curvature2 import *
from get_shape import *
from scipy.io import loadmat
def get_bdprops_v3b(Bd = None):
np.set_printoptions(precision=5,suppress=True)
pnum = len(Bd)
out_rph = np.zeros((pnum,38))
out_cva = np.zeros((pnum,38))
Bd = Bd[0] #for pickle input, not for cbdt
for k in range(pnum):
if (k % 20 == 0 or k == pnum-1): print str(k+1) + ' / ' + str(pnum)
B=Bd[k]
pn = len(B.T[0])
if pn > 50:
dist = np.power((B.T[0]-np.mean(B.T[0])),2) + np.power((B.T[1]-np.mean(B.T[1])),2)
dist = np.sqrt(dist)
cva = get_curvature2(B)
rph = get_shape(dist)
cvap= get_shape(cva.T[0])
out_rph[k]=rph
out_cva[k]=cvap
else:
out_rph[k]=np.zeros(38)
out_cva[k]=np.zeros(38)
return out_rph, out_cva
def get_bdprops(direc,bd,operators):
start = time.time()
# mat=loadmat('cbdt')
# mdata=mat['cbdt']
# cbdt = pd.DataFrame(mdata)[0]
out_rph, out_cva = get_bdprops_v3b(bd)
df1 = pd.DataFrame(out_rph,columns=operators)
df2 = pd.DataFrame(out_cva,columns=operators)
# if not os.path.exists(direc + 'rph.pickle'):
# df1.to_pickle(direc + 'rph.pickle')
if not os.path.exists(direc + 'rph.csv'):
df1.to_csv(direc + 'rph.csv', index = False)
# if not os.path.exists(direc + 'cva.pickle'):
# df2.to_pickle(direc + 'cva.pickle')
if not os.path.exists(direc + 'cva.csv'):
df2.to_csv(direc + 'cva.csv', index = False)
end = time.time()
print 'elapsed time is ' + str(end-start) + 'seconds for get_bdprops'
return df1,df2
|
[
"kyuhan14@gmail.com"
] |
kyuhan14@gmail.com
|
deca3b4423f1e25bc5462950883b449a6943fd75
|
ef8ea9272d876453b99a22db20fb50ecbef39bf1
|
/LevelTwo/py_math.py
|
06b6ca2d748a8f61bea51cd9faa58a3f942ab473
|
[] |
no_license
|
lalchand-rajak/Python-Learn
|
b8b3581c2ff8b134900ecae4f73bc4eab07e6412
|
651b088532e63daddbfd5c718ee4ffa29ad65cdc
|
refs/heads/master
| 2023-03-22T04:22:26.413616
| 2021-03-04T13:24:13
| 2021-03-04T13:24:13
| 342,789,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 371
|
py
|
import math
print(abs(-45))
print(abs(45))
print(math.fabs(-45))
print(math.ceil(-45.88)) #ceil function chooses nearest highest value
print(math.ceil(200.12))
print(math.floor(-45.88)) #floor function chooses nearest lowest value
print(math.floor(200.12))
print(max(10,20,40,99,100,205))
print(min(10,20,40,99,100,205))
print(math.sqrt(9))
print(math.sqrt(4))
|
[
"lalchandrajak05@gmail.com"
] |
lalchandrajak05@gmail.com
|
0f04619c39ba64983e7e79537e3443ea4a1bcd0f
|
23834bba08b8cba7d383e76bfe77f519b5dd0961
|
/Spark/11_word_frequency_sort.py
|
6b99c466b6b47994c9afadb7e4c4ac271c6ad3e6
|
[
"MIT"
] |
permissive
|
jsainero/SparkvsDask
|
4d656b07ac624c1cbf1e34860939cd9cfdbd5373
|
109c2e9b5b15f1cb13c65ebdeeb0c0627a08a635
|
refs/heads/master
| 2022-11-28T07:41:46.117205
| 2020-07-15T12:21:25
| 2020-07-15T12:21:25
| 279,263,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
# -*- coding: utf-8 -*-
from pyspark import SparkContext
import sys
import string
import time
def replace(word):
for a in string.punctuation+'¿¡«»':
word = word.replace(a, '')
return word
def main(sc, filename):
data = sc.textFile(filename)
t0 = time.time()
datanp = data.map(replace)
words_rdd = datanp.flatMap(lambda x: x.split())
# print(words_rdd.take(10))
appearances_rdd = words_rdd.map(lambda x: (x.lower(), 1))
# print(appearances_rdd.take(10))
result_rdd = appearances_rdd.reduceByKey(lambda x, y: x+y)
# print('RESULTS------------------')
#print('words frequency', result_rdd.take(10))
sorted_rdd = result_rdd.sortBy(lambda x: x[1], ascending=False)
# print('RESULTS------------------')
#print('words frequency sorted', sorted_rdd.take(10))
sorted_rdd.collect()
print('Tiempo transcurrido:', time.time()-t0)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Uso: python3 {0} <file>".format(sys.argv[0]))
else:
with SparkContext() as sc:
sc.setLogLevel("ERROR")
main(sc, sys.argv[1])
|
[
"noreply@github.com"
] |
noreply@github.com
|
591ecbab60adab9ab6407ec13de044b44a65615e
|
5af138ff15567814de34b1014f359c998bb9684e
|
/problem with if3.py
|
b92eed51b3968cb5983dbbc394f816bc38ed4926
|
[] |
no_license
|
MariannaBeg/homework
|
a1bae0c8086922d769896d10ea1606d344ae241b
|
5e418e5a73495e8f34b1ef7acbdaead89c16f3ce
|
refs/heads/master
| 2020-07-25T13:51:03.947515
| 2019-10-25T20:33:42
| 2019-10-25T20:33:42
| 208,312,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
print("Please insert a word or sentence")
user_input=input()
if " " in user_input:
print("it is a sentence")
else:
print("it is a word")
|
[
"mbeglaryan9@gmail.com"
] |
mbeglaryan9@gmail.com
|
7e669347a9821defbb6f31f33633400c6b3a81f1
|
51d9838e2810c3ac37a514f1fadbf18d1cae395b
|
/Apps/Predict/myPredict.py
|
822fc97e6b02dcf7509a693333a86d19fd4afe4d
|
[] |
no_license
|
player39/TestMLPipLine
|
2e04ae575afac14d703c4117d61a2061d8779df8
|
7077d493f83e7eab8809f9a475153adfc8e62942
|
refs/heads/master
| 2020-07-29T05:26:36.383679
| 2019-09-20T02:10:35
| 2019-09-20T02:10:35
| 209,684,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
from Apps.Model.HEDModelV1 import generateClass
|
[
"wangyucong95@gmail.com"
] |
wangyucong95@gmail.com
|
9b1ebc326055fd2ddc035181577347d831af00ad
|
d3a82e7576945619973b1314aaaa1bf9897e74a8
|
/OnlineShop/main/eshop_accounts/urls.py
|
2c73f07e5f264ce9146fa6491025990f3e952398
|
[] |
no_license
|
zahrafarrokhi/learning_django_1
|
7b0a2b96f251d14ac5f8468d677c95a5eb420905
|
031a4fb99df74745df8f972df158aed1a17a4862
|
refs/heads/main
| 2023-07-06T11:58:33.725572
| 2021-08-09T06:52:17
| 2021-08-09T06:52:17
| 393,140,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
from django.urls import path
from . import views
app_name = 'eshop_accounts'
urlpatterns = [
path('login/', views.user_login, name='login'),
]
|
[
"zahrafarrokhi2017@gmail.com"
] |
zahrafarrokhi2017@gmail.com
|
a892ef66293e61892f089fdb096b5545ddbb5d4a
|
633acc13e138832b0e5e65e91d5c92f2c5cef273
|
/zhihu/pipelines.py
|
b665512b1c4d27c1580bb8e9e61e6462f447a204
|
[] |
no_license
|
xwj-scarf/zhihuSpider_v1.0
|
9a6325b5c610ea9a6b4011554e0640247c9601d3
|
9cf0d5ed677dba3b01794ca8827c26123279410a
|
refs/heads/master
| 2021-01-23T16:30:09.971180
| 2017-06-04T08:27:05
| 2017-06-04T08:27:05
| 93,299,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
class MongoPipeline(object):
def __init__(self,mongo_url,mongo_db):
self.mongo_url=mongo_url
self.mongo_db=mongo_db
@classmethod
def from_crawler(cls,crawler):
return cls(
mongo_url=crawler.settings.get('MONGO_URL'),
mongo_db=crawler.settings.get('MONGO_DATABASE','items')
)
def open_spider(self,spider):
self.client=pymongo.MongoClient(self.mongo_url)
self.db = self.client[self.mongo_db]
def close_spider(self,spider):
self.client.close()
def process_item(self, item, spider):
self.db['user'].update({'url_token':item['url_token']},{'$set':item},True)
return item
|
[
"xwj_scarf@163.com"
] |
xwj_scarf@163.com
|
23f9781ac175c426b2b801b257618ed97b8661f7
|
a437d23c87b0b4be0db8be378a09b56cf138d1ff
|
/gallery/settings.py
|
c385f8e6ecb3737e00d29f9ed47868695ef381cb
|
[] |
no_license
|
dsipakou/django_gallery
|
7647462f462745eb4e934fc9759e18489c4e9448
|
15d242148b653324e79efac2e773f0b9f46a7de2
|
refs/heads/master
| 2021-06-23T17:33:27.043687
| 2017-08-31T15:54:13
| 2017-08-31T15:54:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,340
|
py
|
"""
Django settings for gallery project.
Generated by 'django-admin startproject' using Django 1.10.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@aw3()cl_+aogg7$mq*(96!*pz0nmhdu@5tk&2k)imtg689s%%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'rest_framework',
'corsheaders'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'gallery.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gallery.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'flyermc',
'USER': 'root',
'PASSWORD': 'password',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CORS_ORIGIN_WHITELIST = (
'localhost:3000',
)
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"denis.sipakov@pandadoc.com"
] |
denis.sipakov@pandadoc.com
|
3d1c874248f328ff324676aff6d647db8ba2e075
|
79bd668684d8ed38927e20cc46bd1c461dce436d
|
/blog/views.py
|
ae4e641314b9deea2feabd12664b8b3be932847d
|
[] |
no_license
|
BagirThohir/Tugas-2
|
a0f6890fd0f6def2c5eff8c2125cdec6ababb0fe
|
1fec92d497cee820e024db42bf8ed72788bc7871
|
refs/heads/master
| 2023-03-28T07:47:01.740862
| 2021-03-26T12:24:01
| 2021-03-26T12:24:01
| 351,765,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
from django.shortcuts import render
from .models import Artikel
def index(request):
context = {
'articles' : Artikel.objects.all().order_by('-created_date')
}
return render(request, 'blog/index.html', context)
def details(request, article_id):
context = {
'article' : Artikel.objects.get(id = article_id)
}
return render(request, 'blog/details.html', context)
def about(request):
return render(request, 'blog/about.html')
|
[
"muhamadbagir99@gmail.com"
] |
muhamadbagir99@gmail.com
|
c118e85ded20e430fa7da66fad7017d4f57958c3
|
da056df6c920d35de8aebb33985a8fdcce3bd29d
|
/q0797.py
|
8977c3170b981c176292e36451e3c48017d81605
|
[] |
no_license
|
iEuler/leetcode_learn
|
d27e30d9de29e2396da8448c30d6ce82680de1bf
|
91a170aa1e6712a1d120c1646db048a97c583049
|
refs/heads/master
| 2021-03-28T12:20:56.406970
| 2020-10-19T12:14:26
| 2020-10-19T12:14:26
| 247,863,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,481
|
py
|
"""
797. All Paths From Source to Target
Medium
https://leetcode.com/problems/all-paths-from-source-to-target/
Given a directed acyclic graph (DAG) of n nodes labeled from 0 to n - 1, find all possible paths from node 0 to node n - 1, and return them in any order.
The graph is given as follows: graph[i] is a list of all nodes you can visit from node i (i.e., there is a directed edge from node i to node graph[i][j]).
Example 1:
Input: graph = [[1,2],[3],[3],[]]
Output: [[0,1,3],[0,2,3]]
Explanation: There are two paths: 0 -> 1 -> 3 and 0 -> 2 -> 3.
Example 2:
Input: graph = [[4,3,1],[3,2,4],[3],[4],[]]
Output: [[0,4],[0,3,4],[0,1,3,4],[0,1,2,3,4],[0,1,4]]
Example 3:
Input: graph = [[1],[]]
Output: [[0,1]]
Example 4:
Input: graph = [[1,2,3],[2],[3],[]]
Output: [[0,1,2,3],[0,2,3],[0,3]]
Example 5:
Input: graph = [[1,3],[2],[3],[]]
Output: [[0,1,2,3],[0,3]]
Constraints:
n == graph.length
2 <= n <= 15
0 <= graph[i][j] < n
graph[i][j] != i (i.e., there will be no self-loops).
The input graph is guaranteed to be a DAG.
"""
from typing import List
class Solution:
def allPathsSourceTarget(self, graph: List[List[int]]) -> List[List[int]]:
n = len(graph)
ans = []
path = [0]
def dfs(i):
if i == n - 1:
ans.append(path[:])
return
for j in graph[i]:
path.append(j)
dfs(j)
path.pop()
dfs(0)
return ans
|
[
"48609238+iEuler@users.noreply.github.com"
] |
48609238+iEuler@users.noreply.github.com
|
c0b823399038ecc6f70a653d7263714212019101
|
579250ce2d68382393394d59e8b930718c23870f
|
/lesson6/67_commandline.py
|
f7fcd57f4b545562525fa5e95734d10f19484c1b
|
[] |
no_license
|
Kazzuki/study_python
|
3fb2a2e387e3dc22dd4b0bf31dcad2d6d940e8d7
|
e0f78efc9b22866bb4b2398071ff2c403ed2feb0
|
refs/heads/master
| 2023-02-26T23:42:57.003100
| 2021-02-12T12:21:54
| 2021-02-12T12:21:54
| 264,119,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,419
|
py
|
# コマンドラインの引数が利用できる!
"""
import sys
print(sys.argv)
for w in sys.argv:
print(w)
"""
#moduleの話
"""
・関数を読み込むんでなくて、moduleを経由して読み込むこと!(コンフリクトや関数の特定に困る)
・フルパスで読み込みなさい!っていうルールを持つ会社もある
・asをつかってしまうと、どんなモジュールかわからなくなる可能性もある!
"""
#その①_フルパスで書いてあげる(サードパーティや他のチームが開発したものでも区別がつく!)
"""
# pakage.moduleの関係(酒井先生はこう読んでいる)
import lesson_package.tool.utils
r = lesson_package.tool.utils.say_twice('kazuki')
print(r)
"""
#その②_module.メソッドで書いてあげる
"""
from lesson_package.tool import utils
r = utils.say_twice('kazuki')
print(r)
"""
#その③_asで書いてあげる(モジュール名がメチャクチャなときには有効)
"""
from lesson_package.tool import utils as hoge
r = hoge.say_twice('kazuki')
print(r)
"""
from lesson_package.talk import human
from lesson_package.talk import animal
# *を使ったimportもできるが避けるようにしたほうが良い!
# __init__.pyの__all__を始めに読み込む
# from lesson_package.talk import *
print(human.cry())
print(human.sing())
print(animal.cry())
print(animal.sing())
|
[
"madakazuki@icloud.com"
] |
madakazuki@icloud.com
|
6cf72e66f2d69c945cf6c8dd2011b157d8cf60a4
|
b99e2869af7a70d4d847c6efafe8b599db27ab0a
|
/vDjBook/Scripts/pildriver.py
|
29eebc94ac25dff1835cab607fbc1f7c35983cde
|
[] |
no_license
|
Covee/site_practice1_Django
|
a24fcf9f0122a93132ccc5d2ab3cad19e2988f77
|
3f824222b4f53ba4a5fc5f3484f0825aa8bc7903
|
refs/heads/master
| 2022-12-26T09:39:16.649844
| 2017-09-26T10:19:40
| 2017-09-26T10:19:40
| 104,064,752
| 0
| 1
| null | 2022-12-10T11:28:50
| 2017-09-19T11:12:22
|
Python
|
UTF-8
|
Python
| false
| false
| 15,541
|
py
|
#!c:\python~2\blog_p~1\vdjbook\scripts\python.exe
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
[
"unittop91@gamil.com"
] |
unittop91@gamil.com
|
bc643726dc086d01106b5695c1317266b5900390
|
f269b417034e397139adf2802514165b0eb26f7c
|
/Python/food_choice_assay/food_choice.py
|
cea19764b5e3f648af452c4bde1d79a5a3d5567d
|
[] |
no_license
|
saulmoore1/PhD_Project
|
2d333f7fdbd8b2b1932007e7cc6e05b3108ed325
|
a235bf8700e4b5a311fc1dfd79c474c5467e9c7a
|
refs/heads/master
| 2023-08-03T17:51:17.262188
| 2023-07-26T12:35:37
| 2023-07-26T12:35:37
| 158,314,469
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,255
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SCRIPT: FOOD CHOICE
A script written to analyse the food choice assay videos and Tierpsy-generated
feature summary data. It calculates, plots and saves results for worm food preference
(for each video separately).
@author: sm5911
@date: 21/03/2019
"""
#%% Imports
import os, time
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib import patches as mpatches
# Custom imports
from time_series.time_series_helper import plot_timeseries_phenix
from visualisation.plotting_helper import hexcolours, plot_pie
from food_choice_assay.food_choice_helper import foodchoice, summarystats
from _deprecated.find import change_path_phenix
#%% Globals
PROJECT_ROOT_DIR = '/Volumes/behavgenom$/Saul/FoodChoiceAssay/' # Project working directory
DATA_DIR = PROJECT_ROOT_DIR.replace('Saul', 'Priota/Data') # Location of features files
# Plot parameters
fps = 25 # frames per second
smooth_window = fps*60*2 # 2-minute moving average window for time-series plot smoothing
OpticalDensity600 = 1.8 # E. coli average OD600
NEW = True # Conduct analysis on new videos only?
#%% Preamble
# Read metadata
fullMetaData = pd.read_csv(os.path.join(PROJECT_ROOT_DIR, "fullmetadata.csv"), header=0, index_col=0)
if NEW:
fullMetaData = fullMetaData[fullMetaData['worm number']==10]
n_files = len(fullMetaData['filename'])
if NEW:
print("%d NEW video file entries found in metadata." % n_files)
else:
print("%d video file entries found in metadata." % n_files)
# Extract assay information
pretreatments = list(np.unique(fullMetaData['Prefed_on']))
assaychoices = list(np.unique(fullMetaData['Food_Combination']))
treatments = list(np.unique([assay.split('/') for assay in assaychoices]))
treatments.insert(len(treatments),"None") # treatments = [OP50, HB101, None]
concentrations = list(np.unique(fullMetaData['Food_Conc']))
# Plot parameters
colours = hexcolours(len(treatments)) # Create a dictionary of colours for each treatment (for plotting)
colour_dict = {key: value for (key, value) in zip(treatments, colours)}
#%% CALCULATE MEAN NUMBER OF WORMS ON/OFF FOOD IN EACH FRAME (FOR EACH VIDEO SEPARATELY)
# - PROPORTION of total worms in each frame
errorlog = 'ErrorLog_FoodChoice.txt'
FAIL = []
tic = time.time()
for i, maskedfilepath in enumerate(fullMetaData['filename']):
toc = time.time()
# Extract file information
file_info = fullMetaData.iloc[i,:]
date = file_info['date(YEARMODA)']
conc = file_info['Food_Conc']
assaychoice = file_info['Food_Combination']
prefed = file_info['Prefed_on']
print("\nProcessing file: %d/%d\n%s\nAssay: %s\nConc: %.3f\nPrefed: %s" % (i + 1,\
len(fullMetaData['filename']), maskedfilepath, assaychoice, conc, prefed))
try:
# Specify file paths
onfoodpath = change_path_phenix(maskedfilepath, returnpath='onfood')
foodchoicepath = change_path_phenix(maskedfilepath, returnpath='foodchoice')
# Read on/off food results
onfood_df = pd.read_csv(onfoodpath, header=0, index_col=0)
# Calculate mean + count number of worms on/off food in each frame
# NB: Store proportions, along with total nworms, ie. mean (worms per frame) and later calculate mean (per frame across videos)
choice_df = foodchoice(onfood_df, mean=True, tellme=True)
# Save food choice results
directory = os.path.dirname(foodchoicepath)
if not os.path.exists(directory):
os.makedirs(directory)
choice_df.to_csv(foodchoicepath)
print("Food choice results saved. \n(Time taken: %d seconds)\n" % (time.time() - toc))
except:
FAIL.append(maskedfilepath)
print("ERROR! Failed to calculate food preference in file:\n %s\n" % maskedfilepath)
print("Complete!\n(Total time taken: %d seconds.)\n" % (time.time() - tic))
# If errors, save error log to file
if FAIL:
fid = open(os.path.join(PROJECT_ROOT_DIR, errorlog), 'w')
print(FAIL, file=fid)
fid.close()
#%% FOOD CHOICE SUMMARY STATS + PIE/BOX PLOTS (FOR EACH VIDEO SEPARATELY)
# - Calculate summary statistics for mean proportion worms feeding in each video
# - Plot and save box plots + pie charts of mean proportion of worms on food
# =============================================================================
# # NB: Cannot pre-allocate full results dataframe to store food choice mean
# # proportion feeding per frame across all videos due to file size = 23GB
# colnames = ['filename','worm_number','Food_Conc','Food_Combination','Prefed_on',\
# 'Acclim_time_s','frame_number','Food','Mean']
# results_df = pd.DataFrame(columns=colnames)
# =============================================================================
tic = time.time()
for i, maskedfilepath in enumerate(fullMetaData['filename']):
# Extract file information
file_info = fullMetaData.iloc[i,:]
date = file_info['date(YEARMODA)']
conc = file_info['Food_Conc']
assaychoice = file_info['Food_Combination']
prefed = file_info['Prefed_on']
print("\nProcessing file: %d/%d\n%s\nAssay: %s\nConc: %.3f\nPrefed: %s" % (i + 1,\
len(fullMetaData['filename']), maskedfilepath, assaychoice, conc, prefed))
# Specify file paths
foodchoicepath = change_path_phenix(maskedfilepath, returnpath='foodchoice')
statspath = change_path_phenix(maskedfilepath, returnpath='summary')
pieplotpath = change_path_phenix(maskedfilepath, returnpath='plots', figname='PiePlot.eps')
boxplotpath = change_path_phenix(maskedfilepath, returnpath='plots', figname='BoxPlot.eps')
try:
# READ FOOD CHOICE RESULTS (csv)
choice_df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# SUMMARY STATISTICS
feeding_stats = summarystats(choice_df)
# Save summary stats
feeding_stats.to_csv(statspath) # Save to CSV
# Define plot labels + colours
colnames = list(choice_df.columns)
labels = [lab.split('_')[0] for lab in colnames]
colours = [colour_dict[treatment] for treatment in labels]
# Specify seaborn colour palette
RGBAcolours = sns.color_palette(colours)
palette = {key: val for key, val in zip(colnames, RGBAcolours)}
# sns.palplot(sns.color_palette(values))
# PIE CHARTS - mean proportion on food
df_pie = feeding_stats.loc['mean']
df_pie.index = df_pie.index.get_level_values(0)
df_pie = df_pie.loc[df_pie!=0] # Remove any empty rows
plt.close("all")
fig = plot_pie(df_pie, rm_empty=False, show=True, labels=df_pie.index,\
colors=colours, textprops={'fontsize': 15}, startangle=90,\
wedgeprops={'edgecolor': 'k', 'linewidth': 1,\
'linestyle': 'solid', 'antialiased': True})
# Save pie charts
directory = os.path.dirname(pieplotpath)
if not os.path.exists(directory):
os.makedirs(directory)
plt.tight_layout()
plt.savefig(pieplotpath, dpi=300)
# Convert to long format
choice_df['frame_number'] = choice_df.index
choice_df_long = choice_df.melt(id_vars='frame_number', value_vars=choice_df.columns[:-1],\
var_name='Food', value_name='Mean')
# BOX PLOTS (Seaborn) - Mean proportion of worms on each food
plt.close("all")
fig, ax = plt.subplots(figsize=(9,7))
ax = sns.boxplot(x='Food', y='Mean', hue='Food', data=choice_df_long, palette=palette, dodge=False)
# NB: Could also produce violinplots, but why not swarmplots? Too many points?
# ax = sns.violinplot(x='Food', y='Mean', hue='Food', data=choice_df_long, palette=palette, dodge=False)
ax.set_ylim(-0.1,1.1)
ax.set_xlim(-1,len(treatments)+0.25)
ax.set_xlabel("Food",fontsize=20)
ax.set_ylabel("Mean Proportion Feeding",fontsize=20)
ax.xaxis.labelpad = 15; ax.yaxis.labelpad = 15
ax.tick_params(labelsize=13, pad=5)
fig.tight_layout(rect=[0.02, 0.07, 0.95, 0.95])
plt.text(0.03, 0.93, "{0} worms".format(file_info['worm number']), transform=ax.transAxes, fontsize=20)
plt.text(len(treatments)+0.25, -0.35, "Prefed on: {0}".format(prefed),\
horizontalalignment='right', fontsize=25)
plt.legend(loc="upper right", borderaxespad=0.4, frameon=False, fontsize=15)
plt.show(); plt.pause(0.0001)
# Save box plots
plt.tight_layout()
plt.savefig(boxplotpath, format='eps', dpi=300)
print("Plots saved.\n")
# =============================================================================
# # Append file info
# choice_df_long['filename'] = maskedfilepath
# choice_df_long['worm_number'] = file_info['worm number']
# choice_df_long['Food_Conc'] = conc
# choice_df_long['Food_Combination'] = assaychoice
# choice_df_long['Prefed_on'] = prefed
# choice_df_long['Acclim_time_s'] = file_info['Acclim_time_s']
#
# # Append to full results dataframe
# results_df = results_df.append(choice_df_long[colnames])
# =============================================================================
except:
print("Error processing file:\n%s" % maskedfilepath)
continue
print("Done.\n(Time taken: %d seconds.)" % (time.time() - tic))
# =============================================================================
# size = sys.getsizeof(results_df)
# # File size is too big! Not a good idea to save as full results file
# =============================================================================
#%% Time-series plots of proportion feeding through time (FOR EACH VIDEO SEPARATELY)
tic = time.time()
for i, maskedfilepath in enumerate(fullMetaData['filename']):
toc = time.time()
# Extract file information
file_info = fullMetaData.iloc[i,:]
conc = file_info['Food_Conc']
assaychoice = file_info['Food_Combination']
prefed = file_info['Prefed_on']
print("\nProcessing file: %d/%d\n%s\nAssay: %s\nConc: %.3f\nPrefed: %s" % (i + 1,\
len(fullMetaData['filename']), maskedfilepath, assaychoice, conc, prefed))
# Specify file paths
onfoodpath = change_path_phenix(maskedfilepath, returnpath='onfood')
foodchoicepath = change_path_phenix(maskedfilepath, returnpath='foodchoice')
plotpath = change_path_phenix(maskedfilepath, returnpath='plots', figname='FoodChoiceTS.png') # Path to save time series plots
onfood_df = pd.read_csv(onfoodpath, header=0, index_col=0)
# READ FOOD CHOICE RESULTS
# df = pd.read_csv(foodchoicepath, header=0, index_col=0)
df = foodchoice(onfood_df, mean=True, std=True, tellme=True)
# Shift plot to include acclimation time prior to assay recording (ie. t(0) = pick time)
acclim = int(file_info['Acclim_time_s'] * fps)
df.index = df.index + acclim
# Caclculate mean + standard deviation per frame across videos
colnames = list(df.columns.levels[0])
# Remove erroneous frames where on/off food does not sum to 1
frames_to_rm = np.where(np.sum([df[x]['mean'] for x in colnames], axis=0).round(decimals=5)!=1)[0]
assert frames_to_rm.size == 0,\
"{:d} frames found in which feeding proportions do not sum to 1.".format(len(frames_to_rm))
# PLOT TIME-SERIES ON/OFF FOOD (count)
plt.close("all")
fig = plot_timeseries_phenix(df=df, colour_dict=colour_dict, window=smooth_window,\
acclimtime=acclim, annotate=True, legend=True, ls='-')
# SAVE TIME SERIES PLOTS
directory = os.path.dirname(plotpath)
if not os.path.exists(directory):
os.makedirs(directory)
plt.tight_layout()
plt.savefig(plotpath, format='png', dpi=300)
print("Time series plots saved.\n(Time taken: %d seconds.)\n" % (time.time() - toc))
print("Complete!\n(Total time taken: %d seconds.)\n" % (time.time() - tic))
#%% FIGURE 1 - Box plots of food choice (Grouped by treatment combination: prefed on (HB101/OP50), food combination (control/choice), and concentration (0.125,0.25,0.5,1))
# - Subset results by grouping files by assay type (control/choice experiment) and by food concentration
tic = time.time()
# Group files in metadata by prefed, assaychoice and concentration treatment combinations
groupedMetaData = fullMetaData.groupby(['Prefed_on','Food_Combination','Food_Conc'])
# For each prefood-assaychoice-concentration treatment combination
for p, prefood in enumerate(pretreatments):
# Initialise plot for prefed group (12 subplots - 3 food combinations, 4 concentrations)
plt.close("all")
fig, axs = plt.subplots(nrows=len(assaychoices), ncols=len(concentrations),\
figsize=(14,10), sharey=True)
for a, assay in enumerate(assaychoices):
for c, conc in enumerate(concentrations):
try:
# Get prefood-assaychoice-concentration group
df_conc = groupedMetaData.get_group((prefood,assay,conc))
# Get group info
info = df_conc.iloc[0,:]
colnames = info['Food_Combination'].split('/')
if colnames[0] == colnames[1]:
colnames = ["{}_{}".format(food, f + 1) for f, food in enumerate(colnames)]
colnames.insert(len(colnames), "None")
# Pre-allocate dataframe for boxplots
df = pd.DataFrame(index=range(df_conc.shape[0]), columns=colnames)
# If single file, read full food choice data (mean proportion feeding)
if df_conc.shape[0] == 1:
foodchoicepath = change_path_phenix(info['filename'], returnpath='foodchoice')
df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# Read summary stats for mean proportion feeding in each video
elif df_conc.shape[0] > 1:
for i in range(df_conc.shape[0]):
info = df_conc.iloc[i]
# Read in food choice summary stats (mean proportion feeding)
statspath = change_path_phenix(info['filename'], returnpath='summary')
df.iloc[i] = pd.read_csv(statspath, header=0, index_col=0).loc['mean']
# =============================================================================
# # Read food choice data for each file and compile into df for plotting
# df = pd.DataFrame()
# for row in range(df_conc.shape[0]):
# info = df_conc.iloc[row,:]
# foodchoicepath = changepath(info['filename'], returnpath='foodchoice')
# tmp_df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# if df.empty:
# df = tmp_df
# else:
# df = df.append(tmp_df, sort=True)
# =============================================================================
# Plot labels/colours
labels = [lab.split('_')[0] for lab in colnames]
colours = [colour_dict[treatment] for treatment in labels]
# Seaborn colour palette
RGBAcolours = sns.color_palette(colours)
palette = {key: val for key, val in zip(colnames, RGBAcolours)}
# sns.palplot(sns.color_palette(values))
# Convert to long format
df['videoID'] = df.index
df_long = df.melt(id_vars='videoID', value_vars=df.columns[:-1],\
var_name='Food', value_name='Mean')
df_long['Mean'] = df_long['Mean'].astype(float)
# =============================================================================
# # Convert to long format
# df['frame_number'] = df.index
# df_long = df.melt(id_vars='frame_number', value_vars=df.columns[:-1],\
# var_name='Food', value_name='Mean')
# =============================================================================
# Plot Seaborn boxplots
sns.boxplot(data=df_long, x='Food', y='Mean', hue='Food', ax=axs[a,c], palette=palette, dodge=False)
axs[a,c].get_legend().set_visible(False)
axs[a,c].set_ylabel('')
axs[a,c].set_xlabel('')
xlabs = axs[a,c].get_xticklabels()
xlabs = [lab.get_text().split('_')[0] for lab in xlabs[:]]
axs[a,c].set_xticklabels(labels=xlabs, fontsize=12)
axs[a,c].set_ylim(-0.05, 1.05)
axs[a,c].set_xlim(-0.75,len(np.unique(df_long['Food']))-0.25)
axs[a,c].text(0.81, 0.9, ("n={0}".format(df_conc.shape[0])),\
transform=axs[a,c].transAxes, fontsize=12)
if a == 0:
axs[a,c].text(0.5, 1.1, "$OD_{{{}}}={}$".format(600, conc*OpticalDensity600),\
horizontalalignment='center', fontsize=18,\
transform=axs[a,c].transAxes)
if c == 0:
axs[a,c].set_ylabel("{0}".format(assay), labelpad=15, fontsize=18)
if a == 1:
axs[a,c].text(-2.75, 0.5, "Mean Proportion Feeding",\
fontsize=25, rotation=90, horizontalalignment='center',\
verticalalignment='center')
except Exception as e:
print("No videos found for concentration: %s\n(Assay: %s, Prefed on: %s)\n" % (e, assay, prefood))
axs[a,c].axis('off')
axs[a,c].text(0.81, 0.9, "n=0", fontsize=12, transform=axs[a,c].transAxes)
if a == 0:
axs[a,c].text(0.5, 1.1, "$OD_{{{}}}={}$".format(600, conc*OpticalDensity600),\
horizontalalignment='center', fontsize=18,\
transform=axs[a,c].transAxes)
if c == 0:
axs[a,c].set_ylabel("{0}".format(assay), labelpad=15, fontsize=18)
if a == 1:
axs[a,c].text(-3.2, 0.5, "Mean Proportion Feeding",\
fontsize=25, rotation=90, horizontalalignment='center',\
verticalalignment='center')
# plt.text(3, -0.7, "Prefed on: {0}".format(prefood), horizontalalignment='center', fontsize=25)
patches = []
for i, (key, value) in enumerate(colour_dict.items()):
patch = mpatches.Patch(color=value, label=key)
patches.append(patch)
fig.legend(handles=patches, labels=list(colour_dict.keys()), loc="upper right", borderaxespad=0.4,\
frameon=False, fontsize=15)
fig.tight_layout(rect=[0.07, 0.02, 0.88, 0.95])
fig.subplots_adjust(hspace=0.2, wspace=0.1)
plt.show(); plt.pause(2)
# Save figure 1
fig_name = "FoodChoiceBox_prefed" + prefood + ".eps"
figure_out = os.path.join(PROJECT_ROOT_DIR, "Results", "Plots", fig_name)
plt.savefig(figure_out, format='eps', dpi=300)
print("Complete!\n(Time taken: %d seconds)" % (time.time() - tic))
#%% FIGURE 2
# - OPTIONAL: Plot as fraction of a constant total?
#%% FIGURE 3 - Time series plots of food choice by concentration and by assay type (GROUPED BY ASSAY/CONC)
# Plot time series plots - proportion on-food through time
tic = time.time()
# Group files in metadata by prefed, assaychoice and concentration treatment combinations
groupedMetaData = fullMetaData.groupby(['Prefed_on','Food_Combination','Food_Conc'])
# For each prefood-assaychoice-concentration treatment combination
for p, prefood in enumerate(pretreatments):
# Initialise plot for prefed group
plt.close("all")
xmax = 180000
fig, axs = plt.subplots(nrows=len(assaychoices), ncols=len(concentrations),\
figsize=(16,7), sharex=True) # 12 subplots (3 food combinations, 4 food concentrations)
for a, assay in enumerate(assaychoices):
for c, conc in enumerate(concentrations):
try:
# Get prefood-assaychoice-concentration group
df_conc = groupedMetaData.get_group((prefood,assay,conc))
# Get acclim time
info = df_conc.iloc[0,:]
acclim = int(info['Acclim_time_s'] * fps)
# If single file, read food choice data (mean proportion feeding)
if df_conc.shape[0] == 1:
foodchoicepath = change_path_phenix(info['filename'], returnpath='foodchoice')
df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# Shift df indices to account for acclimation (t0 = pick time)
acclim = int(info['Acclim_time_s'] * fps)
df.index = df.index + acclim
# If multiple files, read food choice data for each file and compile into df for plotting
elif df_conc.shape[0] > 1:
df = pd.DataFrame()
for row in range(df_conc.shape[0]):
info = df_conc.iloc[row,:]
foodchoicepath = change_path_phenix(info['filename'], returnpath='foodchoice')
tmp_df = pd.read_csv(foodchoicepath, header=0, index_col=0)
# Shift df indices to account for acclimation (t0 = pick time)
acclim = int(info['Acclim_time_s'] * fps)
tmp_df.index = tmp_df.index + acclim
if df.empty:
df = tmp_df
else:
df = df.append(tmp_df, sort=True)
# Caclculate mean + standard deviation per frame across videos
colnames = list(df.columns)
df['frame'] = df.index
fundict = {x:['mean','std'] for x in colnames}
df_plot = df.groupby('frame').agg(fundict)
# Remove erroneous frames where on/off food does not sum to 1
frames_to_rm = np.where(np.sum([df_plot[x]['mean'] for x in colnames], axis=0).round(decimals=5)!=1)[0]
assert frames_to_rm.size == 0,\
"{:d} frames found in which feeding proportions do not sum to 1.".format(len(frames_to_rm))
# Time series plots
plot_timeseries_phenix(df_plot, colour_dict, window=smooth_window,
legend=False, annotate=False, acclimtime=acclim, ax=axs[a,c])
# Add number of replicates (videos) for each treatment combination
axs[a,c].text(0.79, 0.9, ("n={0}".format(df_conc.shape[0])),\
transform=axs[a,c].transAxes, fontsize=13)
# Set axis limits
if max(df_plot.index) > xmax:
xmax = max(df_plot.index)
axs[a,c].set_xlim(0, np.round(xmax,-5))
axs[a,c].set_ylim(-0.05, 1.05)
# Set column labels on first row
if a == 0:
axs[a,c].text(0.5, 1.15, "$OD_{{{}}}={}$".format(600, conc*OpticalDensity600),\
horizontalalignment='center', fontsize=18,\
transform=axs[a,c].transAxes)
# Set main y axis label + ticks along first column of plots
if c == 0:
yticks = list(np.round(np.linspace(0,1,num=6,endpoint=True),decimals=1))
axs[a,c].set_yticks(yticks)
axs[a,c].set_yticklabels(yticks)
axs[a,c].set_ylabel("{0}".format(assay), labelpad=15, fontsize=15)
if a == 1:
axs[a,c].text(-np.round(xmax,-5)/2, 0.5, "Mean Proportion Feeding",\
fontsize=22, rotation=90, horizontalalignment='center',\
verticalalignment='center')
else:
axs[a,c].set_yticklabels([])
# Set main x axis label + ticks along final row of plots
if a == len(assaychoices) - 1:
xticklabels = ["0", "30", "60", "90", "120"]
xticks = [int(int(lab)*fps*60) for lab in xticklabels]
axs[a,c].set_xticks(xticks)
axs[a,c].set_xticklabels(xticklabels)
if c == 1:
axs[a,c].set_xlabel("Time (minutes)", labelpad=25, fontsize=20, horizontalalignment='left')
else:
axs[a,c].set_xticklabels([])
except Exception as e:
# Empty plots
print("No videos found for concentration: %s\n(Assay: %s, Prefed on: %s)\n" % (e, assay, prefood))
# Add number of replicates (videos) for each treatment combination
axs[a,c].text(0.79, 0.9, "n=0", fontsize=13, transform=axs[a,c].transAxes)
# Set column labels on first row
if a == 0:
axs[a,c].text(0.5, 1.15, ("conc={0}".format(conc)),\
horizontalalignment='center', fontsize=18,\
transform=axs[a,c].transAxes)
axs[a,c].axis('off')
# Add 'prefed on' to multiplot
# plt.text(max(df_plot.index), -0.7, "Prefed on: {0}".format(prefood), horizontalalignment='right', fontsize=30)
# Add legend
patches = []
for key, value in colour_dict.items():
patch = mpatches.Patch(color=value, label=key)
patches.append(patch)
fig.legend(handles=patches, labels=treatments, loc="upper right", borderaxespad=0.4,\
frameon=False, fontsize=15)
# Tight-layout + adjustments
fig.tight_layout(rect=[0.07, 0.02, 0.9, 0.93])
fig.subplots_adjust(hspace=0.1, wspace=0.1)
plt.show(); plt.pause(1)
# Save figure 3
fig_name = "FoodChoiceTS_prefed" + prefood + ".png"
figure_out = os.path.join(PROJECT_ROOT_DIR, "Results", "Plots", fig_name)
plt.savefig(figure_out, saveFormat='png', dpi=300)
print("Complete!\n(Time taken: %d seconds)" % (time.time() - tic))
|
[
"saulmoore1@bitbucket.org"
] |
saulmoore1@bitbucket.org
|
3400d334de7ddd40dbc556d74bff6700ddc27be6
|
f43b592a338700e872a862a3fcf9e45bc3cd8c89
|
/paramtools/tree.py
|
d4a8b6f1477def6aed6d6e0766ca36ca262b221b
|
[
"MIT"
] |
permissive
|
MaxGhenis/ParamTools
|
14f316cb6c6aef4b3edb4df07489acba9c90d7af
|
73ccc4eed917ded10b183014e8e08189f13d32a8
|
refs/heads/master
| 2022-11-08T10:23:43.321951
| 2020-06-29T14:05:07
| 2020-06-29T14:05:07
| 275,851,495
| 1
| 0
| null | 2020-06-29T15:14:37
| 2020-06-29T15:14:36
| null |
UTF-8
|
Python
| false
| false
| 10,574
|
py
|
from collections import defaultdict
from typing import List
from paramtools.exceptions import ParamToolsError
from paramtools.typing import ValueObject, CmpFunc
class Tree:
"""
Builds a tree structure for more efficient searching. The structure
is:
label
--> label value
--> set of indices corresponding to value object that has
a label with this value.
"""
def __init__(self, vos: List[ValueObject], label_grid: dict):
self.vos = vos
self.label_grid = dict(label_grid or {}, _auto=[False, True])
self.tree = None
self.new_values = None
self.needs_build = True
def init(self):
"""
Initializes tree data structure. Trees are lazy and this method
should be called before the tree's methods are used.
Cases:
1. If needs_build is false, no action is taken and the existing
tree is returned.
2. If the tree has not yet been there are no new values, the tree
is built from self.vos.
3. If tree has already been initialized and there are new values,
the tree is updated with the new values.
"""
if not self.needs_build:
return self.tree
if self.new_values and self.tree:
ixs = self.new_values
search_tree = self.tree
else:
search_tree = {}
ixs = range(len(self.vos))
for ix in ixs:
vo = self.vos[ix]
for label, label_value in vo.items():
if label == "value":
continue
if label not in search_tree:
search_tree[label] = defaultdict(set)
search_tree[label][label_value].add(ix)
self.tree = search_tree
self.needs_build = False
self.new_values = None
return self.tree
def update(self, tree: "Tree") -> List[ValueObject]:
"""
Update this tree's value objects with value objects from the
other tree.
1. If this tree is empty, but it has value objects,
then it does not use any labels. Thus, we replace
self.vos with the other tree's value objects.
2. Find all value objects with labels that match the labels
in tree.vos. "Search Hits" are the intersection of all
indices in current value objects that have the same value
for a given label of a value object in the other tree.
Value objects in the other tree that do not match value
objects in this tree are added to not_matched and appended
to self.vos at the end.
2.a. Loop over all labels used by this project.
2.a.i. Both trees use this label.
2.a.i.1. Find all values that are in both trees
for the given label and update the search hits
set for matches.
2.a.i.2. Find all values that are in the other tree
but are not in this tree and add them to not_matched.
(VO's in not matched will be added at the end.)
2.a.ii. The label is in this tree but is not in the other tree.
We treat all of the values under this label as search hits
and add their values to the search hits set.
2.a.iii. The label is not in this tree but is in the new tree.
New labels can not be added to a list of value objects and
an error is thrown.
2.a.iv. Neither tree has this label; so, ignore it.
2.b. Loop over all indices in search_hits.
2.a. Replace value of matches with the new value.
(if value is None, save to delete later.)
2.b. If there are no matches for a given index, append them to
not_matched.
3. Drop all indices from to_delete, and append all items in not_matched.
4. If there were no deletions, save the new values to update the tree
when it is used again. If there are deletions, do not save the new
values because the tree needs to be re-built from the new value
objects.
Returns:
List of updated value objects.
Raises:
ParamToolsError if a label is specied in the new value objects
that is not present in the default value objects.
"""
new_values = set([])
not_matched = set([])
to_delete = set([])
# Trees are lazy and need to be initialized before use.
self.init()
tree.init()
# self.tree doesn't have labels -> there are no labels to query.
if not self.tree and tree.vos:
del self.vos[:]
not_matched = range(len(tree.vos))
else:
# search_hits saves the intersection of all label matches.
# The indices in the sets at the end are the search hits.
search_hits = {ix: set([]) for ix in range(len(tree.vos))}
for label in self.label_grid:
if label in ("_auto",):
continue
if label in tree.tree and label in self.tree:
# All label values that exist in both trees.
for label_value in (
tree.tree[label].keys() & self.tree[label].keys()
):
for new_ix in tree.tree[label][label_value]:
if new_ix in search_hits:
if search_hits[new_ix]:
search_hits[new_ix] &= self.tree[label][
label_value
]
else:
search_hits[new_ix] |= self.tree[label][
label_value
]
# All label values in the new tree that are not in this tree.
# Value objects that have a label value that is not included
# in the current tree means that they will not be matched.
for label_value in (
tree.tree[label].keys() - self.tree[label].keys()
):
for new_ix in tree.tree[label][label_value]:
search_hits.pop(new_ix)
not_matched.add(new_ix)
elif label in self.tree:
# All value objects with labels not specified in the other
# tree are treated as search hits (for this label).
unused_label = set.union(*self.tree[label].values())
for new_ix in search_hits:
if search_hits[new_ix]:
search_hits[new_ix] &= unused_label
else:
search_hits[new_ix] |= unused_label
elif label in tree.tree:
raise ParamToolsError(
f"Label {label} was not defined in the defaults."
)
for ix, search_hit_ixs in search_hits.items():
if search_hit_ixs:
if tree.vos[ix]["value"] is not None:
for search_hit_ix in search_hit_ixs:
self.vos[search_hit_ix]["value"] = tree.vos[ix][
"value"
]
else:
to_delete |= search_hit_ixs
else:
not_matched.add(ix)
if to_delete:
# Iterate in reverse so that indices point to the correct
# value. If iterating ascending then the values will be shifted
# towards the front of the list as items are removed.
for ix in sorted(to_delete, reverse=True):
del self.vos[ix]
if not_matched:
for ix in not_matched:
if tree.vos[ix]["value"] is not None:
self.vos.append(tree.vos[ix])
new_values.add(len(self.vos) - 1)
# It's faster to just re-build from scratch if values are deleted.
if to_delete:
self.new_values = None
self.needs_build = True
else:
self.new_values = new_values
self.needs_build = True
return self.vos
def select(
self, labels: dict, cmp_func: CmpFunc, strict: bool = False
) -> List[ValueObject]:
"""
Select all value objects from self.vos according to the label query,
labels, and the comparison function, cmp_func. strict dictates
whether vos missing a label in the query are eligble for inclusion
in the select results.
1. Loop over labels from query.
2. Find all value objects that have a value that returns true
from the cmp_func (e.g. it is equal to the query value).
3. Take the intersection of all of the successful matches across
the different labels to get the final reasult.
Returns:
List of value objects satisfying the query.
"""
if not labels:
return self.vos
search_hits = set([])
self.init()
if not self.tree:
return self.vos
all_ixs = set(range(len(self.vos)))
for label, _label_value in labels.items():
if not isinstance(_label_value, list):
label_value = (_label_value,)
else:
label_value = _label_value
label_search_hits = set([])
if label in self.tree:
for tree_label_value, ixs in self.tree[label].items():
match = cmp_func(tree_label_value, label_value)
if match:
label_search_hits |= ixs
if search_hits:
search_hits &= label_search_hits
elif not strict or label_search_hits:
search_hits |= label_search_hits
if not strict:
search_hits |= all_ixs - set.union(
*self.tree[label].values()
)
return [self.vos[ix] for ix in search_hits]
|
[
"henrymdoupe@gmail.com"
] |
henrymdoupe@gmail.com
|
a5d16596b2c10dda91ff06d79b767fd2d9c2ae72
|
69f03048347f0746f089ae6755dcf8c4a2c9d108
|
/manage.py
|
51c8cf30e0e27192a8bb6bc357f88da59ae5df8d
|
[] |
no_license
|
allanraafael/django-react
|
7625c05c9bd6c2a63c88e941c53fc90cedd38cc7
|
21c2237da01c1e0ff513b29ec7b24ec610f097ed
|
refs/heads/master
| 2023-03-24T13:56:25.214065
| 2021-03-16T01:30:34
| 2021-03-16T01:30:34
| 348,175,700
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
#!/usr/bin/env python
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.core.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"allanrafaelfo@gmail.com"
] |
allanrafaelfo@gmail.com
|
7a09d275bc7d4ff865c287dc116481e4ede3ca2d
|
563d5bdcdb0f3b40893d0ada11bf68e7338c283c
|
/datastructures/arrays/python/algorithms/two_sum_2.py
|
94961b3d8296997633588ee7eedb357551e028a6
|
[] |
no_license
|
llenroc/interviews-1
|
8a1342102f2deede2a569d63eb8fbbf9055fcd26
|
e5f55e48c2b9a4b06a81693b57d3cec65c2d91e9
|
refs/heads/master
| 2023-09-06T10:18:40.490289
| 2021-11-22T18:19:58
| 2021-11-22T18:19:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
def twoSum(numbers, target):
i = 0
j = len(numbers) - 1
sum = 0
while i < j:
sum = numbers[i] + numbers[j]
if sum > target:
j -= 1
elif sum < target:
i += 1
elif sum == target:
return [i + 1, j + 1]
return [-1, -1]
def main():
numbers = [2, 7, 11, 15]
target = 9
print(twoSum(numbers, target))
if __name__ == "__main__":
main()
|
[
"andrei.visan@kpn.com"
] |
andrei.visan@kpn.com
|
8c347b056cb36c0e390a877ccfc8d7cb252c58d5
|
6b10fb82c07ba126669990c55b5d8556e0aa7e30
|
/ushauri/settings.py
|
0b2aadbf5643c28aafaec2b587b2f96a36daf20a
|
[] |
no_license
|
chepkoy/ushauri
|
1c224e7d8a9025f199ac4b5676f3a5df50507d2b
|
f2ad5ffaa55b9d7ad619bee8568f62cb1312353d
|
refs/heads/master
| 2021-05-15T03:40:38.741790
| 2017-11-08T19:57:56
| 2017-11-08T19:57:56
| 110,023,812
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
"""
Django settings for ushauri project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^9o!t90v@a_(!gnt&cii%t@scs1%9r3^z%s5#xitg&71$1$i_8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ushauri.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ushauri.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
[
"allankiplangat22@gmail.com"
] |
allankiplangat22@gmail.com
|
690ebfa7539e477c986cc44439b64ed513a1e44b
|
6356b828b1209c409be87783107ad2e96f7fc0e4
|
/data/main.py
|
aff69002d470a47b481ce58543aede4cf670b59e
|
[
"MIT"
] |
permissive
|
vkazei/deeplogs
|
01e4d1eedbb220b921a2ccd7a2b015b684006086
|
4f6f853ce608a59e9d4b1a3160eb6b0035f333c0
|
refs/heads/master
| 2021-07-09T20:01:48.121545
| 2020-09-15T16:14:15
| 2020-09-15T16:14:15
| 194,487,355
| 33
| 18
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37,572
|
py
|
#%% [markdown]
# # Smart velocity analysis : mapping raw data to velocity logs
#%%
#(c) Vladimir Kazei, Oleg Ovcharenko; KAUST 2020
# cell with imports
import importlib
import multiprocessing
import os
import sys
import time
import pickle
import threading
import random
# learning
import keras
# madagascar API
import m8r as sf
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import seaborn
import tensorflow as tf
# images
from IPython import get_ipython
from keras import backend as K
from keras.utils import multi_gpu_model
from keras.callbacks import (EarlyStopping, ModelCheckpoint, ReduceLROnPlateau,
TensorBoard)
from keras.layers import (AveragePooling2D, BatchNormalization, Conv2D, Dense, Lambda,
Dropout, Flatten, MaxPool2D, Reshape, GaussianNoise, GaussianDropout)
from keras.models import load_model
from numpy.random import randint, seed
from scipy import ndimage
from skimage.transform import resize
from skimage.util import view_as_windows
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
#import styler
from myutils import (cd, cmd, const,
elastic_transform, plt_nb_T, toc, aug_flip, upsample,
merge_dict, np_to_rsf, rsf_to_np, nrms,
tf_random_flip_channels)
from myutils import const as c
from generate_data import (generate_model, show_model_generation,
alpha_deform, sigma_deform,
generate_all_data, generate_rsf_data)
seed()
# set up matplotlib
matplotlib.rc('image', cmap='RdBu_r')
seaborn.set_context('paper', font_scale=5)
CUDA_VISIBLE_DEVICES = "0"
os.environ["CUDA_VISIBLE_DEVICES"]=CUDA_VISIBLE_DEVICES
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# Madagascar binaries will be stored in DATAPATH (RAM on Linux recommended)
cmd("mkdir /dev/shm/RSFTMP")
cmd("chmod 777 /dev/shm/RSFTMP")
os.environ["DATAPATH"]="/dev/shm/RSFTMP/"
# execution flags
generate_rsf_data_flag = True
retrain_flag = False #(sys.argv[1] == "--retrain")
print(f"retrain_flag = {retrain_flag}")
print(type(retrain_flag))
tic_total = time.time()
#%% [markdown]
# ## Introduction
#
# ### Why?
# FWI provides high resolution models, yet it is very computationally expensive and it can fail with the lack of low frequencies.
#
# Velocity analysis is on the other hand very cheap computationally, but limited by the assumptions on the background medium.
#
# ### Goal
# Combine advantages of both methods with deep learning
#
# ### Solution
# We will train a deep convolutional neural network to perform velocity analysis in inhomogeneous media
##%% [markdown]
# We estimate velocity $v(x_{CMP}, z)$ from presure field
# $p_{obs}(x_{CMP}-\varepsilon:x_{CMP}+\varepsilon, 0:h_{max}, f)$, where
# $x_{CMP}$ is the central midpoint,
# $p_{obs}$ is the observed pressure.
#
# $\varepsilon = 0$ in this first part of the application => single CMP as input
##%% [markdown]
# ## Method
#
# 0) generate a model set
# 1) generate seismic data set
# 2) build neural network
# 3) train neural network
# 4) test it on a model that it has not seen
#%% [markdown]
# ## Model generation
#
# we utilize common deep learning image augmentation technique -- elastic transform
#%%
show_model_generation()
#%% [markdown]
# ## Gaussian fields to generate a coordinate shift for laterally smooth models
#
#
#
# ### Large correlation radius in horizontal direction -- to keep it almost horizontally layered
#
# ### Small correlation radius in vertical direction -- to make it represent different layering scenarios
#
# ### Same parameters but different fields for horizontal and vertical components
#
# ### Large vertical shifts and small horizontal -- to keep it laterally slowly varying
#%% [markdown]
# ## Modeling data with constant offset on GPU with Madagascar
#%%
# Setting up parameters
_vel = generate_model()
N = np.shape(_vel)
dt = c.dt
dx = c.dx
T_max = c.T_max
nt = c.nt
print(f"number of time steps = {nt}")
# check stability
print(f"you chose dt = {dt}, dt < {dx/np.max(_vel):.4f} should be chosen for stability \n")
# force stability
assert dt < dx/np.max(_vel)
# ricker wavelet is roughly bounded by 3f_dominant
# therefore the sampling rate principally acceptable sampling rate would be
central_freq = c.central_freq
print(f"dt from Nyquist criterion is {1/(2*3*central_freq)}")
print(f"dt chosen for CNN is {c.jdt*dt}, which is {(1/(3*central_freq))/(c.jdt*dt)} samples per cycle")
#%% [markdown]
# ## Read data into numpy and check that the number of logs is the same as number of shots
#%%
nCMP=21
def read_rsf_XT(shots_rsf='shots_cmp_full.rsf', logs_rsf='logs_full.rsf', j_log_z=c.jlogz):
X = rsf_to_np(shots_rsf)
# single model exception
if X.ndim == 3:
X = np.expand_dims(X, axis=0)
X_f = np.flip(X, axis=2)
X = np.maximum(np.abs(X), np.abs(X_f)) * np.sign(X+X_f)
X = X[:,:,:(np.shape(X)[2] + 1) // 2,:]
T = rsf_to_np(logs_rsf)
# single model exception
if T.ndim == 2:
T = np.expand_dims(T, axis=0)
# decimate logs in vertical direction --2 times by default
T = resize(T, (*T.shape[0:2], np.shape(T)[2] // j_log_z))
T_size = np.shape(T)
print(T_size)
# ensure that the number of logs is equal to the number of CMPs
assert (X.shape[0:2] == T.shape[0:2])
return X, T
#%%
while not os.path.exists('new_data_ready'):
time.sleep(1)
print("waiting for new data, run python generate_data.py if you didn't", end="\r")
cmd("rm new_data_ready")
#%%
X, T = read_rsf_XT(shots_rsf='shots_cmp_full.rsf', logs_rsf='logs_full.rsf')
T_multi = view_as_windows(T, (1, nCMP, T.shape[2])).squeeze().reshape((-1, nCMP, T.shape[2]))[:,nCMP//2,:].squeeze()
# create scaler for the outputs
T_scaler = StandardScaler().fit(T_multi)
scale = np.copy(T_scaler.scale_)
mean = np.copy(T_scaler.mean_)
np.save("scale", scale)
np.save("mean", mean)
#%%
T_scaler.scale_[:] = 1
T_scaler.mean_[:] = 0
# X has the format (model, CMP, offset, time)
plt_nb_T(X[1,:10, -1,:200], title="Common offset (600 m) gather", dx=c.dx*c.jgx*2, dz=1e3*dt*c.jdt,
origin_in_middle=True, ylabel="Time(s)", fname="../latex/Fig/X_short_offset", vmin=-1e-4, vmax=1e-4)
plt_nb_T(T[1,:10,:100], title="Model", dx=c.dx*c.jgx*2, dz=c.dx*c.jlogz,
origin_in_middle=True, ylabel="Time(s)", fname="../latex/Fig/X_short_offset")
#X=X[:,:-3,:,:]
#%%
def prepare_XT(X,T, T_scaler=T_scaler, gen_plots=False):
nCMP = 21
X_multi = view_as_windows(X, (1,nCMP,X.shape[2],X.shape[3])).squeeze().reshape((-1, nCMP, X.shape[2], X.shape[3]))
X_multi = np.swapaxes(X_multi,1,3)
X_multi = np.swapaxes(X_multi,1,2)
T_multi = view_as_windows(T, (1, nCMP, T.shape[2])).squeeze().reshape((-1, nCMP, T.shape[2]))[:,nCMP//2,:].squeeze()
X_scaled_multi = X_multi
T_scaled_multi = T_scaler.transform(T_multi)
# extract central CMPs for singleCMP network
X_scaled = X_scaled_multi[:,:,:,nCMP//2:nCMP//2+1]
T_scaled = T_scaled_multi
#%%
if gen_plots:
plt_nb_T(T_multi, dx=c.jgx*c.dx, dz=c.jlogz*c.dx, fname="../latex/Fig/T_multi")
plt_nb_T(1e3*T_scaled, dx=c.jgx*dx, dz=c.jlogz*c.dx, fname="../latex/Fig/T_scaled")
#%%
# show single training sample
sample_reveal = nCMP
plt_nb_T(1e3*np.concatenate((np.squeeze(X_scaled_multi[sample_reveal,:,:,-1]), np.flipud(np.squeeze(X_scaled_multi[sample_reveal,:,:,0]))), axis=0),
title="CMP first | CMP last", dx=200, dz=1e3*dt*c.jdt,
origin_in_middle=True, ylabel="Time(s)", fname="../latex/Fig/X_scaled", cbar_label = "")
print(np.shape(1e3*T_scaled[sample_reveal-(nCMP+1)//2:sample_reveal+(nCMP-1)//2:nCMP]))
plt_nb_T(1e3*T_multi[sample_reveal-(nCMP-1)//2:sample_reveal+(nCMP-1)//2,:],
dx=100, dz=c.dx*c.jlogz,
title="scaled velocity logs")
return X_scaled_multi, T_scaled_multi
X_scaled_multi, T_scaled_multi = prepare_XT(X,T, T_scaler=T_scaler, gen_plots=False)
#%% plot single input into the network
plt_nb_T(1e3*np.reshape(X[0, :21, :, :], (21*X.shape[2], X.shape[3])), vmin=-0.1, vmax=0.1, figsize=(48,12), no_labels=True, cbar=True, fname="../latex/Fig/input_multi")
#%% [markdown]
# # CNN construction single CMP -> log under the CMP
# 1D total variation for the output
def tv_loss(y_true, y_pred):
#b, h, w, c = img.shape.as_list()
a = K.abs(y_pred[:, :-1] - y_pred[:, 1:])
tv = 0.0 * K.mean(a, axis=-1)
total = tv + K.mean(K.square(y_pred - y_true), axis=-1)
return total
def random_channel_flip(x):
print(x.shape)
return K.in_train_phase(tf_random_flip_channels(x), x)
def R2(y_true, y_pred):
SS_res = K.sum(K.square(y_true-y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res/(SS_tot + K.epsilon()))
def create_model(inp_shape, out_shape, jlogz=c.jlogz):
model = keras.models.Sequential()
activation = 'elu'
padding = 'same'
kernel_size = (3, 11)
model.add(Lambda(random_channel_flip, input_shape=inp_shape, output_shape=inp_shape))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=32, kernel_size=kernel_size, activation=activation, padding=padding, input_shape=inp_shape))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=32, kernel_size=kernel_size, strides=(2,2), activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, strides=(2,2), activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=128, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=128, kernel_size=kernel_size, strides=(2,2), activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=32, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=32, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=16, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=16, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=8, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=8, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=4, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=4, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=2, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=2, kernel_size=kernel_size, activation=activation, padding=padding))
model.add(BatchNormalization())
model.add(GaussianNoise(0.1))
model.add(Conv2D(filters=1, kernel_size=(3, 15), activation='linear', padding="valid"))
model.add(Flatten())
model.add(Lambda(lambda x: K.tf.add(K.tf.multiply(x, K.variable(scale.squeeze)),
K.variable(mean.squeeze))))
return model
#%%
class threadsafe_iter:
"""Takes an iterator/generator and makes it thread-safe by
serializing call to the `next` method of given iterator/generator.
"""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def __next__(self):
with self.lock:
return self.it.__next__()
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
@threadsafe_generator
def batch_generator(X, T, T_scaler=T_scaler, batch_size = None):
batch=[]
print("generator restarted !!!!!!!!!!!!!!!!!!!!!!!!!!! waiting for new data")
while not os.path.exists("new_data_ready"):
time.sleep(1)
while True:
# it might be a good idea to shuffle your data before each epoch
# for iData in range(40):
# print(f"loading NEW DATA {iData}")
# X_rsf, T_rsf = read_rsf_XT(shots_rsf=f'/data/ibex_data/fullCMP_{iData}/shots_cmp_full.hh',
# logs_rsf=f'/data/ibex_data/fullCMP_{iData}/logs_cmp_full.hh')
# X, T = prepare_XT(X_rsf, T_rsf, T_scaler)
# indices = np.arange(len(X))
# np.random.shuffle(indices)
# for i in indices:
# # if os.path.exists("new_data_ready"):
# # break
# batch.append(i)
# if len(batch)==batch_size:
# yield X[batch], T[batch]
# batch=[]
if os.path.exists("new_data_ready"):
cmd("rm new_data_ready")
X_rsf, T_rsf = read_rsf_XT(shots_rsf='shots_cmp_full.rsf', logs_rsf='logs_full.rsf')
#cmd("ssh glogin.ibex.kaust.edu.sa 'rm ~/log_estimation/data/new_data_ready'")
X, T = prepare_XT(X_rsf, T_rsf, T_scaler)
print("new data loaded")
else:
print("reusing the old data")
indices = np.arange(len(X))
np.random.shuffle(indices)
print("indices reshuffled")
for i in indices:
if os.path.exists("new_data_ready"):
break
batch.append(i)
if len(batch)==batch_size:
yield X[batch], T[batch]
batch=[]
#%%
# Init callbacks
def train_model(prefix="multi", X_scaled=X_scaled_multi, T_scaled=T_scaled_multi, weights=None):
cmd("rm new_data_ready")
#cmd("ssh 10.109.66.7 'rm ~/log_estimation/data/new_data_ready'")
lr_start = 0.001
if weights != None:
lr_start = 1e-5
net = create_model(np.shape(X_scaled)[1:], np.shape(T_scaled)[1:])
net.compile(loss=tv_loss,
optimizer=keras.optimizers.Nadam(lr_start),
metrics=[R2])
#net.summary()
if weights != None:
net.load_weights(weights)
early_stopping = EarlyStopping(monitor='val_loss', patience=21)
model_checkpoint = ModelCheckpoint("trained_net",
monitor='val_loss',
save_best_only=True,
verbose=1,
period=5)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=7, min_lr=1e-5, verbose=1)
X_valid = X_scaled
T_valid = T_scaled
print(f"X validation data size = {np.shape(X_valid)}")
# TRAINING
batch_size = 64
# we flip every batch, so, going through whole data needs twice as many batches
steps_per_epoch = len(X_scaled)//batch_size
print(f"Batch size = {batch_size}, batches per epoch = {steps_per_epoch}")
history = net.fit_generator(batch_generator(X_scaled, T_scaled, batch_size=batch_size),
validation_data=(X_valid, T_valid),
epochs=200,
verbose=2,
shuffle=True,
max_queue_size=200,
workers=10,
use_multiprocessing=False,
steps_per_epoch = steps_per_epoch,
callbacks=[model_checkpoint,
reduce_lr,
early_stopping])
print("Optimization Finished!")
return net, history
def load_history(fname_history):
with open(fname_history,'rb') as f:
return pickle.load(f)
def save_history(history, fname_history):
with open(fname_history,'wb') as f:
pickle.dump(history, f)
def train_ensemble(prefix, X_scaled, T_scaled):
valid_best=1e100
net_dict = {}
history_dict = {}
for iNet in range(5):
if retrain_flag:
weights = f"{prefix}_weights.h5"
history_prev = load_history(f"history_{prefix}")
else:
weights = None
net, history = train_model(prefix=prefix, X_scaled=X_scaled, T_scaled=T_scaled, weights=weights)
cur_val_loss = np.min(history.history['val_loss'])
print(cur_val_loss)
if cur_val_loss < valid_best:
valid_best = cur_val_loss
net_best = net
history_best = history.history
net_dict[f"{iNet}"] = net
history_dict[f"{iNet}"] = history.history
if retrain_flag:
history_best = merge_dict(history_prev, history_best)
net_best.save_weights(f"{prefix}_weights.h5")
save_history(history_best, f"history_{prefix}")
return net_dict, history_dict, net_best, history_best
#singleCMP_net_dict, singleCMP_net_best, history_best = train_ensemble("singleCMP", X_scaled, T_scaled)
#cmd("rm new_data_ready")
multiCMP_net_dict, history_dict, multiCMP_net_best, history_best = train_ensemble("multiCMP", X_scaled_multi, T_scaled_multi)
# stop generator
cmd("touch training_finished")
#%% KOSTYLI for testing
history_best = load_history("history_multiCMP")
prefix = "multiCMP"
plt.figure(figsize=(16,9))
r2_arr = np.zeros((history_dict.__len__(),2))
for iNet in history_dict.keys():
r2_arr[int(iNet),0] = history_dict[iNet]['R2'][-1]
r2_arr[int(iNet),1] = history_dict[iNet]['val_R2'][-1]
print(f"netN={iNet}, R2={history_dict[iNet]['R2'][-1]},{history_dict[iNet]['val_R2'][-1]}")
plt.plot(history_dict[iNet]['R2'][:],'b--')
plt.plot(history_dict[iNet]['val_R2'][:],'r')
print(f"Average R2={np.mean(r2_arr, 0)}")
plt.plot(history_best['R2'][:],'b--', label='Training R2', linewidth=3)
plt.plot(history_best['val_R2'][:],'r', label='Validation R2', linewidth=3)
plt.xlabel("epoch")
plt.legend()
plt.savefig(f"../latex/Fig/{prefix}_R2", bbox_inches='tight')
plt.grid(True,which="both",ls="-")
plt.show(block=False)
plt.pause(1)
plt.close()
plt.figure(figsize=(16,9))
for iNet in history_dict.keys():
print(iNet)
plt.plot(history_dict[iNet]['loss'][:],'b--')
plt.plot(history_dict[iNet]['val_loss'][:],'r')
plt.semilogy(history_best['loss'][:],'b--', label='Training loss', linewidth=3)
plt.semilogy(history_best['val_loss'][:],'r', label='Validation loss', linewidth=3)
plt.xlabel("epoch")
plt.legend()
plt.savefig(f"../latex/Fig/{prefix}_loss", bbox_inches='tight')
plt.grid(True,which="both",ls="-")
plt.show(block=False)
plt.pause(1)
plt.close()
# #%%
# multiCMP_net_dict={}
# net_best = create_model(np.shape(X_scaled_multi)[1:], np.shape(T_scaled_multi)[1:])
# net_best.summary()
# net_best.compile(loss=tv_loss,
# optimizer=keras.optimizers.Nadam(1e-6),
# metrics=[R2])
# net_best.load_weights("multiCMP_weights.h5")
# multiCMP_net_dict["0"] = net_best
#%% [markdown]
# # We trained the neural net, it fits the training and validation data...
#
# ## How well does it fit?
#
# ## Does it fit stretched marmousi itself?
#
# ## Could we learn more from models like this?
#
# ## Does it work on something different?
#
# ## When does it break?!
#%% [markdown]
# # Testing
#%% uncomment for loading initial weights
# singleCMP_net_dict={}
# net = create_model(np.shape(X_scaled)[1:], np.shape(T_scaled)[1:])
# net.summary()
# net.load_weights("singleCMP_weights.h5")
# singleCMP_net_dict["0"] = net
# multiCMP_net_dict={}
# netM = create_model(np.shape(X_scaled_multi)[1:], np.shape(T_scaled_multi)[1:])
# netM.summary()
# netM.load_weights("multiCMP_weights.h5")
# multiCMP_net_dict["0"] = netM
def test_on_model(folder="marmvel1D",
net_dict=None,
prefix="singleCMP",
model_filename=None,
distort_flag=False,
stretch_X=None,
nCMP_max=nCMP,
generate_rsf_data_flag=True,
jlogz=c.jlogz,
jgx=c.jgx, sxbeg=c.sxbeg, gxbeg=c.gxbeg):
if model_filename==None:
model_filename=f"{folder}.hh"
fig_path = f"../latex/Fig/test_{prefix}_{folder}"
# expand model
model_output="vel_test.rsf"
print(model_output)
vel_test = generate_model(model_input=model_filename,
model_output=model_output,
stretch_X=stretch_X,
random_state_number=c.random_state_number,
distort_flag=distort_flag,
crop_flag=False,
test_flag=True)
# model data
if generate_rsf_data_flag:
cmd(f"mkdir {folder}")
cmd(f"cp {model_output} {folder}/{model_output}")
# check stability
print(f"you chose dt = {dt}, dt < {dx/np.max(vel_test):.4f} should be chosen for stability \n")
# force stability
assert dt < dx/np.max(vel_test)
generate_rsf_data(model_name=f"{folder}/vel_test.rsf",
shots_out=f"{folder}/shots_cmp_test.rsf",
logs_out=f"{folder}/logs_test.rsf")
# read data
X_test, T_test = read_rsf_XT(shots_rsf=f"{folder}/shots_cmp_test.rsf",
logs_rsf=f"{folder}/logs_test.rsf")
nCMP = int(net_dict["0"].input.shape[3])
# X_scaled, T_test = make_multi_CMP_inputs(X_scaled, T_test, nCMP_max)
X_scaled, T_scaled = prepare_XT(X_test, T_test)
T_test = T_scaler.inverse_transform(T_scaled)
sample_reveal = nCMP_max+1
plt_nb_T(1e3*np.concatenate((np.squeeze(X_scaled[sample_reveal,:,:,-1]), np.flipud(np.squeeze(X_scaled[sample_reveal,:,:,0]))), axis=0),
title="CMP first | CMP last", dx=200, dz=1e3*dt*c.jdt,
vmin=-0.1, vmax=0.1,
origin_in_middle=True, ylabel="Time(s)", fname=f"{fig_path}_X_scaled", cbar_label = "")
if nCMP == 1:
X_scaled = X_scaled[:,:,:,nCMP_max//2:nCMP_max//2+1]
# predict with all networks and save average
T_pred_total = np.zeros_like(net_dict["0"].predict(X_scaled))
T_pred_dict = np.zeros((2*len(net_dict), T_pred_total.shape[0], T_pred_total.shape[1]))
iNet=0
for net in net_dict.values():
T_pred_tmp = net.predict(X_scaled)
T_pred_tmp = T_scaler.inverse_transform(T_pred_tmp)
T_pred_dict[iNet,:,:] = T_pred_tmp
T_pred_tmp = net.predict(np.flip(X_scaled, axis=3))
T_pred_tmp = T_scaler.inverse_transform(T_pred_tmp)
T_pred_dict[iNet+1,:,:] = T_pred_tmp
iNet += 2
T_pred = np.mean(T_pred_dict, axis=0)
# interpolation for display
ups_plot = c.ups_plot
T_pred = upsample(T_pred, ups_plot)
T_test = upsample(T_test, ups_plot)
np_to_rsf(T_pred, f"{folder}/logs_pred.rsf", d1=25, d2=25)
np_to_rsf(T_test, f"{folder}/logs_test_m.rsf", d1=25, d2=25)
variance = np.var(T_pred_dict, axis=0)
plt_nb_T(upsample(np.sqrt(variance), ups_plot), title="Standard deviation",
dx=jgx*dx/ups_plot, dz=jlogz*dx/ups_plot,
fname=f"{fig_path}_inverted_std_dev",
vmin=0, vmax=1, figsize=(16,6))
plt_nb_T(T_pred-T_test, title="Pred-True",
dx=jgx*dx, dz=jlogz*dx,
fname=f"{fig_path}_inverted_error",
vmin=-1, vmax=1)
plt_nb_T(T_pred, title=f"DL, R2 = {r2_score(T_test.flatten(), T_pred.flatten()):.2f}, NRMS={nrms(T_pred, T_test):.1f}%",
dx=jgx*dx/ups_plot, dz=jgx*dx/ups_plot,
vmin=np.min(1e-3*T_test),
vmax=np.max(1e-3*T_test),
fname=f"{fig_path}_inverted", figsize=(16,6))
plt_nb_T(T_test,
dx=jgx*dx/ups_plot, dz=jgx*dx/ups_plot,
vmin=np.min(1e-3*T_test),
vmax=np.max(1e-3*T_test),
fname=f"{fig_path}_true",
title=f"True model",
figsize=(16,6))
#%%
def run_all_tests(net_dict=None, prefix="single", generate_rsf_data_flag=False):
# Marmousi-based tests
test_on_model("marmvel1D", net_dict=net_dict, prefix=prefix, stretch_X=10, generate_rsf_data_flag=generate_rsf_data_flag)
test_on_model("marmvel", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
test_on_model("marm2", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
# Overthrust-based tests
test_on_model("overthrust1D", net_dict=net_dict, prefix=prefix, stretch_X=2, generate_rsf_data_flag=generate_rsf_data_flag)
# cmd("sfadd < overthrust3D_orig.hh add=-1 | sfclip2 lower=1.5 --out=stdout > overthrust3D.hh")
# cmd("sfwindow < overthrust3D_orig.hh n3=120 f1=400 n1=1 | sftransp | sfadd scale=1000 | sfput d1=25 d2=25 --out=stdout > overthrust_test_2D_1.hh")
test_on_model("overthrust_test_2D_1", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
# cmd("sfwindow < overthrust3D.hh n3=120 f2=400 n2=1 | sftransp | sfadd scale=1000 | sfput d1=25 d2=25 --out=stdout > overthrust_test_2D_2.hh")
test_on_model("overthrust_test_2D_2", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
# SEAM I based tests
test_on_model("seam100", net_dict=net_dict, prefix=prefix, stretch_X=2, generate_rsf_data_flag=generate_rsf_data_flag)
# cmd("sfwindow < SEAM_I_3D_20m.hh f3=100 n3=151 f1=1400 | sftransp memsize=100000 plane=13 | sfwindow f3=20 n3=1 f2=500 n2=1000 | sfput o1=0 o2=0 --out=stdout > seam_i_sediments.hh")
test_on_model("seam_i_sediments", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
test_on_model("seam_karst", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
#cmd("sfwindow < SEAM_I_3D_20m.hh f3=10 n3=151 | sftransp memsize=100000 plane=23 | sftransp memsize=100000 plane=12 | sfwindow f3=1455 n3=1 --out=stdout > seam_i_salt.hh")
test_on_model("seam_i_salt", net_dict=net_dict, prefix=prefix, stretch_X=2, generate_rsf_data_flag=generate_rsf_data_flag)
test_on_model("seam_arid", net_dict=net_dict, prefix=prefix, stretch_X=1, generate_rsf_data_flag=generate_rsf_data_flag)
#run_all_tests(net_dict=singleCMP_net_dict, prefix="singleCMP", generate_rsf_data_flag=True)
run_all_tests(net_dict=multiCMP_net_dict, prefix="multiCMP", generate_rsf_data_flag=True)
print(f"Total execution time is {toc(tic_total)}")
#%% PLOT FWI RESULTS
# for folder in ["marm2",
# "seam_i_sediments",
# "seam100",
# "overthrust"]:
for folder in ["overthrust"]:
with cd(f"fwi_{folder}"):
cmd("scons -j 4")
fwi1 = rsf_to_np("fwi2.rsf")
fwi2 = rsf_to_np("fwi_shi.rsf")
velo = rsf_to_np("vel.rsf")
velsm = rsf_to_np("smvel.rsf")
R2o = r2_score(velo.flatten(), fwi2.flatten())
fwi2 = resize(fwi2, (fwi2.shape[0], 120))
fwi1 = resize(fwi1, (fwi2.shape[0], 120))
plt_nb_T(fwi2, title=f"DL+MSFWI, R2={R2o:.2f}, NRMS={nrms(velo,fwi2):.1f}%", fname=f"../../latex/Fig/msfwi_{folder}", dx=25, dz=25, figsize=(32,6), vmin=1.5, vmax=4.5)
plt_nb_T(velsm,
title=f"DL, R2={r2_score(velo.flatten(),velsm.flatten()):.2f}, NRMS={nrms(velo,velsm):.1f}%",
fname=f"../../latex/Fig/dl_{folder}",
dx=25, dz=25, figsize=(16,6), vmin=1.5, vmax=4.5)
plt_nb_T(velo,
title=f"True model",
fname=f"../../latex/Fig/true_{folder}",
dx=25, dz=25, figsize=(16,6), vmin=1.5, vmax=4.5)
def plot_logs(log_x):
plt.figure(figsize=(11,18))
depth = 0.025*np.array(range(120))
plt.plot( 1e-3*velsm[log_x,:], depth, 'r', label="DL", linewidth=6)
plt.plot(1e-3*fwi1[log_x,:], depth, 'b--', label="DL+FWI", linewidth=6)
plt.plot(1e-3*fwi2[log_x,:], depth, 'bo', label="+MSFWI", markersize=15)
plt.plot( 1e-3*velo[log_x,:], depth, 'black', label="True", linewidth=8, alpha=0.6)
plt.ylabel("Depth (km)")
plt.xlabel("Velocity (km/s)")
plt.xlim((1.5, 4.5))
plt.yticks([0,1,2,3])
plt.title(f"Log at {int(0.025*log_x)} km")
plt.gca().invert_yaxis()
plt.legend()
plt.axis("tight")
plt.savefig(f"../latex/Fig/log_{int(0.025*log_x)}")
plot_logs(240)
plot_logs(400)
plot_logs(480)
# %%
|
[
"vkazei@gmail.com"
] |
vkazei@gmail.com
|
44ba51191c44e24175419dfd2f44a582f3d362b0
|
aed2854bbb656b5618a19eb065a41180caa47c4f
|
/src_lib/models_hub/crop_classifiers.py
|
bb316fd5b1c53aab22f683dc680de908c5b85961
|
[] |
no_license
|
rishabhraaj17/MastersThesis
|
ef20e5c92698b830963d43d873ed007773379daf
|
6b4e7bac9c1e780723e202fecd27a5d0e12e6729
|
refs/heads/develop
| 2023-08-23T00:51:01.413365
| 2021-09-23T16:39:24
| 2021-09-23T16:39:24
| 409,666,585
| 0
| 0
| null | 2021-09-23T16:39:25
| 2021-09-23T16:30:27
|
Python
|
UTF-8
|
Python
| false
| false
| 5,578
|
py
|
from typing import Tuple, List, Optional, Callable, Union
import torch
from mmcls.models import ResNet_CIFAR, GlobalAveragePooling, LinearClsHead
from omegaconf import DictConfig
from torch import nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import Dataset, DataLoader
from src_lib.models_hub import Base
class CropClassifier(Base):
def __init__(self, config: DictConfig, train_dataset: Dataset, val_dataset: Dataset,
desired_output_shape: Tuple[int, int] = None, loss_function: nn.Module = None,
additional_loss_functions: List[nn.Module] = None, collate_fn: Optional[Callable] = None):
super(CropClassifier, self).__init__(
config=config, train_dataset=train_dataset, val_dataset=val_dataset,
desired_output_shape=desired_output_shape, loss_function=loss_function,
additional_loss_functions=additional_loss_functions, collate_fn=collate_fn
)
self.backbone = ResNet_CIFAR(
depth=18,
num_stages=4,
out_indices=(3,),
style='pytorch'
)
self.neck = GlobalAveragePooling()
self.head = nn.Sequential(nn.Linear(512, 64), nn.ReLU6(), nn.Linear(64, 1))
@classmethod
def from_config(cls, config: DictConfig, train_dataset: Dataset = None, val_dataset: Dataset = None,
desired_output_shape: Tuple[int, int] = None, loss_function: nn.Module = None,
additional_loss_functions: List[nn.Module] = None, collate_fn: Optional[Callable] = None):
return CropClassifier(config=config, train_dataset=train_dataset, val_dataset=val_dataset,
desired_output_shape=desired_output_shape, loss_function=loss_function,
additional_loss_functions=additional_loss_functions, collate_fn=collate_fn)
def forward(self, x):
out = self.backbone(x)
out = self.neck(out)
out = self.head(out)
return out
def _one_step(self, batch):
crops, labels = batch
# for offline mode
# crops, labels = crops.view(-1, *crops.shape[2:]), labels.view(-1, 1)
labels = labels.view(-1, 1)
out = self(crops)
loss = self.calculate_loss(out, labels)
return loss
def calculate_loss(self, pred, target):
return self.loss_function(pred, target)
def configure_optimizers(self):
opt = torch.optim.Adam(self.parameters(),
lr=self.config.crop_classifier.lr,
weight_decay=self.config.crop_classifier.weight_decay,
amsgrad=self.config.crop_classifier.amsgrad)
schedulers = [
{
'scheduler': ReduceLROnPlateau(opt,
patience=self.config.crop_classifier.patience,
verbose=self.config.crop_classifier.verbose,
factor=self.config.crop_classifier.factor,
min_lr=self.config.crop_classifier.min_lr),
'monitor': self.config.crop_classifier.monitor,
'interval': self.config.crop_classifier.interval,
'frequency': self.config.crop_classifier.frequency
}]
return [opt], schedulers
def train_dataloader(self) -> DataLoader:
return DataLoader(
dataset=self.train_dataset, batch_size=self.config.crop_classifier.batch_size,
shuffle=self.config.crop_classifier.shuffle, num_workers=self.config.crop_classifier.num_workers,
collate_fn=self.collate_fn, pin_memory=self.config.crop_classifier.pin_memory,
drop_last=self.config.crop_classifier.drop_last)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
dataset=self.val_dataset,
batch_size=self.config.crop_classifier.batch_size * self.config.crop_classifier.val_batch_size_factor,
shuffle=False, num_workers=self.config.crop_classifier.num_workers,
collate_fn=self.collate_fn, pin_memory=self.config.crop_classifier.pin_memory,
drop_last=self.config.crop_classifier.drop_last)
class CropClassifierDDP(CropClassifier):
def __init__(self, config: DictConfig, train_dataset: Dataset, val_dataset: Dataset,
desired_output_shape: Tuple[int, int] = None, loss_function: nn.Module = None,
additional_loss_functions: List[nn.Module] = None, collate_fn: Optional[Callable] = None):
super(CropClassifierDDP, self).__init__(
config=config, train_dataset=train_dataset, val_dataset=val_dataset,
desired_output_shape=desired_output_shape, loss_function=loss_function,
additional_loss_functions=additional_loss_functions, collate_fn=collate_fn
)
def training_step(self, batch, batch_idx):
loss = self._one_step(batch)
self.log('train_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
loss = self._one_step(batch)
self.log('val_loss', loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, sync_dist=True)
return loss
if __name__ == '__main__':
m = CropClassifier.from_config({})
inp = torch.randn((2, 3, 64, 64))
o = m(inp)
print()
|
[
"noreply@github.com"
] |
noreply@github.com
|
75d2c7f495c3d1e8921a7eca9f3573f46656c52a
|
122496927f93d9715d55812180fff8c599c77e09
|
/whatsapp.py
|
e04ef58fd25c0634fd14e37b87857e4ca850aa32
|
[] |
no_license
|
blauner5/whatsapp-monitor
|
a1d182ea55d4342350764652aff3278753382313
|
cc18938c40ec4734bc3fd8b2daa75f8263097f38
|
refs/heads/master
| 2020-08-01T02:14:24.610558
| 2019-09-25T22:19:38
| 2019-09-25T22:19:38
| 210,824,567
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,691
|
py
|
from urllib.parse import quote
from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
import datetime
import mysql.connector
mydb = mysql.connector.connect(host="HOST", user="USER", passwd="PASS", database="whatsapp") #<-- Change with your values
TARGET = 'TARGET NAME' #<-- Insert TARGET NAME
SCANNED = False
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--window-size=1920x1080')
chrome_options.add_argument(
'user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36'
)
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-gpu-sandbox')
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--disable-impl-side-painting')
chrome_options.add_argument('--disable-accelerated-2d-canvas')
chrome_options.add_argument('--disable-accelerated-jpeg-decoding')
chrome_options.add_argument('--test-type=ui')
browser = webdriver.Chrome("PATH") #<-- Insert the path chromedriver es. "C:\chromedriver.exe"
browser.get('https://web.whatsapp.com')
while SCANNED is False:
print('Waiting QR Scanning...')
sleep(5)
try:
notice = browser.find_element_by_class_name('_1cDWi')
if notice.text == 'Mantieni il telefono connesso': #<-- Change according to your language es. "Keep the phone connected"
SCANNED = True
print('Success!')
print (browser.service.process.pid)
except NoSuchElementException:
pass
search = browser.find_element_by_class_name('_2HS9r')
search.send_keys(TARGET)
chats = browser.find_elements_by_class_name('_19RFN')
for chat in chats:
name = chat.get_attribute('title')
if TARGET in name:
chat.click()
while True:
try:
sql = "INSERT INTO accessi (nome, data) VALUES (%s, %s)"
mycursor = mydb.cursor()
nome = TARGET
a = str(datetime.datetime.now())
b = a[:-7]
data = b
val = (nome, data)
online = browser.find_element_by_class_name('_3fs0K').text
online2 = online[-6:]
if online2 == 'online':
a = str(datetime.datetime.now())
print(TARGET + ' is online!')
print(a[:-7])
mycursor.execute(sql, val)
mydb.commit()
sleep(1)
except NoSuchElementException:
pass
sleep(2)
|
[
"riccardociprini@gmail.com"
] |
riccardociprini@gmail.com
|
b47c9a85013089dec45758e6489eb731972070ee
|
4ece3041f2ed0cd312dc70fd3c7c240924dbb6ae
|
/pyathena/__init__.py
|
8335fb21281d596d87e5bc8a90d091895483fde9
|
[
"MIT"
] |
permissive
|
ivssh/PyAthena
|
175c5dfff0289a7ceccfe9a47ac490985535f669
|
156c51f19b46ea2f89612b3383937d78942bc990
|
refs/heads/master
| 2020-03-27T13:07:58.417397
| 2018-07-21T13:08:41
| 2018-07-21T13:08:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,351
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
from pyathena.error import * # noqa
__version__ = '1.3.0'
# Globals https://www.python.org/dev/peps/pep-0249/#globals
apilevel = '2.0'
threadsafety = 3
paramstyle = 'pyformat'
class DBAPITypeObject:
"""Type Objects and Constructors
https://www.python.org/dev/peps/pep-0249/#type-objects-and-constructors
"""
def __init__(self, *values):
self.values = values
def __cmp__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
else:
return -1
def __eq__(self, other):
return other in self.values
# https://docs.aws.amazon.com/athena/latest/ug/data-types.html
STRING = DBAPITypeObject('char', 'varchar', 'map', 'array', 'row')
BINARY = DBAPITypeObject('varbinary')
BOOLEAN = DBAPITypeObject('boolean')
NUMBER = DBAPITypeObject('tinyint', 'smallint', 'bigint', 'integer',
'real', 'double', 'float', 'decimal')
DATE = DBAPITypeObject('date')
DATETIME = DBAPITypeObject('timestamp')
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def connect(*args, **kwargs):
from pyathena.connection import Connection
return Connection(*args, **kwargs)
|
[
"laughingman7743@gmail.com"
] |
laughingman7743@gmail.com
|
56b442f8b7bfc47ef533d1a9d1c90373518ecca3
|
df7736726d5b041e46b490e409a1d4481ef8c7f1
|
/tools/rosmaster/src/rosmaster/threadpool.py
|
1261e2f5e4aa3947450c12ff477e0830735e537e
|
[] |
no_license
|
strawlab/ros_comm
|
62f5d2bc68d6cbe85c071eabb7487164d6c328be
|
6f7ea2feeb3c890699518cb6eb3d33faa15c5306
|
refs/heads/master
| 2020-05-18T02:26:43.463444
| 2012-08-05T07:10:58
| 2012-08-05T07:10:58
| 5,301,610
| 13
| 31
| null | 2019-09-24T22:49:12
| 2012-08-05T07:10:44
|
Python
|
UTF-8
|
Python
| false
| false
| 8,088
|
py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id: threadpool.py 8327 2010-02-17 01:23:15Z kwc $
"""
Internal threadpool library for zenmaster.
Adapted from U{http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/203871}
Added a 'marker' to tasks so that multiple tasks with the same
marker are not executed. As we are using the thread pool for i/o
tasks, the marker is set to the i/o name. This prevents a slow i/o
for gobbling up all of our threads
"""
import threading, logging, traceback
from time import sleep
class MarkedThreadPool:
"""Flexible thread pool class. Creates a pool of threads, then
accepts tasks that will be dispatched to the next available
thread."""
def __init__(self, numThreads):
"""Initialize the thread pool with numThreads workers."""
self.__threads = []
self.__resizeLock = threading.Condition(threading.Lock())
self.__taskLock = threading.Condition(threading.Lock())
self.__tasks = []
self.__markers = set()
self.__isJoining = False
self.set_thread_count(numThreads)
def set_thread_count(self, newNumThreads):
""" External method to set the current pool size. Acquires
the resizing lock, then calls the internal version to do real
work."""
# Can't change the thread count if we're shutting down the pool!
if self.__isJoining:
return False
self.__resizeLock.acquire()
try:
self.__set_thread_count_nolock(newNumThreads)
finally:
self.__resizeLock.release()
return True
def __set_thread_count_nolock(self, newNumThreads):
"""Set the current pool size, spawning or terminating threads
if necessary. Internal use only; assumes the resizing lock is
held."""
# If we need to grow the pool, do so
while newNumThreads > len(self.__threads):
newThread = ThreadPoolThread(self)
self.__threads.append(newThread)
newThread.start()
# If we need to shrink the pool, do so
while newNumThreads < len(self.__threads):
self.__threads[0].go_away()
del self.__threads[0]
def get_thread_count(self):
"""@return: number of threads in the pool."""
self.__resizeLock.acquire()
try:
return len(self.__threads)
finally:
self.__resizeLock.release()
def queue_task(self, marker, task, args=None, taskCallback=None):
"""Insert a task into the queue. task must be callable;
args and taskCallback can be None."""
if self.__isJoining == True:
return False
if not callable(task):
return False
self.__taskLock.acquire()
try:
self.__tasks.append((marker, task, args, taskCallback))
return True
finally:
self.__taskLock.release()
def remove_marker(self, marker):
"""Remove the marker from the currently executing tasks. Only one
task with the given marker can be executed at a given time"""
if marker is None:
return
self.__taskLock.acquire()
try:
self.__markers.remove(marker)
finally:
self.__taskLock.release()
def get_next_task(self):
""" Retrieve the next task from the task queue. For use
only by ThreadPoolThread objects contained in the pool."""
self.__taskLock.acquire()
try:
retval = None
for marker, task, args, callback in self.__tasks:
# unmarked or not currently executing
if marker is None or marker not in self.__markers:
retval = (marker, task, args, callback)
break
if retval:
# add the marker so we don't do any similar tasks
self.__tasks.remove(retval)
if marker is not None:
self.__markers.add(marker)
return retval
else:
return (None, None, None, None)
finally:
self.__taskLock.release()
def join_all(self, wait_for_tasks = True, wait_for_threads = True):
""" Clear the task queue and terminate all pooled threads,
optionally allowing the tasks and threads to finish."""
# Mark the pool as joining to prevent any more task queueing
self.__isJoining = True
# Wait for tasks to finish
if wait_for_tasks:
while self.__tasks != []:
sleep(.1)
# Tell all the threads to quit
self.__resizeLock.acquire()
try:
self.__set_thread_count_nolock(0)
self.__isJoining = True
# Wait until all threads have exited
if wait_for_threads:
for t in self.__threads:
t.join()
del t
# Reset the pool for potential reuse
self.__isJoining = False
finally:
self.__resizeLock.release()
class ThreadPoolThread(threading.Thread):
"""
Pooled thread class.
"""
threadSleepTime = 0.1
def __init__(self, pool):
"""Initialize the thread and remember the pool."""
threading.Thread.__init__(self)
self.setDaemon(True) #don't block program exit
self.__pool = pool
self.__isDying = False
def run(self):
"""
Until told to quit, retrieve the next task and execute
it, calling the callback if any.
"""
while self.__isDying == False:
marker, cmd, args, callback = self.__pool.get_next_task()
# If there's nothing to do, just sleep a bit
if cmd is None:
sleep(ThreadPoolThread.threadSleepTime)
else:
try:
try:
result = cmd(*args)
finally:
self.__pool.remove_marker(marker)
if callback is not None:
callback(result)
except Exception, e:
logging.getLogger('rosmaster.threadpool').error(traceback.format_exc())
def go_away(self):
""" Exit the run loop next time through."""
self.__isDying = True
|
[
"strawman@astraw.com"
] |
strawman@astraw.com
|
6430ad985b5c08e8f0e7f98428386d3713eb65b2
|
c45c9e74ffafcceebf395cc1c5f5d31659988c19
|
/answer_search.py
|
ff4bf3d7d7e148d57a000bb5cd58779991814eb8
|
[] |
no_license
|
tedrepo/QABasedOnMedicalKnowledgeGraph
|
f68ca297254218c72ef18a26c98f1910610f7154
|
f690b80e2a7fb85455b45d3829b6998be9ebc739
|
refs/heads/master
| 2020-03-30T23:14:39.416415
| 2018-10-05T04:12:19
| 2018-10-05T04:12:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
#!/usr/bin/env python3
# coding: utf-8
# File: answer_search.py
# Author: lhy<lhy_in_blcu@126.com,https://huangyong.github.io>
# Date: 18-10-5
from py2neo import Graph,Node
class AnswerSearcher:
def __init__(self):
self.g = Graph(
host="127.0.0.1",
http_port=7474,
user="lhy",
password="lhy123")
'''执行cypher查询,并返回相应结果'''
def search_main(self, sqls):
for sql in sqls:
ress = self.g.run(sql).data()
for res in ress:
print(res)
return
if __name__ == '__main__':
searcher = AnswerSearch()
|
[
"lhy_in_blcu@126.com"
] |
lhy_in_blcu@126.com
|
c86adca0a5e98f145fd2d56300d83f383f4b36d6
|
c8dba5315ff0f20395c08a34f1cc27797e052c61
|
/150050085_lab1/cal-year.py
|
b7ac53900a1f4ee829204eec3ac47dc9e3d3aea1
|
[] |
no_license
|
AthletiCoder/PointerLanguageCompiler
|
27e51b80ad789fc46fe69d3aa1081ead4d1b9776
|
b530aadae58cc3e55277c5c51ddfc1c424034b94
|
refs/heads/master
| 2021-05-03T06:30:02.699452
| 2018-04-07T05:22:18
| 2018-04-07T05:22:18
| 120,595,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
import sys
yearly = sys.stdin.readlines()
row = [0]*3
cal = [0]*35
for i in range(35):
cal[i] = list(map(''.join, zip(*[iter(yearly[i+1])]*22)))
print(yearly[0])
for j in range(3):
for i in range(8):
print(cal[i][j]+" "+cal[i+9][j]+" "+cal[i+18][j]+" "+cal[i+27][j])
print("")
|
[
"noreply@github.com"
] |
noreply@github.com
|
c0077791487a7cdd2f840ea70b68c6dc292acbd2
|
d9520da6139352f8e8b7ba8b1722938ff7967ebb
|
/miniflow/nn_sigmoid.py
|
a4632dfe814df259aa4c3dc34ef9f7d982a2a4e0
|
[] |
no_license
|
k26dr/sdc_coursework
|
aa5720da1e559b79a40d2dda7995320cc5725482
|
71d1e2536b21c9e226bc343cc3aaaff32e151d18
|
refs/heads/master
| 2021-01-13T15:03:13.541224
| 2016-12-30T02:17:25
| 2016-12-30T02:17:25
| 76,284,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
import numpy as np
from miniflow import *
inputs, weights, bias = Input(), Input(), Input()
f = Linear(inputs, weights, bias)
g = Sigmoid(f)
x = np.array([[-1., -2.], [-1, -2]])
w = np.array([[2., -3], [2., -3]])
b = np.array([-3., -5])
feed_dict = {inputs: x, weights: w, bias: b}
graph = topological_sort(feed_dict)
output = forward_pass(g, graph)
print(output)
|
[
"kedarmail@gmail.com"
] |
kedarmail@gmail.com
|
74b341d63a32b49d9cba4ce501dbbd8f5a11fc8e
|
97399f2976443cc41329c4b289e89db8c895ddb4
|
/venv/bin/symilar
|
8647cc903f86267d342557e67697147f20c65454
|
[] |
no_license
|
kinitkumar1801806/TouristHelperApi
|
c55393adf2df4c517961f416db0098a73e93c361
|
d873a9bffd7b8bd681ff600e62243bd9ec656707
|
refs/heads/master
| 2023-02-21T08:41:13.656401
| 2021-01-20T10:39:19
| 2021-01-20T10:39:19
| 323,374,394
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
#!/home/kinit/PycharmProjects/TouristHelperApi/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
|
[
"www.kinitkumar@gmail.com"
] |
www.kinitkumar@gmail.com
|
|
fdc137c38ddcc4d02f6616631ac20050ede866f4
|
40c9408be0b2b9d6eb11b94ef8edbdc3ef7ee7fa
|
/LOB_simulation/read_json.py
|
deb98ce3312244f53f7f52eff41be8f8990a036b
|
[
"MIT"
] |
permissive
|
Izidorf/exchange
|
ffff6e8414b4943c94f724b01b8dcae30e302577
|
9c5bae3091f24b832fc7acba979075013492c97a
|
refs/heads/master
| 2021-01-15T12:03:44.041923
| 2016-09-19T18:32:53
| 2016-09-19T18:32:53
| 68,633,067
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,642
|
py
|
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import pandas as pd
def JSONStream(fd, buffer_size=4096, decode=None):
'''
Decodes distinct JSON objects from a stream (a file-like object)
Returns a generator that yields sequential JSON objects as they are retreived from the stream
:param fd: A file-like object representing the input stream
:param buffer_size: Optional read buffer size
:param decode: An optional custom JSON decode function
'''
if not decode:
import json
decode = json.JSONDecoder().raw_decode
buf = ''
data = fd.read(buffer_size)
while data:
try:
if not buf: data = data.lstrip()
buf, data = buf+data, None
while buf:
obj, i= decode(buf)
buf = buf[i:].lstrip()
yield(obj)
except GeneratorExit: break
except ValueError: pass
if not fd.closed:
data = fd.read(buffer_size)
def calc_time_difference(time):
if(len(time) < 2):
return 0
t = time[0]
for i in range(1, len(time)):
time[i-1] = abs((time[i]-t)/1000.0)
t = time[i]
return time[:-1]
def load_json(filename):
with open(filename, 'r') as fd:
ob_state = {}
# buy limit orders
buy_time = []
buy_size = []
buy_price = []
# sell limit orders
sell_time = []
sell_size = []
sell_price = []
# buy limit cancellations
buy_c_time = []
buy_c_size = []
buy_c_price = []
# sell limit cancelations
sell_c_time = []
sell_c_size = []
sell_c_price = []
# trades
trade_b_time = []
trade_b_volume = []
trade_b_price = []
trade_s_time = []
trade_s_volume = []
trade_s_price = []
count = 0
for o in JSONStream(fd):
count = count + 1
print count
print o
# for key, value in o.items():
if 'asks' in o and 'bids' in o:
ob_state=o
if 'state' in o and 'localTimestamp' in o:
value = o['state']
best_ask = ob_state['asks'][0]['limitPrice']
best_bid = ob_state['bids'][0]['limitPrice']
time = o['localTimestamp']
volume = o['orderInfo']['volume']
price = o['orderInfo']['limitPrice']
if value == 'CREATED':
if o['direction'] == 'BUY' and best_bid-price < 5 and best_bid-price > -5:
buy_time.append(time)
buy_size.append(volume)
buy_price.append(best_bid-price)
elif o['direction'] == 'SELL' and best_ask-price < 5 and best_ask-price > -5:
sell_time.append(time)
sell_size.append(volume)
sell_price.append(best_ask-price)
elif value == 'DELETED' and best_bid-price < 5 and best_bid-price > -5:
if o['direction'] == 'BUY' :
buy_c_time.append(time)
buy_c_size.append(volume)
buy_c_price.append(best_bid-price)
elif o['direction'] == 'SELL' and best_ask-price < 5 and best_ask-price > -5:
sell_c_time.append(time)
sell_c_size.append(volume)
sell_c_price.append(best_ask-price)
# elif value == 'UPDATED':
# if o['direction'] == 'BUY' :
# trade_b_time.append(time)
# trade_b_volume.append(volume)
# trade_b_price.append(price)
# elif o['direction'] == 'SELL':
# trade_s_time.append(time)
# trade_s_volume.append(volume)
# trade_s_price.append(price)
if 'exchangeTradeId' in o:
time = o['localTimestamp']
volume = o['volume']
price = o['price']
best_bid = ob_state['bids'][0]['limitPrice']
if price >= best_bid and best_bid-price <= 0 and best_bid-price > -5:
trade_b_time.append(time)
trade_b_volume.append(volume)
trade_b_price.append(best_bid-price)
elif best_ask-price <= 0 and best_ask-price > -5:
trade_s_time.append(time)
trade_s_volume.append(volume)
trade_s_price.append(best_ask-price)
sell_time = calc_time_difference(sell_time)
sell_c_time = calc_time_difference(sell_c_time)
buy_time = calc_time_difference(buy_time)
buy_c_time = calc_time_difference(buy_c_time)
trade_b_time = calc_time_difference(trade_b_time)
trade_s_time = calc_time_difference(trade_s_time)
# buy_size = map(lambda x: float(x)/1000000000.0, buy_size)
# trade_size = map(lambda x: float(x)/1000000000.0, trade_size)
# trade_b_volume = map(lambda x: float(x)/1000000000.0, trade_b_volume )
# trade_s_volume = map(lambda x: float(x)/1000000000.0, trade_s_volume)
#
# print trade_b_time
# print trade_s_t ime
return buy_time, buy_size, buy_price, sell_time, sell_size, sell_price, buy_c_time, buy_c_size, buy_c_price, sell_c_time, sell_c_size, sell_c_price, trade_b_time,trade_s_time,trade_s_volume, trade_b_volume, trade_b_price, trade_s_price
# if __name__ == "__main__":
# buy_time = load_json('log2.txt')
# print buy_time
# df.to_csv(path_or_buf ='test.csv')
|
[
"izidorflajsman@tests-MacBook-Pro-2.local"
] |
izidorflajsman@tests-MacBook-Pro-2.local
|
807c1bda878f4fd044b408e7ea19357e343ca3ec
|
73eb133e8f7a167d13c02f3db96274ae5e7152e7
|
/a.py
|
14dff844ad05865bc4737459fb420fd4b68ca83b
|
[] |
no_license
|
yunhom/TestGit
|
1479ccb7976a97cc5d3ce18ff9f9afde8643ad5e
|
d6f915f04417a349312b71c1da30e4d01af87f6c
|
refs/heads/master
| 2021-01-17T17:46:25.283114
| 2017-12-28T19:40:14
| 2017-12-28T19:40:14
| 61,826,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,833
|
py
|
import urllib.request
from urllib import error
from bs4 import BeautifulSoup
import os.path
import re
import operator
# 通过中国气象局抓取到所有的城市编码
# 中国气象网基地址
weather_base_url = "http://www.weather.com.cn"
# 华北天气预报url
weather_hb_url = "http://www.weather.com.cn/textFC/hb.shtml#"
# 获得城市列表链接
def get_city_list_url():
city_list_url = []
weather_hb_resp = urllib.request.urlopen(weather_hb_url)
weather_hb_html = weather_hb_resp.read().decode('utf-8')
weather_hb_soup = BeautifulSoup(weather_hb_html, 'html.parser')
weather_box = weather_hb_soup.find(attrs={'class': 'lqcontentBoxheader'})
weather_a_list = weather_box.findAll('a')
for i in weather_a_list:
city_list_url.append(weather_base_url + i['href'])
return city_list_url
# 根据传入的城市列表url获取对应城市编码
def get_city_code(city_list_url):
city_code_dict = {} # 创建一个空字典
city_pattern = re.compile(r'^<a.*?weather/(.*?).s.*</a>$') # 获取城市编码的正则
weather_hb_resp = urllib.request.urlopen(city_list_url)
weather_hb_html = weather_hb_resp.read().decode('utf-8')
weather_hb_soup = BeautifulSoup(weather_hb_html, 'html.parser')
# 需要过滤一波无效的
div_conMidtab = weather_hb_soup.find_all(attrs={'class': 'conMidtab', 'style': ''})
for mid in div_conMidtab:
tab3 = mid.find_all(attrs={'class': 'conMidtab3'})
for tab in tab3:
trs = tab.findAll('tr')
for tr in trs:
a_list = tr.findAll('a')
for a in a_list:
if a.get_text() != "详情":
# 正则拿到城市编码
city_code = city_pattern.match(str(a)).group(1)
city_name = a.string
city_code_dict[city_code] = city_name
return city_code_dict
# 写入文件中
def write_to_file(city_code_list):
try:
with open('/root/city_code.txt', "w+") as f:
for city in city_code_list:
f.write(city[0] + ":" + city[1] + "\n")
except OSError as reason:
print(str(reason))
else:
print("文件写入完毕!")
if __name__ == '__main__':
city_result = {} # 创建一个空字典,用来存所有的字典
city_list = get_city_list_url()
# get_city_code("http://www.weather.com.cn/textFC/guangdong.shtml")
for i in city_list:
print("开始查询:" + i)
city_result.update(get_city_code(i))
# 根据编码从升序排列一波
sort_list = sorted(city_result.items(), key=operator.itemgetter(0))
# 保存到文件中
write_to_file(sort_list)
|
[
"noreply@github.com"
] |
noreply@github.com
|
2a012620dfe09c0f6c1c04320e49696991285bed
|
8e6203db7383475f1c24a590f0456330b969bb4b
|
/optbinning/binning/distributed/plots.py
|
dba20f0cab79a00b42588937c020ed96d925680e
|
[
"Apache-2.0"
] |
permissive
|
guillermo-navas-palencia/optbinning
|
6fdfc764a214052b4d7d8e0b59114f0a63e6d5a8
|
73aee82008ebe88b732430e7c5764da57fb4d3ae
|
refs/heads/master
| 2023-08-28T13:33:43.536143
| 2023-08-22T19:20:18
| 2023-08-22T19:20:18
| 231,076,826
| 377
| 91
|
Apache-2.0
| 2023-09-05T20:14:14
| 2019-12-31T11:17:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
"""
Binning sketch plots.
"""
# Guillermo Navas-Palencia <g.navas.palencia@gmail.com>
# Copyright (C) 2020
import matplotlib.pyplot as plt
import numpy as np
def plot_progress_divergence(df, divergence):
n = len(df)
n_add = df.n_add
n_records = df.n_records
div = df.divergence
mv_div_mean = div.rolling(n, min_periods=1).mean()
mv_div_std = div.rolling(n, min_periods=1).std()
mv_div_std /= np.sqrt(np.arange(1, n+1))
div_low = np.maximum(0, div - mv_div_std * 1.959963984540054)
div_high = div + mv_div_std * 1.959963984540054
div_label = "divergence ({:.5f})".format(div.values[-1])
mv_div_label = "moving mean ({:.5f})".format(mv_div_mean.values[-1])
mv_std_label = "standard error ({:.5f})".format(mv_div_std.values[-1])
plt.plot(n_records, div, label=div_label)
plt.plot(n_records, mv_div_mean, linestyle="-.", color="green",
label=mv_div_label)
plt.fill_between(n_records, div_low, div_high, alpha=0.2, color="green",
label=mv_std_label)
plt.title("Progress after {:} add and {} processed records".
format(int(n_add.values[-1]), int(n_records.values[-1])),
fontsize=14)
plt.xlabel("Processed records", fontsize=12)
plt.ylabel("Divergence: {}".format(divergence), fontsize=12)
plt.legend(fontsize=12)
plt.show()
|
[
"g.navas.palencia@gmail.com"
] |
g.navas.palencia@gmail.com
|
9a732ee14213ecf26239bb68f65c2699680d42d1
|
ff1b5b1ea9b9b67cd611458c962b19ba40294368
|
/codingdiagnostic/codingdiagnostic/buyLotsOfFruit.py
|
2eadf001f0f649c900b9237ac79a33f148220f31
|
[] |
no_license
|
mahmoudbadawy243/AI-sheet-Dr-hossiny
|
05569bbf50dd70522c90f88fb4a396385a0a617f
|
12a17d162e080f9a1c7a9977a2efe4193c78816c
|
refs/heads/main
| 2023-08-27T18:14:16.341875
| 2021-10-28T05:13:38
| 2021-10-28T05:13:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
# buyLotsOfFruit.py
# -----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
To run this script, type
python buyLotsOfFruit.py
Once you have correctly implemented the buyLotsOfFruit function,
the script should produce the output:
Cost of [('apples', 2.0), ('pears', 3.0), ('limes', 4.0)] is 12.25
"""
from __future__ import print_function
fruitPrices = {'apples': 2.00, 'oranges': 1.50, 'pears': 1.75,
'limes': 0.75, 'strawberries': 1.00}
def buyLotsOfFruit(orderList):
"""
orderList: List of (fruit, numPounds) tuples
Returns cost of order
"""
totalCost = 0.0
for n,c in orderList:
if n in fruitPrices:
totalCost=totalCost+c*fruitPrices[n]
else:
print (n,"None")
return None
return totalCost
# Main Method
if __name__ == '__main__':
"This code runs when you invoke the script from the command line"
orderList = [('apples', 2.0), ('pears', 3.0), ('limes', 4.0)]
print('Cost of', orderList, 'is', buyLotsOfFruit(orderList))
|
[
"noreply@github.com"
] |
noreply@github.com
|
6c28dcf942f472f991296fca7296a7efb8fd6587
|
967fd4117c72c56000d779970f2c85cc8b661cfe
|
/BasicSyntax
|
c0013026d02294c840c2012b10d46212db3662ca
|
[] |
no_license
|
akrai37/Python-programs
|
afe0761eb56c81e36bba926b69cc571ad9ac6d62
|
866fdd585f5c69de28a900e068818d635201fc44
|
refs/heads/master
| 2020-05-16T11:24:59.229346
| 2019-06-04T18:08:41
| 2019-06-04T18:08:41
| 183,015,646
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
skill_completed = "Python Syntax"
exercises_completed = 13
#The amount of points for each exercise may change, because points don't exist yet
points_per_exercise = 5
point_total = 100
point_total += exercises_completed*points_per_exercise
print("I got "+str(point_total)+" points!")
|
[
"noreply@github.com"
] |
noreply@github.com
|
|
bcaf40cdeb60c459304ff421cdf6b8ff1a525c1c
|
4ec1590f65fd7dc1326455876bff3c0b4cb8d7a6
|
/Lib/site-packages/sqlalchemy/testing/profiling.py
|
b034dafbf78cbc612521cffcc0e7f4377700c706
|
[] |
no_license
|
Raja3131/JobsFastapi
|
b67950cc2e4560f6c76a4008f3052187afafb4b8
|
b56cf4cbaa3fcb09ad873fd91467923ff67d705b
|
refs/heads/main
| 2023-07-24T14:18:16.137728
| 2021-08-31T05:10:18
| 2021-08-31T05:10:18
| 401,579,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,018
|
py
|
# testing/profiling.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Profiling support for unit and performance tests.
These are special purpose profiling methods which operate
in a more fine-grained way than nose's profiling plugin.
"""
import collections
import contextlib
import os
import platform
import pstats
import sys
from . import config
from .util import gc_collect
from ..util import update_wrapper
try:
import cProfile
except ImportError:
cProfile = None
_current_test = None
# ProfileStatsFile instance, set up in plugin_base
_profile_stats = None
class ProfileStatsFile(object):
"""Store per-platform/fn profiling results in a file.
We're still targeting Py2.5, 2.4 on 0.7 with no dependencies,
so no json lib :( need to roll something silly
"""
def __init__(self, filename):
self.force_write = (
config.options is not None and config.options.force_write_profiles
)
self.write = self.force_write or (
config.options is not None and config.options.write_profiles
)
self.fname = os.path.abspath(filename)
self.short_fname = os.path.split(self.fname)[-1]
self.data = collections.defaultdict(
lambda: collections.defaultdict(dict)
)
self._read()
if self.write:
# rewrite for the case where features changed,
# etc.
self._write()
@property
def platform_key(self):
dbapi_key = config.db.name + "_" + config.db.driver
if config.db.name == "sqlite" and config.db.dialect._is_url_file_db(
config.db.url
):
dbapi_key += "_file"
# keep it at 2.7, 3.1, 3.2, etc. for now.
py_version = ".".join([str(v) for v in sys.version_info[0:2]])
platform_tokens = [
platform.machine(),
platform.system().lower(),
platform.python_implementation().lower(),
py_version,
dbapi_key,
]
platform_tokens.append(
"nativeunicode"
if config.db.dialect.convert_unicode
else "dbapiunicode"
)
_has_cext = config.requirements._has_cextensions()
platform_tokens.append(_has_cext and "cextensions" or "nocextensions")
return "_".join(platform_tokens)
def has_stats(self):
test_key = _current_test
return (
test_key in self.data and self.platform_key in self.data[test_key]
)
def result(self, callcount):
test_key = _current_test
per_fn = self.data[test_key]
per_platform = per_fn[self.platform_key]
if "counts" not in per_platform:
per_platform["counts"] = counts = []
else:
counts = per_platform["counts"]
if "current_count" not in per_platform:
per_platform["current_count"] = current_count = 0
else:
current_count = per_platform["current_count"]
has_count = len(counts) > current_count
if not has_count:
counts.append(callcount)
if self.write:
self._write()
result = None
else:
result = per_platform["lineno"], counts[current_count]
per_platform["current_count"] += 1
return result
def replace(self, callcount):
test_key = _current_test
per_fn = self.data[test_key]
per_platform = per_fn[self.platform_key]
counts = per_platform["counts"]
current_count = per_platform["current_count"]
if current_count < len(counts):
counts[current_count - 1] = callcount
else:
counts[-1] = callcount
if self.write:
self._write()
def _header(self):
return (
"# %s\n"
"# This file is written out on a per-environment basis.\n"
"# For each test in aaa_profiling, the corresponding "
"function and \n"
"# environment is located within this file. "
"If it doesn't exist,\n"
"# the test is skipped.\n"
"# If a callcount does exist, it is compared "
"to what we received. \n"
"# assertions are raised if the counts do not match.\n"
"# \n"
"# To add a new callcount test, apply the function_call_count \n"
"# decorator and re-run the tests using the --write-profiles \n"
"# option - this file will be rewritten including the new count.\n"
"# \n"
) % (self.fname)
def _read(self):
try:
profile_f = open(self.fname)
except IOError:
return
for lineno, line in enumerate(profile_f):
line = line.strip()
if not line or line.startswith("#"):
continue
test_key, platform_key, counts = line.split()
per_fn = self.data[test_key]
per_platform = per_fn[platform_key]
c = [int(count) for count in counts.split(",")]
per_platform["counts"] = c
per_platform["lineno"] = lineno + 1
per_platform["current_count"] = 0
profile_f.close()
def _write(self):
print(("Writing profile file %s" % self.fname))
profile_f = open(self.fname, "w")
profile_f.write(self._header())
for test_key in sorted(self.data):
per_fn = self.data[test_key]
profile_f.write("\n# TEST: %s\n\n" % test_key)
for platform_key in sorted(per_fn):
per_platform = per_fn[platform_key]
c = ",".join(str(count) for count in per_platform["counts"])
profile_f.write("%s %s %s\n" % (test_key, platform_key, c))
profile_f.close()
def function_call_count(variance=0.05, times=1, warmup=0):
"""Assert a target for a test case's function call count.
The main purpose of this assertion is to detect changes in
callcounts for various functions - the actual number is not as important.
Callcounts are stored in a file keyed to Python version and OS platform
information. This file is generated automatically for new tests,
and versioned so that unexpected changes in callcounts will be detected.
"""
def decorate(fn):
def wrap(*args, **kw):
for warm in range(warmup):
fn(*args, **kw)
timerange = range(times)
with count_functions(variance=variance):
for time in timerange:
rv = fn(*args, **kw)
return rv
return update_wrapper(wrap, fn)
return decorate
@contextlib.contextmanager
def count_functions(variance=0.05):
if cProfile is None:
raise config._skip_test_exception("cProfile is not installed")
if not _profile_stats.has_stats() and not _profile_stats.write:
config.skip_test(
"No profiling stats available on this "
"platform for this function. Run tests with "
"--write-profiles to add statistics to %s for "
"this platform." % _profile_stats.short_fname
)
gc_collect()
pr = cProfile.Profile()
pr.enable()
# began = time.time()
yield
# ended = time.time()
pr.disable()
# s = compat.StringIO()
stats = pstats.Stats(pr, stream=sys.stdout)
# timespent = ended - began
callcount = stats.total_calls
expected = _profile_stats.result(callcount)
if expected is None:
expected_count = None
else:
line_no, expected_count = expected
print(("Pstats calls: %d Expected %s" % (callcount, expected_count)))
stats.sort_stats("cumulative")
stats.print_stats()
if expected_count:
deviance = int(callcount * variance)
failed = abs(callcount - expected_count) > deviance
if failed or _profile_stats.force_write:
if _profile_stats.write:
_profile_stats.replace(callcount)
else:
raise AssertionError(
"Adjusted function call count %s not within %s%% "
"of expected %s, platform %s. Rerun with "
"--write-profiles to "
"regenerate this callcount."
% (
callcount,
(variance * 100),
expected_count,
_profile_stats.platform_key,
)
)
|
[
"kingraja673@gmail.com"
] |
kingraja673@gmail.com
|
142b4edaf5e0cb5022cd5869f8cbdf4542e77689
|
a4df0ee67d0d56fc8595877470318aed20dd4511
|
/vplexapi-6.2.0.3/vplexapi/models/health_state.py
|
819d13f492a6fb68862c506a14264a4633267ac3
|
[
"Apache-2.0"
] |
permissive
|
QD888/python-vplex
|
b5a7de6766840a205583165c88480d446778e529
|
e2c49faee3bfed343881c22e6595096c7f8d923d
|
refs/heads/main
| 2022-12-26T17:11:43.625308
| 2020-10-07T09:40:04
| 2020-10-07T09:40:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,553
|
py
|
# coding: utf-8
"""
VPlex REST API
A defnition for the next-gen VPlex API # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class HealthState(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
UNKNOWN = "unknown"
OK = "ok"
DEGRADED = "degraded"
MINOR_FAILURE = "minor-failure"
MAJOR_FAILURE = "major-failure"
CRITICAL_FAILURE = "critical_failure"
NON_RECOVERABLE_ERROR = "non-recoverable-error"
ISOLATED = "isolated"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""HealthState - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HealthState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"anil.degwekar@emc.com"
] |
anil.degwekar@emc.com
|
3ec19460ce437a87592c19e706dffcfc90cf10ba
|
70b1c91530b28d86e0fe8f46187b33322a6b9228
|
/backend/manage.py
|
00289a0b69ac55cbdc207727fa8d516788743952
|
[] |
no_license
|
crowdbotics-apps/satsuscreen-dev-1528
|
34fd3d6a3c4a717dcc657eb8ef055e736f33b70d
|
9b910435fc4ef034efe521985446055c688d52d7
|
refs/heads/master
| 2022-04-04T20:58:24.447428
| 2020-01-21T17:16:51
| 2020-01-21T17:16:51
| 235,398,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 640
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'satsuscreen_dev_1528.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
05f98c995114c13d415121f855678ae770c9123b
|
d93fe0484fc3b32c8fd9b33cc66cfd636a148ec4
|
/AtCoder/ABC-D/107probD.py
|
261c5013ca5189665dd06803268802f1623a399f
|
[] |
no_license
|
wattaihei/ProgrammingContest
|
0d34f42f60fa6693e04c933c978527ffaddceda7
|
c26de8d42790651aaee56df0956e0b206d1cceb4
|
refs/heads/master
| 2023-04-22T19:43:43.394907
| 2021-05-02T13:05:21
| 2021-05-02T13:05:21
| 264,400,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
# 足す時は0~iまで一律に足し、返すのはi番目の値
class imosBIT():
def __init__(self, N):
self.N = N
self.bit = [0 for _ in range(self.N+1)]
def __str__(self):
ret = []
for i in range(1, self.N+1):
ret.append(self.__getitem__(i))
return "[" + ", ".join([str(a) for a in ret]) + "]"
def __getitem__(self, i):
s = 0
while i > 0:
s += self.bit[i]
i -= i & -i
return s
def add(self, i, x):
while i <= self.N:
self.bit[i] += x
i += i & -i
import sys
input = sys.stdin.readline
N = int(input())
A = list(map(int, input().split()))
l = 0
r = max(A) + 1
while r-l > 1:
x = (l+r)//2
P = [0]
for a in A:
p = +1 if a >= x else -1
P.append(P[-1]+p)
score = 0
bit = imosBIT(2*N+5)
for p in P:
p += N+1
score += bit[p]
bit.add(p, 1)
if score >= (N*(N+1)//2+1)//2:
l = x
else:
r = x
print(l)
|
[
"wattaihei.rapyuta@gmail.com"
] |
wattaihei.rapyuta@gmail.com
|
3cda1ae2b4cd6cb0ed54b4906fae385788620fcd
|
1e652f1ef9b9a3688d1f8eafc0c7858a698b70e8
|
/01_Replication/03_Motifs/Motifs.py
|
472e4eb1497abd0018f802686bea6c846bb0878d
|
[] |
no_license
|
mtleis/Bioinformatics-Specialisation-UC
|
2f269f60d2a8dbfbcef7663eb807a5f2a22c954d
|
aa7485d841f26b290276f209936f11a1af8e6e1c
|
refs/heads/master
| 2020-04-25T12:40:35.240960
| 2019-05-28T07:31:51
| 2019-05-28T07:31:51
| 172,785,299
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,129
|
py
|
def Count(Motifs):
count = {}
k = len(Motifs[0])
for symbol in "ACGT":
count[symbol] = []
for j in range(k):
count[symbol].append(0)
t = len(Motifs)
for i in range(t):
for j in range(k):
symbol = Motifs[i][j]
count[symbol][j] += 1
return count
# Input: A list of kmers Motifs
# Output: the profile matrix of Motifs, as a dictionary of lists.
def Profile(Motifs):
t = len(Motifs)
k = len(Motifs[0])
profile = {}
# insert your code here
count = Count(Motifs)
for symbol in 'ACGT':
profile[symbol] = []
for i in range(k):
sum = 0
for symbol in 'ACGT':
sum = sum + count[symbol][i]
for symbol in 'ACGT':
profile[symbol].append(count[symbol][i]/sum)
return profile
# Input: A set of kmers Motifs
# Output: A consensus string of Motifs.
def Consensus(Motifs):
consensus = ""
k = len(Motifs[0])
count = Count(Motifs)
for j in range(k):
m = 0
frequentSymbol = ""
for symbol in "ACGT":
if count[symbol][j] > m:
m = count[symbol][j]
frequentSymbol = symbol
consensus += frequentSymbol
return consensus
# Input: A set of kmers Motifs
# Output: A consensus string of Motifs.
def Consensus(Motifs):
consensus = ""
k = len(Motifs[0])
count = Count(Motifs)
for j in range(k):
m = 0
frequentSymbol = ""
for symbol in "ACGT":
if count[symbol][j] > m:
m = count[symbol][j]
frequentSymbol = symbol
consensus += frequentSymbol
return consensus
# Input: A set of k-mers Motifs
# Output: The score of these k-mers.
def Score(Motifs):
consensus = Consensus(Motifs)
score = 0
for i in range(len(Motifs)):
for k in range(len(consensus)):
if consensus[k] != Motifs[i][k]:
score += 1
return score
Motifs = ["ATGCA", "TGGCA", "ATGCT"]
print(Count(Motifs))
print(Profile(Motifs))
print(Consensus(Motifs))
print(Score(Motifs))
|
[
"m.tleis@liacs.leidenuniv.nl"
] |
m.tleis@liacs.leidenuniv.nl
|
1dadfb379c81c86bb5dc817f66cd29c2fda380ad
|
e00d41c9f4045b6c6f36c0494f92cad2bec771e2
|
/programming/language/python/python-importlib_resources/actions.py
|
039be24d23edfb7a60cf2f14d2e783d0ae5ac760
|
[] |
no_license
|
pisilinux/main
|
c40093a5ec9275c771eb5fb47a323e308440efef
|
bfe45a2e84ea43608e77fb9ffad1bf9850048f02
|
refs/heads/master
| 2023-08-19T00:17:14.685830
| 2023-08-18T20:06:02
| 2023-08-18T20:06:02
| 37,426,721
| 94
| 295
| null | 2023-09-14T08:22:22
| 2015-06-14T19:38:36
|
Python
|
UTF-8
|
Python
| false
| false
| 383
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import shelltools
from pisi.actionsapi import pythonmodules
shelltools.export("SETUPTOOLS_SCM_PRETEND_VERSION","1.5.0")
def build():
pythonmodules.compile()
def install():
pythonmodules.install()
|
[
"bluedevil@sctzine.com"
] |
bluedevil@sctzine.com
|
0e3b552414f8ead8c3344be3c90ef387fb41e9b2
|
8f359c04efb43034f2aa2fa3aa96beaa407d45bb
|
/ps1/house_hunting_partC.py
|
57443545924dbcd413a2ffa20249216d5c4cdb75
|
[] |
no_license
|
Clark229Kent/mitocw6.0001
|
d6f89692d0c90113e1e8ed06176c4de584d6dd8f
|
b29a3b6a72433e18374475a50790da91049a020f
|
refs/heads/main
| 2023-01-20T22:09:30.366720
| 2020-11-29T01:27:33
| 2020-11-29T01:27:33
| 313,934,081
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,747
|
py
|
total_cost = 1000000
semi_annual_raise = 0.07
annual_return = 0.04
starting_salary = float(input("Enter your starting salary: "))
portion_down_payment = total_cost * 0.25
current_savings = 0
epsilon = 100
number_of_months = 0
def three_year_savings(current_savings, guess, semi_annual_raise,
annual_return, starting_salary):
for month in range(0,36):
if (month + 1) % 6 == 0 and month != 0:
starting_salary += semi_annual_raise * starting_salary
current_savings += guess * starting_salary / 12 + current_savings * annual_return / 12
else:
current_savings += guess * starting_salary / 12 + current_savings * annual_return / 12
return current_savings
highest_three_year = three_year_savings(0, 1, 0.07, 0.04, starting_salary)
if abs(highest_three_year - portion_down_payment) >= epsilon and highest_three_year < portion_down_payment:
print("It is not possible to pay the down payment in three years. Your maximum savings:", highest_three_year)
else:
low = 0
high = 10000
guess = int((low + high) / 2)
guessed_savings_rate = guess / 10000
step = 0
savings = three_year_savings(0, guessed_savings_rate, 0.07, 0.04, starting_salary)
while abs(savings - portion_down_payment) >= epsilon and savings < portion_down_payment:
if savings > portion_down_payment:
low = guess
else:
high = guess
step += 1
guess = int((high + low) / 2)
print(guess)
guessed_savings_rate = guess/10000
savings = three_year_savings(0, guessed_savings_rate, 0.07, 0.04, starting_salary)
print("Best savings rate:", guessed_rate)
print("Steps in bisection search:", step)
|
[
"noreply@github.com"
] |
noreply@github.com
|
5f033902c9de105ad251faa24d395812abd56674
|
c1ea35519cf44101b873cd752e9eebef9e491813
|
/zi.py
|
02d17cfb7465f9a2cfed9d157f9ddcfedd4c8241
|
[
"MIT"
] |
permissive
|
appendixisu/chooseZi
|
9aa4190b9e9bfa8ceb211427edeb723469a7af71
|
579dce0948fb652919eb19931b5af9ed5ebed065
|
refs/heads/master
| 2023-04-05T22:14:02.444096
| 2021-03-30T07:23:48
| 2021-03-30T07:23:48
| 287,963,730
| 1
| 1
|
MIT
| 2020-08-17T11:49:11
| 2020-08-16T14:49:40
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,906
|
py
|
# !/usr/bin/python
# coding:utf-8
# import requests
# from bs4 import BeautifulSoup
# import io
# import time
# def sleeptime(hour,min,sec):
# return hour*3600 + min*60 + sec
# url = "https://shufa.supfree.net/raky.asp?zi=%BF%C9"
# resp = requests.get(url)
# soup = BeautifulSoup(resp.text, 'lxml')
# print(soup.prettify())
from selenium import webdriver
import time
import urllib
import urllib.request
from urllib.parse import quote
import os
from opencc import OpenCC
from PIL import Image
from parse4808 import read4808
def gif2jpg(path):
im = Image.open(path)
im = im.convert('RGB')
file = path.split('.')
im.save(file[0]+'.jpg', 'JPEG', quality=85)
if os.path.isfile(path):
os.remove(path)
word = read4808()
# 存圖位置
local_path = '/Users/wei-chilan/Documents/python/chooseZi/imgs'
# 目標元素的xpath
xpath = '//div[@class="cdiv"]/ul/li/a/img'
# 啟動chrome瀏覽器
chromeDriver = r'/Users/wei-chilan/webDriverTool/chromedriver' # chromedriver檔案放的位置
driver = webdriver.Chrome(chromeDriver)
# 最大化窗口,因為每一次爬取只能看到視窗内的圖片
driver.maximize_window()
# 紀錄下載過的圖片網址,避免重複下載
img_url_dic = {}
# 簡體字轉繁體字
cc = OpenCC('s2t')
# skip
del(word[0:1646])
m = 0 # 圖片編號
for w in word:
# 爬取頁面網址
print('left:', len(word) - word.index(w), 'now:', w)
url = 'https://shufa.supfree.net/raky.asp?zi=' + quote(w.encode('gbk'))
# 瀏覽器打開爬取頁面
driver.get(url)
for element in driver.find_elements_by_xpath(xpath):
# time.sleep(0.5)
try:
img_url = element.get_attribute('src')
img_alt = element.get_attribute('alt').replace(' ', '').replace('“','').replace('”','')
img_alt = cc.convert(img_alt)
# print('img_alt:', cc.convert(img_alt))
# 保存圖片到指定路徑
if img_url != None and not img_url in img_url_dic:
img_url_dic[img_url] = ''
m += 1
# print('img_url:', img_url)
ext = img_url.split('/')[-1]
# print('ext:', ext)
filename = img_alt + '_' + ext
# print('filename:', filename)
# 保存圖片
filePath = os.path.join(local_path , filename)
print('filePath:', filePath)
if not os.path.exists(local_path):
print('create file')
os.mkdir(local_path)
urllib.request.urlretrieve(img_url, filePath)
gif2jpg(filePath)
except OSError as e:
print('發生OSError!')
print('Error:', e.errno, e.filename, e.strerror)
continue
driver.close()
|
[
"andy10189911@hotmail.com"
] |
andy10189911@hotmail.com
|
9c85a3150d50dce18e37c4fd3faae85c74370fc8
|
32b628faa8b8ca8d11d8837cc495c0013f58b71a
|
/scripts/matrix2matrix.py
|
db91bd6468fb706939d9b97cc2c5810de2e084d0
|
[
"BSD-2-Clause"
] |
permissive
|
jaquol/cgat
|
40b81617625ae9f0ba352caf38c2afd6a13c58f6
|
d26fab0dff2192d4accc128d2895e668254d7b65
|
refs/heads/master
| 2021-01-12T22:33:46.186451
| 2016-01-15T16:56:43
| 2016-01-15T16:56:43
| 49,868,597
| 1
| 0
| null | 2016-01-18T10:10:24
| 2016-01-18T10:10:24
| null |
UTF-8
|
Python
| false
| false
| 17,852
|
py
|
'''
matrix2matrix.py - operate on matrices
======================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
* full: full matrix with row and column headers (unless --no-headers is given.)
* sparse: sparse matrix
* phylip: phylip formatted matrix, but using tabs as separators and long names.
Methods:
sort-rows
sort rows by order in --rows-tsv-file
sort-columns
sort columns by order in --columns-tsv-file
mask-rows
set rows matching ids in --rows-tsv-file to --value
mask-columns
set columns matching ids in --columns-tsv-file to --value
mask-rows-and-columns
set rows and columns matching ids in --columns-tsv-file to --value (and)
Usage
-----
Example::
python matrix2matrix.py --help
Type::
python matrix2matrix.py --help
for command line help.
Command line options
--------------------
'''
import sys
import math
import StringIO
import numpy
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.CorrespondenceAnalysis as CorrespondenceAnalysis
import CGAT.MatlabTools as MatlabTools
import scipy
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: matrix2matrix.py 2782 2009-09-10 11:40:29Z andreas $")
parser.add_option("-m", "--method", dest="methods", type="choice", action="append",
choices=("normalize-by-min-diagonal", "normalize-by-column",
"log", "ln", "negzero2value",
"set-diagonal",
"subtract-matrix", "mix-matrix", "normalize-by-matrix",
"normalize-by-column-max", "normalize-by-row-max",
"normalize-by-column-min", "normalize-by-row-min",
"normalize-by-column-median", "normalize-by-row-median",
"normalize-by-column-mean", "normalize-by-row-mean",
"normalize-by-column-total", "normalize-by-row-total",
"correspondence-analysis",
"normalize-by-value",
"add-value",
"sort-rows", "sort-columns",
"transpose",
"upper-bound", "lower-bound",
"subtract-first-col", "multiply-by-value", "divide-by-value",
"mask-rows", "mask-columns", "mask-rows-and-columns",
"symmetrize-mean", "symmetrize-max", "symmetrize-min",
),
help="""method to use [default=%default]""" )
parser.add_option("-s", "--scale", dest="scale", type="float",
help="factor to scale matrix by [default=%default].")
parser.add_option("-f", "--format", dest="format", type="string",
help="output number format [default=%default].")
parser.add_option("--rows-tsv-file", dest="filename_rows", type="string",
help="filename with rows to mask [default=%default].")
parser.add_option("--columns-tsv-file", dest="filename_columns", type="string",
help="filename with columns to mask [default=%default].")
parser.add_option("-p", "--parameters", dest="parameters", type="string",
help="Parameters for various functions.")
parser.add_option("-t", "--header-names", dest="headers", action="store_true",
help="matrix has row/column headers.")
parser.add_option("--no-headers", dest="headers", action="store_false",
help="matrix has no row/column headers.")
parser.add_option("-a", "--value", dest="value", type="float",
help="value to use for various algorithms.")
parser.add_option("-i", "--input-format", dest="input_format", type="choice",
choices=("full", "sparse", "phylip"),
help="""input format for matrix.""" )
parser.add_option("-o", "--output-format", dest="output_format", type="choice",
choices=("full", "sparse", "phylip"),
help="""output format for matrix.""" )
parser.add_option("--missing-value", dest="missing", type="float",
help="value to use for missing values. If not set, missing values will cause the script to fail [default=%default].")
parser.set_defaults(
methods=[],
scale=1.0,
headers=True,
format="%6.4f",
output_format="full",
input_format="full",
value=0.0,
parameters="",
write_separators=True,
filename_rows=None,
filename_columns=None,
missing=None,
)
(options, args) = E.Start(parser)
options.parameters = options.parameters.split(",")
lines = filter(lambda x: x[0] != "#", sys.stdin.readlines())
if len(lines) == 0:
raise IOError("no input")
chunks = filter(lambda x: lines[x][0] == ">", range(len(lines)))
if not chunks:
options.write_separators = False
chunks = [-1]
chunks.append(len(lines))
if options.filename_rows:
row_names, n = IOTools.ReadList(open(options.filename_rows, "r"))
if options.filename_columns:
column_names, n = IOTools.ReadList(open(options.filename_columns, "r"))
for chunk in range(len(chunks) - 1):
try:
raw_matrix, row_headers, col_headers = MatlabTools.readMatrix(StringIO.StringIO("".join(lines[chunks[chunk] + 1:chunks[chunk + 1]])),
format=options.input_format,
headers=options.headers,
missing=options.missing)
except ValueError, msg:
E.warn("matrix could not be read: %s" % msg)
continue
nrows, ncols = raw_matrix.shape
E.debug("read matrix: %i x %i, %i row titles, %i colum titles" %
(nrows, ncols, len(row_headers), len(col_headers)))
parameter = 0
for method in options.methods:
matrix = numpy.reshape(numpy.array(raw_matrix), raw_matrix.shape)
if method in ("normalize-by-matrix", "subtract-matrix", "mix-matrix", "add-matrix"):
other_matrix, other_row_headers, other_col_headers = MatlabTools.ReadMatrix(open(options.parameters[parameter], "r"),
headers=options.headers)
other_nrows, other_ncols = other_matrix.shape
if options.loglevel >= 2:
options.stdlog.write("# read second matrix from %s: %i x %i, %i row titles, %i colum titles.\n" %
(options.parameters[parameter],
other_nrows, other_ncols, len(other_row_headers), len(other_col_headers)))
parameter += 1
elif method == "normalize-by-min-diagonal":
for x in range(nrows):
for y in range(ncols):
m = min(raw_matrix[x, x], raw_matrix[y, y])
if m > 0:
matrix[x, y] = raw_matrix[x, y] / m
elif method == "normalize-by-column":
if nrows != ncols:
raise "only supported for symmeric matrices."
for x in range(nrows):
for y in range(ncols):
if raw_matrix[y, y] > 0:
matrix[x, y] = raw_matrix[x, y] / raw_matrix[y, y]
elif method == "normalize-by-value":
matrix = raw_matrix / float(options.parameters[parameter])
parameter += 1
elif method == "normalize-by-row":
if nrows != ncols:
raise "only supported for symmeric matrices."
for x in range(nrows):
for y in range(ncols):
if raw_matrix[y, y] > 0:
matrix[x, y] = raw_matrix[x, y] / raw_matrix[x, x]
elif method == "subtract-first-col":
for x in range(nrows):
for y in range(ncols):
matrix[x, y] -= raw_matrix[x, 0]
elif method.startswith("normalize-by-column"):
if method.endswith("max"):
f = max
elif method.endswith("min"):
f = min
elif method.endswith("median"):
f = scipy.median
elif method.endswith("mean"):
f = scipy.mean
elif method.endswith("total"):
f = sum
for y in range(ncols):
m = f(matrix[:, y])
if m != 0:
for x in range(nrows):
matrix[x, y] = matrix[x, y] / m
elif method.startswith("normalize-by-row"):
if method.endswith("max"):
f = max
elif method.endswith("min"):
f = min
elif method.endswith("median"):
f = scipy.median
elif method.endswith("mean"):
f = scipy.mean
elif method.endswith("total"):
f = sum
for x in range(nrows):
m = f(matrix[x, :])
if m != 0:
for y in range(ncols):
matrix[x, y] = raw_matrix[x, y] / m
elif method == "negzero2value":
# set zero/negative values to a value
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] <= 0:
matrix[x, y] = options.value
elif method == "minmax":
# set zero/negative values to a value
for x in range(nrows):
for y in range(ncols):
matrix[x, y], matrix[y, x] = \
min(matrix[x, y], matrix[y, x]), \
max(matrix[x, y], matrix[y, x])
elif method == "log":
# apply log to all values.
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] > 0:
matrix[x, y] = math.log10(matrix[x, y])
elif method == "ln":
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] > 0:
matrix[x, y] = math.log(matrix[x, y])
elif method == "transpose":
matrix = numpy.transpose(matrix)
row_headers, col_headers = col_headers, row_headers
nrows, ncols = ncols, nrows
elif method == "mul":
matrix = numpy.dot(matrix, numpy.transpose(matrix))
col_headers = row_headers
elif method == "multiply-by-value":
matrix *= options.value
elif method == "divide-by-value":
matrix /= options.value
elif method == "add-value":
matrix += options.value
elif method == "angle":
# write angles between col vectors
v1 = numpy.sqrt(numpy.sum(numpy.power(matrix, 2), 0))
matrix = numpy.dot(numpy.transpose(matrix), matrix)
row_headers = col_headers
nrows = ncols
for x in range(nrows):
for y in range(ncols):
matrix[x, y] /= v1[x] * v1[y]
elif method == "euclid":
# convert to euclidean distance matrix
matrix = numpy.zeros((ncols, ncols), numpy.float)
for c1 in range(0, ncols - 1):
for c2 in range(c1 + 1, ncols):
for r in range(0, nrows):
d = raw_matrix[r][c1] - raw_matrix[r][c2]
matrix[c1, c2] += (d * d)
matrix[c2, c1] = matrix[c1, c2]
matrix = numpy.sqrt(matrix)
row_headers = col_headers
nrows = ncols
elif method.startswith("symmetrize"):
f = method.split("-")[1]
if f == "max":
f = max
elif f == "min":
f = min
elif f == "mean":
f = lambda x, y: float(x + y) / 2
if nrows != ncols:
raise ValueError(
"symmetrize only available for symmetric matrices")
if row_headers != col_headers:
raise ValueError(
"symmetrize not available for permuted matrices")
for x in range(nrows):
for y in range(ncols):
matrix[x, y] = matrix[y, x] = f(
matrix[x, y], matrix[y, x])
elif method == "sub":
matrix = options.value - matrix
elif method in ("lower-bound", "upper-bound"):
boundary = float(options.parameters[parameter])
new_value = float(options.parameters[parameter + 1])
parameter += 2
if method == "upper-bound":
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] > boundary:
matrix[x, y] = new_value
else:
for x in range(nrows):
for y in range(ncols):
if matrix[x, y] < boundary:
matrix[x, y] = new_value
elif method == "subtract-matrix":
matrix = matrix - other_matrix
elif method == "add-matrix":
matrix = matrix + other_matrix
elif method == "normalize-by-matrix":
# set 0s to 1 in the other matrix
for x in range(nrows):
for y in range(ncols):
if other_matrix[x, y] == 0:
other_matrix[x, y] = 1.0
matrix = matrix / other_matrix
elif method == "mix-matrix":
for x in range(len(other_row_headers) - 1):
for y in range(x + 1, len(other_col_headers)):
matrix[x, y] = other_matrix[x, y]
elif method == "set-diagonal":
value = float(options.parameters[parameter])
for x in range(min(nrows, ncols)):
matrix[x, x] = value
parameter += 1
elif method == "transpose":
matrix = numpy.transpose(raw_matrix)
row_headers, col_headers = col_headers, row_headers
elif method == "correspondence-analysis":
row_indices, col_indices = CorrespondenceAnalysis.GetIndices(
raw_matrix)
map_row_new2old = numpy.argsort(row_indices)
map_col_new2old = numpy.argsort(col_indices)
matrix, row_headers, col_headers = CorrespondenceAnalysis.GetPermutatedMatrix(raw_matrix,
map_row_new2old,
map_col_new2old,
row_headers=row_headers,
col_headers=col_headers)
elif method == "mask-rows":
r = set(row_names)
for x in range(len(row_headers)):
if row_headers[x] in r:
matrix[x, :] = options.value
elif method == "mask-columns":
r = set(column_names)
for x in range(len(col_headers)):
if col_headers[x] in r:
matrix[:, x] = options.value
elif method == "mask-rows-and-columns":
r = set(row_names)
c = set(column_names)
for x in range(len(row_headers)):
for y in range(len(col_headers)):
if row_headers[x] in r and col_headers[y] in c:
matrix[x, y] = options.value
raw_matrix = numpy.reshape(numpy.array(matrix), matrix.shape)
else:
# for simple re-formatting jobs
matrix = raw_matrix
if options.write_separators:
options.stdout.write(lines[chunks[chunk]])
MatlabTools.writeMatrix(sys.stdout, matrix,
value_format=options.format,
format=options.output_format,
row_headers=row_headers,
col_headers=col_headers)
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[
"andreas.heger@gmail.com"
] |
andreas.heger@gmail.com
|
ecacb54265cd97d2192d5166ef0b87a3f1f27cb8
|
48cfcd07d07919da95d666c26d30f1b979666b1e
|
/env/bin/pilfont.py
|
8ea7d0d895cf1f35459b989947995924c3d97857
|
[] |
no_license
|
asfcarter/vvm
|
4d390047ae77ec3030d7b64773f7f0bf519a3c7d
|
dd072dadc8794fc4354700285759571441758351
|
refs/heads/master
| 2021-01-20T09:01:37.222878
| 2015-05-05T10:18:53
| 2015-05-05T10:18:53
| 34,862,426
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
#!/home/asfcarter/vvm/env/bin/python
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
VERSION = "0.4"
import glob, sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
|
[
"asfcarter@yahoo.co.uk"
] |
asfcarter@yahoo.co.uk
|
722c725f1db986f213a767b15664d2a33ca6efd9
|
0cf1f386f8e396f1ef0b1d558feb42a1c4c9c281
|
/myapp/store/__init__.py
|
dafe5cb4e9d3af981a278a59fdf4144ccdfa739d
|
[] |
no_license
|
leomorpho/flask-website
|
5583add9cef929c012c830c9b9006432496b35fb
|
4ca91a37f1424c5f8bcdddc7d1e4ef5b97f86605
|
refs/heads/master
| 2023-05-13T10:32:27.734249
| 2019-06-17T20:36:50
| 2019-06-17T20:36:50
| 191,808,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
from flask import Blueprint
bp = Blueprint('store', __name__)
from myapp.store import routes
|
[
"leonard.audibert@pm.me"
] |
leonard.audibert@pm.me
|
75a15a27dd983c7914437cc5b9cb7b8699823b7a
|
69117d2879189663a31c5f642e33cf9d1dd6d6a3
|
/apps/staff/urls.py
|
59d0c436ac2bba32ab6fb3544f235afca1a42493
|
[] |
no_license
|
code-axassin/risha-professional-studies
|
70f35052cd7237d50982e6a7a7efa3130588b9c7
|
550ff690fb62b2eda241094394e580336a9ad416
|
refs/heads/main
| 2023-06-11T13:55:55.893574
| 2021-07-05T11:45:56
| 2021-07-05T11:45:56
| 382,754,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
from django.urls import path
from apps.staff.api import api_delete_staff
from apps.staff.views import AllStaffListView, add_staff, view_staff, edit_staff, StaffUpdate, StaffCreate, staff_search
urlpatterns = [
path('', AllStaffListView.as_view(), name="all_staff"),
path('add/', add_staff, name="add_staff"),
#path('add/save/', add_staff_save, name="add_staff_save"),
path('add/save/', StaffCreate.as_view(), name="add_staff_save"),
path('view/<int:staff_id>/', view_staff, name="view_staff"),
path('edit/<int:staff_id>/', edit_staff, name="edit_staff"),
path('edit/save/<int:id>/', StaffUpdate.as_view(), name="edit_staff_save"),
path('', view_staff, name="view_staff"),
path('api/delete_staff/<int:staff_id>/', api_delete_staff, name="api_delete_staff"),
path('staff-search/', staff_search, name="staff_search"),
]
|
[
"76621558+code-axassin@users.noreply.github.com"
] |
76621558+code-axassin@users.noreply.github.com
|
adcaf241cc21e8b53902c8fd088e45563c540721
|
68f3146c44a4ef775cddd7418c31a14f5d4d3232
|
/cc_gen.py
|
74ae5ff090f2bb2b97b544da50127ca7ffc30d53
|
[
"MIT"
] |
permissive
|
geixD/jardingUWU
|
1e4c2aa73226d8ab08ea72c33d433dbce78042fb
|
f45f852f1a3f7073e10ab55b4d2b70e47960c16d
|
refs/heads/main
| 2023-04-02T00:18:01.718837
| 2021-04-18T03:06:34
| 2021-04-18T03:06:34
| 358,755,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,854
|
py
|
import os, time, sys, datetime
from random import randint
from huepy import *
__version__ = "1.3.6"
def cc_gen(bin):
cc = ""
if len(bin) != 16:
while len(bin) != 16:
bin += 'x'
else:
pass
if len(bin) == 16:
for x in range(15):
if bin[x] in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"):
cc += bin[x]
continue
elif bin[x] in ('x'):
cc += str(randint(0,9))
else:
print(bad(f"Invalid Format Bin: {bin}"))
sys.exit()
for i in range(10):
check_cc = cc
check_cc += str(i)
if ValidCC(check_cc):
cc = check_cc
break
else:
check_cc = cc
else:
print(bad(f"Invalid Format BIN: {bin}"))
return(cc)
def ValidCC(card_number): # Luhn Algorithm
sum = 0
num_digits = len(card_number)
oddeven = num_digits & 1
for count in range(0, num_digits):
digit = int(card_number[count])
if not (( count & 1 ) ^ oddeven ):
digit = digit * 2
if digit > 9:
digit = digit - 9
sum = sum + digit
return ( (sum % 10) == 0 )
def dategen(month, year):
if month == 'rnd' and year == 'rnd':
now = datetime.datetime.now()
month = int(randint(1, 12))
current_year = str(now.year)
year = str(randint(int(current_year[-2:]) + 1, int(current_year[-2:]) + 6))
if month < 10:
month = '0'+str(month)
else:
month = str(month)
date = month + "|" + year
return date
elif month == 'rnd' and year != 'rnd':
now = datetime.datetime.now()
month = int(randint(1,12))
if month < 10:
month = '0'+str(month)
else:
month = str(month)
date = month + "|" + year
return date
elif month != 'rnd' and year == 'rnd':
now = datetime.datetime.now()
current_year = str(now.year)
year = str(randint(int(current_year[-2:]) + 1, int(current_year[-2:]) + 6))
date = month + "|" + year
return date
elif month != 'rnd' and year != 'rnd':
date = month + "|" + year
return date
def ccvgen(cvv):
if cvv == 'rnd':
ccv = ""
num = randint(10,999)
if num < 100:
ccv = "0" + str(num)
else:
ccv = str(num)
return(ccv)
else:
return(cvv)
def clean():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def banner():
print(purple("""
:::.
.XXr;;;rrX7
iWa, :@S
rM .i;;i;;. Mi
@ aXi:,,,iZ .M
i2 X. i: @i
a ir :r. S ,M
Xi X.MM2. ,2MM8i 2Z
;S a;rXMMM @MMSiB. Xr
,8 2i.;:; : : ii;72 i, V V I T T T T T T T T T T O O O
B .2 , , X. , V V I T T O O
0:.SX ;ai, 7 :: iS. 8i i; V V I T T O O
2X 7 rr.iXi,:rXX;,:r;i;iiX 2r V I T T O O O
iW X :;,.7.. ..,.;i;;: iX M.
M X .iiX::i::S7S, X. Br
M , ;,XX7SXXX,, S M
S0 i : : .. S Z2
@M: .:i ,: : ;;;::iiM0
:72XXBW88SXXi :i..;X2SXXrSXrrS72Xi
:7ri: r:i;r;, ,07, ..:; ,ii.
B ,;X2 ri
.a X :2
Generador de CC \n"""))
def main():
clean()
banner()
try:
seses = int(input(info("Quantity > ")))
counter = 0
BIN = input(info("BIN > "))
MONTH = input(info("Month [MM / RND] > ")).lower()
YEAR = input(info("Year [YY / RND] > ")).lower()
CVV = input(info("CVV [CVV / RND] > ")).lower()
month = MONTH
year = YEAR
clean()
banner()
while counter != seses:
print(good(cc_gen(BIN) + "|" + dategen(month, year) + "|" + ccvgen(CVV)))
counter +=1
except:
print(bad(yellow("An Error Ocurred...")))
sys.exit()
if __name__ == "__main__":
print(info(lightred(("An Error Ocurred..."))))
else:
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b6c63959eba8dbb8c0b937893309c42ae2c5647d
|
84bf3311d6b4977c1a86b9224e06092a9df5ab40
|
/solutions/circus.py
|
1bab03b6e6018fe7e0f56ad643ddd850c27bdc72
|
[] |
no_license
|
walshification/advent-of-code-2017
|
ce2416d0121b085e43a6915938455be81db46bb0
|
afb5ea837af8a2e1a9f681f675172707452ef70c
|
refs/heads/master
| 2021-09-09T22:00:28.712406
| 2018-01-09T02:41:02
| 2018-01-09T02:41:02
| 112,861,940
| 0
| 0
| null | 2018-03-20T00:21:40
| 2017-12-02T17:25:31
|
Python
|
UTF-8
|
Python
| false
| false
| 4,107
|
py
|
import re
from collections import defaultdict
import yaml
class Program:
def __init__(self, program):
children = []
if '->' in program:
program, supports = program.split(' -> ')
children.extend(supports.split(', '))
name = re.search('\w+|$', program).group()
weight = re.search('\d+|$', program).group()
self.name = name
self.weight = int(weight)
self.children = children
self.parent = None
self.total_weight = int(weight)
class Pyramid:
def __init__(self, programs):
self.programs = {program.name: program for program in programs}
for name, program in self.programs.items():
program.total_weight += sum(
self.programs[child].weight for child in program.children
)
self._root = None
self._unbalanced_program = None
@classmethod
def build(cls, schemas):
programs = [Program(schema) for schema in schemas]
for program in programs:
for other in programs:
if program.name in other.children:
program.parent = other.name
return cls(programs)
@property
def root(self):
if self._root is None:
self._root = self._dig_up_root(self.programs[next(iter(self.programs))])
return self._root
@property
def unbalanced_program(self):
if self._unbalanced_program is None:
self._unbalanced_program = self._find_unbalanced_program(self.root)
return self._unbalanced_program
@property
def weight_to_balance(self):
sibling = [
self.programs[child]
for child in self.programs[self.unbalanced_program.parent].children
if child != self.unbalanced_program.name
][0]
difference = sibling.total_weight - self.unbalanced_program.total_weight
return self.unbalanced_program.weight + difference
def is_balanced(self, program_name):
program = self.programs[program_name]
if not program.children:
return True
children_weights = set(
self.programs[child].total_weight for child in program.children
)
return len(children_weights) < 2
def _dig_up_root(self, arbitrary_program):
if not arbitrary_program.parent:
return arbitrary_program
return self._dig_up_root(self.programs[arbitrary_program.parent])
def _supported_weight(self, programs):
descendants = self._get_descendants(programs)
return sum(descendant.weight for descendant in descendants)
def _get_descendants(self, programs):
if not programs:
return []
children = [
self.programs[child]
for program in programs
for child in program.children
]
return children + self._get_descendants(children)
def _find_unbalanced_program(self, root):
unbalanced = [
program.name for program in [root] if not self.is_balanced(program.name)
]
if unbalanced:
child_to_weights_mapping = {
child: self.programs[child].total_weight
for child in self.programs[unbalanced[0]].children
}
weight_count = defaultdict(int)
for weight in child_to_weights_mapping.values():
weight_count[weight] += 1
oddball = [
child
for child, weight in child_to_weights_mapping.items()
if weight_count[weight] == 1 and self.programs[child].children
]
if oddball:
return self._find_unbalanced_program(self.programs[oddball[0]])
else:
return root
return root
if __name__ == '__main__':
with open('problem_inputs/circus.yaml', 'r') as programs:
test_input = yaml.safe_load(programs)
pyramid = Pyramid.build(test_input)
print('Part One:', pyramid.root.name)
print('Part Two:', pyramid.weight_to_balance, '1072')
|
[
"walshification@gmail.com"
] |
walshification@gmail.com
|
9afa4e20081e1cfa380b6474b33c811305e13c9a
|
29623d43b2ab99e55a5d102e8d718015053073a3
|
/aliyun-python-sdk-mts/aliyunsdkmts/request/v20140618/QueryCensorJobListRequest.py
|
8bb5ce2996660555cc680f7e5f65657a90cfa511
|
[
"Apache-2.0"
] |
permissive
|
zxsted/aliyun-openapi-python-sdk
|
ed41e1d93c63557ecfbcffb6c84f87d4ed2a7f59
|
a539d6e268fc07f314c5114c21ced4c8ead51dee
|
refs/heads/master
| 2021-05-13T19:58:10.000697
| 2018-01-09T11:16:55
| 2018-01-09T11:16:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,894
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class QueryCensorJobListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Mts', '2014-06-18', 'QueryCensorJobList')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_JobIds(self):
return self.get_query_params().get('JobIds')
def set_JobIds(self,JobIds):
self.add_query_param('JobIds',JobIds)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
2bba30e05df174ce2a696b492bde945a1261ad21
|
cf302b8399ebfaf8af0baf905b2e928af604095d
|
/pages/migrations/0003_auto_20200523_0946.py
|
2be20337d701ddb85d4f3d33fbda1370b16183b3
|
[] |
no_license
|
FabiolaLa/Webempresa
|
2dbec95417b218b89a85c819173e03f3c9ebe355
|
5f8786732be82371262632e511151989b1dd81f8
|
refs/heads/master
| 2022-08-26T00:21:24.654470
| 2020-05-23T19:24:21
| 2020-05-23T19:24:21
| 266,407,188
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# Generated by Django 3.0.3 on 2020-05-23 14:46
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pages', '0002_auto_20200522_1859'),
]
operations = [
migrations.AlterField(
model_name='page',
name='content',
field=ckeditor.fields.RichTextField(verbose_name='Contenido'),
),
]
|
[
"“fabi20laureano@gmail.com”"
] |
“fabi20laureano@gmail.com”
|
007d6bb2431b517f4505925708b3e6528096adbd
|
cd5e93bc606960495287976c0d2c8e5df3c6375d
|
/main.py
|
f44f31b7b751ef252ac839a6fb5030b02dd53661
|
[] |
no_license
|
luluiz01/ProjetoMineradorVagas
|
f5defb2eecfa2f7f291816ff5d2adcf7e7e99079
|
b89bf8c1b182017165f5a5a9d90b3756aa23060f
|
refs/heads/master
| 2020-09-05T03:07:10.075306
| 2020-01-25T18:15:20
| 2020-01-25T18:15:20
| 219,964,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 897
|
py
|
import funcoes as f
import bot
import telepot
import pprint
import scraping as sc
#print(soup)
# url = 'https://www.apinfo.com/apinfo/inc/list4.cfm'
# soup = f.pega_pagina(url)
# print(soup)
minerar = True
if minerar:
nome_arquivo = 'apinfo_pagina_1.html'
soup = f.carrega_html(nome_arquivo)
titulo = f.filtrarPeloTitulo(soup)
#print(titulo)
datas = f.filtrarPelaData(soup)
#print(datas)
cidade = f.filtrarPelaCidade(soup)
#print(cidade)
codigo = f.filtrarPeloCodigo(soup)
#print(codigo)
link = f.filtrarPeloLink(soup)
#print(link)
descricao = f.filtrarPelaDescricao(soup)
#print(descricao)
#token = bot.get_bot_info()
#print(token)
#update = bot.get_bot_update()
#print(update)
#mensagem = bot.send_messege()
vagas = sc.pega_vagas(nome_arquivo)
print("-----------------------------------")
#print(vagas)
dicio = sc.vagas_to_dict(vagas)
print(dicio)
bot.send_messege()
|
[
"luiz.pventura@gmail.com"
] |
luiz.pventura@gmail.com
|
6383999b044cd69bf47c5f63f9a4dd3dc82f14b0
|
0cac657be64b0e01b20a6c8283ed89ad03a1deeb
|
/Capitulo 10/final.py
|
d65a4c1cf80720c3324fa26595a84926ea742d4a
|
[] |
no_license
|
RenanMarcell/Intro-programacao-python
|
5b493cdfbed5a97e74e1be7bc60ef192e700b308
|
f1827d8ad52a83d0241f1fd408415d9c5f950f8a
|
refs/heads/master
| 2021-09-03T09:09:57.461812
| 2018-01-07T23:16:44
| 2018-01-07T23:16:44
| 114,700,062
| 0
| 0
| null | 2018-01-07T23:16:45
| 2017-12-19T00:08:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,539
|
py
|
import sys
import pickle
from functools import total_ordering
from listagem_10_13 import ListaUnica
from nome import Nome
from tipotelefone import TipoTelefone
from telefone import Telefone
from agenda import TiposTelefone, Agenda, DadoAgenda, Telefones
def nulo_ou_vazio(texto):
return texto is None or not texto.strip()
def valida_faixa_inteiro(pergunta, inicio, fim, padrao=None):
while True:
try:
entrada = input(pergunta)
if nulo_ou_vazio(entrada) and padrao is not None:
entrada = padrao
valor = int(entrada)
if inicio <= valor <= fim:
return valor
except ValueError:
print('Valor invalido, favor digitar entre {} e {}'.format(inicio, fim))
def valida_faixa_inteiro_ou_branco(pergunta, inicio, fim):
while True:
try:
entrada = input(pergunta)
if nulo_ou_vazio(entrada):
return None
valor = int(entrada)
if inicio <= valor <= fim:
return valor
except ValueError:
print('Valor invalido, favor digitar entre {} e {}'.format(inicio, fim))
class Menu:
def __init__(self):
self.opcoes = [["Sair", None]]
def adiciona_opcao(self, nome, funcao):
self.opcoes.append([nome, funcao])
def exibe(self):
print("====")
print("Menu")
print("====\n")
for i, opcao in enumerate(self.opcoes):
print("[{0}] - {1}".format(i, opcao[0]))
print()
def execute(self):
while True:
self.exibe()
escolha = valida_faixa_inteiro(
"Escolha uma opção: ",
0,
len(self.opcoes)
)
if escolha == 0:
break
self.opcoes[escolha][1]()
class AppAgenda:
@staticmethod
def pede_nome():
return input("Nome: ")
@staticmethod
def pede_telefone():
return input("Telefone: ")
@staticmethod
def mostra_dados(dados):
print("Nome: {}".format(dados.nome))
for telefone in dados.telefone:
print("Telefone: {}".format(telefone))
print()
@staticmethod
def mostra_dados_telefone(dados):
print("Nome: {}".format(dados.nome))
for i, telefone in enumerate(dados.telefone):
print("{0} - Telefone: {1}".format(i, telefone))
print()
@staticmethod
def pede_nome_arquivo():
return input("Nome do arquivo: ")
def __init__(self):
self.agenda = Agenda()
self.agenda.adiciona_tipo("Celular")
self.agenda.adiciona_tipo("Residencia")
self.agenda.adiciona_tipo("Trabalho")
self.agenda.adiciona_tipo("Fax")
self.menu = Menu()
self.menu.adiciona_opcao("Novo", self.novo)
self.menu.adiciona_opcao("Altera", self.altera)
self.menu.adiciona_opcao("Apaga", self.apaga)
self.menu.adiciona_opcao("Lista", self.lista)
self.menu.adiciona_opcao("Grava", self.grava)
self.menu.adiciona_opcao("Le", self.le)
self.menu.adiciona_opcao("Ordena", self.ordena)
self.ultimo_nome = None
def pede_tipo_telefone(self, padrao=None):
for i, tipo in enumerate(self.agenda.tipos_telefone):
print(" {0} - {1} ".format(i, tipo), end=None)
t = valida_faixa_inteiro("Tipo: ", 0, len(self.agenda.tipos_telefone) - 1, padrao)
return self.agenda.tipos_telefone[t]
def pesquisa(self, nome):
dado = self.agenda.pesquisa_nome(nome)
return dado
def novo(self):
novo = AppAgenda.pede_nome()
if nulo_ou_vazio(novo):
return
nome = Nome(novo)
if self.pesquisa(nome) is not None:
print("Nome já existe!")
return
registro = DadoAgenda(nome)
self.menu_telefones(registro)
self.agenda.adiciona(registro)
def apaga(self):
if len(self.agenda) == 0:
print("Agenda vazia, nada a apagar")
nome = AppAgenda.pede_nome()
if nulo_ou_vazio(nome):
return
p = self.pesquisa(nome)
if p is not None:
self.agenda.remove(p)
print("Apagado. A agenda agora possui apenas: {} registros".format(len(self.agenda)))
else:
print('Nome não encontrado')
def altera(self):
if len(self.agenda) == 0:
print("Agenda vazia, nada a alterar")
nome = AppAgenda.pede_nome()
if nulo_ou_vazio(nome):
return
p = self.pesquisa(nome)
if p is not None:
print("\nEncontrado: \n")
AppAgenda.mostra_dados(p)
print("Digite enter caso não queira alterar o nome")
novo = AppAgenda.pede_nome()
if not nulo_ou_vazio(novo):
p.nome = Nome(novo)
self.menu_telefones(p)
else:
print("Nome não encontrado")
def menu_telefones(self, dados):
while True:
print("\nEditando telefones\n")
AppAgenda.mostra_dados_telefone(dados)
if len(dados.telefones) > 0:
print("\n[A] - alterar\n[D] - Apagar\n", end="")
print("[N] - novo\n[S] - Sair\n")
operacao = input("Escolha uma operação")
operacao = operacao.lower()
if operacao not in ['a', 'd', 'n', 's']:
print("Operação inválida, digite A, D, N ou S")
continue
if operacao == 'a' and len(dados.telefones) > 0:
self.altera_telefones(dados)
elif operacao == 'd' and len(dados.telefones) > 0:
self.apaga_telefone(dados)
elif operacao == 'n':
self.novo_telefone(dados)
elif operacao == 's':
break
def novo_telefone(self, dados):
telefone = AppAgenda.pede_telefone()
if nulo_ou_vazio(telefone):
return
if dados.pesquisa_telefone(telefone) is not None:
print("Telefone já existe")
tipo = self.pede_tipo_telefone()
dados.telefones.adiciona(Telefone(telefone, tipo))
def apaga_telefone(self, dados):
t = valida_faixa_inteiro_ou_branco(
"Digite a posição do numero a apagar, enter para sair:",
0,
len(dados.telefone - 1)
)
if t is None:
return
dados.telefone.remove(dados.telefones[t])
def altera_telefones(self, dados):
t = valida_faixa_inteiro_ou_branco(
"Digite a posição do numero a alterar, enter para sair:",
0,
len(dados.telefone - 1)
)
if t is None:
return
telefone = dados.telefones[t]
print("Telefone: %s" % telefone)
print("Digite enter caso não queira alterar o número")
novotelefone = AppAgenda.pede_telefone()
if not nulo_ou_vazio(novotelefone):
telefone.numero = novotelefone
print("Digite enter caso não queira alterar o tipo")
telefone.tipo = self.pede_tipo_telefone(
self.agenda.tipos_telefone.pesquisa(telefone.tipo)
)
def lista(self):
print("\nAgenda")
print("-"*60)
for e in self.agenda:
AppAgenda.mostra_dados(e)
print("-"*60)
def le(self, nome_arquivo=None):
if nome_arquivo is None:
nome_arquivo = AppAgenda.pede_nome_arquivo()
if nulo_ou_vazio(nome_arquivo):
return
with open(nome_arquivo, 'rb') as arquivo:
self.agenda = pickle.load(arquivo)
self.ultimo_nome = nome_arquivo
def ordena(self):
self.agenda.ordena()
print("\nAgenda ordenada")
def grava(self):
if self.ultimo_nome is not None:
print("Ultimo nome ultilizado foi '%s'" % self.ultimo_nome)
print("Digite enter caso queira utilizar o mesmo nome")
nome_arquivo = AppAgenda.pede_nome_arquivo()
if nulo_ou_vazio(nome_arquivo):
if self.ultimo_nome is not None:
nome_arquivo = self.ultimo_nome
else:
return
with open(nome_arquivo, 'wb') as arquivo:
pickle.dump(self.agenda, arquivo)
def execute(self):
self.menu.execute()
if __name__ == "__main__":
app = AppAgenda()
if len(sys.argv) > 1:
app.le(sys.argv[1])
app.execute()
|
[
"renanm@outlook.com"
] |
renanm@outlook.com
|
16526d6d991321e879d46e8d8cd308ef7e4677b9
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_Difference/trend_MovingMedian/cycle_5/ar_/test_artificial_1024_Difference_MovingMedian_5__20.py
|
f424dd4077963cad7c75f615bce42289c823621a
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970
| 2018-12-17T22:08:11
| 2018-12-17T22:08:11
| 137,104,552
| 0
| 0
|
BSD-3-Clause
| 2018-12-17T22:08:12
| 2018-06-12T17:15:43
|
Python
|
UTF-8
|
Python
| false
| false
| 274
|
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 5, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 0);
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
5f8ceb5c2e60994ad6cc254a9719b424f114dd9f
|
76452f3645acad331878a9dfa6ca3021515cd155
|
/utils.py
|
f1916adb742815d9dd6f82fa39919ad1d9311909
|
[
"MIT"
] |
permissive
|
nishatdhillon/CarND-Behavioral-Cloning
|
6b436136612512e36cd30e977c4b183225a821c2
|
0c5cdd253436799e5590a14d7f843f7108b50d90
|
refs/heads/master
| 2021-01-25T11:14:50.478986
| 2017-04-10T00:38:55
| 2017-04-10T00:38:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,211
|
py
|
import numpy as np
from scipy.misc import imread, imresize
def read_imgs(img_paths):
imgs = np.empty([len(img_paths), 160, 320, 3])
for i, path in enumerate(img_paths):
imgs[i] = imread(path)
return imgs
def resize(imgs, shape=(32, 16, 3)):
"""
Resize images to shape.
"""
height, width, channels = shape
imgs_resized = np.empty([len(imgs), height, width, channels])
for i, img in enumerate(imgs):
imgs_resized[i] = imresize(img, shape)
return imgs_resized
def rgb2gray(imgs):
"""
Convert images to grayscale.
"""
return np.mean(imgs, axis=3, keepdims=True)
def normalize(imgs):
"""
Normalize images between [-1, 1].
"""
return imgs / (255.0 / 2) - 1
def preprocess(imgs):
imgs_processed = resize(imgs)
imgs_processed = rgb2gray(imgs_processed)
imgs_processed = normalize(imgs_processed)
return imgs_processed
def random_flip(imgs, angles):
"""
Augment the data by randomly flipping some angles / images horizontally.
"""
new_imgs = np.empty_like(imgs)
new_angles = np.empty_like(angles)
for i, (img, angle) in enumerate(zip(imgs, angles)):
if np.random.choice(2):
new_imgs[i] = np.fliplr(img)
new_angles[i] = angle * -1
else:
new_imgs[i] = img
new_angles[i] = angle
return new_imgs, new_angles
def augment(imgs, angles):
imgs_augmented, angles_augmented = random_flip(imgs, angles)
return imgs_augmented, angles_augmented
def gen_batches(imgs, angles, batch_size):
"""
Generates random batches of the input data.
:param imgs: The input images.
:param angles: The steering angles associated with each image.
:param batch_size: The size of each minibatch.
:yield: A tuple (images, angles), where both images and angles have batch_size elements.
"""
num_elts = len(imgs)
while True:
indeces = np.random.choice(num_elts, batch_size)
batch_imgs_raw, angles_raw = read_imgs(imgs[indeces]), angles[indeces].astype(float)
batch_imgs, batch_angles = augment(preprocess(batch_imgs_raw), angles_raw)
yield batch_imgs, batch_angles
|
[
"matthew_cooper@brown.edu"
] |
matthew_cooper@brown.edu
|
9a822e8ca6f1905ffc8bfe053be72103d6bdd50d
|
16574605a55cf3539e171f66f03938807065d8d9
|
/ptocr/model/architectures/det_model.py
|
0f915139bba09c8bf06175ff908b4ba344daae57
|
[] |
no_license
|
simplew2011/OCR_ZOO
|
7a0e83cf0c7630f469aaac8994144a5bf13bbab6
|
8d1da013a9aa9ca53272dc030c8bfcbf25db6738
|
refs/heads/master
| 2023-01-12T10:06:53.125201
| 2020-11-02T10:00:06
| 2020-11-02T10:00:06
| 308,555,691
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,764
|
py
|
# -*- coding:utf-8 _*-
"""
@author:fxw
@file: det_model.py
@time: 2020/08/07
"""
import torch
import torch.nn as nn
from .. import create_module
class DetModel(nn.Module):
def __init__(self, config):
super(DetModel, self).__init__()
self.algorithm = config['base']['algorithm']
self.backbone = create_module(config['backbone']['function'])(config['base']['pretrained'])
if(self.algorithm == 'SAST'):
self.head = create_module(config['head']['function'])(config['base']['with_attention'])
else:
self.head = create_module(config['head']['function']) \
(config['base']['in_channels'],
config['base']['inner_channels'])
if (config['base']['algorithm']) == 'DB':
self.seg_out = create_module(config['segout']['function'])(config['base']['inner_channels'],
config['base']['k'],
config['base']['adaptive'])
elif (config['base']['algorithm']) == 'PAN':
self.seg_out = create_module(config['segout']['function'])(config['base']['inner_channels'],
config['base']['classes'])
elif (config['base']['algorithm']) == 'PSE':
self.seg_out = create_module(config['segout']['function'])(config['base']['inner_channels'],
config['base']['classes'])
elif (config['base']['algorithm']) == 'SAST':
self.seg_out = create_module(config['segout']['function'])()
else:
assert True == False, ('not support this algorithm !!!')
def forward(self, data):
if self.training:
if self.algorithm == "DB":
img, gt, gt_mask, thresh_map, thresh_mask = data
if torch.cuda.is_available():
img, gt, gt_mask, thresh_map, thresh_mask = \
img.cuda(), gt.cuda(), gt_mask.cuda(), thresh_map.cuda(), thresh_mask.cuda()
gt_batch = dict(gt=gt)
gt_batch['mask'] = gt_mask
gt_batch['thresh_map'] = thresh_map
gt_batch['thresh_mask'] = thresh_mask
elif self.algorithm == "PSE":
img, gt_text, gt_kernels, train_mask = data
if torch.cuda.is_available():
img, gt_text, gt_kernels, train_mask = \
img.cuda(), gt_text.cuda(), gt_kernels.cuda(), train_mask.cuda()
gt_batch = dict(gt_text=gt_text)
gt_batch['gt_kernel'] = gt_kernels
gt_batch['train_mask'] = train_mask
elif self.algorithm == "PAN":
img, gt_text, gt_text_key, gt_kernel, gt_kernel_key, train_mask = data
if torch.cuda.is_available():
img, gt_text, gt_text_key, gt_kernel, gt_kernel_key, train_mask = \
img.cuda(), gt_text.cuda(), gt_text_key.cuda(), gt_kernel.cuda(), gt_kernel_key.cuda(), train_mask.cuda()
gt_batch = dict(gt_text=gt_text)
gt_batch['gt_text_key'] = gt_text_key
gt_batch['gt_kernel'] = gt_kernel
gt_batch['gt_kernel_key'] = gt_kernel_key
gt_batch['train_mask'] = train_mask
elif self.algorithm == "SAST":
img, score_map, border_map, training_mask, tvo_map, tco_map = data
if torch.cuda.is_available():
img, score_map, border_map, training_mask, tvo_map, tco_map = \
img.cuda(), score_map.cuda(), border_map.cuda(), training_mask.cuda(), tvo_map.cuda(), tco_map.cuda()
gt_batch = dict(input_score=score_map)
gt_batch['input_border'] = border_map
gt_batch['input_mask'] = training_mask
gt_batch['input_tvo'] = tvo_map
gt_batch['input_tco'] = tco_map
else:
img = data
x = self.backbone(img)
x = self.head(x)
x = self.seg_out(x, img)
if self.training:
return x, gt_batch
return x
class DetLoss(nn.Module):
def __init__(self, config):
super(DetLoss, self).__init__()
self.algorithm = config['base']['algorithm']
if (config['base']['algorithm']) == 'DB':
self.loss = create_module(config['loss']['function'])(config['loss']['l1_scale'],
config['loss']['bce_scale'])
elif (config['base']['algorithm']) == 'PAN':
self.loss = create_module(config['loss']['function'])(config['loss']['kernel_rate'],
config['loss']['agg_dis_rate'])
elif (config['base']['algorithm']) == 'PSE':
self.loss = create_module(config['loss']['function'])(config['loss']['text_tatio'])
elif (config['base']['algorithm']) == 'SAST':
self.loss = create_module(config['loss']['function'])(config['loss']['tvo_lw'],
config['loss']['tco_lw'],
config['loss']['score_lw'],
config['loss']['border_lw']
)
else:
assert True == False, ('not support this algorithm !!!')
def forward(self, pre_batch, gt_batch):
return self.loss(pre_batch, gt_batch)
|
[
"simplew2011@163.com"
] |
simplew2011@163.com
|
6c93602e8ac313fc510dc327883b106814e788e8
|
aa2d3f731f1ebe09578e828e30f6f2b4a9ad75fd
|
/bhaskara.py
|
ee4e467c7ebf44467676f08a9dc74fd83cb9737e
|
[
"Apache-2.0"
] |
permissive
|
cleuton/pythondrops
|
2d980f4975dbd36c4e4fe18929b62a87925e1982
|
297f38b2f358ea7621befe9c82a4415243c2978f
|
refs/heads/master
| 2022-05-01T01:14:33.758024
| 2022-04-16T11:55:40
| 2022-04-16T11:55:40
| 126,306,109
| 11
| 6
|
Apache-2.0
| 2020-10-13T12:23:08
| 2018-03-22T08:48:06
|
Python
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
import math
def calc_delta(a,b,c):
delta = b**2 - 4 * a * c
return delta
a = float(input("a: "))
b = float(input("b: "))
c = float(input("c: "))
delta = calc_delta(a, b, c)
if delta > 0:
x1 = (-b + math.sqrt(delta)) / (2*a)
x2 = (-b - math.sqrt(delta)) / (2*a)
print(f"x1: {x1} e x2: {x2}")
elif delta == 0:
x = (-b + math.sqrt(delta)) / (2*a)
print(f"x: {x}")
else:
print("Não possui raízes reais")
|
[
"cleuton@tecgraf.puc-rio.br"
] |
cleuton@tecgraf.puc-rio.br
|
b261ab4e92228c3878cdd7b320205880aa010143
|
6659b5530db19461f32456903da617cb2f819526
|
/House.py
|
1f27accd8fb3070e9f00f3131a9d10533eb1282a
|
[] |
no_license
|
Blikhmania/Draw-using-pyton
|
5365358533bdb44aa18696350bf6625411214f33
|
ebeb10642094ec76f1daf4fee9a696d274410f87
|
refs/heads/master
| 2022-12-15T23:44:33.865870
| 2020-09-12T14:06:53
| 2020-09-12T14:06:53
| 294,934,630
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,318
|
py
|
import pygame, sys
from pygame.locals import *
pygame.init()
# set up the window
DISPLAYSURF = pygame.display.set_mode((500, 400), 0, 32)
pygame.display.set_caption('Drawing')
# set up the colors
#COLOR R G B
AQUA = (000, 255, 255)
BLACK = (000, 000, 000)
BLUE = ( 0, 0, 200)
GRAY = (128, 128, 128)
GREEN = (000, 255, 000)
LIME = ( 40, 128, 40)
RED = (255, 000, 000)
WHITE = (255, 255, 255)
ORANGE= (200, 200, 0)
# draw on the surface object
DISPLAYSURF.fill(WHITE)
pygame.draw.rect(DISPLAYSURF, GREEN, (0,220,500,200))
pygame.draw.rect(DISPLAYSURF, BLUE , (0,0,500,220))
pygame.draw.polygon(DISPLAYSURF, GRAY, ((236, 156), (236, 327), (56, 327), (56, 156)))
pygame.draw.polygon(DISPLAYSURF, LIME, ((146, 50), (300, 30), (385, 110), (236, 156)))
pygame.draw.polygon(DISPLAYSURF, GRAY, ((236, 156), (385, 110), (385, 275), (236, 327)))
pygame.draw.polygon(DISPLAYSURF, BLACK, ((320, 160), (295,170), (295, 190), (320,180)))
pygame.draw.polygon(DISPLAYSURF, LIME, ((56,156),(236,156),(146,50)))
pygame.draw.aaline(DISPLAYSURF, BLACK, (56, 156), (236, 156), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (146, 50), (56, 156), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (147, 50), (237, 156), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (236, 156), (236, 327), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (236, 327), (56, 327), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (56, 327), (56, 156), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (146, 50), (300, 30), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (300, 30), (385, 110), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (385, 110), (236, 156), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (385, 110), (385, 275), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (385, 275), (236, 327), 2)
pygame.draw.aaline(DISPLAYSURF, BLACK, (0,220),(56,220),5)
pygame.draw.aaline(DISPLAYSURF, BLACK, (385,220),(500,220),5)
pygame.draw.circle(DISPLAYSURF, BLACK, (146, 110), 20, 0)
pygame.draw.rect(DISPLAYSURF, BLACK, (95, 200, 110, 127))
pygame.draw.circle(DISPLAYSURF, ORANGE, (450, 90), 20, 0)
pixObj = pygame.PixelArray(DISPLAYSURF)
del pixObj
# run the game loop
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
|
[
"noreply@github.com"
] |
noreply@github.com
|
6b067efcd0dbc5af3028f691554b9724604944e8
|
e4652af89da12753710eb225e3905497032486e8
|
/Conditional Statements Advanced/fruit_and_veggie.py
|
8239a683f2d336092c6788f9439037a22d85f02f
|
[] |
no_license
|
paunovaeleonora/SoftUni-Python-Basics-2020
|
6ef6654aafcc30f8d0d81811f617b808d2f86d85
|
2f82efa99ba9e0be9c4811c26de5b1543c3fe0bf
|
refs/heads/master
| 2023-08-28T17:42:13.261923
| 2021-10-22T17:14:46
| 2021-10-22T17:14:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 335
|
py
|
item = input()
is_fruit = item == 'banana' or item == 'apple' or item == 'kiwi' or item == 'cherry' or item == 'lemon' or item == 'grapes'
is_vegetable = item == 'tomato' or item == 'cucumber' or item == 'pepper' or item == 'carrot'
if is_fruit:
print('fruit')
elif is_vegetable:
print('vegetable')
else:
print('unknown')
|
[
"nora.paunova@gmail.com"
] |
nora.paunova@gmail.com
|
74975e6fc6ed6ea9be93456c6a6a2415df374dcd
|
a9069fb302c4dc5bb01db2c519dec4762e5d4ce5
|
/fastAPI_SQL/sql_app/models.py
|
d45b8ef99f5e00019acffd5245179fb2aad17ea6
|
[] |
no_license
|
hhsu15/advanced_python
|
123c73110c2c811895257320ffb639688a8f6a46
|
cf4a5647814a3a63b67c970e992ae50cafd30265
|
refs/heads/master
| 2021-06-10T14:07:20.343336
| 2021-03-13T02:46:50
| 2021-03-13T02:46:50
| 139,370,162
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from .database import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
hashed_password = Column(String)
is_active = Column(Boolean, default=True)
# a field for items that has relationship with items table
items = relationship("Item", back_populates="owner")
class Item(Base):
__tablename__ = "items"
id = Column(Integer, primary_key=True, index=True)
title = Column(String, index=True)
description = Column(String, index=True)
owner_id = Column(Integer, ForeignKey("users.id"))
owner = relationship("User", back_populates="items")
|
[
"hsin@Hsinhengs-MBP.fios-router.home"
] |
hsin@Hsinhengs-MBP.fios-router.home
|
45e4c604a1abb9e87a512ebd9f76d9b0f9c3b196
|
ac5eb7df8515fb92071f9e0f64d1cf6467f4042d
|
/Python/rob2.py
|
f3a88519ae1128202362e9a689cbe3ddc705f124
|
[] |
no_license
|
Litao439420999/LeetCodeAlgorithm
|
6ea8060d56953bff6c03c95cf6b94901fbfbe395
|
9aee4fa0ea211d28ff1e5d9b70597421f9562959
|
refs/heads/master
| 2023-07-08T06:17:20.310470
| 2021-08-08T08:12:23
| 2021-08-08T08:12:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 983
|
py
|
#!/usr/bin/env python3
# encoding: utf-8
"""
@Filename: rob2.py
@Function: 打家劫舍 II 动态规划
@Link: https://leetcode-cn.com/problems/house-robber-ii/
@Python Version: 3.8
@Author: Wei Li
@Date:2021-07-16
"""
class Solution:
def rob(self, nums) -> int:
def robRange(start: int, end: int) -> int:
first = nums[start]
second = max(nums[start], nums[start + 1])
for i in range(start + 2, end + 1):
first, second = second, max(first + nums[i], second)
return second
length = len(nums)
if length == 1:
return nums[0]
elif length == 2:
return max(nums[0], nums[1])
else:
return max(robRange(0, length - 2), robRange(1, length - 1))
# -------------------------
if __name__ == "__main__":
nums = [2, 3, 2]
solution = Solution()
max_cash = solution.rob(nums)
print(f"The solution of this problem is {max_cash}")
|
[
"weili_yzzcq@163.com"
] |
weili_yzzcq@163.com
|
c3f07911afb0807ce05def959386ca7075cc8770
|
78ffdfdeb9a56ba77136f2e9923f6b532332ff30
|
/Template14/hinderedRotation.py
|
a99c6bd5f6f9387708ca7997a1a197891ecb5dad
|
[] |
no_license
|
whm9167/Templates
|
ae96b1645c4414d17fb0b04118d6a52fe4765043
|
5195e7833fd1ca53c4e5d20ac8c31c312d6fba15
|
refs/heads/master
| 2021-05-01T20:45:25.206459
| 2015-04-19T07:59:04
| 2015-04-19T07:59:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,796
|
py
|
# READING--------------------------------------------------------------------------------------
import numpy as np
from xlrd import *
from xlwt import *
import pyExcelerator
from xlutils.copy import copy
import re
import os
import shutil
import matplotlib.pyplot as plt
import phys
import fourier
import chem
import lamm
#input
name = 'rotation'
# symbol indicating the position
pattern_name = re.compile('^.*.*|.*_scan.*$')
pattern_atoms = re.compile('^.*D *([0-9]+) *([0-9]+) *([0-9]+) *([0-9]+).*S *([0-9]+) *(-?[0-9]+\.[0-9]+).*$')
pattern_energy = re.compile('^.*SCF Done: E\(U?B3LYP\) = *(-?[0-9]+\.[0-9]+).*$')
pattern_optimized = re.compile('^.*Optimized Parameters.*$')
pattern_standard = re.compile('^.*Standard orientation:.*$')
pattern_input = re.compile('^.*Input orientation:.*$')
pattern_endline = re.compile('^.*---------------------------------------------------------------------.*$')
pattern_normal = re.compile('^.*Normal termination of Gaussian 09.*$')
# constants
# the number of rows and columns of the displayed fitting figures
FIG_ROW = 6
FIG_COL = 5
# modules
phys1 = phys.phys()
lamm1 = lamm.lamm()
#variables
atoms = []
# energy in hatree
steps = 0
step_length = 0.0
energy = []
# dihedral in degree
dihedral = []
energy_cmm1 = []
dihedral_rad = []
coeff_V = []
deviation_V =[]
geoms = []
atomsNum = 0
inertia = []
rotConst = []
coeff_I = []
deviation_I = []
coeff_B = []
deviation_B = []
#flags
atoms_done = 0
energy_done = 0
optimized_done = 0
dihedral_done = 0
standard_done = 0
coordinate_done = 0
geom_start = 0
geom_end = 0
# temporary variables
tmp_m = []
tmp_energy = 0.0
tmp_dihedral = 0.0
tmp_num = 0
tmp_page = 1
pwd = os.getcwd()
wb_new = Workbook()
pattern_blue = Pattern() # Create the Pattern
pattern_blue.pattern = Pattern.SOLID_PATTERN # May be: NO_PATTERN, SOLID_PATTERN, or 0x00 through 0x12
pattern_blue.pattern_fore_colour = 49
style_blue = XFStyle() # Create the Pattern
style_blue.pattern = pattern_blue
if os.path.exists(pwd + '/' + 'lammInput'):
shutil.rmtree('lammInput')
os.mkdir('lammInput')
if os.path.exists(pwd + '/' + 'lammOutput'):
shutil.rmtree('lammOutput')
os.mkdir('lammOutput')
tmp_fileLists = os.listdir(pwd)
# traverse folders
for tmp_file in tmp_fileLists:
if not os.path.isdir(pwd + '/' + tmp_file):
continue
tmp_pwd = pwd + '/' + tmp_file
# print tmp_pwd
# if target directory found
if re.search(name,tmp_file):
sh = wb_new.add_sheet(tmp_file)
sh_fit = wb_new.add_sheet(tmp_file + '_fit')
tmp_page = 1
tmp_fig = plt.figure(figsize=(22,12))
tmp_fig2 = plt.figure(figsize=(22,12))
tmp_fig3 = plt.figure(figsize=(22,12))
print '\n------------------------------------ ' + tmp_file + ' ----------------------------------------'
os.mkdir('lammInput/' + tmp_file)
os.mkdir('lammOutput/' + tmp_file)
# traverse files
tmp_fileLists2 = os.listdir(tmp_pwd)
tmp_num = 0
for tmp_file2 in tmp_fileLists2:
tmp_m = pattern_name.match(tmp_file2[0:-4])
# if target file found
if tmp_m:
print tmp_file2
tmp_num = tmp_num + 1
# extract data from scan log file
atoms_done = 0
energy_done = 0
optimized_done = 0
dihedral_done = 0
atoms = []
steps = 0
step_length = 0.0
energy = []
dihedral = []
energy_cmm1 = []
dihedral_rad = []
coeff_V = []
deviation_V =[]
geoms = []
atomsNum = 0
inertia = []
rotConst = []
coeff_I = []
deviation_I = []
coeff_B = []
deviation_B = []
fr = file(tmp_pwd + '/' + tmp_file2,'r')
tmp_lines = fr.readlines()
for i in range(0,len(tmp_lines)):
tmp_line = tmp_lines[i]
if atoms_done != 1:
tmp_m = pattern_atoms.match(tmp_line)
if tmp_m:
atoms = map(int,tmp_m.groups()[0:4])
steps = int(tmp_m.group(5))
step_length = float(tmp_m.group(6))
pattern_dihedral = re.compile('^.*D\(' + str(atoms[0]) + ',' + str(atoms[1]) + ',' + str(atoms[2]) + ',' + str(atoms[3]) + '\) *(-?[0-9]+\.[0-9]+).*-DE/DX.*$')
atoms_done = 1
elif (standard_done != 1 or coordinate_done != 1 or energy_done != 1 or optimized_done != 1):
if geom_start > 0 and coordinate_done != 1:
atomsNum += 1
# tmp_m = pattern_standard.match(tmp_line)
tmp_m = pattern_input.match(tmp_line)
if tmp_m:
geom_start = i + 5
atomsNum = -5
coordinate_done = 0
if coordinate_done != 1:
tmp_m = pattern_endline.match(tmp_line)
if tmp_m:
if i > geom_start:
geom_end = i
coordinate_done = 1
tmp_m = pattern_energy.match(tmp_line)
if tmp_m:
tmp_energy = float(tmp_m.group(1))
tmp_m = pattern_optimized.match(tmp_line)
if tmp_m:
energy.append(tmp_energy)
if (geom_end - geom_start) != atomsNum:
print 'Error! The number of atoms is not correct!'
tmp_mole = chem.molecule()
tmp_mole.getLogGeom(tmp_lines[geom_start: geom_start + atomsNum])
tmp_mole.changeLabel(tmp_file2[0:-4])
geoms.append(tmp_mole)
dihedral_done = 0
standard_done = 1
coordinate_done = 1
energy_done = 1
optimized_done = 1
elif dihedral_done != 1:
tmp_m = pattern_dihedral.match(tmp_line)
if tmp_m:
tmp_dihedral=float(tmp_m.group(1))
if len(dihedral) > 0:
if abs(step_length - 10.0) < 1e-2:
while tmp_dihedral < dihedral[-1]:
tmp_dihedral = tmp_dihedral + 360
elif abs(step_length + 10.0) < 1e-2:
while tmp_dihedral > dihedral[-1]:
tmp_dihedral = tmp_dihedral - 360
else:
print 'Warning! The step length is neither 10 or -10 degree!'
dihedral.append(tmp_dihedral)
standard_done = 0
coordinate_done = 0
energy_done = 0
optimized_done = 0
dihedral_done = 1
tmp_m = pattern_normal.match(tmp_lines[-1])
if not tmp_m:
print 'Notice that job ' + tmp_file2 + ' did not end successfully!'
# else:
# print tmp_file2 + 'successfully!'
# fit potential energy data
dihedral = np.array(dihedral)
energy = np.array(energy)
if step_length < 0:
dihedral = 360 - dihedral
dihedral_rad = phys1.degreeTorad(dihedral)
dihedral_rad = dihedral_rad - dihedral_rad[0]
energy_cmm1 = phys1.hatreeTocmm1(energy)
energy_cmm1 = energy_cmm1- energy_cmm1[0]
# notice that the np.std is sqrt(sum((x-mean)^2)/n) rather than n-1
coeff_V, deviation_V = fourier.fit_fourier_noGuess(dihedral_rad, energy_cmm1, threshold=np.std(energy_cmm1)/1e1)
# coeff_V, deviation_V = fourier.fit_fourier(dihedral_rad, energy_cmm1)
# generate lamm input files
lamm1.genInput(geoms, dihedral, energy_cmm1)
#run lamm
lamm1.run()
# extract lamm data
lamm_dihedral, inertia, rotConst = lamm1.extractOutput()
lamm_dihedral = np.array(lamm_dihedral)
inertia = np.array(inertia)
rotConst = np.array(rotConst)
lamm1.rename(reFileName = tmp_file2[0: -4])
shutil.move(tmp_file2[0: -4] + '.dat' ,'lammInput/' + tmp_file)
shutil.move(tmp_file2[0: -4] + '.out', 'lammOutput/' + tmp_file)
# fit lamm data I0s, B0s
# coeff_I, deviation_I = fourier.fit_cosFourier_noGuess(dihedral_rad, inertia, threshold=np.std(inertia))
# coeff_B, deviation_B = fourier.fit_cosFourier_noGuess(dihedral_rad, rotConst, threshold=np.std(rotConst)/2)
coeff_I, deviation_I = fourier.fit_cosFourier_noGuess(lamm_dihedral, inertia, n=6)
coeff_B, deviation_B = fourier.fit_cosFourier_noGuess(lamm_dihedral, rotConst, n=6)
# coeff_I, deviation_I = fourier.fit_cosFourier_noGuess(lamm_dihedral, inertia, threshold=np.std(inertia))
# coeff_B, deviation_B = fourier.fit_cosFourier_noGuess(lamm_dihedral, rotConst, threshold=np.std(rotConst)/2)
# write to excel
# original data
if tmp_num > (15*tmp_page) :
tmp_page += 1
sh = wb_new.add_sheet(tmp_file + ' (' + str(tmp_page) + ')')
tmp_row = 0
tmp_col = 0 + (tmp_num - 15 * (tmp_page-1) - 1) * 15
sh.col(tmp_col).width = 0x1500
sh.write(0, tmp_col+0, tmp_file2,style_blue)
sh.write(1, tmp_col+0, 'atoms')
sh.write(1, tmp_col+1, atoms[0])
sh.write(1, tmp_col+2, atoms[1])
sh.write(1, tmp_col+3, atoms[2])
sh.write(1, tmp_col+4, atoms[3])
tmp_row = 3
sh.col(tmp_col+1).width = 0x0d00
sh.col(tmp_col+3).width = 0x1000
sh.col(tmp_col+4).width = 0x1400
sh.col(tmp_col+5).width = 0x1400
sh.col(tmp_col+7).width = 0x1000
sh.col(tmp_col+8).width = 0x1400
sh.col(tmp_col+9).width = 0x1400
sh.col(tmp_col+11).width = 0x1000
sh.col(tmp_col+12).width = 0x1400
sh.col(tmp_col+13).width = 0x1400
sh.write(tmp_row, tmp_col+0, 'dihedral (degree)')
sh.write(tmp_row, tmp_col+1, 'energy (hatree)')
sh.write(tmp_row, tmp_col+3, 'relative dihedral (rad)')
sh.write(tmp_row, tmp_col+4, 'relative energy (cm^-1)')
sh.write(tmp_row, tmp_col+5, 'fit deviation_V (cm^-1)')
sh.write(tmp_row, tmp_col+7, 'relative dihedral (rad)')
sh.write(tmp_row, tmp_col+8, 'inertia (amu.A^2)')
sh.write(tmp_row, tmp_col+9, 'fit deviation_I (amu.A^2)')
sh.write(tmp_row, tmp_col+11, 'relative dihedral (rad)')
sh.write(tmp_row, tmp_col+12, 'rot const (cm^-1)')
sh.write(tmp_row, tmp_col+13, 'fit deviation_B (cm^-1)')
tmp_row = 4
for i in range(0,len(dihedral)):
sh.write(tmp_row+i, tmp_col+0, dihedral[i])
sh.write(tmp_row+i, tmp_col+1, energy[i])
sh.write(tmp_row+i, tmp_col+3, dihedral_rad[i])
sh.write(tmp_row+i, tmp_col+4, energy_cmm1[i])
sh.write(tmp_row+i, tmp_col+5, deviation_V[i])
for i in range(0,len(lamm_dihedral)):
sh.write(tmp_row+i, tmp_col+7, lamm_dihedral[i])
sh.write(tmp_row+i, tmp_col+8, inertia[i])
sh.write(tmp_row+i, tmp_col+9, deviation_I[i])
sh.write(tmp_row+i, tmp_col+11, lamm_dihedral[i])
sh.write(tmp_row+i, tmp_col+12, rotConst[i])
sh.write(tmp_row+i, tmp_col+13, deviation_B[i])
# fitted data
tmp_row2 = 0 + (tmp_num - 1) * 30
tmp_col2 = 0
sh_fit.col(0).width = 0x2000
sh_fit.write(tmp_row2, 0, tmp_file2, style_blue)
tmp_row2 = tmp_row2 + 1
sh_fit.write(tmp_row2, 0, 'atoms')
sh_fit.write(tmp_row2, 1, atoms[0])
sh_fit.write(tmp_row2, 2, atoms[1])
sh_fit.write(tmp_row2, 3, atoms[2])
sh_fit.write(tmp_row2, 4, atoms[3])
tmp_row2 = tmp_row2 + 1
sh_fit.write(tmp_row2+0, tmp_col2, 'relative dihedral (rad)')
sh_fit.write(tmp_row2+1, tmp_col2, 'relative energy (cm^-1)')
sh_fit.write(tmp_row2+2, tmp_col2, 'fit deviation_V (cm^-1)')
sh_fit.write(tmp_row2+3, tmp_col2, 'relative dihedral (rad)')
sh_fit.write(tmp_row2+4, tmp_col2, 'inertia (amu.A^2)')
sh_fit.write(tmp_row2+5, tmp_col2, 'fit deviation_I (amu.A^2)')
sh_fit.write(tmp_row2+6, tmp_col2, 'relative dihedral (rad)')
sh_fit.write(tmp_row2+7, tmp_col2, 'rot const (cm^-1)')
sh_fit.write(tmp_row2+8, tmp_col2, 'fit deviation_B (cm^-1)')
sh_fit.write(tmp_row2+9, tmp_col2, 'data summary')
tmp_col2 = tmp_col2 + 1
for i in range(0, len(dihedral)):
sh_fit.write(tmp_row2+0, tmp_col2+i, '%.6f' % dihedral_rad[i])
sh_fit.write(tmp_row2+1, tmp_col2+i, '%.6f' % energy_cmm1[i])
sh_fit.write(tmp_row2+2, tmp_col2+i, '%.6f' % deviation_V[i])
for i in range(0, len(lamm_dihedral)):
sh_fit.write(tmp_row2+3, tmp_col2+i, '%.6f' % lamm_dihedral[i])
sh_fit.write(tmp_row2+4, tmp_col2+i, '%.6f' % inertia[i])
sh_fit.write(tmp_row2+5, tmp_col2+i, '%.6f' % deviation_I[i])
sh_fit.write(tmp_row2+6, tmp_col2+i, '%.6f' % lamm_dihedral[i])
sh_fit.write(tmp_row2+7, tmp_col2+i, '%.6f' % rotConst[i])
sh_fit.write(tmp_row2+8, tmp_col2+i, '%.6f' % deviation_B[i])
sh_fit.write(tmp_row2+9, tmp_col2, str(list(dihedral_rad)))
sh_fit.write(tmp_row2+10, tmp_col2, str(list(energy_cmm1)))
sh_fit.write(tmp_row2+11, tmp_col2, str(list(deviation_V)))
sh_fit.write(tmp_row2+12, tmp_col2, str(list(lamm_dihedral)))
sh_fit.write(tmp_row2+13, tmp_col2, str(list(inertia)))
sh_fit.write(tmp_row2+14, tmp_col2, str(list(deviation_I)))
sh_fit.write(tmp_row2+15, tmp_col2, str(list(lamm_dihedral)))
sh_fit.write(tmp_row2+16, tmp_col2, str(list(rotConst)))
sh_fit.write(tmp_row2+17, tmp_col2, str(list(deviation_B)))
tmp_row2 = tmp_row2 + 18
tmp_col2 = 0
sh_fit.write(tmp_row2, tmp_col2, 'fitted V_a coefficients (c=0, a0, a1...)')
sh_fit.write(tmp_row2+1, tmp_col2, 'fitted V_b coefficients (b1, b2...)')
sh_fit.write(tmp_row2+2, tmp_col2, 'fitted I coefficients (c, a0, a1, a2...)')
sh_fit.write(tmp_row2+3, tmp_col2, 'fitted B coefficients (c, a0, a1, a2...)')
tmp_col2 = tmp_col2 + 1
for i in range(0, len(coeff_V)/2):
sh_fit.write(tmp_row2, tmp_col2+i, coeff_V[i])
if i > 0:
sh_fit.write(tmp_row2+1, tmp_col2+i, coeff_V[i+len(coeff_V)/2])
for (i, x) in enumerate(coeff_I):
sh_fit.write(tmp_row2+2, tmp_col2+i, x)
for (i, x) in enumerate(coeff_B):
sh_fit.write(tmp_row2+3, tmp_col2+i, x)
tmp_row2 = tmp_row2 + 4
tmp_col2 = 0
sh_fit.write(tmp_row2, tmp_col2, 'fitted parameters summary')
tmp_row2 = tmp_row2 + 1
sh_fit.write(tmp_row2, tmp_col2, str(atoms[1]) + ' ' + str(atoms[2]))
tmp_row2 = tmp_row2 + 1
sh_fit.row(tmp_row2+0).set_style(style_blue)
sh_fit.row(tmp_row2+1).set_style(style_blue)
sh_fit.row(tmp_row2+2).set_style(style_blue)
sh_fit.write(tmp_row2+0, tmp_col2+0, 0, style_blue)
sh_fit.write(tmp_row2+0, tmp_col2+1, 'hrd', style_blue)
sh_fit.write(tmp_row2+1, tmp_col2+1, 'Vhrd3', style_blue)
sh_fit.write(tmp_row2+2, tmp_col2+1, 'Ihrd1', style_blue)
tmp_col2 = tmp_col2 + 2
sh_fit.write(tmp_row2, tmp_col2+0, len(coeff_V)-1, style_blue)
sh_fit.write(tmp_row2, tmp_col2+1, len(coeff_I)-1, style_blue)
sh_fit.write(tmp_row2, tmp_col2+2, 1, style_blue)
sh_fit.write(tmp_row2+1, tmp_col2+0, 1, style_blue)
sh_fit.write(tmp_row2+1, tmp_col2+1, '0.0', style_blue)
sh_fit.write(tmp_row2+2, tmp_col2+0, 1, style_blue)
for i in range(0, len(coeff_V)):
if i < len(coeff_V)/2:
sh_fit.write(tmp_row2+1, tmp_col2+2+i, '%.6f' % coeff_V[i], style_blue)
elif i > len(coeff_V)/2:
sh_fit.write(tmp_row2+1, tmp_col2+1+i, '%.6f' % coeff_V[i], style_blue)
for (i,x) in enumerate(coeff_I):
sh_fit.write(tmp_row2+2, tmp_col2+1+i, x, style_blue)
# draw figures
tmp_ax = tmp_fig.add_subplot(FIG_ROW,FIG_COL,tmp_num)
tmp_fig.subplots_adjust(left=0.04,bottom=0.04,right=0.98,top=0.96,wspace=0.2,hspace=0.4)
tmp_ax.plot(dihedral_rad, energy_cmm1, 'b*', dihedral_rad, fourier.func_fourier(dihedral_rad,*coeff_V),'r-')
tmp_ax.set_title(tmp_file2)
tmp_ax2 = tmp_fig2.add_subplot(FIG_ROW,FIG_COL,tmp_num)
tmp_fig2.subplots_adjust(left=0.04,bottom=0.04,right=0.98,top=0.96,wspace=0.2,hspace=0.4)
tmp_ax2.plot(lamm_dihedral, inertia, 'b*', dihedral_rad, fourier.func_cosFourier(dihedral_rad,*coeff_I),'r-')
tmp_ax2.set_title(tmp_file2)
tmp_ax3 = tmp_fig3.add_subplot(FIG_ROW,FIG_COL,tmp_num)
tmp_fig3.subplots_adjust(left=0.04,bottom=0.04,right=0.98,top=0.96,wspace=0.2,hspace=0.4)
tmp_ax3.plot(lamm_dihedral, rotConst, 'b*', dihedral_rad, fourier.func_cosFourier(dihedral_rad,*coeff_B),'r-')
tmp_ax3.set_title(tmp_file2)
tmp_fig.show()
tmp_fig2.show()
tmp_fig3.show()
tmp_fig.savefig('V' + tmp_file + '.png',dpi=300)
tmp_fig2.savefig('I' + tmp_file + '.png',dpi=300)
tmp_fig3.savefig('B' + tmp_file + '.png',dpi=300)
plt.close(tmp_fig)
plt.close(tmp_fig2)
plt.close(tmp_fig3)
if os.path.exists('HR_fit.xls'):
os.remove('HR_fit.xls')
wb_new.save('HR_fit.xls')
print 'hindered rotation data extracted successfully!'
# THE END
|
[
"hetanjin@163.com"
] |
hetanjin@163.com
|
25e655d40a6d42517ef5ad9c8bd1badf7733d0dc
|
3c5754bd81702ab1fa560111a75dae7052ef0df6
|
/mst(minimumSpanningTree)/prim_adjacancyMatrix.py
|
4550b8bf1c1f8b8f4d8f19e012bacc3fda763459
|
[
"MIT"
] |
permissive
|
vsjadhav/DSA_python
|
e62a8ea970a6f26a8a77cbb07a54c1fef7fcce6f
|
1dd71b418bb604ebfd561c3dc9b8e123486ad8ef
|
refs/heads/main
| 2023-08-17T14:44:32.030375
| 2021-09-15T12:34:34
| 2021-09-15T12:34:34
| 406,753,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,454
|
py
|
class Graph:
def __init__(self,numberOfVertices, vertices):
self.v = numberOfVertices
self.vertices = vertices
self.graph= self.createGraph()
def createGraph(self):
g= []
for i in range(self.v):
row = [0]*self.v
g.append(row)
return g
def addEdge(self,s,d,w):
self.graph[s][d]=w
def prim(self):
ans=[]
visited = [0]*self.v
e = 0
visited[0]=True
while e< self.v -1:
min = float('inf')
for i in range(self.v):
if visited[i]:
for j in range(self.v):
if (not visited[j] and self.graph[i][j]):
if self.graph[i][j] < min:
min = self.graph[i][j]
s=i
d=j
ans.append([self.vertices[s],self.vertices[d],self.graph[s][d]])
visited[d]=True
e+=1
print(ans)
# graph = "ab8ad1bc1ca4db2dc9"
graph = "ab10,ba10,ac20,ca20,bc30,cb30,bd5,db5,cd15,dc15,de8,ed8,ce6,ec6"
vertices = "abcde"
g =Graph(5,[v for v in vertices])
i=0
for v in vertices:
graph= graph.replace(v, str(i))
i+=1
l = graph.split(",")
for i in l:
s = int(i[0])
d = int(i[1])
w = int(i[2:])
g.addEdge(s,d,w)
# print(g.graph)
g.prim()
|
[
"noreply@github.com"
] |
noreply@github.com
|
2d5a531c90bdd9e612a4c53e4dd5fb5bb4137892
|
970f44beabdbf6df534f7c5ca9d30dd0056e098b
|
/deep_ch/projections.py
|
73d8e536e04c7a8bfb64e16e5048f65d4e740289
|
[] |
no_license
|
AkivaSinai/DeepCognitiveHierarchy
|
d8c3e05f5ea6dc7feaa61b6aa634c7d6126c1b1f
|
7c66673275e8019a0414e8066cdebd613351dd6c
|
refs/heads/master
| 2020-09-17T19:41:54.315829
| 2017-04-06T21:32:35
| 2017-04-06T21:32:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
import numpy
def get_operator(name):
if name == 'simplex':
return project_simplex
elif name == 'bound':
return bound
else:
raise NameError, 'Unkown operator %s' % name
def bound(x, a=0., b=1.):
return numpy.clip(x, a, b)
def project_simplex(x):
"""
Project an arbitary vector onto the simplex.
See [Wang & Carreira-Perpin 2013] for a description and references.
TODO: Implement in theano for faster projections
"""
n = x.shape[0]
mu = -numpy.sort(-x) # sort decending
sm = 0
for j in xrange(1, n+1):
sm += mu[j - 1]
t = mu[j - 1] - (1./(j)) * (sm - 1)
if t > 0:
row = j
sm_row = sm
theta = (1. / row) * (sm_row - 1)
return numpy.abs(numpy.maximum(x - theta, 0))
|
[
"jasonhar@cs.ubc.ca"
] |
jasonhar@cs.ubc.ca
|
beff490e394e8a5b7b6882f0b03e90a9aafaad28
|
87f91ccf1ee28679aad24041ac29423c8d393d38
|
/Python_code/src/chapter13/爬虫/爬虫2.py
|
2221e0f771180c661f3e55525b1d30df167e3d7a
|
[] |
no_license
|
FirMoveBrick/MoveBrick
|
382673ed63cfbf00b43e011eb8f76e0457771f62
|
ee0effb7bc74ec6fd2fe1184744cf4fce2066805
|
refs/heads/master
| 2020-05-07T08:08:28.475439
| 2019-11-22T13:16:12
| 2019-11-22T13:16:12
| 180,308,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,203
|
py
|
# -*- coding:UTF-8 -*-
import requests, json, time, sys
import urllib3
from contextlib import closing
class get_photos(object):
def __init__(self):
self.photos_id = []
self.download_server = 'https://unsplash.com/photos/xxx/download?force=trues'
self.target = 'http://unsplash.com/napi/feeds/home'
self.headers = {'authorization': 'Client-ID c94869b36aa272dd62dfaeefed769d4115fb3189a9d1ec88ed457207747be626'}
"""
函数说明:获取图片ID
Parameters:
无
Returns:
无
Modify:
2017-09-13
"""
def get_ids(self):
req = requests.get(url=self.target, headers=self.headers, verify=False)
html = json.loads(req.text)
next_page = html['next_page']
for each in html['photos']:
self.photos_id.append(each['id'])
time.sleep(1)
for i in range(5):
req = requests.get(url=next_page, headers=self.headers, verify=False)
html = json.loads(req.text)
next_page = html['next_page']
for each in html['photos']:
self.photos_id.append(each['id'])
time.sleep(1)
"""
函数说明:图片下载
Parameters:
无
Returns:
无
Modify:
2017-09-13
"""
def download(self, photo_id, filename):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36'}
target = self.download_server.replace('xxx', photo_id)
with closing(requests.get(url=target, stream=True, verify=False, headers=self.headers)) as r:
with open('%d.jpg' % filename, 'ab+') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
# 下载
if __name__ == '__main__':
gp = get_photos()
urllib3.disable_warnings()
print('获取图片连接中:')
gp.get_ids()
print('图片下载中:')
for i in range(len(gp.photos_id)):
print(' 正在下载第%d张图片' % (i + 1))
gp.download(gp.photos_id[i], (i + 1))
|
[
"945184211@qq.com"
] |
945184211@qq.com
|
e93c5f49793bfebeb2025b28d96715deb5ef9d47
|
accb3a97a376a4d0f52f74a134ddf1f19680d655
|
/week10_homework_ls/app.py
|
8434e390e248efc151c9d49fa5a3bf0d8e14baab
|
[] |
no_license
|
luis-sauceda/bootcamp
|
5eddf3037cfb77a13975705e9bbacb04841d093a
|
498320af5c73ca19584cbf4b9fa8ee1ca5c5dcbe
|
refs/heads/master
| 2020-06-16T05:42:12.908872
| 2019-09-28T16:06:34
| 2019-09-28T16:06:34
| 195,493,080
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,622
|
py
|
from flask import Flask, jsonify
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import numpy as np
import pandas as pd
import datetime as dt
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
return(
f"Welcome to the Hawaii climate analysis API!<br/>"
f"<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end<br/>"
f"<br/>"
f"<br/>"
f"<br/>"
f"*For /api/v1.0/start and /api/v1.0/start/end please replace word START and END with<br/>"
f" dates format YYYY-MM-DD<br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
#################################################
# Create SQL object
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
# Create our session (link) from Python to the DB
session = Session(engine)
#get date on year before the latest date in the datasete
query_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)
#Selection precipitation and date
sel = [func.avg(Measurement.prcp), Measurement.date]
#query filtering and ordering by date
precipitation = session.query(*sel).\
order_by(func.strftime(Measurement.date).asc()).\
filter(Measurement.date >= query_date).\
group_by(Measurement.date).all()
#convert query results into a Data Fame
query_df = pd.DataFrame(precipitation, columns=["prcp","date"])
#Changing index to date
#query_df["date"] = pd.to_datetime(query_df["date"])
query_df.set_index(query_df["date"], inplace=True)
#keeping only prco and station columns
query_df = query_df.loc[:,["prcp"]]
return jsonify(list(query_df.to_dict().values()))
@app.route("/api/v1.0/stations")
def stations():
#################################################
# Create SQL object
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#query the base to get the distinct stations
station = session.query(Station.station).distinct(Station.station).all()
#convert query results into a Data Fame
query_df = pd.DataFrame(station, columns=["station"])
#Return JSON
return jsonify(list(query_df.to_dict().values()))
@app.route("/api/v1.0/tobs")
def tobs():
#################################################
# Create SQL object
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
# Create our session (link) from Python to the DB
session = Session(engine)
query_date = dt.date(2017, 8, 23) - dt.timedelta(days=365)
#query for the tobs from one year onwards before the last date
tobs = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= query_date).\
order_by(func.strftime(Measurement.date).asc()).all()
#Convert query results into a Data Frame
query_df = pd.DataFrame(tobs, columns=["date","tobs"])
#Change index for date
query_df.set_index(query_df["date"], inplace=True)
#keeping only prco and station columns
query_df = query_df.loc[:,["tobs"]]
return jsonify(list(query_df.to_dict().values()))
@app.route("/api/v1.0/<start>")
def average_start(start):
#################################################
# Create SQL object
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
# Create our session (link) from Python to the DB
session = Session(engine)
temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).all()
#Convert query results into a Data Frame
query_df = pd.DataFrame(temps, columns=["min","avg","max"]).transpose()
return jsonify(list(query_df.to_dict().values()))
@app.route("/api/v1.0/<start>/<end>")
def average_period(start, end):
#################################################
# Create SQL object
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
# Create our session (link) from Python to the DB
session = Session(engine)
temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= end).all()
#Convert query results into a Data Frame
query_df = pd.DataFrame(temps, columns=["min","avg","max"]).transpose()
return jsonify(list(query_df.to_dict().values()))
if __name__ == "__main__":
app.run(debug=True)
|
[
"luis.alfonso.sauceda@gmail.com"
] |
luis.alfonso.sauceda@gmail.com
|
419fd0e93a51cee00b17feeb01639772907b12a8
|
ad02dbf1bd48e408c7584e3c5b4f2a6649fb988e
|
/production/Leonid_Smirnov_todoapp/userapp/views.py
|
f2a9dd73fcc4cd9dc577e4231c95d539c36ed033
|
[] |
no_license
|
Leo-droid-maker/REST_API_Project
|
b795857de3269e2c9bb402804488ebba3cb27799
|
c49cd303908100a098072441e9e1ca21f82f93e1
|
refs/heads/master
| 2023-08-30T02:19:17.896759
| 2021-10-15T09:26:18
| 2021-10-15T09:26:18
| 398,198,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
from rest_framework.viewsets import GenericViewSet
from rest_framework import mixins
from userapp.models import User
from userapp.serializers import UserModelSerializer, UserModelSerializerWithStaffInformation
class UserCustomViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, mixins.CreateModelMixin, GenericViewSet):
queryset = User.objects.all()
serializer_class = UserModelSerializer
def get_serializer_class(self):
if self.request.version == '0.2':
return UserModelSerializerWithStaffInformation
return UserModelSerializer
|
[
"jwabeljr@gmail.com"
] |
jwabeljr@gmail.com
|
823bdba8b566f0e5d31459c3e6942b77fac8fbb2
|
705bdeb17c8732ec5bf6653e34a37f149e1656fd
|
/orders/migrations/0005_auto_20170610_2147.py
|
b1e7946a8851b2965ea13e91f3e7ea32fae6a4ed
|
[] |
no_license
|
igrv4/coilzone
|
79092ee2792b18c9c5f90f1a0ed138ffee2b3e63
|
1d7ca77ec3156169284b0dd97caaff44d4cbac84
|
refs/heads/master
| 2021-01-21T21:18:46.582108
| 2017-06-20T19:28:38
| 2017-06-20T19:28:38
| 94,809,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-10 18:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0004_productincart'),
]
operations = [
migrations.AddField(
model_name='productinorder',
name='coil',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='productinorder',
name='winding',
field=models.CharField(blank=True, default=None, max_length=10, null=True),
),
migrations.AlterField(
model_name='productinorder',
name='nmb',
field=models.IntegerField(default=0),
),
]
|
[
"artur.bykov94@yandex.ru"
] |
artur.bykov94@yandex.ru
|
357f34d7579b60591d6308fc473f39d7d0ddb46c
|
5f0ffab428b304eb644e54f047ef092cc13c1885
|
/ex024.py
|
1512b953a7ddc240df106c9356aa55eb2233f619
|
[] |
no_license
|
alamyrjunior/pythonExercises
|
a93ca2301ed8d6a02f452a67318baac6d7c8ac02
|
bad1860519bf1e62333d2350b706e3dddd350a4d
|
refs/heads/master
| 2022-11-17T12:20:28.603351
| 2020-07-11T22:04:37
| 2020-07-11T22:04:37
| 275,353,387
| 0
| 0
| null | 2020-07-05T14:37:41
| 2020-06-27T10:58:12
|
Python
|
UTF-8
|
Python
| false
| false
| 141
|
py
|
cidade = str(input('Digite o nome da sua cidade: ')).strip()
split = cidade[:5].upper().split()
santo = 'SANTO' in split[0]
print(santo)
|
[
"noreply@github.com"
] |
noreply@github.com
|
de08487599db59ec42c8411a915504fbb20fb953
|
16e25b85246531216c57d5eb7fe1d4198aa41ce7
|
/django_todo/urls.py
|
2efbb05ba8c0cd2dc0e117a6e855929c968c9f29
|
[] |
no_license
|
StuChapman/Code-Institute-fsf-hello-django
|
0b22318a32c6d9477658cd70869a1ada376868b0
|
341a916dad8f58244df266774e16180d206f47d6
|
refs/heads/master
| 2023-08-17T10:43:59.185573
| 2021-01-09T12:40:03
| 2021-01-09T12:40:03
| 326,421,544
| 0
| 0
| null | 2021-09-22T19:42:04
| 2021-01-03T14:07:24
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
"""django_todo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from todo import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.get_todo_list, name='get_todo_list'),
path('add', views.add_item, name='add'),
path('edit/<item_id>', views.edit_item, name='add'),
path('toggle/<item_id>', views.toggle_item, name='toggle'),
path('delete/<item_id>', views.delete_item, name='delete')
]
|
[
"chapman.stuart@sky.com"
] |
chapman.stuart@sky.com
|
b8093a8f605a4b25b1ab786c82386a1b447f02f8
|
bbe282e2f2e3b6ba560eb4a5bac677e832c6aca9
|
/apps/LOGIN_APP/views.py
|
142b20af0b9a759b78a0af551638d6ec63cd45b3
|
[] |
no_license
|
jhoney92021/djagno_favoriteBooks
|
20edaa66b6dce1f158fcd8584615105f43e17aaf
|
013919d4be60976d69c36d78bdfc865628cfecc8
|
refs/heads/master
| 2021-06-25T12:30:59.415140
| 2019-07-17T21:33:43
| 2019-07-17T21:33:43
| 197,466,361
| 0
| 0
| null | 2021-06-10T21:43:55
| 2019-07-17T21:34:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,229
|
py
|
from django.shortcuts import render, HttpResponse, redirect
from django.contrib import messages
from apps.LOGIN_APP.models import Users
import random, datetime, bcrypt
def index(request): #MAIN INDEX IE LOGIN INDEX
return render(request,'LOGIN_APP/index.html')
def processRegistration(request): #REGISTRATION PROCESS ROUTE
errors = Users.objects.validator(request.POST)
if len(errors) > 0:
for key, val in errors.items():
messages.error(request, val)
return redirect('/')
else:
newUserPass = request.POST['password']
newUserPassEncrypt = bcrypt.hashpw(newUserPass.encode(), bcrypt.gensalt())
newUser = Users.objects.create(
fname= request.POST['fname'],
lname= request.POST['lname'],
username= request.POST['username'],
birthday= request.POST['birthday'],
email= request.POST['email'],
password= newUserPassEncrypt
)
request.session['user_live'] = newUser.id
return redirect('/success')
def processLogin(request): #LOGIN PROCESS ROUTE
errors = Users.objects.loginVal(request.POST)
if len(errors) > 1:
for key, val in errors.items():
messages.error(request, val)
return redirect('/')
else:
passGiven = request.POST['password']
userQuery = Users.objects.get(email= request.POST['email'])
if bcrypt.checkpw(passGiven.encode(), userQuery.password.encode()):
request.session['user_live'] = userQuery.id
return redirect('/success' )
else:
messages.error(request, 'logFail')
return redirect('/')
def success(request): #RENDER SUCCESS, TO INFORM THE USER THAT THEY ARE IN SESSION
liveUser = request.session['user_live']
context ={
'user': Users.objects.get(id = liveUser),
}
return render(request, 'LOGIN_APP/success.html', context)
def logout(request): #CLEAR USER IN SESSION
request.session.clear()
return redirect('/')
def userPage(request, userID): #FOR RENDERING A USERS PAGE
context = {
'thisUser': Users.objects.get(id= userID)
}
return render(request, "LOGIN_APP/userPage.html", context)
|
[
"51093828+jhoney92021@users.noreply.github.com"
] |
51093828+jhoney92021@users.noreply.github.com
|
2e6f64de93dc287b2fb60da10ae8273cc7d6b585
|
25e33d1b4e3546239310514b2c34de10f5390b50
|
/image_patch_files/system_python_path/site-packages/cloudinit/__init__.py
|
a2d016e9e8daeb44205b8bac8c9374c23fd5d8e5
|
[
"Apache-2.0"
] |
permissive
|
jgruberf5/bigiq-cloudinit
|
8961e261c5fcf41647f8fa881484a3871fbbd12f
|
d5ef898c3970bdc5822fa6c74938e9959a5e098f
|
refs/heads/master
| 2021-10-24T15:35:08.923810
| 2021-10-14T18:23:45
| 2021-10-14T18:23:45
| 212,141,905
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
config_modules = [
'cc_configdrive_defaults', 'cc_ibm_vpc_gen2_defaults', 'cc_set_passwords',
'cc_bigiq_configdrive_openstack', 'cc_bigiq_static_mgmt',
'cc_bigiq_dhcpv4_tmm', 'cc_bigiq_playbooks'
]
|
[
"jgruber@f5.com"
] |
jgruber@f5.com
|
f393a262dfa4a67a83872bd7ce0ff658f6611d3d
|
cfad0044a5d50be42ff78651b1f1460f3dadb014
|
/python/prac1x2.py
|
99af3e89abf2be043bd42d376502cc4651d227f0
|
[
"Unlicense"
] |
permissive
|
sourabh48/python
|
4c41db76bd6eddf73be8e76e10a782e247373f39
|
5fbda23133dad1ca0b73d4600b4e00cb6b5e3331
|
refs/heads/master
| 2020-05-07T16:19:46.369631
| 2019-04-10T23:20:22
| 2019-04-10T23:20:22
| 180,677,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
a = int(input("Enter number 1: "))
b = int(input("Enter number 2: "))
c = int(input("Enter number 3: "))
d = (a+b+c)/3
print("The avarage of 3 numbers are: " ,d )
|
[
"30088198+sourabh48@users.noreply.github.com"
] |
30088198+sourabh48@users.noreply.github.com
|
a4b433b288892409b40316cf2edd73f31e98e343
|
903784f4ebc68012041d6f2cfeb903c6b37837a6
|
/pydenji/userproperties/overrider.py
|
e581afc404a864e3e30d7e3c8e71a8387c9e2361
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
alanfranz/pydenji
|
25a45aea5f03f496fe03bf9e4f0f135b9cae4a37
|
a922f1456e0d698b3e9e0674d980ede746fc2835
|
refs/heads/master
| 2020-04-05T12:34:36.629505
| 2017-07-10T16:20:46
| 2017-07-10T16:20:46
| 95,158,029
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (C) 2010 Alan Franzoni.
from configobj import ConfigObj
from pydenji._aop.intercept import intercept
# TODO: change this name, I don't like it.
class override_with(object):
def __init__(self, configobj_source, *other_co_srcs):
self._co = ConfigObj(configobj_source, unrepr=True)
for other_src in other_co_srcs:
self._co.merge(ConfigObj(other_src, unrepr=True))
def __call__(self, config_cls):
for section_name in self._co.sections:
def section_interceptor(context):
o = context.proceed()
for k, v in self._co[section_name].items():
setattr(o, k, v)
return o
# this creates a new subclass every time! we should change the way
# intercept works??
config_cls = intercept(config_cls, section_name, section_interceptor)
return config_cls
|
[
"username@franzoni.eu"
] |
username@franzoni.eu
|
61a95e5589b6dfb0bbd3e318f3d3bad8468e78cd
|
d32c1163bbfb27049df0f0f0485cdf5bb5224b92
|
/src/handelsraad_bot/util.py
|
8e56e0875e136f89426a48b4d6def23ff5abba6c
|
[] |
no_license
|
joostsijm/handelsraad_bot
|
4804453e015260d9d191e93d999ce9fe01b3f471
|
658fd447c00e411c8ee40212178fae2a64aae2ca
|
refs/heads/master
| 2023-08-11T16:43:28.256240
| 2021-09-26T12:38:31
| 2021-09-26T12:38:31
| 355,955,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,153
|
py
|
"""Common utilities"""
from rival_regions_calc import Value
from handelsraad_bot import LOGGER, TESTING, database
def check_permission(update, roles, action):
"""Check permissions"""
executor = database.get_user_by_telegram_id(
update.message.from_user.id
)
if not executor:
executor = database.get_user_by_telegram_username(
update.message.from_user.username
)
if executor:
executor.telegram_id = update.message.from_user.id
executor = database.save_user(executor)
else:
executor = database.add_user(
update.message.from_user.first_name,
update.message.from_user.id,
update.message.from_user.username
)
if TESTING:
return True
for role in executor.get_roles():
if role in roles:
return True
LOGGER.warning(
'%s: %s, not allowed',
update.message.from_user.username,
action
)
update.message.reply_text(
'Rollen die recht hebben op dit command: {}'.format(
', '.join(roles)
)
)
return False
def total_investment(user):
"""Count user investment"""
total = 0
for investment in user.investments:
total += investment.amount
return total
def get_total():
"""Get total including average"""
total = {
0: {
'amount': 0,
'average': 0
}
}
for user in database.get_investors():
total[0]['amount'] += total_investment(user)
item_details = {}
for detail in database.get_transaction_details():
if detail.item_id not in total:
total[detail.item_id] = {
'amount': 0,
'average': 0
}
if detail.item_id not in item_details:
item_details[detail.item_id] = []
total[detail.item_id]['amount'] += detail.amount
total[0]['amount'] += detail.money
item_details[detail.item_id].append(detail)
for item_id, details in item_details.items():
money_total = 0
item_total = total[item_id]['amount']
for detail in reversed(details):
if detail.money >= 0:
continue
if item_total < detail.amount:
money_total += round(
item_total * (detail.money / detail.amount), 2
)
break
money_total += detail.money
item_total -= detail.amount
if total[item_id]['amount']:
total[detail.item_id]['average'] = abs(round(
money_total / total[item_id]['amount'], 2
))
else:
del total[item_id]
return total
def round_number(number, length):
"""Round number"""
i = 1
number = Value(number)
while len(str(number)) > length:
amount = pow(1000, i)
number = Value(round(number / amount) * amount)
i += 1
return number
|
[
"joostsijm@gmail.com"
] |
joostsijm@gmail.com
|
82a203f3a27ae3767dc8c58441b3f4644e5a1399
|
a2e607593dcbe5feaeedd9e9bd4caeaf06e46733
|
/tests/ui/menus/test_opmenu.py
|
464f5422d23c0778525972d3ce32d53d5aa537af
|
[] |
no_license
|
all-in-one-of/Houdini-Toolbox
|
dd05b2c869e663b185c1997d326bfe7548fbf55f
|
c10663c46c0f1249a9b3c6b32d4384a4399849ed
|
refs/heads/master
| 2020-06-13T01:10:11.832715
| 2019-08-30T07:24:47
| 2019-08-30T07:24:47
| 194,484,242
| 0
| 0
| null | 2019-06-30T06:42:17
| 2019-06-30T06:42:17
| null |
UTF-8
|
Python
| false
| false
| 1,738
|
py
|
"""Tests for ht.ui.menus.opmenu module."""
# =============================================================================
# IMPORTS
# =============================================================================
# Python Imports
from mock import MagicMock, patch
import unittest
# Houdini Toolbox Imports
import ht.ui.menus.opmenu
# Houdini Imports
import hou
reload(ht.ui.menus.opmenu)
# =============================================================================
# CLASSES
# =============================================================================
class Test_create_absolute_reference_copy(unittest.TestCase):
"""Test ht.ui.menus.opmenu.create_absolute_reference_copy."""
def test(self):
"""Test creating an absolute reference copy."""
mock_node = MagicMock(spec=hou.Node)
scriptargs = {
"node": mock_node
}
mock_ui = MagicMock()
hou.ui = mock_ui
ht.ui.menus.opmenu.create_absolute_reference_copy(scriptargs)
mock_node.parent.return_value.copyItems.assert_called_with([mock_node], channel_reference_originals=True, relative_references=False)
del hou.ui
class Test_save_item_to_file(unittest.TestCase):
"""Test ht.ui.menus.opmenu.save_item_to_file."""
@patch("ht.ui.menus.opmenu.copy_item")
def test(self, mock_copy):
"""Test saving an item to a file."""
mock_node = MagicMock(spec=hou.Node)
scriptargs = {
"node": mock_node
}
ht.ui.menus.opmenu.save_item_to_file(scriptargs)
mock_copy.assert_called_with(mock_node)
# =============================================================================
if __name__ == '__main__':
unittest.main()
|
[
"captainhammy@gmail.com"
] |
captainhammy@gmail.com
|
2d9579c4f46e697e49c1ff2919d1f2e549706639
|
51305c54f8a316b6878a4462e1ba58a55c8e320f
|
/manager/thumbgen.py
|
49950650252902242789d77d34a36ed34f1f3ad4
|
[] |
no_license
|
coinmenace/gck
|
5b15b460335c0b52925f1875ccb4fecd416008e7
|
fade84780cda218291cb2066808310c4871a06c8
|
refs/heads/master
| 2020-03-27T06:58:54.878353
| 2018-10-22T12:32:20
| 2018-10-22T12:32:20
| 146,153,068
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,078
|
py
|
from PIL import Image, ImageFile
import glob, os
from threading import *
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Thumbgen:
def __init__(self,file,fullname,identifier):
sizes = [(32, 32),(64, 64),(128, 128),(256, 256),(512, 512),(1024, 1024),(2048, 2048)]
self.generateThumb(identifier,file,fullname,sizes)
def generateThumb(self,identifier,file,fullname,sizes):
for size in sizes:
t=Thread(target=generateImages,args=(identifier,file,fullname,size,))
t.start()
t.join()
def generateImages(identifier,file,fullname,size):
#print "Open "+fullname
im = Image.open(fullname)
im.thumbnail(size)
if not os.path.exists("website/static/thumbs/"+identifier+"/"):
os.mkdir("website/static/thumbs/"+identifier+"/")
file="website/static/thumbs/"+identifier+"/"+file.split(".")[0]+"_"+str(size[0])+"_"+str(size[1])
im.save(file + ".png",format="PNG", quality=95, optimize=True, progressive=True)
if __name__=="__main__":
filename="sample.png"
t=Thumbgen(filename)
|
[
"webframes@gmail.com"
] |
webframes@gmail.com
|
ff04d53e4dd4235eb7334cc856b400d44b802eb2
|
0d1377c871764252fe2f44e2e6b784c49f18df62
|
/0x0F-python-object_relational_mapping/model_city.py
|
2b10f600b8123223e444e6e037e51bc805ba9a23
|
[] |
no_license
|
ElianaGomez2020/holbertonschool-higher_level_programming
|
a0ecba588091fe5bae0f529f3fb321cd3c4c7329
|
53754d496e561fec26ae56ba6f38d07cc3968914
|
refs/heads/master
| 2023-03-28T15:19:24.082338
| 2021-04-02T16:11:06
| 2021-04-02T16:11:06
| 291,766,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
#!/usr/bin/python3
"""contains the class definition of a City"""
from sqlalchemy import Column, Integer, String, ForeignKey
from model_state import Base
class City(Base):
"""City class"""
__tablename__ = "cities"
id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
name = Column(String(128), nullable=False)
state_id = Column(Integer, ForeignKey("states.id"), nullable=False)
|
[
"elianagomez.s@outlook.com"
] |
elianagomez.s@outlook.com
|
263b24f9d47303128b0b13be8f3eb6f5571bb848
|
db8ac9c3a3a3176574bfdd53b91683684f0ca24c
|
/phase_space_test.py
|
bcc4d769431883f2b95efe07cdece55c493dc1f0
|
[] |
no_license
|
skostogl/GPU-Tracking-code
|
05247364d17987ee17cea696db67589a1a4979d2
|
41577514021edf3119ddee2673c57a8ce1a097ca
|
refs/heads/master
| 2020-12-24T10:58:46.607065
| 2017-03-16T11:17:55
| 2017-03-16T11:17:55
| 73,206,822
| 2
| 2
| null | 2016-11-10T17:42:27
| 2016-11-08T16:47:41
|
C++
|
UTF-8
|
Python
| false
| false
| 1,851
|
py
|
import pickle
from modules.tracker import *
from modules.naff import *
from modules.grid import *
from modules.tune_resonances import *
from modules.FMA import *
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import cm
lattice = Lattice()
if ( True ):
#if ( False ):
lattice.read_twiss_table("LHC/lhc_no_bb.twi")
lattice.optimise()
lattice.compile()
lattice.write_ptx("LHC/lhc_no_bb")
else:
lattice.read_ptx("LHC/lhc_no_bb")
lattice.n_turns = 5000
lattice.norm_emit_x = 2e-6
lattice.norm_emit_y = 2e-6
lattice.collect_tbt_data = 1 # every 1 turn
#b,grid = cmp_grid (lattice.sigma_x(), lattice.sigma_x()*8, lattice.sigma_y(), lattice.sigma_y()*8,1,lattice)
#n_particles=b.size()
n_particles=20
b=HostBunch(n_particles)
for i in range (n_particles):
b.y[i]=0
b.yp[i]=0
b.x[i]=(i+1)*lattice.sigma_x()
b.xp[i]=0
#b.d[i]=100e-5
#lattice.track(b)
#for i in range (n_particles):
# filename = '/home/skostogl/cuTrack/dat_files/particles/particle_%d.dat'%i
# tbt = [ (b.x[i], b.xp[i], b.y[i], b.yp[i]) for b in lattice.turns ]
# with open(filename,'w') as outfile:
# for t in tbt:
# outfile.write("{} {} {} {}\n".format(t[0], t[1], t[2], t[3]))
#
#
#fig,ax=plt.subplots()
#color=iter(cm.rainbow (np.linspace(0,1,n_particles)))
#for i in range (n_particles):
# c=next(color)
# for j in (lattice.turns):
# plt.plot(j.x[i]*1e3, j.xp[i]*1e3, c=c,marker='o',ms=3,markeredgewidth=0.0)
#
plt.xlabel(r'$x [mm]$', fontsize=20)
plt.ylabel(r'$x_p [mm]$', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
annotation_string=r'$\delta=0$'
at = AnchoredText(annotation_string,prop=dict(size=18), frameon=True,loc=1)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at)
plt.tight_layout()
plt.show()
|
[
"sofia.kostoglou@cern.ch"
] |
sofia.kostoglou@cern.ch
|
45733874fe84b5e216c9c12f97d47074eb2cfdaf
|
83c7fc1aeff81e95412ef0ec284e2c86a3aea448
|
/pragmatic/urls.py
|
8f8a97a6a04abf2284d18531925cec72c2f1f043
|
[] |
no_license
|
diegomazorra1/pragmatic
|
044b90c94ffc129073c5b1e4212368a287f60db9
|
94639a161eb1c822802729c415f562832232435d
|
refs/heads/master
| 2020-12-04T09:29:13.042970
| 2020-01-10T20:54:01
| 2020-01-10T20:54:01
| 231,711,356
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
"""pragmatic URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from apps.cotizacion.views import PagPrincipal
from django.contrib.auth.views import LoginView
from django.contrib.auth import views as auth_views
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('usuario/',include('apps.usuario.urls', namespace='usuario')),
path('cotizacion/', include('apps.cotizacion.urls', namespace='cotizacion')),
path('inventario/', include('apps.inventario.urls', namespace='inventario')),
path('', PagPrincipal.as_view(),name='inicio'),
path('accounts/login/', auth_views.LoginView.as_view(template_name='login/login.html'), name='login'),
path('accounts/logout/', auth_views.LogoutView.as_view(), name='logout'),
]
|
[
"ingenierodiegomazorra@gmail.com"
] |
ingenierodiegomazorra@gmail.com
|
167fad9e892a205acf7896cdaf131a41ea63ec1f
|
2427bf2ec03b9fc9a01c39962acefdcabd822f9d
|
/movies/domain/allCombined.py
|
cd11c57fbc854de2459aaa56647e63d264c2d05b
|
[] |
no_license
|
adso514/Assignment-2-235-adso514
|
6cf56125531b5790960d7c8cf3ba458985419cb9
|
a37d2e48399edf9ee6220d1c267306888733ce0b
|
refs/heads/master
| 2023-01-04T15:12:15.191374
| 2020-10-27T00:39:30
| 2020-10-27T00:39:30
| 307,074,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,627
|
py
|
from datetime import date, datetime
class Actor:
def __init__(self, actor_full_name: str):
if actor_full_name == "" or type(actor_full_name) is not str:
self.__actor_full_name = None
else:
self.__actor_full_name = actor_full_name.strip()
self.__colleagues_list = []
@property
def actor_full_name(self) -> str:
return self.__actor_full_name
@property
def colleagues(self) -> list:
return self.__colleagues_list
def __repr__(self):
return f"<Actor {self.__actor_full_name}>"
def __eq__(self, other):
return self.actor_full_name == other.actor_full_name
def __lt__(self, other):
return self.actor_full_name < other.actor_full_name
def __hash__(self):
return hash(self.actor_full_name)
def add_actor_colleague(self, colleague):
self.__colleagues_list.append(colleague)
def check_if_this_actor_worked_with(self, colleague):
return colleague in self.__colleagues_list
class Director:
def __init__(self, director_full_name: str):
if director_full_name == "" or type(director_full_name) is not str:
self.__director_full_name = None
else:
self.__director_full_name = director_full_name.strip()
@property
def director_full_name(self) -> str:
return self.__director_full_name
def __repr__(self):
return f"<Director {self.__director_full_name}>"
def __eq__(self, other):
return self.director_full_name == other.director_full_name
def __lt__(self, other):
return self.director_full_name < other.director_full_name
def __hash__(self):
return hash(self.director_full_name)
class Genre:
def __init__(self, genre_name: str):
if genre_name == "" or type(genre_name) is not str:
self.__genre_name = None
else:
self.__genre_name = genre_name.strip()
@property
def genre_name(self) -> str:
return self.__genre_name
def __repr__(self):
return f"<Genre {self.__genre_name}>"
def __eq__(self, other):
return self.genre_name == other.genre_name
def __lt__(self, other):
return self.genre_name < other.genre_name
def __hash__(self):
return hash(self.genre_name)
class Movie:
def __init__(self, movie_title: str, release_year: int):
if movie_title == "" or type(movie_title) is not str:
self.__movie_title = None
else:
self.__movie_title = movie_title.strip()
if type(release_year) is not int:
self.__release_year = None
else:
if release_year < 1900:
self.__release_year = None
else:
self.__release_year = release_year
self.__director = None
self.__actors = list()
self.__genres = list()
self.__description = None
self.__runtime_minutes = None
self.__id = None
self.__reviews = list()
self.__imagelink = None
# A movie is considered to be uniquely defined by the combination of its title and release year
@property
def imagelink(self):
return self.__imagelink
@property
def title(self) -> str:
return self.__movie_title
@property
def year(self) -> int:
return self.__release_year
@property
def director(self) -> Director:
return self.__director
@property
def id(self) -> int:
return self.__id
@director.setter
def director(self, director):
self.__director = director
@property
def actors(self) -> list:
return self.__actors
@property
def genres(self) -> list:
return self.__genres
@property
def reviews(self) -> list:
return self.__reviews
@property
def description(self) -> str:
return self.__description
@description.setter
def description(self, description):
self.__description = description.strip()
@property
def runtime_minutes(self) -> int:
return self.__runtime_minutes
@runtime_minutes.setter
def runtime_minutes(self, runtime_minutes):
if type(runtime_minutes) is not int:
raise ValueError
else:
if runtime_minutes >= 0:
self.__runtime_minutes = runtime_minutes
else:
raise ValueError
def add_id(self, rank: int):
self.__id = rank
def add_actor(self, actor: Actor):
self.__actors.append(actor)
def add_director(self, director: Director):
self.__director = director
def add_imagelink(self, link):
self.__imagelink = link
def remove_actor(self, actor: Actor):
actors_list = self.__actors
if actor in actors_list:
for i in range(len(actors_list) - 1, -1, -1):
if actor == actors_list[i]:
actors_list.pop(i)
self.__actors = actors_list
def add_genre(self, genre: Genre):
self.__genres.append(genre)
def add_review(self, review):
self.__reviews.append(review)
def remove_genre(self, genre: Genre):
genres_list = self.__genres
if genre in genres_list:
for i in range(len(genres_list)-1, -1, -1):
if genre == genres_list[i]:
genres_list.pop(i)
self.__genres = genres_list
def __repr__(self):
return f"<Movie {self.__movie_title}, {self.__release_year}>"
def __eq__(self, other):
return self.title == other.title and self.__release_year == other.__release_year
def __lt__(self, other):
return f"{self.__movie_title}{self.__release_year}" < f"{other.__movie_title}{other.__release_year}"
def __hash__(self):
return hash(f"{self.__movie_title}{self.__release_year}")
class User:
def __init__(self, username: str, password):
self.__user_name = username.strip()
self.__watched_movies = []
self.__reviews = []
self.__time_spent_watching_movies_minutes = 0
self.__password = password
self.__watchlist = []
@property
def username(self) -> str:
return self.__user_name
@property
def watchlist(self) -> list:
return self.__watchlist
@property
def password(self):
return self.__password
@property
def watched_movies(self) -> list:
return self.__watched_movies
@property
def reviews(self) -> list:
return self.__reviews
@property
def time_spent_watching_movies_minutes(self) -> int:
return self.__time_spent_watching_movies_minutes
def __repr__(self):
return f"<User {self.__user_name} {self.__password}>"
def __eq__(self, other):
return self.username == other.username
def __lt__(self, other):
return self.username < other.username
def __hash__(self):
return hash(self.username)
def watch_movie(self, movie: Movie):
self.__watched_movies.append(movie)
self.__time_spent_watching_movies_minutes += movie.runtime_minutes
def add_review(self, review):
self.__reviews.append(review)
def add_to_watchlist(self, movie):
self.__watchlist.append(movie)
def remove_from_watchlist(self, movie):
watchlist = self.__watchlist
if movie in watchlist:
for i in range(len(watchlist) - 1, -1, -1):
if movie == watchlist[i]:
watchlist.pop(i)
self.__watchlist = watchlist
class Review:
def __init__(self, user: User, movie: Movie, review_text: str):
self.__movie = movie
self.__review_text = review_text
self.__timestamp = datetime.now()
self.__user = user
@property
def movie(self) -> Movie:
return self.__movie
@property
def user(self) -> User:
return self.__user
@property
def username(self) -> str:
return self.__user.username
@property
def review_text(self) -> str:
return self.__review_text
@property
def timestamp(self) -> datetime:
return self.__timestamp
def __eq__(self, other):
return self.movie == other.movie and self.review_text == other.review_text and self.rating == other.rating and self.timestamp == other.timestamp
def make_review(review_text: str, user: User, movie: Movie):
review = Review(user, movie, review_text)
user.add_review(review)
movie.add_review(review)
return review
|
[
"adso514@aucklanduni.ac.nz"
] |
adso514@aucklanduni.ac.nz
|
bf851c29b826417e4b89cc581539ab78bec35eb2
|
55ce104faddb4a723567ced94dd93e88e40781cf
|
/week5/w5d5/w5d5exercice1/rock-paper-scissors.py
|
21e027e3a5ff852576e14728ada20523cba74785
|
[] |
no_license
|
leasoussan/DIpython
|
134cd7549e424837189f84cfd063db4db60a044e
|
1eb83fe9ea53a4881f73cd60ae9b5161b4fb1e20
|
refs/heads/master
| 2023-02-08T02:57:21.091078
| 2020-12-15T21:35:18
| 2020-12-15T21:35:18
| 305,334,529
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,526
|
py
|
from game import *
# Rock-Paper-Scissors.Py : Create 3 Functions
# get_user_menu_choice() - this should display a simple menu,
# get the user’s choice (with data validation), and return the choice. No looping should occur here.
# print_results(results) – this should print the results of the games played.
# It should have a single parameter named results;
# which will be a dictionary of the results of the games played.
# It should display these results in a user-friendly way,
# and thank the user for playing.
# Note: results should be in this form: {win: 2,loss: 4,draw: 3}.
# Bear in mind that this dictionary will need to be created and populated in some other part of our code,
# and passed in to the print_results function at the right time.
# main() - the main function. It should take care of 3 things:
# displaying the menu repeatedly, until the user types in the value to exit the program: ‘x’ or ‘q’, whatever you decide. (Make use of the get_user_menu_choice function)
# When the user chooses to play a game:
# Create a new Game object (see below), and call its play()* function, receiving the result of the game that is returned.
# Remember the results of every game that is played. More about this below.
# When the user chooses to exit the program, call the print_results function in order to display a summary of all the games played.
def get_user_menu_choice():
print("Menu")
print("(g) Play a game")
print("(x) Show scores and Exit")
choice_input =input("")
if choice_input not in ["x", "g"]:
raise ValueError("Please choose g or x")
else:
print("lets' Start***")
return choice_input
#
# this should print the results of the games played.
# It should have a single parameter named results; which will be a dictionary
# of the results of the games played. It should display these results in a
# user-friendly way, and thank the user for playing.
# Note: results should be in this form: {win: 2,loss: 4,draw: 3}.
# Bear in mind that this dictionary will need to be created and populated in
# some other part of our code, and passed in to the print_results function at
# the right time.
def print_results(results):
print(f"\
Game Results:\n\
Your won: {results['win']} times \n\
You lost: {results['loss']} \n\
You drew {results['draw']} \n\
\n\
Thank you for Playing")
# for key, values in results:
# You results {results[key]} {results[values]} ")
def main():
results = {
"win":0,
"loss": 0,
"draw" :0
}
while True:
user_menu_choice = get_user_menu_choice()
if user_menu_choice == "g":
round_result = Game().play()
results[round_result] += 1
elif user_menu_choice in ["x", "q"]:
print_results(results)
break
else:
print("wrong Input")
continue
# displaying the menu repeatedly,
# until the user types in the value to exit the program: ‘x’ or ‘q’,
# whatever you decide. (Make use of the get_user_menu_choice function)
# When the user chooses to play a game:
# Create a new Game object (see below), and call its play()* function,
# receiving the result of the game that is returned.
# Remember the results of every game that is played. More about this below.
# When the user chooses to exit the program,
# call the print_results function in order to display a summary of all
# the games played.
|
[
"leasoussan@hmail.com"
] |
leasoussan@hmail.com
|
6679ecf0979fc944496c2a7be40579a31f7d9871
|
a32133131ec1153ec9cb1537c757881124cd46b9
|
/trainers/create_batch_benchmark.py
|
01624d83c71a94e88b7e1a0be542df83e9b3a867
|
[] |
no_license
|
yli5/bdtaunu_hadron_learning
|
d9130bb4ff3f2f10c7f4b5b9c4953a7e8a2e66d6
|
6d09f3388494afe7ca15663bca21652ac55361ab
|
refs/heads/master
| 2021-04-15T17:33:26.378184
| 2018-05-07T18:13:46
| 2018-05-07T18:13:46
| 126,240,103
| 0
| 0
| null | 2018-05-04T20:44:21
| 2018-03-21T21:00:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,141
|
py
|
import sys
import time
import numpy as np
import bisect
import matplotlib.pyplot as plt
from os import path
lib_path = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(lib_path)
from preprocess.PreProcess import PreProcess, load_data
from util.resampling import binary_downsampling, binary_upsampling
def get_data(data, fit=True):
# Preprocessor
process_path = {'imputer': '../preprocess/imputer.pkl', 'scaler': '../preprocess/scaler.pkl', 'encoder': '../preprocess/encoder.pkl'}
processor = PreProcess()
X, y, w = load_data(data, process_path, fit=fit)
# Resample to balance labels
X, Y, W = binary_upsampling(X, y, w)
# Return labels as 1D array
assert X.shape[0] == Y.shape[0]
return X, Y, W
def alias_setup(probs):
K = len(probs)
q = np.zeros(K)
J = np.zeros(K, dtype=np.int)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K*prob
if q[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
J[small] = large
q[large] = q[large] - (1.0 - q[small])
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
def alias_draw(J, q):
K = len(J)
# Draw from the overall uniform mixture.
kk = int(np.floor(np.random.rand()*K))
# Draw from the binary mixture, either keeping the
# small one, or choosing the associated larger one.
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
if __name__ == '__main__':
# Load training data
print 'Loading training data and preprocessing ......'
start_time = time.time()
training_data = '../data/train.csv'
x, y, w = get_data(training_data)
end = time.time()
print 'Done. Took {} seconds.'.format(end - start_time)
print
# Parameters
n = x.shape[0]
BATCH_SIZE = 256
NUM_EPOCHS = 1
# Create batches by partitioning
# print 'Batch sampling by partitioning ......'
# start_time = time.time()
# for step in xrange(int(NUM_EPOCHS * n) // BATCH_SIZE):
# offset = (step * BATCH_SIZE) % (n - BATCH_SIZE)
# x_batch = x[offset:(offset + BATCH_SIZE), ...]
# y_batch = y[offset:(offset + BATCH_SIZE)]
# end = time.time()
# print 'Done. Took {} seconds.'.format(end - start_time)
# print
# Create batches by sampling by weights
J, q = alias_setup(w)
cs = np.cumsum(w)
# s = np.cumsum(p)
print 'Batch sampling by weights ......'
print
results = []
results_part = []
batch_sizes = [1024, 512, 256, 128, 64]
def get_partition_idxs(cs, n_per_batch):
'''
cs: cumulative sum of probability vector
n_per_batch: size of each batch
'''
idxs = []
n = len(cs)
approx_batch_weight = float(n_per_batch)/n
for i in range(0, n/n_per_batch):
idxs.append(bisect.bisect(cs, (i+1)*approx_batch_weight))
return idxs
batch_idxs = get_partition_idxs(cs, BATCH_SIZE)
for BATCH_SIZE in batch_sizes:
print 'Batch size = {}'.format(BATCH_SIZE)
start_time = time.time()
# Sampling according to weights
prev_batch_end_idx = -1
for step in xrange(int(NUM_EPOCHS * n) // BATCH_SIZE):
# # Naive; very slow
# rand_idxs = np.random.choice(n, size=BATCH_SIZE,
# replace=False, p=p)
#
# # Still too slow
# r = np.random.rand(n)
# rand_idxs = (s < r).sum()
#
# Faster
rand_idxs = []
for i in range(BATCH_SIZE):
rand_idxs.append(bisect.bisect(cs, np.random.random() * cs[-1]))
#
# # Walker's alias method
# rand_idxs = np.zeros(BATCH_SIZE)
# for i in xrange(BATCH_SIZE):
# rand_idxs[i] = alias_draw(J, q)
# rand_idxs = rand_idxs.astype(np.int)
#
# x_batch = x[rand_idxs]
# y_batch = y[rand_idxs]
#
# # Batch selection
# batch_idx = batch_idxs[step % NUM_EPOCHS]
# x_batch = x[prev_batch_end_idx+1:batch_idx, ...]
# y_batch = y[prev_batch_end_idx+1:batch_idx]
# if batch_idx == batch_idxs[-1]:
# prev_batch_end_idx = -1
# else:
# prev_batch_end_idx = batch_idx
#
# # Sequential selection
# #start_idx = np.random.randint(n/BATCH_SIZE*BATCH_SIZE)
# start_idx = 0
# sum_w = 0.
# batch_w = float(BATCH_SIZE) / n
# i = start_idx
# idxs = []
# while sum_w < batch_w:
# if np.random.random() < w[i]:
# sum_w += w[i]
# idxs.append(i)
# elif i == start_idx:
# start_idx = i + 1
# i += 1
# x_batch = x[idxs, ...]
# y_batch = y[idxs]
end = time.time()
results.append(end-start_time)
print 'Done. Took {} seconds.'.format(end - start_time)
print
# Batch sample by partitioning
start_time = time.time()
for step in xrange(int(NUM_EPOCHS * n) // BATCH_SIZE):
offset = (step * BATCH_SIZE) % (n - BATCH_SIZE)
x_batch = x[offset:(offset + BATCH_SIZE), ...]
y_batch = y[offset:(offset + BATCH_SIZE)]
end = time.time()
results_part.append(end-start_time)
print 'Done. Took {} seconds.'.format(end - start_time)
print
ax = plt.figure().gca()
ax.plot(batch_sizes, results, color='r')
ax2 = ax.twinx()
ax2.plot(batch_sizes, results_part)
plt.show()
|
[
"jaehongkim86@gmail.com"
] |
jaehongkim86@gmail.com
|
b524fe5caa3d77e5a88deb2e1aca3844f930eedf
|
40491d4649bc8f981cfd531657b0970e2577edd1
|
/Policy_Gradient/Tank_1/params.py
|
591b4c09a4383ccea277dcc219593c967ce568b8
|
[] |
no_license
|
emedd33/Reinforcement-Learning-in-Process-Control
|
d82ddab87dc6727a70ee38d53889aa8af87ade25
|
24bc1d9b72c0762bd92c215837347d6548099902
|
refs/heads/master
| 2022-07-12T02:53:52.208320
| 2022-04-05T15:23:48
| 2022-04-05T15:23:48
| 161,691,463
| 29
| 11
| null | 2022-06-21T21:39:15
| 2018-12-13T20:29:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
MAIN_PARAMS = {
"EPISODES": 20000,
"MEAN_EPISODE": 50,
"MAX_TIME": 200,
"RENDER": True,
"MAX_MEAN_REWARD": 200, # minimum reward before saving model
}
AGENT_PARAMS = {
"N_TANKS": 1,
"SS_POSITION": 0.5,
"VALVE_START_POSITION": 0.2,
"ACTION_DELAY": [5],
"INIT_ACTION": 0.3,
"VALVEPOS_UNCERTAINTY": 0,
"EPSILON_DECAY": [1],
"LEARNING_RATE": [0.0005],
"HIDDEN_LAYER_SIZE": [[5, 5]],
"BATCH_SIZE": 5,
"MEMORY_LENGTH": 10000,
"OBSERVATIONS": 4, # level, gradient, is_above 0.5, prevous valve position
"GAMMA": 0.9,
"EPSILON": [0],
"EPSILON_MIN": [0],
"BASE_LINE_LENGTH": 1,
"Z_VARIANCE": [0.05],
"SAVE_MODEL": [True],
"LOAD_MODEL": [False],
"TRAIN_MODEL": [True],
"LOAD_MODEL_NAME": [""],
"LOAD_MODEL_PATH": "Policy_Gradient/Tank_1/",
"SAVE_MODEL_PATH": "Policy_Gradient/Tank_1/",
}
# Model parameters Tank 1
TANK1_PARAMS = {
"height": 10,
"init_level": 0.5,
"width": 10,
"pipe_radius": 0.5,
"max_level": 0.75,
"min_level": 0.25,
}
TANK1_DIST = {
"add": True,
"pre_def_dist": False,
"nom_flow": 1, # 2.7503
"var_flow": 0.1,
"max_flow": 2,
"min_flow": 0.7,
"add_step": False,
"step_time": int(MAIN_PARAMS["MAX_TIME"] / 2),
"step_flow": 2,
"max_time": MAIN_PARAMS["MAX_TIME"],
}
TANK_PARAMS = [TANK1_PARAMS]
TANK_DIST = [TANK1_DIST]
|
[
"eskild.emedd33@gmail.com"
] |
eskild.emedd33@gmail.com
|
f226ecd710abd15e52e47a41bb94ec641d607bef
|
b2a1813da144ba4a86f6db5378c55eca8fc91e7a
|
/jump7.py
|
b1dcc6172f9ad6463ff9c9e86e1e791a26186a70
|
[] |
no_license
|
yongzhiji/shiyanlou-code
|
bc201d24fbfd39cb006849394121cfc58ff24d2c
|
e3061f50014f43f75ad6b26eb3e7d2b42d1410d0
|
refs/heads/master
| 2022-03-31T19:46:58.191232
| 2020-02-15T07:50:21
| 2020-02-15T07:50:21
| 240,661,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
for x in range(1,101):
if x%7==0 :
continue
elif x%10==7 or x//10==7:
continue
else:
print(x)
|
[
"985302188@qq.com"
] |
985302188@qq.com
|
012ee645b66bdf19e05e944be0e68d6bbf38c7d0
|
59934c214dc37e916042a9ef95004cdec59fe461
|
/image_simplifier.py
|
11ed7a060c544182169ee7c80fac34dbccd9e868
|
[] |
no_license
|
thomcchester/boxifier
|
a27101aaa690ae1f04bc3e6cabe0bd3cf3495f46
|
48fa1e634374468f73f138ea79c09dbd480ee8ca
|
refs/heads/master
| 2020-05-18T15:44:29.672575
| 2019-05-03T05:15:48
| 2019-05-03T05:15:48
| 184,506,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
import numpy as np
## This file is just for simplifying the image into constiuent parts
#This is a function to flatten value to 0 or 1 for array. I also flipped the values since I wanted to use one as black
#and 0 as white, mainly as a preference but also because ones look like lines. The idea is simple, and a bit of an over
#simplification, but I am going to say that if the hue amplitude is over half way it is white, and if it is under half
#way it is black. I might come back to this to allow for differences in color and what not.
def flattenator(value_list):
if value_list[1] < 128:
return 1
else:
return 0
# asciiator is function used to turn the entirety of the array into ascii-art 0 and 1 of the image for later processing
def asciiator(image):
image_list = []
for i in image:
row_list = []
for m in i:
row_list.append(flattenator(m))
image_list.append(row_list)
return np.array(image_list)
|
[
"thomcchester@gmail.com"
] |
thomcchester@gmail.com
|
c4db54872f01cdbf61c781d4bffff7eea689d74b
|
a08a95129f1e976ea6e8e4dd9a2c17260a7f0ef1
|
/models/CosineClassifier.py
|
d134b20d6804c930bbcfc2769178e43c17336f44
|
[] |
no_license
|
UCSD-SVCL/SSLT
|
9c7b0f6f46ad4f9f000d548d7f275ebbd10f2f8f
|
2de03dbb3d4c0c38eeeb0d00dffac870df19c51a
|
refs/heads/main
| 2023-05-26T18:09:38.950195
| 2021-06-10T00:28:00
| 2021-06-10T00:28:00
| 374,817,518
| 0
| 0
| null | 2021-06-07T22:38:14
| 2021-06-07T22:38:14
| null |
UTF-8
|
Python
| false
| false
| 499
|
py
|
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
class CosineClassifier(nn.Module):
def __init__(self, num_classes=1000, feat_dim=2048):
super(CosineClassifier, self).__init__()
self.weight = Parameter(torch.Tensor(feat_dim, num_classes))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
|
[
"boliu@eng.ucsd.edu"
] |
boliu@eng.ucsd.edu
|
cfbc0b358cbc8a73771ab602b38fe9a5b825e242
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/488/usersdata/341/112971/submittedfiles/AvF_Parte3.py
|
25d6392521b197f54357fe6d625293d8a2655e93
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
# -*- coding: utf-8 -*-
n = int(input('Digite a quantidade de números: '))
a = []
for i in range (n):
a.append('Digite os respectivos números: '))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
201bfe5c7dc0bad1c20ba462e06fb93848253015
|
758bdc11c72f5348afec8c3bb47549bf85c4d4ca
|
/levelup/levelup/wsgi.py
|
09e3b66a6e7905052aed184d814ccb7eea595d85
|
[
"Apache-2.0"
] |
permissive
|
claudiaw111/cs411project
|
39020ada1bd2991810f6e4ffd6403ef9e411839d
|
e1bad3836d932a92deb0297dfc83b1c014ea4f00
|
refs/heads/master
| 2021-01-10T13:41:52.207969
| 2015-12-08T20:31:44
| 2015-12-08T20:31:44
| 45,619,629
| 0
| 0
| null | 2015-12-08T20:31:45
| 2015-11-05T15:19:19
|
Python
|
UTF-8
|
Python
| false
| false
| 391
|
py
|
"""
WSGI config for levelup project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "levelup.settings")
application = get_wsgi_application()
|
[
"yfwang10@bu.edu"
] |
yfwang10@bu.edu
|
96fd8f586922bfca585450a5943f4a0948e56b09
|
b42ccc28946274e91d1b91dc778f1ec76724ea5f
|
/authProj/settings.py
|
d6e28108755f7977598b4612955293c7379019c8
|
[] |
no_license
|
vlad508/authProjBack
|
30cc90f332cbce85842efaecc52484b01a9e3d30
|
536641b3809bc410b7cbe329558963c83e5b793e
|
refs/heads/master
| 2022-11-08T14:36:48.597109
| 2020-06-12T07:48:33
| 2020-06-16T08:32:51
| 271,738,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,848
|
py
|
"""
Django settings for authProj project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j__99bn%&si@u9kzuhfa-ztzxa3ilu52tsjh)t5$1^sn_7dxm3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '161.35.199.210']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'rest_framework.authtoken',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
]
ROOT_URLCONF = 'authProj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'authProj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'myproject',
'USER': 'myprojectuser',
'PASSWORD': 'password',
'HOST': 'localhost',
'PORT': ''
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = [
os.path.join(BASE_DIR, 'django_blog/static')
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
],
}
|
[
"vlad508@gmail.com"
] |
vlad508@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.