content stringlengths 5 1.05M |
|---|
from dsbox.template.template import DSBoxTemplate
from d3m.metadata.problem import TaskKeyword
from dsbox.template.template_steps import TemplateSteps
from dsbox.schema import SpecializedProblem
import typing
import numpy as np # type: ignore
class RegressionWithSelection(DSBoxTemplate):
def __init__(self):
DSBoxTemplate.__init__(self)
self.template = {
"name": "regression_with_feature_selection",
"taskSubtype": {TaskKeyword.UNIVARIATE.name, TaskKeyword.MULTIVARIATE.name},
"taskType": TaskKeyword.REGRESSION.name,
"inputType": "table", # See SEMANTIC_TYPES.keys() for range of values
"output": "model_step", # Name of the final step generating the prediction
"target": "extract_target_step", # Name of the step generating the ground truth
"steps": TemplateSteps.human_steps() + TemplateSteps.dsbox_feature_selector("regression") +
[
{
"name": "model_step",
"primitives": [
{
"primitive": "d3m.primitives.regression.sgd.SKlearn",
"hyperparameters": {
"loss": ['squared_loss', 'huber'],
"alpha": [float(x) for x in np.logspace(-5, -1.004, 7)], # cannot reach 0.1
"l1_ratio": [0.01, 0.15, 0.3, 0.5, 0.6, 0.7, 0.9], # cannot reach 1
"learning_rate": ['optimal', 'invscaling'],
'add_index_columns': [True],
'use_semantic_types':[True],
}
},
{
"primitive":
"d3m.primitives.regression.gradient_boosting.SKlearn",
"hyperparameters":
{
'max_depth': [2, 3, 5],
'n_estimators': [100, 150, 200],
'learning_rate': [0.1, 0.3, 0.5],
'min_samples_split': [2, 3],
'min_samples_leaf': [1, 2],
'add_index_columns': [True],
'use_semantic_types':[True],
}
},
],
"inputs": ["feature_selector_step", "extract_target_step"]
}
]
}
|
import re
with open('input.txt') as file:
CMDS = []
for line in file:
cmd, v = line.strip().split(' ')
CMDS.append((cmd, int(v)))
def go(cmds):
visited = []
acc = 0
index = 0
while True:
if index in visited:
return acc
visited.append(index)
try:
cmd, v = cmds[index]
except IndexError:
print(f"Index out of range. Index: {index}, list length: {len(cmds)}, acc: {acc}")
raise
if cmd == 'acc':
acc += v
elif cmd == 'jmp':
index += v - 1
index += 1
v = go(CMDS)
print(v)
def brute_force():
i = -1
while True:
ccc = CMDS.copy()
# change one command
while True:
i += 1
cmd, v = ccc[i]
if cmd == 'nop':
ccc[i] = ('jmp', v)
break
elif cmd == 'jmp':
ccc[i] = ('nop', v)
break
go(ccc)
brute_force() |
# Modules
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage import rotate
from scipy.stats import skewnorm
def gaussian(angle=0.0, size=(100, 100), skew=(0.0, 0.0), std=(1.0, 1.0)):
"""Generate asymmetric Gaussian kernel
Keyword Arguments:
angle {int} -- Rotation in degrees (default: {0.0})
size {tuple} -- Kernel size (default: {(100, 100)})
skew {tuple} -- Skews of the distribution (default: {(0.0, 0.0)})
std {tuple} -- Standard deviations of the distribution (default: {(1.0, 1.0)})
Returns:
array -- Asymmetric Gaussian kernel
"""
# Pad to account for kernel rotations
padding = max(size)
# Create 1D Gaussians
pdfs = []
for i in range(2):
# Define padded size and shape of the kernel
padded = 2 * padding + size[i]
pdf = np.arange(padded) - ((padded - 1) / 2)
# Set the standard deviation
pdf /= std[i]
# Correct for the skew
pdf *= skewnorm.std(skew[i])
pdf += skewnorm.median(skew[i])
# Generate probability density function
pdf = skewnorm.pdf(pdf, skew[i])
pdfs.append(pdf)
# Create 2D Gaussian
kernel = np.outer(*pdfs)
# Rotate kernel and trim padding
kernel = rotate(kernel, angle, reshape=False)
kernel = kernel[padding:-padding, padding:-padding]
# Normalize probability density function
kernel /= sum(sum(kernel))
return kernel
def wave(angle=0.0, frequency=1.0, size=(100, 100)):
"""Generate Cosine wave kernel
Keyword Arguments:
angle {float} -- Rotation in degrees (default: {0.0})
frequency {float} -- Number of waves per kernel length (default: {1.0})
size {tuple} -- Kernel size (default: {(100, 100)})
Returns:
array -- Cosine wave kernel
"""
# Pad to account for kernel rotations
padding = max(size)
# Create 2D wave pattern
kernel = np.linspace(-np.pi, np.pi, size[0] + 2 * padding)
kernel = np.tile(kernel, (size[1] + 2 * padding, 1))
kernel = np.cos(frequency * kernel)
# Rotate kernel and trim padding
kernel = rotate(kernel, angle, reshape=False)
kernel = kernel[padding:-padding, padding:-padding]
return kernel
|
from math import log
from typing import Sequence, Union, Optional
from jina.executors.evaluators.rank import BaseRankingEvaluator
def _compute_dcg(gains, power_relevance):
"""Compute discounted cumulative gain."""
ret = 0.0
if not power_relevance:
for score, position in zip(gains[1:], range(2, len(gains) + 1)):
ret += score / log(position, 2)
return gains[0] + ret
for score, position in zip(gains, range(1, len(gains) + 1)):
ret += (pow(2, score) - 1) / log(position + 1, 2)
return ret
def _compute_idcg(gains, power_relevance):
"""Compute ideal discounted cumulative gain."""
sorted_gains = sorted(gains, reverse=True)
return _compute_dcg(sorted_gains, power_relevance)
class NDCGEvaluator(BaseRankingEvaluator):
"""
:class:`NDCGEvaluator` evaluates normalized discounted cumulative gain for information retrieval.
"""
def __init__(self, eval_at: Optional[int] = None, power_relevance: bool =True):
"""
:param power_relevance: The power relevance places stronger emphasis on retrieving relevant documents.
For detailed information, please check https://en.wikipedia.org/wiki/Discounted_cumulative_gain
"""
super().__init__()
self.eval_at = eval_at
self._power_relevance = power_relevance
@property
def metric(self):
return f'nDCG@{self.eval_at}' if self.eval_at else f'nDCG'
def evaluate(
self,
actual: Sequence[Union[int, float]],
desired: Sequence[Union[int, float]],
*args, **kwargs
) -> float:
""""
:param actual: the scores predicted by the search system.
:param desired: the expected score given by user as groundtruth, please ensure the value is in desc order.
:return the evaluation metric value for the request document.
"""
# Information gain must be greater or equal to 0.
actual_at_k = actual[:self.eval_at] if self.eval_at else actual
desired_at_k = desired[:self.eval_at] if self.eval_at else desired
if not actual_at_k:
raise ValueError(f'Expecting gains at k with minimal length of 1, {len(actual_at_k)} received.')
if not desired_at_k:
raise ValueError(f'Expecting desired at k with minimal length of 1, {len(desired_at_k)} received.')
if any(item < 0 for item in actual_at_k) or any(item < 0 for item in desired_at_k):
raise ValueError('One or multiple score is less than 0.')
dcg = _compute_dcg(gains=actual_at_k, power_relevance=self._power_relevance)
idcg = _compute_idcg(gains=desired_at_k, power_relevance=self._power_relevance)
return 0.0 if idcg == 0.0 else dcg / idcg
|
#
# PySNMP MIB module HH3C-OID-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-OID-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:25:26 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Unsigned32, enterprises, iso, Counter64, Bits, NotificationType, ModuleIdentity, Gauge32, ObjectIdentity, TimeTicks, Integer32, IpAddress, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Unsigned32", "enterprises", "iso", "Counter64", "Bits", "NotificationType", "ModuleIdentity", "Gauge32", "ObjectIdentity", "TimeTicks", "Integer32", "IpAddress", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hh3c = MibIdentifier((1, 3, 6, 1, 4, 1, 25506))
hh3cProductId = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 1))
hh3cCommon = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2))
hh3cEntityVendorTypeOID = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 3))
hh3cNM = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 4))
hh3cSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 6))
hh3cSNMPAgCpb = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 7))
hh3cRhw = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8))
hh3cSurveillanceMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 9))
hh3cStorageRef = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 10))
hpNetworking = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 11))
hh3cFtm = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 1))
hh3cUIMgt = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 2))
hh3cSystemMan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 3))
hh3cConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 4))
hh3cFlash = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 5))
hh3cEntityExtend = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 6))
hh3cIPSecMonitor = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 7))
hh3cAcl = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 8))
hh3cVoiceVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 9))
hh3cL4Redirect = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 10))
hh3cIpPBX = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 11))
hh3cUser = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 12))
hh3cRadius = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 13))
hh3cPowerEthernetExt = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 14))
hh3cEntityRelation = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 15))
hh3cProtocolVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 16))
hh3cQosProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 17))
hh3cNat = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 18))
hh3cPos = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 19))
hh3cNS = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 20))
hh3cAAL5 = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 21))
hh3cSSH = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 22))
hh3cRSA = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 23))
hh3cVrrpExt = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 24))
hh3cIpa = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 25))
hh3cPortSecurity = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 26))
hh3cVpls = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 27))
hh3cE1 = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 28))
hh3cT1 = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 29))
hh3cIKEMonitor = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 30))
hh3cWebSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 31))
hh3cAutoDetect = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 32))
hh3cIpBroadcast = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 33))
hh3cIpx = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 34))
hh3cIPS = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 35))
hh3cDhcpSnoop = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 36))
hh3cProtocolPriority = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 37))
hh3cTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 38))
hh3cVoice = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 39))
hh3cIfExt = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 40))
hh3cCfCard = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 41))
hh3cEpon = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 42))
hh3cDldp = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 43))
hh3cUnicast = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 44))
hh3cRrpp = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 45))
hh3cDomain = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 46))
hh3cIds = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 47))
hh3cRcr = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 48))
hh3cAtmDxi = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 49))
hh3cMulticast = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 50))
hh3cMpm = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 51))
hh3cOadp = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 52))
hh3cTunnel = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 53))
hh3cGre = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 54))
hh3cObjectInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 55))
hh3cStorage = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 56))
hh3cDvpn = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 57))
hh3cDhcpRelay = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 58))
hh3cIsis = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 59))
hh3cRpr = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 60))
hh3cSubnetVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 61))
hh3cDlswExt = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 62))
hh3cSyslog = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 63))
hh3cFlowTemplate = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 64))
hh3cQos2 = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 65))
hh3cStormConstrain = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 66))
hh3cIpAddrMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 67))
hh3cMirrGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 68))
hh3cQINQ = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 69))
hh3cTransceiver = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 70))
hh3cIpv6AddrMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 71))
hh3cBfdMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 72))
hh3cRCP = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 73))
hh3cAcfp = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 74))
hh3cDot11 = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 75))
hh3cE1T1VI = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 76))
hh3cwapiMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 77))
hh3cL2VpnPwe3 = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 78))
hh3cMplsOam = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 79))
hh3cMplsOamPs = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 80))
hh3cSiemMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 81))
hh3cUps = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 82))
hh3cEOCCommon = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 83))
hh3cHPEOC = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 84))
hh3cAFC = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 85))
hh3cMultCDR = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 86))
hh3cMACInformation = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 87))
hh3cFireWall = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 88))
hh3cDSP = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 89))
hh3cNetMan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 90))
hh3cStack = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 91))
hh3cPosa = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 92))
hh3cWebAuthentication = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 93))
hh3cCATVTransceiver = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 94))
hh3cLpbkdt = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 95))
hh3cMultiMedia = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 96))
hh3cDns = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 97))
hh3c3GModem = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 98))
hh3cPortal = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 99))
hh3clldp = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 100))
hh3cDHCPServer = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 101))
hh3cPPPoEServer = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102))
hh3cL2Isolate = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 103))
hh3cSnmpExt = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 104))
hh3cVsi = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 105))
hh3cEvc = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 106))
hh3cMinm = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 107))
hh3cBlg = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 108))
hh3cRS485 = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 109))
hh3cARPRatelimit = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 110))
hh3cLI = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 111))
hh3cDar = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 112))
hh3cPBR = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 113))
hh3cAAANasId = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 114))
hh3cIfQos2 = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 65, 1))
hh3cCBQos2 = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 65, 2))
hh3cQosCapability = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 7, 1))
hh3cDHCPRelayMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 1))
hh3cDHCPServerMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 2))
hh3cNqa = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 3))
hh3crmonExtend = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 4))
hh3cpaeExtMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 6))
hh3cHgmp = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 7))
hh3cDevice = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 8))
hh3cMpls = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 12))
hh3cTRNG = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 13))
hh3cUserLogMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 18))
hh3cNTP = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 22))
hh3cLAG = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 25))
hh3cSmonExtend = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 26))
hh3cQoS = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 32))
hh3cMultilinkPPP = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 33))
hh3clswCommon = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35))
hh3cmlsr = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 36))
hh3cdlsw = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 37))
hh3cVMMan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 9, 1))
hh3cPUMan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 9, 2))
hh3cMSMan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 9, 3))
hh3cStorageMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 10, 1))
hh3cStorageSnap = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 10, 2))
hh3cDisk = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 10, 3))
hh3cRaid = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 10, 4))
hh3cLogicVolume = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 10, 5))
hh3cMplsLsr = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 12, 1))
hh3cMplsLdp = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 12, 2))
hh3cMplsVpn = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 12, 3))
hh3cLswExtInterface = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 1))
hh3cLswVlan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 2))
hh3cLswMacPort = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 3))
hh3cLswArpMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 4))
hh3cLswL2InfMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 5))
hh3cLswRstpMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 6))
hh3cLswIgmpsnoopingMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 7))
hh3cLswDhcpMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 8))
hh3cLswdevMMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 9))
hh3cLswTrapMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 12))
hh3cdot1sMstp = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 14))
hh3cLswQosAclMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 16))
hh3cLswMix = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 17))
hh3cLswDeviceAdmin = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 35, 18))
hh3cNDEC = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 36, 2))
hh3credundancyPower = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 36, 4))
hh3credundancyFan = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 36, 5))
hh3cpos = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 36, 8))
hh3cIsdnMib = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 8, 36, 9))
mibBuilder.exportSymbols("HH3C-OID-MIB", hh3cNqa=hh3cNqa, hh3cLI=hh3cLI, hh3cLAG=hh3cLAG, hh3cWebAuthentication=hh3cWebAuthentication, hh3cEpon=hh3cEpon, hh3cDar=hh3cDar, hh3cRSA=hh3cRSA, hh3cUIMgt=hh3cUIMgt, hh3cFlowTemplate=hh3cFlowTemplate, hh3cSurveillanceMIB=hh3cSurveillanceMIB, hh3cVoiceVlan=hh3cVoiceVlan, hh3cIKEMonitor=hh3cIKEMonitor, hh3cLswRstpMib=hh3cLswRstpMib, hh3cSnmpExt=hh3cSnmpExt, hh3cIpx=hh3cIpx, hh3cOadp=hh3cOadp, hh3cProtocolPriority=hh3cProtocolPriority, hh3cMultCDR=hh3cMultCDR, hh3cPowerEthernetExt=hh3cPowerEthernetExt, hh3cL2Isolate=hh3cL2Isolate, hh3cPortSecurity=hh3cPortSecurity, hh3cLswdevMMib=hh3cLswdevMMib, hh3cIpBroadcast=hh3cIpBroadcast, hh3cCATVTransceiver=hh3cCATVTransceiver, hh3cIsis=hh3cIsis, hh3cL2VpnPwe3=hh3cL2VpnPwe3, hh3cL4Redirect=hh3cL4Redirect, hh3cpaeExtMib=hh3cpaeExtMib, hh3cMplsLdp=hh3cMplsLdp, hh3cStorageSnap=hh3cStorageSnap, hh3cmlsr=hh3cmlsr, hh3cDvpn=hh3cDvpn, hh3cVpls=hh3cVpls, hh3cSubnetVlan=hh3cSubnetVlan, hh3cDot11=hh3cDot11, hh3cDlswExt=hh3cDlswExt, hh3cQoS=hh3cQoS, hh3cCfCard=hh3cCfCard, hh3cTRNG=hh3cTRNG, hh3cQosCapability=hh3cQosCapability, hh3cMplsLsr=hh3cMplsLsr, hh3cNetMan=hh3cNetMan, hh3cDHCPServer=hh3cDHCPServer, hpNetworking=hpNetworking, hh3cPortal=hh3cPortal, hh3cVoice=hh3cVoice, hh3c3GModem=hh3c3GModem, hh3cConfig=hh3cConfig, hh3cRS485=hh3cRS485, hh3cLswIgmpsnoopingMib=hh3cLswIgmpsnoopingMib, hh3cPBR=hh3cPBR, hh3cMinm=hh3cMinm, hh3cSystem=hh3cSystem, hh3cIpa=hh3cIpa, hh3cIsdnMib=hh3cIsdnMib, hh3cIPS=hh3cIPS, hh3cT1=hh3cT1, hh3cDhcpRelay=hh3cDhcpRelay, hh3crmonExtend=hh3crmonExtend, hh3cIfQos2=hh3cIfQos2, hh3cFlash=hh3cFlash, hh3cLpbkdt=hh3cLpbkdt, hh3cVsi=hh3cVsi, hh3cIpPBX=hh3cIpPBX, hh3cMACInformation=hh3cMACInformation, hh3cLswMix=hh3cLswMix, hh3cIpAddrMIB=hh3cIpAddrMIB, hh3cVrrpExt=hh3cVrrpExt, hh3cUserLogMIB=hh3cUserLogMIB, hh3cNTP=hh3cNTP, hh3cGre=hh3cGre, hh3cStorageRef=hh3cStorageRef, hh3cNS=hh3cNS, hh3cIpv6AddrMIB=hh3cIpv6AddrMIB, hh3cLswArpMib=hh3cLswArpMib, hh3clswCommon=hh3clswCommon, hh3cMirrGroup=hh3cMirrGroup, hh3cIPSecMonitor=hh3cIPSecMonitor, hh3cLswTrapMib=hh3cLswTrapMib, hh3cSmonExtend=hh3cSmonExtend, hh3cIfExt=hh3cIfExt, hh3cTunnel=hh3cTunnel, hh3cHgmp=hh3cHgmp, hh3cUps=hh3cUps, hh3cdlsw=hh3cdlsw, hh3cDHCPRelayMib=hh3cDHCPRelayMib, hh3cStorageMIB=hh3cStorageMIB, hh3cAcl=hh3cAcl, hh3cSystemMan=hh3cSystemMan, hh3cDhcpSnoop=hh3cDhcpSnoop, hh3cLswQosAclMib=hh3cLswQosAclMib, hh3cLswExtInterface=hh3cLswExtInterface, hh3cProtocolVlan=hh3cProtocolVlan, hh3cQos2=hh3cQos2, hh3cEntityExtend=hh3cEntityExtend, hh3cAFC=hh3cAFC, hh3cDldp=hh3cDldp, hh3cStormConstrain=hh3cStormConstrain, hh3cMpm=hh3cMpm, hh3clldp=hh3clldp, hh3cAtmDxi=hh3cAtmDxi, hh3cFtm=hh3cFtm, hh3cPPPoEServer=hh3cPPPoEServer, hh3cHPEOC=hh3cHPEOC, hh3cDns=hh3cDns, hh3cLswL2InfMib=hh3cLswL2InfMib, hh3cProductId=hh3cProductId, hh3cDisk=hh3cDisk, hh3cRaid=hh3cRaid, hh3cDHCPServerMib=hh3cDHCPServerMib, hh3cBfdMIB=hh3cBfdMIB, hh3cStack=hh3cStack, hh3cMpls=hh3cMpls, hh3cEvc=hh3cEvc, hh3credundancyFan=hh3credundancyFan, hh3cMulticast=hh3cMulticast, hh3cCommon=hh3cCommon, hh3cStorage=hh3cStorage, hh3cLswVlan=hh3cLswVlan, hh3cwapiMIB=hh3cwapiMIB, hh3cdot1sMstp=hh3cdot1sMstp, hh3cRCP=hh3cRCP, hh3cEOCCommon=hh3cEOCCommon, hh3cBlg=hh3cBlg, hh3cLswDeviceAdmin=hh3cLswDeviceAdmin, hh3cIds=hh3cIds, hh3cNM=hh3cNM, hh3cSyslog=hh3cSyslog, hh3cMSMan=hh3cMSMan, hh3cWebSwitch=hh3cWebSwitch, hh3cARPRatelimit=hh3cARPRatelimit, hh3cSSH=hh3cSSH, hh3cCBQos2=hh3cCBQos2, hh3cpos=hh3cpos, hh3c=hh3c, hh3cObjectInfo=hh3cObjectInfo, hh3cUser=hh3cUser, hh3cAcfp=hh3cAcfp, hh3cPosa=hh3cPosa, hh3cUnicast=hh3cUnicast, hh3cRrpp=hh3cRrpp, hh3cVMMan=hh3cVMMan, hh3cRcr=hh3cRcr, hh3cRhw=hh3cRhw, hh3cQINQ=hh3cQINQ, hh3cNat=hh3cNat, hh3cNDEC=hh3cNDEC, hh3cTrap=hh3cTrap, hh3cE1=hh3cE1, hh3cRpr=hh3cRpr, hh3credundancyPower=hh3credundancyPower, hh3cSiemMib=hh3cSiemMib, hh3cDSP=hh3cDSP, hh3cTransceiver=hh3cTransceiver, hh3cEntityRelation=hh3cEntityRelation, hh3cPUMan=hh3cPUMan, hh3cSNMPAgCpb=hh3cSNMPAgCpb, hh3cQosProfile=hh3cQosProfile, hh3cMultilinkPPP=hh3cMultilinkPPP, hh3cPos=hh3cPos, hh3cRadius=hh3cRadius, hh3cE1T1VI=hh3cE1T1VI, hh3cMultiMedia=hh3cMultiMedia, hh3cAAL5=hh3cAAL5, hh3cDomain=hh3cDomain, hh3cMplsVpn=hh3cMplsVpn, hh3cMplsOam=hh3cMplsOam, hh3cAutoDetect=hh3cAutoDetect, hh3cFireWall=hh3cFireWall, hh3cLswMacPort=hh3cLswMacPort, hh3cLogicVolume=hh3cLogicVolume, hh3cLswDhcpMib=hh3cLswDhcpMib, hh3cMplsOamPs=hh3cMplsOamPs, hh3cDevice=hh3cDevice, hh3cAAANasId=hh3cAAANasId, hh3cEntityVendorTypeOID=hh3cEntityVendorTypeOID)
|
# -*- coding: utf-8 -*-
#
from typing import Optional
from ..category import Category
class Market(Category):
def add(
self,
owner_id: int = None,
name: str = None,
description: str = None,
category_id: int = None,
price: Optional[float] = None,
old_price: Optional[float] = None,
deleted: Optional[bool] = None,
main_photo_id: int = None,
photo_ids: Optional[list] = None,
url: Optional[str] = None,
dimension_width: Optional[int] = None,
dimension_height: Optional[int] = None,
dimension_length: Optional[int] = None,
weight: Optional[int] = None,
**kwargs
) -> dict:
return self._request("add", locals())
def add_album(
self,
owner_id: int = None,
title: str = None,
photo_id: Optional[int] = None,
main_album: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("addAlbum", locals())
def add_to_album(
self,
owner_id: int = None,
item_id: int = None,
album_ids: list = None,
**kwargs
) -> dict:
return self._request("addToAlbum", locals())
def create_comment(
self,
owner_id: int = None,
item_id: int = None,
message: Optional[str] = None,
attachments: Optional[list] = None,
from_group: Optional[bool] = None,
reply_to_comment: Optional[int] = None,
sticker_id: Optional[int] = None,
guid: Optional[str] = None,
**kwargs
) -> dict:
return self._request("createComment", locals())
def delete(
self,
owner_id: int = None,
item_id: int = None,
**kwargs
) -> dict:
return self._request("delete", locals())
def delete_album(
self,
owner_id: int = None,
album_id: int = None,
**kwargs
) -> dict:
return self._request("deleteAlbum", locals())
def delete_comment(
self,
owner_id: int = None,
comment_id: int = None,
**kwargs
) -> dict:
return self._request("deleteComment", locals())
def edit(
self,
owner_id: int = None,
item_id: int = None,
name: str = None,
description: str = None,
category_id: int = None,
price: float = None,
deleted: Optional[bool] = None,
main_photo_id: int = None,
photo_ids: Optional[list] = None,
url: Optional[str] = None,
**kwargs
) -> dict:
return self._request("edit", locals())
def edit_album(
self,
owner_id: int = None,
album_id: int = None,
title: str = None,
photo_id: Optional[int] = None,
main_album: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("editAlbum", locals())
def edit_comment(
self,
owner_id: int = None,
comment_id: int = None,
message: Optional[str] = None,
attachments: Optional[list] = None,
**kwargs
) -> dict:
return self._request("editComment", locals())
def get(
self,
owner_id: int = None,
album_id: Optional[int] = None,
count: Optional[int] = None,
offset: Optional[int] = None,
extended: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("get", locals())
def get_album_by_id(
self,
owner_id: int = None,
album_ids: list = None,
**kwargs
) -> dict:
return self._request("getAlbumById", locals())
def get_albums(
self,
owner_id: int = None,
offset: Optional[int] = None,
count: Optional[int] = None,
**kwargs
) -> dict:
return self._request("getAlbums", locals())
def get_by_id(
self,
item_ids: list = None,
extended: Optional[bool] = None,
**kwargs
) -> dict:
return self._request("getById", locals())
def get_categories(
self,
count: Optional[int] = None,
offset: Optional[int] = None,
**kwargs
) -> dict:
return self._request("getCategories", locals())
def get_comments(
self,
owner_id: int = None,
item_id: int = None,
need_likes: Optional[bool] = None,
start_comment_id: Optional[int] = None,
offset: Optional[int] = None,
count: Optional[int] = None,
sort: Optional[str] = None,
extended: Optional[bool] = None,
fields: Optional[list] = None,
**kwargs
) -> dict:
return self._request("getComments", locals())
def remove_from_album(
self,
owner_id: int = None,
item_id: int = None,
album_ids: list = None,
**kwargs
) -> dict:
return self._request("removeFromAlbum", locals())
def reorder_albums(
self,
owner_id: int = None,
album_id: int = None,
before: Optional[int] = None,
after: Optional[int] = None,
**kwargs
) -> dict:
return self._request("reorderAlbums", locals())
def reorder_items(
self,
owner_id: int = None,
album_id: Optional[int] = None,
item_id: int = None,
before: Optional[int] = None,
after: Optional[int] = None,
**kwargs
) -> dict:
return self._request("reorderItems", locals())
def report(
self,
owner_id: int = None,
item_id: int = None,
reason: Optional[int] = None,
**kwargs
) -> dict:
return self._request("report", locals())
def report_comment(
self,
owner_id: int = None,
comment_id: int = None,
reason: int = None,
**kwargs
) -> dict:
return self._request("reportComment", locals())
def restore(
self,
owner_id: int = None,
item_id: int = None,
**kwargs
) -> dict:
return self._request("restore", locals())
def restore_comment(
self,
owner_id: int = None,
comment_id: int = None,
**kwargs
) -> dict:
return self._request("restoreComment", locals())
def search(
self,
owner_id: int = None,
album_id: Optional[int] = None,
q: Optional[str] = None,
price_from: Optional[int] = None,
price_to: Optional[int] = None,
sort: Optional[int] = None,
rev: Optional[int] = None,
offset: Optional[int] = None,
count: Optional[int] = None,
extended: Optional[bool] = None,
status: Optional[int] = None,
**kwargs
) -> dict:
return self._request("search", locals())
|
""" Uses the *.wrd files from train.scp (mfc) to build the timit "words"
corpus, and then applied a phonemic dictionary on it to print phonemic output.
"""
TIMIT_SCP = "/Users/gabrielsynnaeve/postdoc/datasets/TIMIT/train/train.scp"
total_corpus = []
with open(TIMIT_SCP) as f:
for line in f:
fname = line.rstrip('\n').split('.')[0] + '.wrd'
with open(fname) as rf:
sentence = []
for l in rf:
sentence.append(l.rstrip('\n').split(' ')[-1])
total_corpus.append(sentence)
import re
words_dict = {}
#with open("/Users/gabrielsynnaeve/postdoc/contextual_segmentation/phonology_dict/words.txt") as f:
with open("cmudict.txt") as f:
for line in f:
#word, _, phonemes = line.split('\t')
word, phonemes = line.split('\t')
words_dict[word.lower()] = map(lambda phn: re.sub('\d+', '', phn.lower()), phonemes.split())
def wrd_to_phn(w):
w = w.lower()
if w in words_dict:
#return ''.join(words_dict[w])
return ' '.join(words_dict[w])
return ''
#print words_dict
for sentence in total_corpus:
s = ["sil"] + map(wrd_to_phn, sentence) + ["sil"]
print ' '.join(s)
|
"""
Runs tests on main classes
AgentTest, MarketTest, SimulationTest
"""
import matchingmarkets as mm
import numpy.random as rng
import unittest
class TestMarkets(unittest.TestCase):
def test_agent(self):
"""
Agent Tests
"""
AgentList = list()
numTypes = rng.randint(1, 25)
for i in range(5):
new_discount = rng.random_sample()
new_time_to_crit = int(10*rng.random_sample())
new_type = mm.randomType(numTypes)
newAgent = mm.Agent(name=len(AgentList),
discount_rate=new_discount,
time_to_critical=new_time_to_crit,
myType=new_type)
AgentList.append(newAgent)
for agent in AgentList:
self.assertGreaterEqual(agent.name, 0)
self.assertGreaterEqual(agent.discount_rate, 0)
self.assertGreaterEqual(agent.type, 0)
self.assertGreaterEqual(agent.time_to_critical, 0)
def test_markets(self):
"""
Market Test
"""
n = 5
m = 50
MarketList = list()
for i in range(n):
newMarket = mm.Market(arrival_rate=rng.randint(2**(i+1), 3**(i+1)),
success_prob=lambda: rng.random(),
max_agents=rng.randint(150)
)
MarketList.append(newMarket)
for market in MarketList:
for i in range(m):
market.update(discount=rng.random_sample,
typeGenerator=rng.randint,
numTypes=5, verbose=False
)
def test_simulation(self):
"""
Simulation Test
"""
sim = mm.simulation(time_per_run=5000, runs=3, logAllData=True,
arrival_rate=rng.randint(50),
success_prob=lambda: rng.random(),
discount=lambda: rng.random_sample(),
typeGenerator=lambda x: rng.randint(x),
numTypes=5)
sim.run()
sim.stats()
sim.time_per_run = 75
arrival_r = rng.randint(1, 5)
lossTest = sim.single_run(0, objective=lambda x: x.loss)
welfareTest = sim.single_run(0, objective=lambda x: x.welfare)
matchTest = sim.single_run(0, objective=lambda x: len(x.matched))
arrival_r = rng.randint(1, 5)
welfareTest1 = sim.single_run(
[2], metaParamNames=["period"], objective=lambda x: x.welfare)
welfareTest2 = sim.single_run(
[2], metaParamNames=["period"], objective=lambda x: x.welfare)
welfareTest3 = sim.single_run(
[2], metaParamNames=["period"], objective=lambda x: x.welfare)
sim.time_per_run = 5
res = sim.brute_search(
[(1, 3)], metaParamNames=["period"],
objective=lambda x: x.welfare,
stochastic_objective=False,
stochastic_samples=5,
stochastic_precision=1.,
)
def test_plotting(self):
"""
Market Plotting Test
"""
# Modify number of runs here
n = 2 # Markets
m = 3 # Periods per market
MarketList = list()
for i in range(n):
newMarket = mm.Market(
arrival_rate=rng.randint(2**(i+1), 3**(i+1)),
success_prob=lambda: rng.random(),
max_agents=rng.randint(1, 15000), plots=True,
plot_time=0.000001
)
MarketList.append(newMarket)
for market in MarketList:
for i in range(m):
market.update(
discount=rng.random_sample,
typeGenerator=rng.randint,
compatFct=mm.neighborSameType,
crit_input=5,
numTypes=5
)
if __name__ == '__main__':
unittest.main() |
##
## metapredict
## A protein disorder predictor based on a BRNN (IDP-Parrot) trained on the consensus disorder values from
## 8 disorder predictors from 12 proteomes.
##
# import user-facing functions
from .meta import *
from metapredict.backend.meta_predict_disorder import get_metapredict_network_version
import os
import sys
# To crash on LIBOMP error set this to False
IGNORE_LIBOMP_ERROR = True
# ------------------------------------------------------------
#
# Handle versioneer
from ._version import get_versions
versions = get_versions()
__version__ = versions['version']
__git_revision__ = versions['full-revisionid']
del get_versions, versions
# Handle omplib error
if IGNORE_LIBOMP_ERROR:
if sys.platform == 'darwin':
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# Standardized function to check performance
def print_performance(seq_len=500, num_seqs=100, verbose=True):
"""
Function that lets you test metapredicts performance on your local hardware.
Parameters
--------------
seqlen : int
Length of each random sequence to be tested. Default = 500.
num_seqs : int
Number of sequences to compute over. Default = 100.
verbose : bool
Flag which, if true, means the function prints a summary when finished. If
false simply returns an integer
Returns
---------------
int
Returns the nearest number of sequences-per-second metapredict is currently
predicting. For ref, on a spring 2020 MBP this value was ~10,000 sequences per
second.
"""
# this is a bit bad but, only import random is this FX is called
import random
import time
VALID_AMINO_ACIDS = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']
def genseq(n):
"""
Function that generates a random
"""
return "".join([random.choice(VALID_AMINO_ACIDS) for i in range(n)])
seqs = []
for i in range(num_seqs):
seqs.append(genseq(seq_len))
start = time.time()
for i in seqs:
predict_disorder(i)
end = time.time()
r_per_second = (seq_len*num_seqs)/(end - start)
if verbose:
print('Predicting %i residues per second!'%(r_per_second))
return r_per_second
def print_metapredict_network_version():
"""
Function that returns a string with the current trained network version
used in disorder prediction. This is useful to know if updated versions
of the network are provided, which will always accompany a version bump
so prior versions of the code will always be available.
Returns
---------
str
Returns a string in the format v<version information>
"""
return get_metapredict_network_version()
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sylvain Afchain, eNovance SAS
# @author: Emilien Macchi, eNovance SAS
import contextlib
import uuid
import mock
from oslo.config import cfg
from sqlalchemy.orm import query
from neutron.api.v2 import attributes as attr
from neutron.common import constants
from neutron.common import topics
from neutron import context as q_context
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db
from neutron.extensions import l3 as ext_l3
from neutron import manager
from neutron.openstack.common import timeutils
from neutron.scheduler import l3_agent_scheduler
from neutron.tests import base
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_l3_plugin
HOST = 'my_l3_host'
FIRST_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST,
'topic': topics.L3_AGENT,
'configurations': {},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_2 = 'my_l3_host_2'
SECOND_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_2,
'topic': topics.L3_AGENT,
'configurations': {},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_DVR = 'my_l3_host_dvr'
DVR_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
DVR_SNAT_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR_SNAT,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr_snat'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
DB_PLUGIN_KLASS = ('neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2')
class L3SchedulerTestExtensionManager(object):
def get_resources(self):
attr.RESOURCE_ATTRIBUTE_MAP.update(ext_l3.RESOURCE_ATTRIBUTE_MAP)
l3_res = ext_l3.L3.get_resources()
return l3_res
def get_actions(self):
return []
def get_request_extensions(self):
return []
class L3SchedulerTestCase(l3_agentschedulers_db.L3AgentSchedulerDbMixin,
test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin):
def setUp(self):
ext_mgr = L3SchedulerTestExtensionManager()
super(L3SchedulerTestCase, self).setUp(plugin=DB_PLUGIN_KLASS,
ext_mgr=ext_mgr)
self.adminContext = q_context.get_admin_context()
self.plugin = manager.NeutronManager.get_plugin()
self._register_l3_agents()
def _register_l3_agents(self):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': FIRST_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST]})
self.agent_id1 = agent_db[0].id
self.agent1 = agent_db[0]
callback.report_state(self.adminContext,
agent_state={'agent_state': SECOND_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST]})
self.agent_id2 = agent_db[0].id
def _register_l3_dvr_agents(self):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR]})
self.l3_dvr_agent = agent_db[0]
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_SNAT_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR_SNAT]})
self.l3_dvr_snat_id = agent_db[0].id
self.l3_dvr_snat_agent = agent_db[0]
def _set_l3_agent_admin_state(self, context, agent_id, state=True):
update = {'agent': {'admin_state_up': state}}
self.plugin.update_agent(context, agent_id, update)
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=str(uuid.uuid4()),
external_gateway_info=None,
subnet=None, set_context=False,
**kwargs):
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
yield router
self._remove_external_gateway_from_router(
router['router']['id'], subnet['subnet']['network_id'])
self._delete('routers', router['router']['id'])
def test_schedule_router_distributed(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
sync_router = {
'id': 'foo_router_id',
'distributed': True
}
plugin = mock.Mock()
plugin.get_router.return_value = sync_router
plugin.get_l3_agents_hosting_routers.return_value = []
plugin.get_l3_agents.return_value = [agent]
plugin.get_l3_agent_candidates.return_value = [agent]
with mock.patch.object(scheduler, 'bind_router'):
scheduler._schedule_router(
plugin, self.adminContext,
'foo_router_id', None, {'gw_exists': True})
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.schedule_snat_router(
mock.ANY, 'foo_router_id', sync_router, True),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(
mock.ANY, sync_router, [agent], None),
]
plugin.assert_has_calls(expected_calls)
def _test_schedule_bind_router(self, agent, router):
ctx = self.adminContext
session = ctx.session
db = l3_agentschedulers_db.RouterL3AgentBinding
scheduler = l3_agent_scheduler.ChanceScheduler()
rid = router['router']['id']
scheduler.bind_router(ctx, rid, agent)
results = (session.query(db).filter_by(router_id=rid).all())
self.assertTrue(len(results) > 0)
self.assertIn(agent.id, [bind.l3_agent_id for bind in results])
def test_bind_new_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('is scheduled', args[0])
def test_bind_existing_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
self._test_schedule_bind_router(self.agent1, router)
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('has already been scheduled', args[0])
def _check_get_l3_agent_candidates(self, router, agent_list, exp_host):
candidates = self.get_l3_agent_candidates(self.adminContext,
router, agent_list,
subnet_id=None)
self.assertEqual(len(candidates), 1)
self.assertEqual(candidates[0]['host'], exp_host)
def test_get_l3_agent_candidates(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test legacy agent_mode case: only legacy agent should be candidate
router['distributed'] = False
exp_host = FIRST_L3_AGENT.get('host')
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
# test dvr agent_mode case only dvr agent should be candidate
router['distributed'] = True
exp_host = DVR_L3_AGENT.get('host')
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
# test dvr_snat agent_mode cases: dvr_snat agent can host
# centralized and distributed routers
agent_list = [self.l3_dvr_snat_agent]
exp_host = DVR_SNAT_L3_AGENT.get('host')
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
router['distributed'] = False
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
class L3AgentChanceSchedulerTestCase(L3SchedulerTestCase):
def test_random_scheduling(self):
random_patch = mock.patch('random.choice')
random_mock = random_patch.start()
def side_effect(seq):
return seq[0]
random_mock.side_effect = side_effect
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 1)
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 2)
random_patch.stop()
class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCase):
def setUp(self):
cfg.CONF.set_override('router_scheduler_driver',
'neutron.scheduler.l3_agent_scheduler.'
'LeastRoutersScheduler')
super(L3AgentLeastRoutersSchedulerTestCase, self).setUp()
def test_scheduler(self):
# disable one agent to force the scheduling to the only one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id1 = agents[0]['id']
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id2 = agents[0]['id']
self.assertEqual(agent_id1, agent_id2)
# re-enable the second agent to see whether the next router
# spawned will be on this one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
with self.router_with_ext_gw(name='r3',
subnet=subnet) as r3:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r3['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id3 = agents[0]['id']
self.assertNotEqual(agent_id1, agent_id3)
class L3DvrScheduler(l3_db.L3_NAT_db_mixin,
l3_dvrscheduler_db.L3_DVRsch_db_mixin):
pass
class L3DvrSchedulerTestCase(base.BaseTestCase):
def setUp(self):
plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
self.setup_coreplugin(plugin)
super(L3DvrSchedulerTestCase, self).setUp()
self.adminContext = q_context.get_admin_context()
self.dut = L3DvrScheduler()
def test_dvr_update_router_addvm(self):
port = {
'device_id': 'abcd',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.3'
}
]
}
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
self.dut.dvr_update_router_addvm(self.adminContext, port)
def test_get_dvr_routers_by_vmportid(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_port', return_value=dvr_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
router_id = self.dut.get_dvr_routers_by_vmportid(self.adminContext,
dvr_port['id'])
self.assertEqual(router_id.pop(), r1['id'])
def test_get_subnet_ids_on_router(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(sub_ids.pop(),
dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
def test_check_vm_exists_on_subnet(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
result = self.dut.check_vm_exists_on_subnet(
self.adminContext,
'thisHost', 'dvr_port1',
sub_ids)
self.assertFalse(result)
def test_schedule_snat_router_with_snat_candidates(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
with contextlib.nested(
mock.patch.object(query.Query, 'first'),
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'bind_snat_servicenode')) as (
mock_query, mock_agents, mock_candidates, mock_bind):
mock_query.return_value = []
mock_agents.return_value = [agent]
mock_candidates.return_value = [agent]
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', mock.ANY, True)
mock_bind.assert_called_once_with(
self.adminContext, 'foo_router_id', [agent])
|
from train_helper import get_input_args
from train_helper import load_data
from make_model import make_model
from train_helper import train_model
from train_helper import compute_accuracy
from train_helper import save_checkpoint
from workspace_utils import active_session
import torch
from torch import optim
# command line arg
in_arg = get_input_args()
data_dir = in_arg.data_dir
save_dir = in_arg.save_dir
arch = in_arg.arch
lr = in_arg.learning_rate
hidden_units = in_arg.hidden_units
drop = in_arg.drop
epochs = in_arg.epochs
gpu = in_arg.gpu
device = 'cpu'
if gpu:
if torch.cuda.is_available():
device = 'cuda'
print("GPU mode enabled\n")
else:
print("Device doesn't support CUDA\n")
exit(0)
else:
device = 'cpu'
print("Further training will be done on cpu, switch to GPU\n")
print("Selected Device: ", device, "\n")
# load the datasets
data, loader = load_data(data_dir)
# make model
model = make_model(arch, hidden_units, drop)
model.to(device)
# set optimizer state according to arch
if model.name == 'vgg16' or model.name == 'densenet121':
optimizer = optim.Adam(model.classifier.parameters(), lr=lr)
elif model.name == 'resnet50':
optimizer = optim.Adam(model.fc.parameters(), lr=lr)
# train model, get new state of optimizer in order to save it in checkpoint
trained_model, optimizer = train_model(model, optimizer, epochs, device, data, loader)
# check accuracy
compute_accuracy(trained_model, loader, device)
# save model
save_checkpoint(model , arch, optimizer, lr, epochs, hidden_units, drop, save_dir)
|
#!/usr/bin/python
N, I = map(int, input().strip().split())
assert 1 <= N <= 10**5
assert 1 <= I <= 10**6
lis_of_sets = []
for i in range(I):
a,b = map(int, input().strip().split())
assert 0 <= a < N and 0 <= b < N
indices = []
new_set = set()
set_len = len(lis_of_sets)
s = 0
while s < set_len:
if a in lis_of_sets[s] or b in lis_of_sets[s]:
indices.append(s)
new_set = new_set.union(lis_of_sets[s])
del lis_of_sets[s]
set_len -= 1
else:
s += 1
new_set = new_set.union([a, b])
lis_of_sets.append(new_set)
answer = N*(N-1)/2
for i in lis_of_sets:
answer -= len(i)*(len(i)-1)/2
print(answer)
|
from itertools import cycle
import logging
logger = logging.getLogger(__name__)
class UAVSerialNumberValidator():
''' A class to validate the Serial number of a UAV per the ANSI/CTA-2063-A standard '''
def code_contains_O_or_I(self, manufacturer_code):
m_code = [c for c in manufacturer_code]
if 'O' in m_code or 'I' in m_code:
return True
else:
return False
def __init__(self, serial_number):
self.serial_number = serial_number
self.serial_number_length_code_points = {'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,'A':10,'B':11,'C':12,'D':13,'E':14,'F':15}
self.serial_number_code_points = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H','J','K','L','M','N','P','Q','R','S','T','U','V','W','X','Y','Z']
def is_valid(self):
manufacturer_code = self.serial_number[:4]
# Check if the string is four characters
if not len(manufacturer_code):
return False
if self.code_contains_O_or_I(manufacturer_code = manufacturer_code):
return False
character_length_code = self.serial_number[4:5]
# Length code can only be 1-9, A-F
if character_length_code not in self.serial_number_length_code_points.keys():
return False
#Get the rest of the string
manufacturers_code = self.serial_number[5:]
if (len(manufacturers_code) != self.serial_number_length_code_points[character_length_code]):
return False
return True
class OperatorRegistrationNumberValidator():
''' A class to validate a Operator Registration provided number per the EN4709-02 standard'''
def __init__(self, operator_registration_number):
self.operator_registration_number = operator_registration_number
self.registration_number_code_points = ['0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
def gen_checksum(self, raw_id):
assert raw_id.isalnum()
assert len(raw_id) == 15
d = {v: k for k, v in enumerate(self.registration_number_code_points)}
numeric_base_id = list(map(d.__getitem__, list(raw_id)))
# Multiplication factors for each digit depending on its position
mult_factors = cycle([2, 1])
def partial_sum(number, mult_factor):
"""Calculate partial sum ofr a single digit."""
quotient, remainder = divmod(number * mult_factor, 36)
return quotient + remainder
final_sum = sum(partial_sum(int(character), mult_factor) for character, mult_factor in zip(numeric_base_id, mult_factors))
# Calculate control number based on partial sums
control_number = -final_sum % 36
return self.registration_number_code_points[control_number]
def is_valid(self):
# Get the prefix
oprn, secure_characters = self.operator_registration_number.split('-')
if len(oprn) != 16:
return False
if len(secure_characters) != 3:
return False
base_id = oprn[3:-1]
if not base_id.isalnum():
return False
country_code = self.operator_registration_number[:3]
checksum = self.operator_registration_number[-5] # checksum
# op_registration_suffix = self.operator_registration_number[3:]
random_three_alnum_string = self.operator_registration_number[-3:]
computed_checksum = self.gen_checksum(base_id + random_three_alnum_string)
if computed_checksum != checksum:
return False
return True |
#!/usr/bin/env python
# Very quick hack to monitor 3ware RAID cards via SNMP with check_mk
# Tested using Windows 3ware SNMP plugin (3wSnmp.msi from http://kb.lsi.com)
# Hereward Cooper <coops@fawk.eu> - Sep 2012
# MIB: TW-RAID-MIB
# .1.3.6.1.4.1.1458.100.22.1.10.X = twRaidDriveStatus.X
# 255 = OK
def inventory_3ware_disks_snmp(checkname, info):
inventory = []
# If 'info' isn't empty, add it to the inventory
if info != []:
inventory.append( (None, None) )
return inventory
def check_3ware_disks_snmp(item, params, info):
output = ""
retval = 0
# Check the results and return appropriately
for count, rawcheck in enumerate(info):
check = rawcheck[0]
if check == '255':
output += "Disk %s OK, " % (count)
else:
output += "Disk %s Error (code: %s), " % (count, check)
retval = 2
if retval == 0:
output = "OK - " + output[:-2]
elif retval == 2:
output = "CRITICAL - " + output[:-2]
return (retval, output)
check_info["3ware_disks_snmp"] = (check_3ware_disks_snmp, "3ware RAID Disks", 0, inventory_3ware_disks_snmp)
snmp_info["3ware_disks_snmp"] = ( ".1.3.6.1.4.1.1458.100.22.1", ["10"] )
|
"""
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
@author Arthur Allshire
@email arthur.allshire@mail.utoronto.ca
@author Mayank Mittal
@email mittalma@ethz.ch
@brief Defines vec-environment wrapper for environments in isaacgym simulator.
"""
# leibnizgym
from leibnizgym.envs.env_base import IsaacEnvBase
# python
from gym import spaces
from typing import Tuple
import torch
import numpy as np
class VecTask:
"""
A wrapper around a base environment for RL training using IsaacGym simulator..
TODO (@mayank): All wrappers should still inherit from env_base class to ensure consistency!
Since we don't support C++ environments, the VecTask and VecTaskPython can be merged.
"""
def __init__(self, task: IsaacEnvBase, rl_device: str, clip_obs: float = 5.0, clip_actions: float = 1.0):
"""Initialize the wrapper for RL training.
Args:
task: An instance of the IsaacEnvBase class to wrap around.
rl_device: The device on which RL agent is present.
clip_obs: Clipping for the observations.
clip_actions: Clipping for the actions.
"""
# check instance of input
assert isinstance(task, IsaacEnvBase)
# copy inputs to class members
self._task = task
self._clip_obs = float(clip_obs)
self._clip_actions = float(clip_actions)
self._rl_device = rl_device
# set gym spaces for the environment
self._obs_space = spaces.Box(np.full(self.num_obs, -self._clip_obs),
np.full(self.num_obs, self._clip_obs))
self._state_space = spaces.Box(np.full(self.num_states, -self._clip_obs),
np.full(self.num_states, self._clip_obs))
self._act_space = spaces.Box(np.full(self.num_actions, -self._clip_actions),
np.full(self.num_actions, self._clip_actions))
def __str__(self) -> str:
msg = f"Vectorized Environment around task: {type(self._task).__name__} \n" \
f"\t Number of instances : {self.num_envs} \n" \
f"\t Number of observations: {self.num_obs} \n" \
f"\t Number of states : {self.num_states} \n" \
f"\t Number of actions : {self.num_actions} \n" \
f"\t Observation clipping : {self._clip_obs} \n" \
f"\t Actions clipping : {self._clip_actions} \n"
return msg
"""
Properties
"""
def get_number_of_agents(self) -> int:
"""Returns number of agents in the environment (used for multi-agent environments)"""
if hasattr(self._task, 'get_number_of_agents'):
return self._task.get_number_of_agents()
else:
return 1
@property
def num_envs(self) -> int:
return self._task.get_num_instances()
@property
def num_states(self) -> int:
return self._task.get_state_dim()
@property
def num_obs(self) -> int:
return self._task.get_obs_dim()
@property
def num_actions(self) -> int:
return self._task.get_action_dim()
@property
def observation_space(self) -> spaces.Box:
return self._obs_space
@property
def state_space(self) -> spaces.Box:
return self._state_space
@property
def action_space(self) -> spaces.Box:
return self._act_space
"""
Operations - Implementation specfic.
"""
def dump_config(self, filename: str):
"""Dumps the environment configuration into a YAML file.
Args:
filename: The path to the file to save configuration into.
"""
self._task.dump_config(filename)
def reset(self) -> torch.Tensor:
raise NotImplementedError
def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, dict]:
raise NotImplementedError
class VecTaskPython(VecTask):
"""
Wrapper for Python CPU/GPU environment on IsaacGym simulator.
"""
def __init__(self, task: IsaacEnvBase, rl_device: str, clip_obs: float = 5.0, clip_actions: float = 1.0):
"""Initialize the wrapper for RL training.
Args:
task: An instance of the IsaacEnvBase class to wrap around.
rl_device: The device on which RL agent is present.
clip_obs: Clipping for the observations.
clip_actions: Clipping for the actions.
"""
super().__init__(task, rl_device, clip_obs, clip_actions)
"""
Properties
"""
def get_state(self) -> torch.Tensor:
return torch.clamp(self._task.states_buf, -self._clip_obs, self._clip_obs).to(self._rl_device)
"""
Operations - Implementation specfic.
"""
def reset(self) -> torch.Tensor:
obs = self._task.reset()
return torch.clamp(obs, -self._clip_obs, self._clip_obs).to(self._rl_device)
def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, dict]:
# render the GUI
if self._task.visualize:
self._task.render()
# clip input actions
actions_tensor = torch.clamp(actions, -self._clip_actions, self._clip_actions)
# take step into environment
obs, rew, is_done, info = self._task.step(actions_tensor)
# copy tensors to RL device
obs = torch.clamp(obs, -self._clip_obs, self._clip_obs).to(self._rl_device)
rew = rew.to(self._rl_device)
is_done = is_done.to(self._rl_device)
# return
return obs, rew, is_done, info
# EOF
|
import plotly.figure_factory as ff
from datetime import datetime
import numpy as np
def int2dt(x):
return datetime.fromtimestamp(31536000+x*24*3600).strftime("%Y-%d-%m")
df = [dict(Task="T111111111111111", Start=int2dt(0), Finish=int2dt(1), Resource='Func1'),
dict(Task="T111111111111111", Start=int2dt(3), Finish=int2dt(4), Resource='Func2'),
dict(Task="T222222222222222", Start=int2dt(5), Finish=int2dt(6), Resource='Func1'),
dict(Task="T222222222222222", Start=int2dt(7), Finish=int2dt(8), Resource='Func2'),
]
colors = {'Func1': 'rgb(220, 0, 0)',
'Func2': 'rgb(0, 255, 100)'}
fig = ff.create_gantt(df, colors=colors, index_col='Resource', show_colorbar=True,
group_tasks=True)
num_tick_labels = np.linspace(start=0, stop=10, num=11, dtype=int)
date_ticks = [int2dt(x) for x in num_tick_labels]
fig.layout.xaxis.update({'tickvals': date_ticks, 'ticktext': num_tick_labels})
fig.show()
|
"""You could use the scipy.optimize.root for achieving this as described here.
You could use any of the mentioned methods on the above-mentioned link. I am choosing broyden1. In this method, you can set fatol to 0.2 as it is the absolute tolerance for the residual."""
from scipy.optimize import root
amountOfCalls = 0
def myFunc(x):
global amountOfCalls
amountOfCalls+=1
y = x ** 3 + x -5
return y
x0 = 3
res = root(myFunc, x0, method='broyden1', options={'disp':True, 'fatol':0.2})
print(res)
"""The above code will terminate when myFunc(x) < 0.2.
If you want your condition to be explicitly abs(f(x)) < 0.2, then you could use the callback function as it is called at each iteration. You can then check at each iteration, if the condition is met and raise an Error to stop the root-finding method as follows:"""
from scipy.optimize import root
class Error(Exception):
pass
class AlmostReachedZeroError(Error):
pass
class rootFinder():
def __init__(self):
self.amountOfCalls = 0
def fun(self, x0):
y = x0 ** 3 + x0 - 5
return y
def callback(self, x, f):
# callback to terminate if condition is met
self.amountOfCalls+=1
if abs(self.fun(x[0])) < 0.2:
print("Current x: ", x[0], "Current fval: ", f[0],
"Iterations: ", self.amountOfCalls)
raise AlmostReachedZeroError("Terminating optimization: limit reached")
def find_root(self):
x0 = 3
opt = root(self.fun, x0, method='broyden1', callback=self.callback,
options={'disp':True, 'ftol':0.2})
return opt
rf = rootFinder()
res = rf.find_root()
print(res)
"""The above code will terminate the root-finding method if abs(f(x)) < 0.2, and will produce an Error. Also, the current x value, the current fval, and the iterations that the method took will be printed."""
|
import unittest
from problems.arrays_strings.check_permutation import *
class CheckPermutationTest(unittest.TestCase):
def test_are_permutations_sort(self):
self.assertTrue(are_permutations_sort("qwerty", "yetrwq"))
self.assertFalse(are_permutations_sort("yopqweyeut", "qwoiuriwtpoiwq39dkfj"))
def test_are_permutations(self):
self.assertTrue(are_permutations("qwerty", "yetrwq"))
self.assertFalse(are_permutations("yopqweyeut", "qwoiuriwtpoiwq39dkfj"))
|
import time
#colors
red='\033[91m'
b='\033[21m'
green='\033[92m'
yellow='\033[93m'
cyan='\033[96m'
blue='\033[94m'
print (red+"""
!!\ /!! /_\ /|| !!==!! ===\ !! !! ~~!!~~
!! \/ !! // \ / || !! )) )) !! !! !!
!! !! !! /) //----\ || !!===|| // !!===!! !!
!! !! !!/ //------\ || !! )) // !! !! !!
!! !! !! o // \ || !!==!! /==== !! !! ~~!!~~
Mr .W O R D L I S T E R """+red)
print (red+" *<==****************$ WELCOME TO Mr.WORDLISTER $****************==>*"+red)
print (green+" *<====================[[ coded by A1B2HI ]]=======================>*"+green)
print (yellow+" *<------------( search on youtube Abhishek Gandre)---------------->*"+yellow)
length=int(raw_input(cyan+b+"Enter The Number Of Characters: "+b+cyan))
print (" ")
name=raw_input(cyan+b+"Name Your Wordlist Wit (.txt) Extension: "+b+cyan)
tic = time.clock()
print (" ")
print (blue+b+"<><><><><><><><><><><><><><><><><><><><><>"+b+blue)
print (" ")
print (yellow+b+"Wordlist Generating Please Wait!"+b+yellow)
print (" ")
print (blue+b+"<><><><><><><><><><><><><><><><><><><><><>"+b+blue)
print (" ")
lista=[0 for x in xrange(length)]
x=length-1
string="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*-_+=|)(/?>.<,~`"
list_of_results=[]
file1=file(name,"w")
while(x>-1):
result=""
if lista[x]==len(string)-1:
for z in xrange (length):
result+=string[lista[z]]
lista[x]=0
x-=1
elif x==length-1:
for z in xrange(length):
result+=string[lista[z]]
lista[x]+=1
else:
for z in xrange(length):
result+=string[lista[z]]
lista[x]+=1
if x>0:
x+=1
else:
x=length-1
file1.write(result+"\n")
toc = time.clock()
ttn = toc - tic
print (red+b+"<<<========================================>>>"+b+red)
print (" ")
print (green+b+"Completed in "+str(ttn)+" seconds."+b+green)
print (" ")
print (green+b+"Please check "+str(name)+" in your Mr.WORDLISTER Directoy"+b+green)
print (" ")
print (red+"THANK YOU SO MUCH FOR USING PLEASE SUBSCRIBE TO MY CHANNEL"+red)
print (red+b+"<<<========================================>>>"+b+red)
|
import datetime
from connexion import request
from anchore_engine.apis import exceptions as api_exceptions
from anchore_engine.apis.authorization import (
ActionBoundPermission,
RequestingAccountValue,
get_authorizer,
)
from anchore_engine.apis.context import ApiRequestContextProxy
from anchore_engine.clients.services import internal_client_for
from anchore_engine.clients.services.catalog import CatalogClient
from anchore_engine.common.helpers import make_response_error
from anchore_engine.subsys import logger
authorizer = get_authorizer()
IMPORT_BUCKET = "image_content_imports"
MAX_UPLOAD_SIZE = 100 * 1024 * 1024 # 100 MB
OPERATION_EXPIRATION_DELTA = datetime.timedelta(hours=24)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def create_operation():
"""
POST /imports/images
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.create_image_import()
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_operations():
"""
GET /imports/images
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_image_import_operations()
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def get_operation(operation_id):
"""
GET /imports/images/{operation_id}
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.get_image_import_operation(operation_id)
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def invalidate_operation(operation_id):
"""
DELETE /imports/images/{operation_id}
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.cancel_image_import(operation_id)
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_packages(operation_id):
"""
GET /imports/images/{operation_id}/packages
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "packages")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_dockerfiles(operation_id):
"""
GET /imports/images/{operation_id}/dockerfile
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "dockerfile")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_image_manifests(operation_id):
"""
GET /imports/images/{operation_id}/manifest
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "manifest")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_parent_manifests(operation_id):
"""
GET /imports/images/{operation_id}/manifest
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "parent_manifest")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def list_import_image_configs(operation_id):
"""
GET /imports/images/{operation_id}/image_config
:param operation_id:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
resp = client.list_import_content(operation_id, "image_config")
return resp, 200
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_packages(operation_id):
"""
POST /imports/images/{operation_id}/packages
:param operation_id:
:param sbom:
:return:
"""
return content_upload(operation_id, "packages", request)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_dockerfile(operation_id):
"""
POST /imports/images/{operation_id}/dockerfile
:param operation_id:
:param sbom:
:return:
"""
return content_upload(operation_id, "dockerfile", request)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_manifest(operation_id):
"""
POST /imports/images/{operation_id}/manifest
:param operation_id:
:return:
"""
return content_upload(operation_id, "manifest", request)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_parent_manifest(operation_id):
"""
POST /imports/images/{operation_id}/parent_manifest
:param operation_id:
:return:
"""
return content_upload(operation_id, "parent_manifest", request)
@authorizer.requires([ActionBoundPermission(domain=RequestingAccountValue())])
def import_image_config(operation_id):
"""
POST /imports/images/{operation_id}/image_config
:param operation_id:
:return:
"""
return content_upload(operation_id, "image_config", request)
def content_upload(operation_id, content_type, request):
"""
Generic handler for multiple types of content uploads. Still operates at the API layer
:param operation_id:
:param content_type:
:param request:
:return:
"""
try:
client = internal_client_for(
CatalogClient, userId=ApiRequestContextProxy.namespace()
)
return (
client.upload_image_import_content(
operation_id, content_type, request.data
),
200,
)
except api_exceptions.AnchoreApiError as ex:
return (
make_response_error(ex, in_httpcode=ex.__response_code__),
ex.__response_code__,
)
except Exception as ex:
logger.exception("Unexpected error in api processing")
return make_response_error(ex, in_httpcode=500), 500
|
# -*- coding: utf-8 -*-
"""
collection
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import collection.models.party
import collection.models.error_reason
class PreApprovalResult(object):
"""Implementation of the 'PreApprovalResult' model.
TODO: type model description here.
Attributes:
payer (Party): Party identifies a account holder in the wallet
platform. Party consists of two parameters, type and partyId. Each
type have its own validation of the partyId<br> MSISDN - Mobile
Number validated according to ITU-T E.164. Validated with
IsMSISDN<br> EMAIL - Validated to be a valid e-mail format.
Validated with IsEmail<br> PARTY_CODE - UUID of the party.
Validated with IsUuid
payer_currency (string): ISO4217 Currency
payer_message (string): The mesage that is shown to the approver.
validity_time (int): The request validity time of the pre-approval
status (StatusEnum): TODO: type description here.
reason (ErrorReason): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"payer":'payer',
"payer_currency":'payerCurrency',
"payer_message":'payerMessage',
"validity_time":'validityTime',
"status":'status',
"reason":'reason'
}
def __init__(self,
payer=None,
payer_currency=None,
payer_message=None,
validity_time=None,
status=None,
reason=None):
"""Constructor for the PreApprovalResult class"""
# Initialize members of the class
self.payer = payer
self.payer_currency = payer_currency
self.payer_message = payer_message
self.validity_time = validity_time
self.status = status
self.reason = reason
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
payer = collection.models.party.Party.from_dictionary(dictionary.get('payer')) if dictionary.get('payer') else None
payer_currency = dictionary.get('payerCurrency')
payer_message = dictionary.get('payerMessage')
validity_time = dictionary.get('validityTime')
status = dictionary.get('status')
reason = collection.models.error_reason.ErrorReason.from_dictionary(dictionary.get('reason')) if dictionary.get('reason') else None
# Return an object of this model
return cls(payer,
payer_currency,
payer_message,
validity_time,
status,
reason)
|
import os
import networkx as nx
if __name__ == '__main__':
with open(f'{os.path.dirname(os.path.realpath(__file__))}/input.txt', 'r') as reader:
puzzle_input = [(x[0:3], x[4:]) if x[3] == ')' else (x[4:], x[0:3]) for x in [coord for coord in reader.read().splitlines()]]
orbit_map = nx.DiGraph(puzzle_input)
part1 = sum(len(nx.ancestors(orbit_map, n)) for n in orbit_map.nodes)
starchart = nx.Graph(puzzle_input)
origin = "YOU"
destination = "SAN"
part2 = len(nx.shortest_path(starchart, origin, destination)) - 3
print(part1)
print(part2) |
"""
Test the deployed ocean stack
- Kubernetes, Local docker-compose, etc.
"""
def test_dummy():
assert True
|
#!/usr/local/bin/python
# coding: utf-8
from controller import Controller
if __name__ == '__main__':
Controller.execute()
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
__version__ = '0.24.0'
__version_info__ = (0, 24, 0)
|
import os
import re
import sys
import numpy as np
# Please modify to fit your environment
import tensorflow.contrib.keras.api.keras as keras
from tensorflow.contrib.keras.api.keras.models import Sequential, Model
from tensorflow.contrib.keras.api.keras.layers import add, Input, Dense, Dropout, Add
from tensorflow.contrib.keras.api.keras import regularizers
import macro as mc
def latent(data_shape):
model = Sequential()
model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
model.add(Dense(mc._OUT_DIM, activation='relu', input_shape=(data_shape,),\
kernel_regularizer=regularizers.l2(mc._L2_REGULARIZE_RATE)))
return model
def source_last_layer():
model = Sequential()
model.add(Dense(mc._SOURCE_DIM_NUM, name='source_nn_output', input_shape=(mc._OUT_DIM,)))
return model
def target_last_layer():
model = Sequential()
model.add(Dense(mc._TARGET_DIM_NUM, name='target_nn_output', input_shape=(mc._OUT_DIM,)))
return model
|
import sys
import os
from __main__ import MathPy
from setuptools import setup, find_packages
os.environ['ANACONDA3_PATH'] = "C:\\ProgramData\\Anaconda3\\envs\\Calculator\\Library\\bin\\"
base = None
# GUI applications require a different base on Windows (the default is for a console application).
if sys.platform == "win32":
base = "Win32GUI"
elif sys.platform == "win64":
base = "Win64GUI"
setup(
packages=find_packages(),
scripts="__main__.py",
icon="Alecive-Flatwoken-Apps-Libreoffice-Math-B.ico",
name=MathPy.__name__,
version=MathPy.__version__,
base=base,
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
include_package_data=True,
install_requires=['tkinter', 'matplotlib', 'sympy', 'numpy', 'itertools', 'random'],
# metadata to display on PyPI
author=MathPy.__author__,
author_email="najmi.achraf@gmail.com",
description="GUI Calculator",
license="MIT License",
keywords=['gui', 'executable'],
url="", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/AchrafNajmi/MathPy/issues",
"Documentation": "https://github.com/AchrafNajmi/MathPy/blob/master/Release%20Notes.txt",
"Source Code": "https://github.com/AchrafNajmi/MathPy",
},
python_requires='>=3.7',
classifiers=[
"License :: MIT :: AchrafNajmi/MathPy",
"License :: OSI Approved :: Python Software Foundation License",
'Programming Language :: Python :: 3.7',
"Operating System :: Microsoft :: Windows"
]
# could also include long_description, download_url, etc.
)
|
# -*- coding: utf-8 -*-
"""
BuzzlogixTextAnalysisAPILib.Configuration
This file was automatically generated for buzzlogix by APIMATIC BETA v2.0 on 12/06/2015
"""
class Configuration :
# The base Uri for API calls
BASE_URI = "https://buzzlogix-text-analysis.p.mashape.com"
|
# Authors:
# Loic Gouarin <loic.gouarin@polytechnique.edu>
# Benjamin Graille <benjamin.graille@math.u-psud.fr>
#
# License: BSD 3 clause
"""
Solver D1Q2 for the advection equation on the 1D-torus
d_t(u) + c d_x(u) = 0, t > 0, 0 < x < 1, (c=1/4)
u(t=0,x) = u0(x),
u(t,x=0) = u(t,x=1)
the solution is
u(t,x) = u0(x-ct).
test: True
"""
import sympy as sp
import pylbm
from exact_solvers import AdvectionSolver as exact_solver
from exact_solvers import riemann_pb
# pylint: disable=redefined-outer-name
U, X = sp.symbols('u, X')
C, LA, SIGMA = sp.symbols('c, lambda, sigma', constants=True)
def run(space_step,
final_time,
generator="cython",
sorder=None,
with_plot=True):
"""
Parameters
----------
space_step: double
spatial step
final_time: double
final time
generator: string
pylbm generator
sorder: list
storage order
with_plot: boolean
if True plot the solution otherwise just compute the solution
Returns
-------
sol
<class 'pylbm.simulation.Simulation'>
"""
# parameters
xmin, xmax = 0., 1. # bounds of the domain
la = 1. # lattice velocity (la = dx/dt)
velocity = 0.25 # velocity of the advection
s = 1.9 # relaxation parameter
symb_s = 1/(0.5+SIGMA) # symbolic relaxation parameter
# initial values
u_left, u_right = 1., 0.
# discontinuity position
if velocity > 0:
xmid = 0.75*xmin + .25*xmax
elif velocity < 0:
xmid = .25*xmin + .75*xmax
else:
xmid = .5*xmin + .5*xmax
# fixed bounds of the graphics
ymin = min([u_left, u_right])-.2*abs(u_left-u_right)
ymax = max([u_left, u_right])+.2*abs(u_left-u_right)
exact_solution = exact_solver({
'jump abscissa': xmid,
'left state': [u_left],
'right state': [u_right],
'velocity': velocity,
})
# dictionary of the simulation
simu_cfg = {
'box': {'x': [xmin, xmax], 'label': 0},
'space_step': space_step,
'scheme_velocity': LA,
'schemes': [
{
'velocities': [1, 2],
'conserved_moments': U,
'polynomials': [1, X],
'relaxation_parameters': [0., symb_s],
'equilibrium': [U, C*U],
},
],
'init': {U: (riemann_pb, (xmid, u_left, u_right))},
'boundary_conditions': {
0: {'method': {
0: pylbm.bc.Neumann,
}, },
},
'generator': generator,
'parameters': {
LA: la,
C: velocity,
SIGMA: 1/s-.5
},
'show_code': False,
}
# build the simulation
sol = pylbm.Simulation(simu_cfg, sorder=sorder)
# build the equivalent PDE
eq_pde = pylbm.EquivalentEquation(sol.scheme)
print(eq_pde)
if with_plot:
# create the viewer to plot the solution
viewer = pylbm.viewer.matplotlib_viewer
fig = viewer.Fig()
axe = fig[0]
axe.axis(xmin, xmax, ymin, ymax)
axe.set_label(r'$x$', r'$u$')
x = sol.domain.x
l1a = axe.CurveScatter(
x, sol.m[U],
color='navy', label=r'$D_1Q_2$',
)
l1e = axe.CurveLine(
x, exact_solution.evaluate(x, sol.t)[0],
width=1,
color='black',
label='exact',
)
axe.legend(loc='upper right',
shadow=False,
frameon=False,
)
def update(iframe): # pylint: disable=unused-argument
if sol.t < final_time: # time loop
sol.one_time_step() # increment the solution of one time step
l1a.update(sol.m[U])
l1e.update(exact_solution.evaluate(x, sol.t)[0])
axe.title = r'advection at $t = {0:f}$'.format(sol.t)
fig.animate(update)
fig.show()
else:
while sol.t < final_time:
sol.one_time_step()
return sol
if __name__ == '__main__':
# pylint: disable=invalid-name
space_step = 1./256
final_time = 1.
solution = run(space_step, final_time, generator="numpy")
|
import copy
from v3iokubespawner.utils import get_k8s_model, update_k8s_model, _get_k8s_model_attribute
from kubernetes.client.models import (
V1PodSpec, V1SecurityContext, V1Container, V1Capabilities, V1Lifecycle
)
class MockLogger(object):
"""Trivial class to store logs for inspection after a test run."""
def __init__(self):
self.warning_count = 0
def warning(self, message):
"""Remembers the most recent warning."""
self.most_recent_warning = message
self.warning_count += 1
def test__get_k8s_model_attribute():
"""Verifies fundamental behavior"""
assert _get_k8s_model_attribute(V1PodSpec, "service_account") == "service_account"
assert _get_k8s_model_attribute(V1PodSpec, "serviceAccount") == "service_account"
def test_update_k8s_model():
"""Ensure update_k8s_model does what it should. The test is first updating
attributes using the function and then and manually verifies that the
correct changes have been made."""
manually_updated_target = V1Container(
name="mock_name",
image="mock_image",
command=['iptables'],
security_context=V1SecurityContext(
privileged=True,
run_as_user=0,
capabilities=V1Capabilities(add=['NET_ADMIN'])
)
)
target = copy.deepcopy(manually_updated_target)
source = {"name": "new_mock_name"}
update_k8s_model(target, source)
manually_updated_target.name = "new_mock_name"
assert target == manually_updated_target
def test_update_k8s_models_logger_warning():
"""Ensure that the update_k8s_model function uses the logger to warn about
overwriting previous values."""
target = V1Container(
name="mock_name"
)
source = {"name": "new_mock_name", "image_pull_policy": "Always"}
mock_locker = MockLogger()
update_k8s_model(target, source, logger=mock_locker, target_name="notebook_container", changes_name="extra_container_config")
assert mock_locker.most_recent_warning.find("'notebook_container.name' current value: 'mock_name' is overridden with 'new_mock_name', which is the value of 'extra_container_config.name'") != -1
assert mock_locker.warning_count == 1
def test_get_k8s_model():
"""Thest that passing either a kubernetes.client.models object or as a
dictionary to representing it get_k8s_model should work."""
# verify get_k8s_model for when passing dict objects
v1_lifecycle_from_dict = get_k8s_model(
V1Lifecycle,
{
'preStop': {
'exec': {
'command': ['/bin/sh', 'test']
}
}
},
)
assert isinstance(v1_lifecycle_from_dict, V1Lifecycle)
assert v1_lifecycle_from_dict.to_dict() == {
'post_start': None,
'pre_stop': {
'exec': {
'command': ['/bin/sh', 'test']
}
},
}
# verify get_k8s_model for when passing model objects
v1_lifecycle_from_model_object = get_k8s_model(V1Lifecycle, v1_lifecycle_from_dict)
assert isinstance(v1_lifecycle_from_model_object, V1Lifecycle)
assert v1_lifecycle_from_model_object.to_dict() == {
'post_start': None,
'pre_stop': {
'exec': {
'command': ['/bin/sh', 'test']
}
},
}
|
import json
import uuid
from rest_framework import status
from rest_framework.test import APIClient, force_authenticate
from django.contrib.auth.models import User
from django.test import TestCase
from quickstart.models import Like, Inbox, Author
from quickstart.serializers import InboxSerializer
from quickstart.tests.helper_test import get_test_post_fields, get_follow_author_fields, get_test_like_fields, get_test_author_fields
client = APIClient()
class GetInbox(TestCase):
"""Tests to GET an author's inbox at endpoint /api/author/<str:author>/inbox/."""
def setUp(self):
client.force_authenticate(User.objects.create(username='john', password='doe'))
self.inbox = Inbox.objects.create(author=Author.objects.create(**get_test_author_fields()))
self.inbox.items.append(get_test_post_fields())
self.inbox.items.append(get_follow_author_fields())
self.inbox.items.append(get_test_like_fields())
self.inbox.save()
def test_get_inbox(self):
response = client.get(f'/api/author/{self.inbox.author.id}/inbox/')
inbox = Inbox.objects.get(author=self.inbox.author)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, InboxSerializer(inbox).data)
def test_get_invalid_inbox(self):
response = client.get(f'/api/author/{uuid.uuid4()}/inbox/')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class PostInbox(TestCase):
"""Tests for sending a post/follow/like to an inbox by POST'ing to /api/author/<str:author>/inbox/."""
def setUp(self):
client.force_authenticate(User.objects.create(username='john', password='doe'))
self.inbox = Inbox.objects.create(author=Author.objects.create(**get_test_author_fields()))
def test_send_post_to_inbox(self):
fields = get_test_post_fields()
fields['type'] = 'post'
self.send_to_inbox(fields)
def test_send_follow_to_inbox(self):
fields = get_follow_author_fields()
fields['type'] = 'follow'
self.send_to_inbox(fields)
def test_send_like_to_inbox(self):
fields = get_test_like_fields()
fields['type'] = 'like'
self.send_to_inbox(fields)
# Ensure a Like model is saved into our database.
self.assertTrue(Like.objects.get(object=fields['object']))
def send_to_inbox(self, payload):
response = client.post(
f'/api/author/{self.inbox.author.id}/inbox/',
data=json.dumps(payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
changed_inbox = Inbox.objects.get(author=self.inbox.author)
self.assertEqual(len(changed_inbox.items), 1)
self.assertEqual(changed_inbox.items[0], payload)
class ClearInbox(TestCase):
"""Tests for clearing an author's inbox by DELETE'ing to /api/author/<str:author>/inbox/."""
def setUp(self):
client.force_authenticate(User.objects.create(username='john', password='doe'))
self.inbox = Inbox.objects.create(author=Author.objects.create(**get_test_author_fields()))
self.inbox.items.append(get_follow_author_fields())
self.inbox.save()
def test_clear_inbox(self):
self.assertEqual(len(self.inbox.items), 1)
response = client.delete(f'/api/author/{self.inbox.author.id}/inbox/')
changed_inbox = Inbox.objects.get(author=self.inbox.author)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(len(changed_inbox.items), 0) |
#
# Copyright 2015-2020 Andrey Galkin <andrey@futoin.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..buildtool import BuildTool
from ..testtool import TestTool
from .piptoolmixin import PipToolMixIn
class setuptoolsTool(PipToolMixIn, BuildTool, TestTool):
"""Easily download, build, install, upgrade, and uninstall Python packages.
Home: https://pypi.python.org/pypi/setuptools
Not assumed to be used directly.
Build targets:
prepare -> {removes build & dist folders}
build -> ['sdist', 'bdist_wheel']
package -> {uses result of build in dist/}
Override targets with .config.toolTune.
"""
__slots__ = ()
def autoDetectFiles(self):
return 'setup.py'
def uninstallTool(self, env):
pass
def initEnv(self, env):
ospath = self._ospath
virtualenv_dir = env['virtualenvDir']
self._have_tool = ospath.exists(
ospath.join(virtualenv_dir, 'bin', 'easy_install'))
def onPrepare(self, config):
targets = self._getTune(config, 'prepare', ['build', 'dist'])
targets = self._configutil.listify(targets)
for d in targets:
self._pathutil.rmTree(d)
def onBuild(self, config):
env = config['env']
self._requirePip(env, 'wheel')
targets = self._getTune(config, 'build', ['sdist', 'bdist_wheel'])
targets = self._configutil.listify(targets)
cmd = [env['pythonBin'], 'setup.py'] + targets
self._executil.callMeaningful(cmd)
def onPackage(self, config):
target = self._getTune(config, 'package', 'dist')
self._pathutil.addPackageFiles(
config, self._pathutil.safeJoin(target, '*'))
def onCheck(self, config):
env = config['env']
self._requirePip(env, 'docutils')
self._requirePip(env, 'readme')
cmd = [env['pythonBin'], 'setup.py', 'check', '-mrs']
self._executil.callMeaningful(cmd)
|
#!/usr/bin/env python
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest import TestCase, main
from unittest.mock import Mock, patch
from cli_command_parser.utils import camel_to_snake_case, get_args, ProgramMetadata, ProgInfo
class UtilsTest(TestCase):
def test_camel_to_snake(self):
self.assertEqual('foo_bar', camel_to_snake_case('FooBar'))
self.assertEqual('foo bar', camel_to_snake_case('FooBar', ' '))
self.assertEqual('foo', camel_to_snake_case('Foo'))
def test_get_args(self):
# This is for coverage in 3.9+ for the get_args compatibility wrapper, to mock the attr present in 3.8 & below
self.assertEqual((), get_args(Mock(_special=True)))
def test_meta_name(self):
meta = ProgramMetadata(doc_name='foo')
self.assertEqual('foo', meta.doc_name)
def test_real_bad_path(self):
with TemporaryDirectory() as tmp_dir:
tmp_path = Path(tmp_dir).joinpath('foo.py')
with patch('cli_command_parser.utils.sys.argv', [tmp_path.as_posix()]):
with patch.object(ProgInfo, '_find_top_frame_and_globals', side_effect=RuntimeError):
meta = ProgramMetadata()
self.assertEqual(meta.path.name, 'UNKNOWN')
def test_fake_bad_path(self):
with patch('pathlib.Path.is_file', side_effect=OSError):
with patch.object(ProgInfo, '_find_top_frame_and_globals', side_effect=RuntimeError):
meta = ProgramMetadata()
self.assertEqual(meta.path.name, 'UNKNOWN')
def test_prog_info_repr(self):
self.assertIsNotNone(repr(ProgInfo()))
if __name__ == '__main__':
try:
main(warnings='ignore', verbosity=2, exit=False)
except KeyboardInterrupt:
print()
|
# -*- coding:utf-8 -*-
# @Time : 19-7-4 下午7:48
# @Author : zhangshanling
from flask import abort
from threading import Thread
from functools import wraps
from flask_login import current_user
import os
def async(f):
""" 多线程修饰器 """
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
def admin_required(func):
""" 检查管理员权限 """
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.is_admin():
abort(403)
return f(*args, **kwargs)
return decorated_function
return decorator(func)
def author_required(func):
""" 检查作者权限 """
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.is_author():
abort(403)
return f(*args, **kwargs)
return decorated_function
return decorator(func)
def tag_split(tags):
"""
:param tags: 输入要分隔的tags
:return: 一个tags的List列表
"""
ts = []
for tag in tags.split(u','):
t = tag.split(u',')
ts.extend(t)
return ts
|
"""Tests for permalinks.
This tests for the correct permalink behavior
except headings, which are tested in ``test_headerlinks.py``
"""
from pathlib import Path
import pytest
from sphinx.application import Sphinx
from .util import parse_html
@pytest.mark.sphinx(
"html",
testroot="table",
freshenv=True,
confoverrides={"html_theme": "sphinxawesome_theme"},
)
def test_no_permalinks_on_tables(app: Sphinx) -> None:
"""It tests parsing a table without headerlinks."""
app.config.html_permalinks = False # type: ignore[attr-defined]
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
tables = tree("table")
assert len(tables) == 2
headerlinks = tree("a", class_="headerlink")
assert len(headerlinks) == 0
@pytest.mark.sphinx(
"html",
testroot="figure",
freshenv=True,
confoverrides={"html_theme": "sphinxawesome_theme"},
)
def test_no_permalinks_on_figures(app: Sphinx) -> None:
"""It tests parsing a figure without headerlinks."""
app.config.html_permalinks = False # type: ignore[attr-defined]
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
figures = tree("figure")
assert len(figures) == 3
headerlinks = tree("a", class_="headerlink")
assert len(headerlinks) == 0
@pytest.mark.sphinx("html", testroot="table", freshenv=True)
def test_permalink_table_default_theme(app: Sphinx) -> None:
"""Test the permalink behavior in Tables.
This uses the default theme ``alabaster`` for
a baseline of the expected Sphinx behavior.
"""
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
tables = tree("table")
assert len(tables) == 2
assert tables[0]["id"] == "id1"
assert tables[1]["id"] == "id2"
for i, table in enumerate(tables, 1):
assert table["id"] == f"id{i}"
# filter newline characters
children = [c for c in table.children if c.strip is None]
caption = children[0]
assert str(caption) == (
'<caption><span class="caption-text">Table</span>'
f'<a class="headerlink" href="#id{i}" '
'title="Permalink to this table">¶</a></caption>'
)
# the second table has an explicit label:
# <span id="foo"><table ...>
span = tables[1].previous_element
assert span.name == "span"
assert span["id"] == "foo"
@pytest.mark.sphinx(
"html",
testroot="table",
freshenv=True,
confoverrides={"html_theme": "sphinxawesome_theme"},
)
def test_permalink_table_awesome_theme(app: Sphinx) -> None:
"""Test the permalink behavior in Tables.
This uses the awesome theme.
"""
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
tables = tree("table")
assert tables[0]["id"] == "id1"
assert tables[1]["id"] == "id2"
for i, table in enumerate(tables, 1):
assert table["id"] == f"id{i}"
# filter newline characters
children = [c for c in table.children if c.strip is None]
caption = children[0]
assert str(caption) == (
'<caption><span class="caption-text">Table</span>'
'<a aria-label="Copy link to this table." '
'class="headerlink tooltipped tooltipped-n" '
f'href="#id{i}" role="button"><svg pointer-events="none" '
'viewbox="0 0 24 24" xmlns="http://www.w3.org/2000/svg">'
'<path d="M3.9 12c0-1.71 1.39-3.1 3.1-3.1h4V7H7c-2.76 0-5 '
"2.24-5 5s2.24 5 5 5h4v-1.9H7c-1.71 0-3.1-1.39-3.1-3.1zM8 "
"13h8v-2H8v2zm9-6h-4v1.9h4c1.71 0 3.1 1.39 3.1 3.1s-1.39 "
'3.1-3.1 3.1h-4V17h4c2.76 0 5-2.24 5-5s-2.24-5-5-5z">'
"</path></svg></a></caption>"
)
# the second table has an explicit label:
# <span id="foo"><table ...>
span = tables[1].previous_element
assert span.name == "span"
assert span["id"] == "foo"
@pytest.mark.sphinx("html", testroot="figure", freshenv=True)
def test_permalink_figure_default_theme(app: Sphinx) -> None:
"""It tests the permalink behavior in figures.
This test uses the default ``alabaster`` theme
to get a good baseline.
"""
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
figures = tree("figure")
assert len(figures) == 3
assert figures[0]["id"] == "id1"
assert figures[0]["class"] == ["align-default"]
assert figures[1]["id"] == "id2"
assert figures[1]["class"] == ["align-default"]
# figure 3 has no alt text, hence no id
assert "id" not in figures[2].attrs
# check the structure for figure 1, first strip newlines
children = [c for c in figures[0].children if c.strip is None]
assert len(children) == 2
assert children[0].name == "img"
assert children[1].name == "figcaption"
# check the structure for figure 2, first strip newlines
children = [c for c in figures[1].children if c.strip is None]
assert len(children) == 3
# the explicit label is inserted before the img
assert children[0].name == "span"
assert children[0]["id"] == "foo"
assert children[1].name == "img"
assert children[2].name == "figcaption"
captions = tree("figcaption")
assert len(captions) == 2
for caption in captions:
children = [c for c in caption.children if c.strip is None]
assert len(children) == 2
assert children[0].name == "p"
assert children[1].name == "div"
assert children[1]["class"] == ["legend"]
@pytest.mark.sphinx(
"html",
testroot="figure",
freshenv=True,
confoverrides={"html_theme": "sphinxawesome_theme"},
)
def test_permalink_figure_awesome_theme(app: Sphinx) -> None:
"""It tests the permalink behavior in figures.
This test uses the awesome theme.
"""
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
figures = tree("figure")
assert len(figures) == 3
assert figures[0]["id"] == "id1"
assert figures[1]["id"] == "id2"
# figure 3 has no alt text, hence no id
assert "id" not in figures[2].attrs
# check the structure for figure 1, first strip newlines
children = [c for c in figures[0].children if c.strip is None]
assert len(children) == 2
assert children[0].name == "img"
assert children[1].name == "figcaption"
# check the structure for figure 2, first strip newlines
children = [c for c in figures[1].children if c.strip is None]
assert len(children) == 3
# the explicit label is inserted before the img
assert children[0].name == "span"
assert children[0]["id"] == "foo"
assert children[1].name == "img"
assert children[2].name == "figcaption"
captions = tree("figcaption")
assert len(captions) == 2
for i, caption in enumerate(captions, 1):
children = [c for c in caption.children if c.strip is None]
assert len(children) == 3
assert children[0].name == "span"
assert children[0]["class"] == ["caption-text"]
assert children[1].name == "a"
assert children[1]["href"] == f"#id{i}"
assert children[1]["aria-label"] == "Copy link to this image."
assert children[2].name == "div"
assert children[2]["class"] == ["legend"]
@pytest.mark.sphinx(
"html",
testroot="figure",
freshenv=True,
confoverrides={"html_theme": "sphinxawesome_theme"},
)
def test_figure_attributes(app: Sphinx) -> None:
"""It tests if width and align attributes are passed."""
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
figures = tree("figure")
assert len(figures) == 3
assert figures[2].attrs["class"] == ["align-left"]
assert figures[2].attrs["style"] == "width: 99%"
img = figures[2].find("img")
assert img.attrs["width"] == "50%"
@pytest.mark.sphinx(
"html",
testroot="toctree",
freshenv=True,
confoverrides={"html_theme": "sphinxawesome_theme"},
)
def test_caption_on_toctree(app: Sphinx) -> None:
"""It tests parsing a table without headerlinks."""
app.build()
tree = parse_html(Path(app.outdir) / "index.html")
captions = tree("p", class_="caption")
assert len(captions) == 2
for cap in captions:
assert cap.text == "Foo"
|
def triangulo(num):
if num < 0: print("Error. Número debe ser > 0")
elif num == 1:
print('*')
else:
triangulo(num-1)
print('*'*num)
#Main
num = int(input("Introduzca el lado del triángulo: "))
triangulo(num) |
#! /usr/bin/env python3
# _*_coding:utf-8 -*_
import time
def singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
@singleton
class WVSState:
def __init__(self):
self.wvs_state_list = []
def has(self, state):
identifier = "{}_{}".format(state["Name"], state["Address"][0])
for i in range(len(self.wvs_state_list)):
wvs_identifier = "{}_{}".format(self.wvs_state_list[i]["Name"], self.wvs_state_list[i]["Address"][0])
if wvs_identifier == identifier:
return True, i
return False, -1
def add_wvs_state(self, state):
existed, index = self.has(state)
if not existed:
state["Timestamp"] = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(state["Timestamp"]))
self.wvs_state_list.append(state)
else:
state["Timestamp"] = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(state["Timestamp"]))
self.wvs_state_list[index] = state
def get_wvs_state_list(self):
return self.wvs_state_list
def remove_agent(self, state):
existed, index = self.has(state)
if existed:
self.wvs_state_list.remove(state)
@singleton
class AgentState(object):
def __init__(self):
self.agent_list = []
def has(self, state):
identifier = "{}_{}".format(state["Name"], state["Address"][0])
for i in range(len(self.agent_list)):
wvs_identifier = "{}_{}".format(self.agent_list[i]["Name"], self.agent_list[i]["Address"][0])
if wvs_identifier == identifier:
return True, i
return False, -1
def add_agent_state(self, state):
existed, index = self.has(state)
state["Timestamp"] = time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(state["Timestamp"]))
if not existed:
self.agent_list.append(state)
else:
self.agent_list[index] = state
def get_agent_list(self):
return self.agent_list
def remove_agent(self, state):
existed, index = self.has(state)
if existed:
self.agent_list.remove(state)
if __name__ == '__main__':
state = {
"Name": "AppscanAgent",
"Address": ["127.0.0.1", 6000],
"Timestamp": 1529455672.7734838,
"State": "Online"
}
print(time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(state["Timestamp"])))
agent_list = WVSState()
agent_list.add_wvs_state(state)
agent_list.add_wvs_state(state)
agent_list.add_wvs_state(state)
for state in agent_list.get_wvs_state_list():
print(state)
|
import os
from tempfile import NamedTemporaryFile, TemporaryDirectory
import unittest
from google.cloud import storage
from gfluent import GCS
class TestGCSIntegration(unittest.TestCase):
def setUp(self):
self.bucket = "johnny-trading-data"
self.prefix = "sit-temp"
self.project_id = os.environ.get("PROJECT_ID")
self.tf = NamedTemporaryFile()
with open(self.tf.name, 'w') as f:
f.write("hello")
# os.makedirs("/tmp/sit-temp", exist_ok=True)
def tearDown(self):
GCS(self.project_id).bucket(self.bucket).prefix(self.prefix).delete()
def test_upload_single(self):
gcs = GCS(project=self.project_id)
gcs.bucket(self.bucket).prefix(self.prefix).local(self.tf.name).upload()
def test_upload_many(self):
gcs = GCS(project=self.project_id)
current_path = os.path.dirname(os.path.abspath(__file__))
(
gcs.bucket(self.bucket)
.prefix("sit-temp")
.local(os.path.join(current_path, "..", "docs"))
.upload()
)
def test_download_many(self):
current_path = os.path.dirname(os.path.abspath(__file__))
# upload first
(
GCS(self.project_id).bucket(self.bucket)
.prefix(self.prefix)
.local(os.path.join(current_path, "..", "docs"))
.upload()
)
# download
(
GCS(self.project_id).bucket(self.bucket)
.prefix(self.prefix + '/')
.local("/tmp")
.download()
)
def test_download_single(self):
# upload first
with open(os.path.join('/', "tmp", "tempfile.txt"), 'w') as f:
f.write("hello world")
(
GCS(self.project_id).bucket(self.bucket)
.prefix(self.prefix)
.local(os.path.join('/', "tmp", "tempfile.txt"))
.upload()
)
# download
(
GCS(self.project_id).bucket(self.bucket)
.prefix(self.prefix + '/' + "tempfile.txt")
.local("/tmp")
.download()
) |
import sys, numpy as np
sys.path.append('./utils')
import models, audio_utils
from functools import partial
# takes a batch tuple (X,y)
def add_channel_dim(X):
return np.expand_dims(X,1)
CONFIG_MAP = {}
CONFIG_MAP['mlp_mfcc'] = {
'batch_size': 32,
'model': models.MLPModel,
'feature_fn': partial(audio_utils.featurize_mfcc, hop_length=186),
'val_data_text_path': './data/switchboard/val/switchboard_val_data.txt',
'log_frequency': 200,
'swb_train_audio_pkl_path': './data/switchboard/train/swb_train_audios.pkl',
'swb_val_audio_pkl_path': './data/switchboard/val/swb_val_audios.pkl',
'swb_audio_root': './data/switchboard/switchboard-1/97S62/',
'swb_transcription_root': './data/switchboard/switchboard-1/swb_ms98_transcriptions/',
'audioset_noisy_train_audio_pkl_path': './data/audioset/train/audioset_train_audios.pkl',
'linear_layer_size': 44*40,
'filter_sizes': None,
'augment_fn': None,
'expand_channel_dim': False,
'supervised_augment': False,
'supervised_spec_augment': False
}
CONFIG_MAP['resnet_base'] = {
'batch_size': 32,
'model': models.ResNetBigger,
'feature_fn': partial(audio_utils.featurize_melspec, hop_length=186),
'val_data_text_path': './data/switchboard/val/switchboard_val_data.txt',
'log_frequency': 200,
'swb_train_audio_pkl_path': './data/switchboard/train/swb_train_audios.pkl',
'swb_val_audio_pkl_path': './data/switchboard/val/swb_val_audios.pkl',
'swb_audio_root': './data/switchboard/switchboard-1/97S62/',
'swb_transcription_root': './data/switchboard/switchboard-1/swb_ms98_transcriptions/',
'audioset_noisy_train_audio_pkl_path': './data/audioset/train/audioset_train_audios.pkl',
'augment_fn': None,
'linear_layer_size': 64,
'filter_sizes': [64,32,16,16],
'expand_channel_dim': True,
'supervised_augment': False,
'supervised_spec_augment': False
}
CONFIG_MAP['resnet_with_augmentation'] = {
'batch_size': 32,
'model': models.ResNetBigger,
'feature_fn': partial(audio_utils.featurize_melspec, hop_length=186),
'val_data_text_path': './data/switchboard/val/switchboard_val_data.txt',
'log_frequency': 200,
'swb_train_audio_pkl_path': './data/switchboard/train/swb_train_audios.pkl',
'swb_val_audio_pkl_path': './data/switchboard/val/swb_val_audios.pkl',
'swb_audio_root': './data/switchboard/switchboard-1/97S62/',
'swb_transcription_root': './data/switchboard/switchboard-1/swb_ms98_transcriptions/',
'audioset_noisy_train_audio_pkl_path': './data/audioset/train/audioset_train_audios.pkl',
'augment_fn': partial(audio_utils.random_augment, sr=8000),
'linear_layer_size': 128,
'filter_sizes': [128,64,32,32],
'expand_channel_dim': True,
'supervised_augment': True,
'supervised_spec_augment': True
} |
#! python3
import pyinputplus as pyip
prices = {'white': 1, 'wheat': 1.25, 'sourdough': 1.5,
'chicken': 2, 'turkey': 1.75, 'ham': 2.25, 'tofu': 2.5,
'cheddar': 1, 'swiss': 1.5, 'mozzarella': 2,
'mayo': 0.25, 'mustard': 0.3, 'lettuce': 0.4, 'tomato': 0.35}
selection = []
print('Time to make your sandwich')
selection.append(pyip.inputMenu(['wheat', 'white', 'sourdough'], numbered=True))
selection.append(pyip.inputMenu(['chicken', 'turkey', 'ham', 'tofu'], numbered=True))
if pyip.inputYesNo(prompt='Do you want cheese?') == 'yes':
selection.append(pyip.inputMenu(['cheddar', 'swiss', 'mozzarella'], numbered=True))
if pyip.inputYesNo(prompt='Do you want mayo?')[0].lower() == 'y':
selection.append('mayo')
if pyip.inputYesNo(prompt='Do you want mustard?')[0].lower() == 'y':
selection.append('mustard')
if pyip.inputYesNo(prompt='Do you want lettuce?')[0].lower() == 'y':
selection.append('lettuce')
if pyip.inputYesNo(prompt='Do you want tomato?')[0].lower() == 'y':
selection.append('tomato')
# Get price of sandwich based on selection
quantity = pyip.inputInt('How many of this sandwich do you want? ', min=1)
total = 0
for ingredient in selection:
total += prices[ingredient]
total *= quantity # Get updated price based on quantity of sandwichs
print('Your total is: ' + str(total)) |
name = "cpimgs"
__version__ = '0.1.3' |
"""FreeWheel Linear Programming Interface for Python"""
from flipy.lp_reader import LpReader
from flipy.lp_problem import LpProblem
from flipy.lp_constraint import LpConstraint
from flipy.lp_expression import LpExpression
from flipy.solvers.cbc_solver import CBCSolver
from flipy.lp_variable import LpVariable, VarType
from flipy.solvers.base_solver import SolutionStatus
try:
from flipy.solvers.gurobi_solver import GurobiSolver
except ImportError:
class GurobiSolver:
""" Dumb GurobiSolver class that returns an error when called """
def __init__(self, *args, **kwargs):
raise ModuleNotFoundError('gurobipy not installed')
from flipy.lp_objective import LpObjective, Minimize, Maximize
version = '0.0.5'
|
# -*- encoding: utf-8 -*-
# Module iasebox
from numpy import *
def iasebox(r=1):
from ia870 import iasesum, iabinary
B = iasesum( iabinary([[1,1,1],
[1,1,1],
[1,1,1]]),r)
return B
|
import cPickle as pickle
import os.path
# def load_synonyms():
# with open('data/synonyms.pck', 'rb') as f:
# syns = pickle.load(f)
# return syns
# SYNONYMS = load_synonyms()
# WORDS = SYNONYMS.keys()
SYNONYMS = dict()
i = 0
while True:
if os.path.exists('data/synonyms.%02d.pck' % i):
with open('data/synonyms.%02d.pck' % i, 'rb') as f:
d = pickle.load(f)
SYNONYMS.update(d)
i += 1
else:
break
def cached_synonyms(x, length=None):
x = x.lower()
if x in SYNONYMS:
syns = [s for s in SYNONYMS[x] if (not length) or (len(s) <= length)]
return list(syns)
else:
return []
|
def FizzBuzz(num):
answer=''
for x in range(1,num+1):
if x%3 == 0:
answer+='Fizz'
if x%5 == 0:
answer+='Buzz'
if x%3 !=0 and x%5 !=0:
answer+=str(x)
answer+=' '
return answer
print(FizzBuzz(50))
|
f = open("input.txt")
d = f.read().splitlines()
registers = {"a":1, "b":0}
memory = d
ip = 0
while True:
try:
next_line = memory[ip]
except:
break
line_parts = next_line.split(" ")
inst = line_parts[0]
if inst == "hlf":
registers[line_parts[1]] /= 2
ip += 1
elif inst == "tpl":
registers[line_parts[1]] *= 3
ip += 1
elif inst == "inc":
registers[line_parts[1]] += 1
ip += 1
elif inst == "jmp":
ip += int(line_parts[1])
elif inst == "jie":
args = line_parts[1:]
if registers[args[0][0]] % 2 == 0:
ip += int(args[1])
else:
ip += 1
elif inst == "jio":
args = line_parts[1:]
if registers[args[0][0]] == 1:
ip += int(args[1])
else:
ip += 1
print(registers["b"])
|
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField, Form
from wtforms.validators import DataRequired
class RecipeStepForm(Form):
step_number = IntegerField('stepNumber', validators=[DataRequired()])
content = StringField('content', validators=[DataRequired()])
|
from __future__ import print_function
from autostep import Autostep
import time
port = '/dev/ttyACM0'
stepper = Autostep(port)
servo_angle_0 = stepper.get_servo_angle()
servo_angle_1 = stepper.get_servo_angle_alt()
print(f'servo_angle_0: {servo_angle_0} and servo_angle_1: {servo_angle_1}')
angle_list_fwd = list(range(0,180,1))
angle_list_rev = angle_list_fwd[::-1]
angle_list_0 = angle_list_fwd + angle_list_rev
angle_list_1 = angle_list_rev + angle_list_fwd
while True:
for i, (angle_0, angle_1) in enumerate(zip(angle_list_0, angle_list_1)):
print(f'i: {i}, angle_0: {angle_0}, angle_1: {angle_1}')
stepper.set_servo_angle(angle_0)
stepper.set_servo_angle_alt(angle_1)
time.sleep(0.01)
|
# SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu>
# SPDX-License-Identifier: BSD-3-Clause
# SPDX-FileType: SOURCE
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the license found in the LICENSE.txt file in the root
# directory of this source tree.
# =======
# Imports
# =======
import numpy
from .._utilities.plot_utilities import * # noqa: F401, F403
from .._utilities.plot_utilities import load_plot_settings, plt, \
show_or_save_plot
__all__ = ['generate_data']
# =============
# generate data
# =============
def generate_data(
points,
noise_magnitude,
seed=0,
plot=False):
"""
Generates 1D array of data points. The data are the additive ``sin``
function along each axis plus uniform noise.
:param points: 2D array of points, where each row represents a point
coordinate.
:param points: numpy.ndarray
:param noise_magnitude: The magnitude of additive noise to the data.
:type noise_magnitude: float
:return: 1D array of data
:rtype: numpy.ndarray
"""
# If points are 1d array, wrap them to a 2d array
if points.ndim == 1:
points = numpy.array([points], dtype=float).T
num_points = points.shape[0]
dimension = points.shape[1]
z = numpy.zeros((num_points, ), dtype=float)
for i in range(dimension):
z += numpy.sin(points[:, i]*numpy.pi)
# Add noise
if seed is None:
rng = numpy.random.RandomState()
else:
rng = numpy.random.RandomState(seed)
z += noise_magnitude*rng.randn(num_points)
# Plot data
if plot:
_plot_data(points, z)
return z
# =========
# plot data
# =========
def _plot_data(points, z):
"""
Plots 1D or 2D data.
"""
load_plot_settings()
dimension = points.shape[1]
if dimension == 1:
x = points
xi = numpy.linspace(0, 1)
zi = generate_data(xi, 0.0, False)
fig, ax = plt.subplots()
ax.plot(x, z, 'o', color='black', markersize=4, label='noisy data')
ax.plot(xi, zi, color='black', label='noise-free data')
ax.set_xlim([0, 1])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$z(x)$')
ax.set_title('Sample one -dimensional data')
ax.legend(fontsize='small')
plt.tight_layout()
show_or_save_plot(plt, 'data', transparent_background=True)
elif dimension == 2:
# Noise free data
xi = numpy.linspace(0, 1)
yi = numpy.linspace(0, 1)
Xi, Yi = numpy.meshgrid(xi, yi)
XY = numpy.c_[Xi.ravel(), Yi.ravel()]
zi = generate_data(XY, 0.0, False)
Zi = numpy.reshape(zi, (xi.size, yi.size))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='3d')
ax.scatter(points[:, 0], points[:, 1], z, marker='.', s=7, c='black',
label='noisy data')
surf = ax.plot_surface(Xi, Yi, Zi, linewidth=0, antialiased=False,
color='darkgray', label='noise-free data')
# To avoid a bug in matplotlib
surf._facecolors2d = surf._facecolor3d
surf._edgecolors2d = surf._edgecolor3d
x_min = numpy.min(points[:, 0])
x_max = numpy.max(points[:, 0])
y_min = numpy.min(points[:, 1])
y_max = numpy.max(points[:, 1])
ax.set_xlim([x_min, x_max])
ax.set_ylim([y_min, y_max])
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
ax.set_zlabel(r'$z(x_1, x_2)$')
ax.set_title('Sample two-dimensional data')
ax.legend(fontsize='small')
ax.view_init(elev=40, azim=120)
plt.tight_layout()
show_or_save_plot(plt, 'data', transparent_background=True)
else:
raise ValueError('Dimension should be "1" or "2" to plot data.')
|
"""
Logger builder for getting a logger that can be used for debugging
(print on console) and for reales mode (disable console output)
@author: wimmer, simon-justus
"""
import logging
import os
class logger_builder:
def __init__(self, log_name = 'default Logger', log_dir='logging123'):
self.log_dir = log_dir
self.log_name = log_name
self.logger = logging.Logger(str(log_name))
self.set_logging_config()
def set_logging_config(self):
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s")
logging.getLogger().addHandler(logging.StreamHandler())
self.logger.setLevel(logging.DEBUG)
self.log_dir = "{0}/{1}".format(os.getcwd(),self.log_dir)
if not os.path.isdir(self.log_dir):
os.mkdir(self.log_dir)
fileHandler = logging.FileHandler("{0}/{1}.log".format(self.log_dir,self.log_name))
fileHandler.setFormatter(logFormatter)
self.logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
self.logger.addHandler(consoleHandler)
def get_logger(self):
return self.logger |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.forms import TextInput
from django.db import models
from import_export.admin import ImportExportModelAdmin
from .models import *
from .forms import Adaptingtextarea
from backend.management.commands import warmup
admin.site.site_header = "QLever UI Administration"
admin.site.site_title = "QLever UI Administration"
class BackendAdmin(ImportExportModelAdmin):
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size': '140'})},
models.TextField: {'widget': Adaptingtextarea()},
}
fieldsets = (
("General", {
'fields': ('name', 'slug', 'sortKey', 'baseUrl', 'isDefault', 'apiToken')
}),
('UI Suggestions', {
'fields': ('maxDefault', 'fillPrefixes', 'filterEntities', 'filteredLanguage', 'supportedKeywords', 'supportedFunctions', 'suggestPrefixnamesForPredicates', 'supportedPredicateSuggestions', 'suggestedPrefixes'),
}),
('Variable Names', {
'fields': ('suggestionEntityVariable', 'suggestionNameVariable', 'suggestionAltNameVariable', 'suggestionReversedVariable'),
'description': 'Define the variable names that are used in the warmup and autocomplete queries below.'
}),
('Frequent Predicates', {
'fields': ('frequentPredicates', 'frequentPatternsWithoutOrder'),
'description': 'Frequent predicates that should be pinned to the cache (can be left empty). Separate by space. You can use all the prefixes from "Suggested Prefixes" (e.g. wdt:P31 if "Suggested Prefixes" defines the prefix for wdt), but you can also write full IRIs.'
}),
('Warmup Query Patterns', {
'fields': ('entityNameAndAliasPattern', 'entityScorePattern', 'predicateNameAndAliasPatternWithoutContext', 'predicateNameAndAliasPatternWithContext', 'entityNameAndAliasPatternDefault', 'predicateNameAndAliasPatternWithoutContextDefault', 'predicateNameAndAliasPatternWithContextDefault'),
'description': 'The patterns used in the warmup queries below. The idea is that you only have to adapt a few and then the warmup queries and the AC queries just work out of the box.<br><br>The "Name and Alias" patterns are typically defined with KB-specific predicates such as rdfs:label or fb:type.object.name. However usually not all entities in a knowledge base have such names. As a fallback, therefore also names according to the patterns labelled as "... (default)" are used.'
}),
('Warmup Queries', {
'fields': ('warmupQuery1', 'warmupQuery2', 'warmupQuery3', 'warmupQuery4', 'warmupQuery5'),
'description': 'The warmup queries. These warmup queries are written in such a way that for almost all knowledge bases, you have to adapat only the patterns, not these warmup query templates.'
}),
('Autocomplete Settings', {
'fields': ('dynamicSuggestions', 'defaultModeTimeout', 'mixedModeTimeout', 'replacePredicates'),
}),
('Autocomplete Queries (context-sensitive)', {
'fields': ('suggestSubjects', 'suggestPredicates', 'suggestObjects'),
}),
('Autocomplete Queries (context-insensitive)', {
'fields': ('suggestSubjectsContextInsensitive', 'suggestPredicatesContextInsensitive', 'suggestObjectsContextInsensitive',),
}),
('Showing names', {
'fields': ('subjectName', 'alternativeSubjectName', 'predicateName', 'alternativePredicateName', 'objectName', 'alternativeObjectName'),
}),
)
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context["warmupTargets"] = warmup.Command.Targets.choices
extra_context["object_id"] = object_id
return super(BackendAdmin, self).change_view(
request, object_id, form_url, extra_context=extra_context,
)
def get_form(self, request, obj=None, **kwargs):
t = super().get_form(request, obj, **kwargs)
obj = obj or BackendDefaults.objects.first()
if obj:
obj.useBackendDefaults = False
for fieldName in t.base_fields:
if fieldName in BackendDefaults.AVAILABLE_DEFAULTS:
t.base_fields[fieldName].widget.attrs["placeholder"] = obj.__getattribute__(
fieldName, forceUseDefault=True)
return t
def get_queryset(self, request):
qs = super(BackendAdmin, self).get_queryset(request)
return qs.filter(backenddefaults__isnull=True)
class BackendDefaultsAdmin(ImportExportModelAdmin):
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size': '140'})},
models.TextField: {'widget': Adaptingtextarea()},
}
# Uses the following sections of the BackendAdmin fieldsets
# - Variable Names
# - Frequent Predicates
# - Warmup Query Patterns
# - Warmup Queries
# - Autocomplete Settings
# - Autocomplete Queries (context-sensitive)
# - Autocomplete Queries (context-insensitive)
fieldsets = (("General", {
'fields': ('apiToken', )
}),) + BackendAdmin.fieldsets[2:9]
class ExampleAdmin(ImportExportModelAdmin):
list_display = ['backend', 'name']
class LinkAdmin(admin.ModelAdmin):
list_display = ['identifier', 'content']
admin.site.unregister(Group)
admin.site.register(Backend, BackendAdmin)
admin.site.register(Example, ExampleAdmin)
admin.site.register(BackendDefaults, BackendDefaultsAdmin)
#admin.site.register(Link, LinkAdmin)
|
import sys
input = sys.stdin.readline
from collections import deque
def bfs(start):
queue = deque([start])
sign = [-1] * (n) # 最短経路長 / 最短経路 到達可能 False
sign[start] = 0 # 最短経路長 自分との距離は0(1往復を考えるときは消す) / 最短経路 到達可能 sign[start] = 1
while queue:
node = queue.popleft() # .pop()ならdfs
for near in nears[node]:
if sign[near] == -1: # 最短経路長 / 最短経路 到達可能 False
sign[near] = sign[
node] + 1 # 最短経路長 / 最短経路 sign[near] = node / 到達可能 sign[near] = 1
queue.append(near)
return sign
n, q = [int(x) for x in input().split()] # nは頂点の数、mは辺の数
nears = [[] for _ in range(n)] # 隣接リスト
for _ in range(n - 1):
a, b = [int(x) for x in input().split()]
nears[a - 1].append(b - 1)
nears[b - 1].append(a - 1) # 有向グラフの場合は消す
d = bfs(0)
d = [0 if i % 2 == 0 else 1 for i in d]
#print(d)
for _ in range(q):
x, y = list(map(int, input().split()))
if abs(d[x - 1] - d[y - 1]) == 0:
print("Town")
else:
print("Road")
|
# ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2020 Pantheon.tech. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
import pytest
import requests
from cloudify.mocks import MockCloudifyContext
from cloudify.state import current_ctx
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from cloudify.exceptions import RecoverableError
_goodosv2 = {
'auth_url': 'https://example.com/identity/v2.0',
'password': 'pw',
'region': 'r',
'tenant_name': 'tn',
'username': 'un'
}
def test_create_feed(monkeypatch, mockconsul, mockdmaapbc):
import dmaapplugin
from dmaapplugin import dr_lifecycle
properties = {'fqdn': 'a.x.example.com', 'openstack': _goodosv2, 'feed_id': 'test_feed_id' }
mock_ctx = MockCloudifyContext(
node_id='test_node_id',
node_name='test_node_name',
properties=properties,
runtime_properties = {
"admin": { "user": "admin_user" },
"user": { "user": "user_user" },
"viewer": { "user": "viewer_user" }
})
current_ctx.set(mock_ctx)
kwargs = { "feed_name": "ONAP_test",
"feed_description": "onap dmaap plugin unit test feed"}
def fake_feed(self):
return {"feedId":"test_feedId", "publishURL":"test_publishURL", "logURL":"test_logURL" }
monkeypatch.setattr(requests.Response, "json", fake_feed)
dr_lifecycle.create_feed(**kwargs)
dr_lifecycle.get_existing_feed(**kwargs)
dr_lifecycle.delete_feed(**kwargs)
|
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import scipy.stats as stats
from .likelihood import Likelihood
from .pearl_chain import PearlChainProbability
import pylife.utils.functions as functions
from . import FatigueData, determine_fractures
class Elementary:
"""Base class to analyze SN-data.
The common base class for all SN-data analyzers calculates the first
estimation of a Wöhler curve in the finite zone of the SN-data. It
calculates the slope `k`, the fatigue limit `SD`, the transition cycle
number `ND` and the scatter in load direction `1/TN`.
The result is just meant to be a first guess. Derived classes are supposed
to use those first guesses as starting points for their specific
analysis. For that they should implement the method `_specific_analysis()`.
"""
def __init__(self, fatigue_data):
"""The constructor.
Parameters
----------
fatigue_data : pd.DataFrame or FatigueData
The SN-data to be analyzed.
"""
self._fd = self._get_fatigue_data(fatigue_data)
self._lh = self._get_likelihood()
def _get_likelihood(self):
return Likelihood(self._fd)
def _get_fatigue_data(self, fatigue_data):
if isinstance(fatigue_data, pd.DataFrame):
if hasattr(fatigue_data, "fatigue_data"):
params = fatigue_data.fatigue_data
else:
params = determine_fractures(fatigue_data).fatigue_data
elif isinstance(fatigue_data, FatigueData):
params = fatigue_data
else:
raise ValueError("fatigue_data of type {} not understood: {}".format(type(fatigue_data), fatigue_data))
return params
def analyze(self, **kwargs):
"""Analyze the SN-data.
Parameters
**kwargs : kwargs arguments
Arguments to be passed to the derived class
"""
if len(self._fd.load.unique()) < 2:
raise ValueError("Need at least two load levels to do a Wöhler analysis.")
wc = self._common_analysis()
wc = self._specific_analysis(wc, **kwargs)
self.__calc_bic(wc)
wc['failure_probability'] = 0.5
return wc
def _common_analysis(self):
self._slope, self._lg_intercept = self._fit_slope()
TN, TS = self._pearl_chain_method()
return pd.Series({
'k_1': -self._slope,
'ND': self._transition_cycles(self._fd.fatigue_limit),
'SD': self._fd.fatigue_limit,
'TN': TN,
'TS': TS
})
def _specific_analysis(self, wc):
return wc
def bayesian_information_criterion(self):
"""The Bayesian Information Criterion
Bayesian Information Criterion is a criterion for model selection among
a finite set of models; the model with the lowest BIC is preferred.
https://www.statisticshowto.datasciencecentral.com/bayesian-information-criterion/
Basically the lower the better the fit.
"""
if not hasattr(self,"_bic"):
raise ValueError("BIC value undefined. Analysis has not been conducted.")
return self._bic
def pearl_chain_estimator(self):
return self._pearl_chain_estimator
def __calc_bic(self, wc):
''' '''
param_num = 5 # SD, TS, k_1, ND, TN
log_likelihood = self._lh.likelihood_total(wc['SD'], wc['TS'], wc['k_1'], wc['ND'], wc['TN'])
self._bic = (-2 * log_likelihood) + (param_num * np.log(self._fd.num_tests))
def _fit_slope(self):
slope, lg_intercept, _, _, _ = stats.linregress(np.log10(self._fd.fractures.load),
np.log10(self._fd.fractures.cycles))
return slope, lg_intercept
def _transition_cycles(self, fatigue_limit):
# FIXME Elementary means fatigue_limit == 0 -> np.inf
if fatigue_limit == 0:
fatigue_limit = 0.1
return 10**(self._lg_intercept + self._slope * (np.log10(fatigue_limit)))
def _pearl_chain_method(self):
'''
Pearl chain method: consists of shifting the fractured data to a median load level.
The shifted data points are assigned to a Rossow failure probability.The scatter in load-cycle
direction can be computed from the probability net.
'''
self._pearl_chain_estimator = PearlChainProbability(self._fd.fractures, self._slope)
TN = functions.std2scatteringRange(1./self._pearl_chain_estimator.slope)
TS = TN**(1./-self._slope)
return TN, TS
|
from tkinter import *
from tkinter import messagebox
from script import *
def show_message():
lst = processing(message.get())
h = 0
l = 0
temp = ''
for c in lst:
temp += str(c) + '\n'
h += c.get_freq() * log2(c.get_freq())
l += c.get_freq() * len(c.get_code())
h = abs(h)
messagebox.showinfo("GUI для алгоритма Шеннона-Фано", temp)
try:
messagebox.showinfo("GUI для алгоритма Шеннона-Фано",
"H_max = {}\nh = {}\nl_cp = {}\nK_c.c. = {}\nK_o.э. = {}".format(
log2(len(lst)), h, l, log2(len(lst)) / l, h / 1))
except ZeroDivisionError:
messagebox.showinfo("GUI для алгоритма Шеннона-Фано",
"H_max = {}\nh = {}\nl_cp = {}\nK_c.c. = ∞\nK_o.э. = ∞".format(
log2(len(lst)), h, l))
root = Tk()
root.title("GUI для алгоритма Шеннона-Фано")
root.geometry("300x250")
message = StringVar()
message_entry = Entry(textvariable=message)
message_entry.place(relx=.5, rely=.1, anchor="c")
message_button = Button(text="Обработать", command=show_message)
message_button.place(relx=.5, rely=.5, anchor="c")
root.mainloop()
|
from bs4 import BeautifulSoup as bs
import requests
import json
def scrape(
station, units={"temp": "c", "pressure": "hpa", "speed": "kph", "precip": "mm"}
):
USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
LANGUAGE = "en-US,en;q=0.5"
URL = "https://www.wunderground.com/dashboard/pws/"
try:
session = requests.Session()
session.headers["User-Agent"] = USER_AGENT
session.headers["Accept-Language"] = LANGUAGE
session.headers["Content-Language"] = LANGUAGE
html = session.get(URL + station["id"])
soup = bs(html.text, "html.parser")
except:
return None
data = {}
if (
soup.findAll("span", attrs={"_ngcontent-app-root-c173": ""})[21].text
== "Online"
):
# Last updated value
data["LAST_UPDATED"] = soup.findAll(
"span", attrs={"class": "ng-star-inserted"}
)[0].text
strings = data["LAST_UPDATED"].split()
if (strings[0] == "(updated") and (strings[3] == "ago)"):
value = int(strings[1])
if (value >= 0) and (value <= 60):
if strings[2][0:6] == "second":
data["LAST_UPDATED"] = value
elif strings[2][0:6] == "minute":
data["LAST_UPDATED"] = value * 60
elif strings[2][0:4] == "hour":
if (value >= 0) and (value <= 24):
data["LAST_UPDATED"] = value * 3600
else:
return None
else:
return None
else:
return None
# Get Temperature
if "temp" in station["parameters"]:
data["temp"] = soup.find("span", attrs={"class": "wu-value"})
data["temp"] = round(float(data["temp"].text))
if units["temp"] == "c":
data["temp"] = round((data["temp"] - 32) * (5 / 9), 1)
# Get Wind Speed
if "windSpeed" in station["parameters"]:
data["windSpeed"] = soup.findAll("span", attrs={"class": "wu-value"})
data["windSpeed"] = round(float(data["windSpeed"][2].text), 1)
if units["speed"] == "kmph":
data["windSpeed"] = round(data["windSpeed"] * 1.6, 1)
elif units["speed"] == "mps":
data["windSpeed"] = round(data["windSpeed"] * (4 / 9), 1)
# Get Wind Gust
if "windGust" in station["parameters"]:
data["windGust"] = soup.findAll("span", attrs={"class": "wu-value"})
data["windGust"] = round(float(data["windGust"][3].text), 1)
if units["speed"] == "kmph":
data["windGust"] = round(data["windGust"] * 1.6, 1)
elif units["speed"] == "mps":
data["windGust"] = round(data["windGust"] * (4 / 9), 1)
# Get Wind Bearing
if "windBearing" in station["parameters"]:
data["windBearing"] = soup.find("div", attrs={"class": "arrow-wrapper"})
string_full = ((data["windBearing"]["style"]).split())[1]
string_start = string_full[0:7]
string_end = string_full[-5:-1]
if (string_start == "rotate(") and (string_end == "deg)"):
data["windBearing"] = int(string_full[7:-5]) - 180
else:
data["windBearing"] = None
# Get Precipitation Rate
if "precipRate" in station["parameters"]:
data["precipRate"] = soup.findAll("span", attrs={"class": "wu-value"})
data["precipRate"] = round(float(data["precipRate"][5].text), 2)
if units["precip"] == "mm":
data["precipRate"] = round(data["precipRate"] * 25.4, 2)
# Get Precipitation Total
if "precipTotal" in station["parameters"]:
data["precipTotal"] = soup.findAll("span", attrs={"class": "wu-value"})
data["precipTotal"] = round(float(data["precipTotal"][8].text), 2)
if units["precip"] == "mm":
data["precipTotal"] = round(data["precipTotal"] * 25.4, 2)
# Get Pressure
if "pressure" in station["parameters"]:
data["pressure"] = soup.findAll("span", attrs={"class": "wu-value"})
data["pressure"] = round(float(data["pressure"][6].text), 2)
if units["pressure"] == "hpa":
data["pressure"] = round(data["pressure"] * 33.86, 2)
# Get Humidity
if "humidity" in station["parameters"]:
data["humidity"] = soup.findAll("span", attrs={"class": "wu-value"})
data["humidity"] = round(float(data["humidity"][7].text))
# Get UV Index
if "uvIndex" in station["parameters"]:
data["uvIndex"] = soup.findAll("span", attrs={"class": "wu-value"})
data["uvIndex"] = round(float(data["uvIndex"][9].text))
# Get Solar Radiation
if "radiation" in station["parameters"]:
data["radiation"] = soup.findAll("div", attrs={"class": "weather__text"})
strings = data["radiation"][-1].text.split()
if strings[1][-8:-3] == "watts":
data["radiation"] = round(float(strings[0]), 1)
else:
data["radiation"] = None
return data
def lambda_handler(event, context):
# 1. Try to extract the parameters passed
try:
station_id = event["queryStringParameters"]["stationId"]
del event["queryStringParameters"]["stationId"]
except:
raise Exception("Invalid queryStringParameters")
parameters = []
for value in event["queryStringParameters"].values():
parameters.append(value)
station = {}
station["id"] = station_id
station["parameters"] = parameters
# 2. Scrape Wunderground website
response_body = scrape(station)
# 3. Construct http response object
response_obj = {}
response_obj["statusCode"] = 200
response_obj["headers"] = {}
response_obj["headers"]["Content-Type"] = "application/json"
response_obj["body"] = json.dumps(response_body)
# 4. Return the response object
return response_obj
# def debug():
# event = {
# "queryStringParameters": {
# "stationId": "ICURITIB24",
# "p0": "temp",
# "p1": "windSpeed",
# "p2": "windGust",
# "p3": "windBearing",
# "p4": "pressure",
# "p5": "humidity",
# "p6": "precipRate",
# "p7": "precipTotal",
# "p8": "uvIndex",
# "p9": "radiation",
# }
# }
# print(json.dumps(lambda_handler(event, None), indent=4))
# debug()
|
__version__ = "0.3.9"
from .eval import *
from .eval_algorithms import *
from .system_analysis import * |
#! /usr/bin/env python
import os
import datetime
from future.utils import iteritems
import pandas as pd
class EnvironTransition(object):
"""Representation of a single environmental transition."""
def __init__(self, start_state, end_state, time, env_conds):
"""
Args:
start_state (str): Start state for transition
end_state (str): End state for transition
time (str): Time taken for transition
env_conds (dict): Key/value pairs of environmental conditions
which lead to transition
"""
self.start_state = start_state
self.end_state = end_state
self.time = time
self.env_conds = env_conds
def _key_value_string_repr(self, key, val):
"""Return a string describing a given env condition and its state.
Punctuation should be appropriate given type, e.g.
time:10 (integer)
good:true (boolean)
water:"xeric" (string)
"""
if isinstance(val, str):
return '{0}:"{1}"'.format(key, val)
elif isinstance(val, bool):
return '{0}:{1}'.format(key, repr(val).lower())
else:
return '{0}:{1}'.format(key, val)
def time_as_string(self):
return 'delta_t:{0}'.format(self.time)
def env_cond_as_string(self):
states = []
for (cond, state) in iteritems(self.env_conds):
states.append(self._key_value_string_repr(cond, state))
return ',\n'.join(states)
def __repr__(self):
return (
self._key_value_string_repr('start', self.start_state) + ',\n' +
self._key_value_string_repr('end', self.end_state) + ',\n' +
self.env_cond_as_string() + ',\n' +
self.time_as_string()
)
class EnvironTransitionSet(object):
"""Representation of all possible environmental transitions."""
def __init__(self, df, start_state_col, end_state_col, time_col,
env_cond_cols=None):
"""Setup EnvironTransitionSet object from table.
Args:
df (pd.DataFrame): Table specifying environmental transitions.
start_state_col (str): Name of table column specifying transition
start state.
end_state_col (str): Name of table column specifying transition
end state.
time_col (str): Name of table column specifying time taken to
complete the transition specified by each row.
env_cond_cols (list of str, optional): Names of columns which
specify combinations of environmental conditions leading
to the transition specified by each row. If not given
these will be assumed to be all the columns in the DataFrame
not already specified.
"""
processed_env_cond_cols = self._infer_env_cond_cols(
df,
start_state_col,
end_state_col,
time_col,
env_cond_cols
)
self._transitions = self._process_environ_transitions(
df,
start_state_col,
end_state_col,
time_col,
processed_env_cond_cols
)
def _infer_env_cond_cols(self, df, start_state_col, end_state_col,
time_col, env_cond_cols):
"""Work out which columns specify environmental conditions.
If `env_cond_cols` specified in constructor, use those. Otherwise
assume every column not already specified is an environmental
condition.
"""
if env_cond_cols:
return env_cond_cols
else:
return [col for col in df.columns
if col not in [start_state_col, end_state_col, time_col]]
def _process_environ_transitions(self, df, start_state_col, end_state_col,
time_col, env_cond_cols):
"""Process table, return list of EnvironCondition objects."""
env_conds = []
for index, row in df.iterrows():
cond = row[env_cond_cols].to_dict()
env_conds.append(EnvironTransition(row[start_state_col],
row[end_state_col],
row[time_col],
cond))
return env_conds
@property
def transitions(self):
return self._transitions
@transitions.setter
def transitions(self, value):
self._transitions = value
def _make_dict_mapper(self, data_dict):
def get_value(key):
if key in data_dict.keys():
return data_dict[key]
else:
return key
return get_value
def apply_state_aliases(self, state_aliases):
"""Consume state alias dict and apply to each EnvironTransition.
Be careful to ensure the type of the keys in the state_alias dict
match the type of the state codes included in the input data.
"""
state_alias_mapper = self._make_dict_mapper(state_aliases)
for et in self.transitions:
et.start_state = state_alias_mapper(et.start_state)
et.end_state = state_alias_mapper(et.end_state)
def apply_environ_condition_aliases(self, env_cond_aliases):
"""Consume env condition aliases dict, apply to EnvironTransition-s."""
alias_mappers = {}
for k in env_cond_aliases.keys():
# make a dictionary of functions which return value alias if given
alias_mappers[k] = self._make_dict_mapper(env_cond_aliases[k])
for et in self.transitions:
for k in et.env_conds.keys():
try:
et.env_conds[k] = alias_mappers[k](et.env_conds[k])
except KeyError as e:
print('WARNING: couldn\'t find alias for environmental' +
' condition {0}'.format(k) + e)
def _get_header_str(self, project_path, start_code, end_code):
"""Construct the header portion of the Cypher file.
This will specify the SuccessionTrajectory between ``start_code`` and
``end_code`` along with all the possible combinations of
EnvironConditions which cause that transition.
"""
header_str = """
//==============================================================================
// file: {0}/{1}_to_{2}_w.cql
// modified: {3}
// dependencies:
// abstract/LandCoverType_w.cql
// external parameters:
// model_ID, used to identify model created nodes belong to
// description:
// Create the Successiontrajectory representing the possibility of
// transition between the {1} and {2} LandCoverState-s. Also specify all
// combinations of environmental conditions which can lead to this
// transition.
//==============================================================================
{{
"priority":1
}}
""".format(project_path, start_code, end_code,
str(datetime.date.today()))
return header_str
def _get_succession_traj_query(self, start_code, end_code):
"""Return a string containing a SuccessionTrajectory query.
This will specify the possibility of transitioning from ``start_code``
to ``end_code``
"""
traj_query = """
MATCH
(srcLCT:LandCoverType {{code:\"{0}\", model_ID:$model_ID}}),
(tgtLCT:LandCoverType {{code:\"{1}\", model_ID:$model_ID}})
CREATE
(traj:SuccessionTrajectory {{model_ID:$model_ID}})
MERGE (srcLCT)<-[:SOURCE]-(traj)-[:TARGET]->(tgtLCT);
""".format(start_code, end_code)
return traj_query
def _get_env_cond_query(self, env_trans):
"""Given an EnvironTransition construct environ conditions query."""
env_cond_query = """
MERGE
(ec:EnvironCondition {{model_ID:$model_ID,
{0},
{1}}})
WITH ec
MATCH
(:LandCoverType {{code:\"{2}\", model_ID:$model_ID}})
<-[:SOURCE]-(traj:SuccessionTrajectory {{model_ID:$model_ID}})-[:TARGET]->
(:LandCoverType {{code:\"{3}\", model_ID:$model_ID}})
MERGE
(ec)-[:CAUSES]->(traj);
""".format(env_trans.env_cond_as_string().replace(',\n', ',\n'+32*' '),
env_trans.time_as_string(),
env_trans.start_state, env_trans.end_state)
return env_cond_query
def _get_file_dict(self):
"""Return file name/ file contents key/value pairs."""
d = {}
for trans in self.transitions:
start = trans.start_state
end = trans.end_state
fname = start + '_to_' + end + '_w.cql'
if fname not in d:
d[fname] = self._get_header_str('succession', start, end) + \
self._get_succession_traj_query(start, end)
d[fname] += self._get_env_cond_query(trans)
return d
def write_cypher_files(self, project_path):
target_dir = os.path.join(project_path, 'succession')
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
d = self._get_file_dict()
for k in d.keys():
fname = os.path.join(target_dir, k)
with open(fname, 'w') as f:
f.write(d[k])
if __name__ == """__main__""":
df = pd.read_pickle('traj.pkl')
env_trans_set = EnvironTransitionSet(df, 'start_code', 'end_code',
'delta_T')
# env_trans_set.apply_environ_condition_aliases({})
# env_trans_set.apply_state_aliases({})
env_trans_set.write_cypher_files('~/AgroSuccess/views')
|
import pytest
from PySide import QtGui, QtCore
import qmenuview
@pytest.fixture(scope='function', autouse=True)
def useqtbot(qtbot):
pass
@pytest.fixture(scope='function')
def model():
m = QtGui.QStandardItemModel()
for i in range(10):
m.appendRow(QtGui.QStandardItem("testrow%s" % i))
return m
@pytest.fixture(scope='function')
def treemodel():
m = QtGui.QStandardItemModel()
for i in range(10):
textitem = QtGui.QStandardItem("testrow%s:0" % i)
iconitem = QtGui.QStandardItem(QtGui.QIcon(), "iconitem")
m.appendRow([textitem, iconitem])
for j in range(10):
item = QtGui.QStandardItem("testrow%s:%s" % (i, j))
textitem.appendRow(item)
for k in range(5):
item.appendRow(QtGui.QStandardItem("testrow%s:%s:%s" % (i, j, k)))
return m
@pytest.fixture(scope='function')
def loadedview(treemodel):
mv = qmenuview.MenuView()
mv.icon_column = 1
mv.model = treemodel
return mv
def test_title():
title = 'Test title'
mv = qmenuview.MenuView(title)
assert mv.title() == title
def test_parent():
p = QtGui.QWidget()
mv = qmenuview.MenuView(parent=p)
assert mv.parent() is p
def test_set_empty_model(loadedview):
m = QtGui.QStandardItemModel()
loadedview.model = m
assert loadedview.model is m
assert loadedview.isEmpty()
def test_set_model_none(loadedview):
loadedview.model = None
assert loadedview.model is None
assert loadedview.isEmpty()
def test_actions_created(model):
mv = qmenuview.MenuView()
mv.model = model
actions = mv.actions()
assert len(actions) == 10,\
'There should be 10 actions for 10 items'
for i in range(10):
assert actions[i].text() == 'testrow%s' % i
def test_flatten_hierarchy(treemodel):
flatted = qmenuview.MenuView._flatten_hierarchy(treemodel)
i = 0
parent = QtCore.QModelIndex()
for index in flatted[:10]:
assert index.parent() == parent
assert index.row() == i
assert index.column() == 0
i += 1
for j in range(10):
parent = treemodel.index(j, 0, QtCore.QModelIndex())
for i, index in enumerate(flatted[10 * (j + 1):10 * (j + 2)]):
assert index.parent() == parent
assert index.row() == i
assert index.column() == 0
def test_menus_created(loadedview):
for i, a in enumerate(loadedview.actions()):
assert a.text() == 'testrow%s:0' % i
for j, ca in enumerate(a.menu().actions()):
assert ca.text() == 'testrow%s:%s' % (i, j)
def test_menu_action_triggered(qtbot, loadedview):
with qtbot.waitSignal(loadedview.triggered, raising=True):
action = loadedview.actions()[0]
action.triggered.emit()
def test_menu_action_hovered(qtbot, loadedview):
with qtbot.waitSignal(loadedview.action_hovered, raising=True):
action = loadedview.actions()[0]
action.hovered.emit()
def test_action_triggered(qtbot, loadedview):
with qtbot.waitSignal(loadedview.action_triggered, raising=True):
action = loadedview.actions()[0].menu().actions()[0]
action.triggered.emit()
def test_action_hovered(qtbot, loadedview):
with qtbot.waitSignal(loadedview.action_hovered, raising=True):
action = loadedview.actions()[0].menu().actions()[0]
action.hovered.emit()
def test_insert_menus(treemodel):
mv = qmenuview.MenuView()
mv.model = treemodel
item = QtGui.QStandardItem("newitem1")
item.appendRow(QtGui.QStandardItem("newitem2"))
parentindex = treemodel.index(0, 0, treemodel.index(2, 0, treemodel.index(2, 0)))
parent = treemodel.itemFromIndex(parentindex)
parent.appendRow(item)
parentmenu = mv.actions()[2].menu().actions()[2].menu().actions()[0].menu()
assert parentmenu,\
"The parent action was not converted to a menu"
newaction = parentmenu.actions()[0]
assert newaction.text() == 'newitem1'
assert newaction.menu().actions()[0].text() == 'newitem2',\
"Did not create submenus of inserted rows."
def test_get_parent_indizes_invalid(loadedview):
parents = loadedview._get_parent_indizes(QtCore.QModelIndex())
assert parents == [],\
"Invalid index has no parents!"
def test_get_parent_indizes_other_model(loadedview, model):
parents = loadedview._get_parent_indizes(model.index(0, 0))
assert parents == [],\
"There should be no parents because index is of another model!"
def test_get_parent_indizes_first_level(loadedview, treemodel):
parents = loadedview._get_parent_indizes(treemodel.index(0, 0))
assert parents == [],\
"There are no parent indizes because index is on first level!"
def test_get_parent_indizes_second_level(loadedview, treemodel):
p1 = treemodel.index(0, 0)
parents = loadedview._get_parent_indizes(treemodel.index(0, 0, p1))
assert parents == [p1],\
"There should be one parent!"
def test_get_parent_indizes_third_level(loadedview, treemodel):
p1 = treemodel.index(0, 0)
p2 = treemodel.index(0, 0, p1)
parents = loadedview._get_parent_indizes(treemodel.index(0, 0, p2))
assert parents == [p2, p1],\
"There should be two parents!"
def test_get_action_invalid(loadedview):
action = loadedview.get_action(QtCore.QModelIndex())
assert action is loadedview.menuAction(),\
"Invalid Index should give the action of the menu view!"
def test_get_action_first_level(loadedview, treemodel):
action = loadedview.get_action(treemodel.index(3, 0))
assert action is loadedview.actions()[3]
def test_get_action_second_level(loadedview, treemodel):
action = loadedview.get_action(treemodel.index(9, 0, treemodel.index(9, 0)))
assert action is loadedview.actions()[9].parentWidget().actions()[9]
def test_get_action_third_level(loadedview, treemodel):
action = loadedview.get_action(
treemodel.index(0, 0, treemodel.index(2, 0, treemodel.index(9, 0))))
assert action is loadedview.actions()[9].parentWidget().actions()[2].parentWidget().actions()[0]
def test_get_parents_invalid(loadedview):
w1 = QtGui.QMenu()
w2 = w1.addMenu("Test")
action = QtGui.QAction(w2)
parents = loadedview._get_parents(action)
assert parents == [],\
"Actions not part of the tree have no parents!"
def test_get_parents_self(loadedview):
action = loadedview.menuAction()
parents = loadedview._get_parents(action)
assert parents == [],\
"If the action is the menuAction of the menu view, there are no parents!"
def test_get_parents_first_level(loadedview):
action = loadedview.actions()[4]
parents = loadedview._get_parents(action)
assert parents == [],\
"First level actions have no parent!"
def test_get_parents_second_level(loadedview):
p1 = loadedview.actions()[4]
action = p1.menu().actions()[9]
parents = loadedview._get_parents(action)
assert parents == [p1],\
"Second level actions only have the first level action as parent!"
def test_get_parents_third_level(loadedview):
p1 = loadedview.actions()[4]
p1.setParent(loadedview)
p2 = p1.menu().actions()[5]
action = p2.menu().actions()[2]
parents = loadedview._get_parents(action)
assert parents == [p2, p1],\
"Third level actions have two actions as parent!"
def test_get_index_self(loadedview):
i = loadedview.get_index(loadedview.menuAction())
assert not i.isValid()
def test_get_index_invalid(loadedview):
a = QtGui.QAction(None)
i = loadedview.get_index(a)
assert not i.isValid()
def test_get_index_first_level(loadedview, treemodel):
i = loadedview.get_index(loadedview.actions()[9])
assert i == treemodel.index(9, 0)
def test_get_index_second_level(loadedview, treemodel):
i = loadedview.get_index(loadedview.actions()[2].menu().actions()[9])
expected = treemodel.index(9, 0, treemodel.index(2, 0))
assert i == expected
def test_remove_menus(loadedview, treemodel):
first = 3
count = 5
treemodel.removeRows(first, count, treemodel.index(2, 0))
remaining = loadedview.actions()[2].menu().actions()
assert len(remaining) == 5,\
"There should only be 5 rows remaining!"
texts = ['2:0', '2:1', '2:2', '2:8', '2:9']
expectedtexts = ['testrow%s' % t for t in texts]
remainingtexts = [a.text() for a in remaining]
assert expectedtexts == remainingtexts,\
"There should only be actions with these texts left"
def test_remove_menus_convert_menu(loadedview, treemodel):
first = 0
count = 10
treemodel.removeRows(first, count, treemodel.index(2, 0))
assert loadedview.actions()[2].menu() is None,\
"The should be no menu, if there are no actions left."
def test_update_menu_text(loadedview, treemodel):
teststring = "Thanks for the fish!"
treemodel.setData(treemodel.index(2, 0), teststring)
assert loadedview.actions()[2].text() == teststring
def test_change_column(loadedview, treemodel):
loadedview.text_column = 1
i = treemodel.index(0, 1)
txt = 'TEST'
treemodel.setData(i, txt)
assert loadedview.actions()[0].text() == txt
def test_remove_all_then_insert(model):
# this test can fail, if you remove all rows,
# then remove the menu from the parent action.
# If the menu is the menuview itself, then
# it will never be reparented!
# thus resulting in a wierd error
mv = qmenuview.MenuView()
mv.model = model
for i in reversed(range(model.rowCount())):
model.removeRow(i)
item = QtGui.QStandardItem("testrow1")
item.appendRow(QtGui.QStandardItem("testrow2"))
model.appendRow(item)
|
class TestWorksIDGet:
id_result = "https://openalex.org/W2894744280"
name_result = "Fusing Location Data for Depression Prediction"
def test_works_openalex_get(self, client):
res = client.get("/works/W2894744280")
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_works_openalex_get_case_insensitive(self, client):
res = client.get("/works/w2894744280", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_works_openalex_get_url(self, client):
res = client.get(
"/works/https://openalex.org/W2894744280", follow_redirects=True
)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_works_openalex_get_key(self, client):
res = client.get("/works/openalex:W2894744280", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_works_doi_get(self, client):
res = client.get(
"/works/https://doi.org/10.1109/tbdata.2018.2872569", follow_redirects=True
)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_works_doi_get_key(self, client):
res = client.get(
"/works/doi:10.1109/tbdata.2018.2872569", follow_redirects=True
)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_works_doi_get_partial_url(self, client):
res = client.get(
"/works/doi.org/10.1109/tbdata.2018.2872569", follow_redirects=True
)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_works_mag_get(self, client):
res = client.get("/works/mag:2894744280", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_works_pmid_get_short(self, client):
res = client.get("/works/pmid:30295140", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == "https://openalex.org/W2894716986"
assert json_data["ids"]["pmid"] == "https://pubmed.ncbi.nlm.nih.gov/30295140"
def test_works_pmid_get_long(self, client):
res = client.get(
"/works/pmid:https://pubmed.ncbi.nlm.nih.gov/30295140",
follow_redirects=True,
)
json_data = res.get_json()
assert json_data["id"] == "https://openalex.org/W2894716986"
assert json_data["ids"]["pmid"] == "https://pubmed.ncbi.nlm.nih.gov/30295140"
def test_works_pmid_get_bad_data(self, client):
res = client.get("/works/pmid:7777777777", follow_redirects=True)
assert res.status_code == 404
def test_works_id_get_bad_data(self, client):
res = client.get("/works/2894744280", follow_redirects=True)
assert res.status_code == 404
class TestAuthorsIDGet:
id_result = "https://openalex.org/A2609699"
name_result = "Peter Vandenabeele"
def test_authors_openalex_get(self, client):
res = client.get("/authors/A2609699")
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_authors_openalex_get_case_insensitive(self, client):
res = client.get("/authors/a2609699", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_authors_openalex_get_url(self, client):
res = client.get(
"/authors/https://openalex.org/A2609699", follow_redirects=True
)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_authors_openalex_get_key(self, client):
res = client.get("/authors/openalex:A2609699", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_authors_orcid_get(self, client):
res = client.get(
"/authors/https://orcid.org/0000-0001-5285-9835", follow_redirects=True
)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_authors_orcid_get_key(self, client):
res = client.get("/authors/orcid:0000-0001-5285-9835", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_authors_mag_get(self, client):
res = client.get("/authors/mag:2609699", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_authors_id_get_bad_data(self, client):
res = client.get("/authors/289744280", follow_redirects=True)
assert res.status_code == 404
class TestInstitutionsIDGet:
id_result = "https://openalex.org/I19820366"
name_result = "Chinese Academy of Sciences"
def test_institutions_openalex_get(self, client):
res = client.get("/institutions/I19820366")
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_institutions_ror_get(self, client):
res = client.get(
"/institutions/https://ror.org/034t30j35", follow_redirects=True
)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_institutions_ror_get_key(self, client):
res = client.get("/institutions/ror:034t30j35", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_institutions_id_get_bad_data(self, client):
res = client.get("/institutions/289744280", follow_redirects=True)
assert res.status_code == 404
class TestVenuesIDGet:
id_result = "https://openalex.org/V41354064"
name_result = "ChemInform"
def test_venues_openalex_get(self, client):
res = client.get("/venues/V41354064")
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_venues_issn_get_key(self, client):
res = client.get("/venues/issn:1431-5890", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_venues_issn_l_get_key(self, client):
res = client.get("/venues/issn_l:1057-610X", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == "https://openalex.org/V41746314"
assert json_data["display_name"] == "Studies in Conflict & Terrorism"
def test_venues_id_bad_issn(self, client):
res = client.get("/venues/issn:778-333", follow_redirects=True)
assert res.status_code == 404
def test_venues_id_get_bad_data(self, client):
res = client.get("/venues/289744280", follow_redirects=True)
assert res.status_code == 404
class TestConceptsIDGet:
id_result = "https://openalex.org/C86803240"
name_result = "Biology"
def test_concepts_openalex_get(self, client):
res = client.get("/concepts/C86803240")
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_concepts_wikidata_get(self, client):
res = client.get("/concepts/wikidata:Q420", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_concepts_wikidata_get_key(self, client):
res = client.get(
"/concepts/https://www.wikidata.org/wiki/Q420", follow_redirects=True
)
json_data = res.get_json()
assert json_data["id"] == self.id_result
assert json_data["display_name"] == self.name_result
def test_concepts_id_bad_issn(self, client):
res = client.get("/concepts/wikidata:8899", follow_redirects=True)
assert res.status_code == 404
def test_concepts_bad_data(self, client):
res = client.get("/concepts/289744280", follow_redirects=True)
assert res.status_code == 404
class TestConceptsNameGet:
def test_concepts_name_get(self, client):
res = client.get("/concepts/name/biology")
json_data = res.get_json()
assert json_data["id"] == "https://openalex.org/C86803240"
assert json_data["display_name"] == "Biology"
class TestUniversalIDGet:
def test_works_openalex_get(self, client):
res = client.get("/W2894744280", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == "https://openalex.org/W2894744280"
assert (
json_data["display_name"]
== "Fusing Location Data for Depression Prediction"
)
def test_authors_openalex_get(self, client):
res = client.get("/A2609699", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == "https://openalex.org/A2609699"
assert json_data["display_name"] == "Peter Vandenabeele"
def test_institutions_openalex_get(self, client):
res = client.get("/I19820366", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == "https://openalex.org/I19820366"
assert json_data["display_name"] == "Chinese Academy of Sciences"
def test_venues_openalex_get(self, client):
res = client.get("/V41354064", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == "https://openalex.org/V41354064"
assert json_data["display_name"] == "ChemInform"
def test_concepts_openalex_get(self, client):
res = client.get("/C86803240", follow_redirects=True)
json_data = res.get_json()
assert json_data["id"] == "https://openalex.org/C86803240"
assert json_data["display_name"] == "Biology"
|
from discord.ext.commands import BadArgument
from discord.ext.commands.converter import MemberConverter
from discord.ext.commands.view import StringView
def binary_search(array, query, key=lambda a: a, start=0, end=-1):
"""Python's 'in' keyword performs a linear search on arrays.
Given the circumstances of storing sorted arrays, it's better
for Nyx to use a binary search.
Arguments:
key - filter for objects in the array
start - 0-indexed starting marker on the array to search
end - exclusive ending marker on the array to search
"""
if array is None or len(array) == 0:
return None
# elif len(array) == 1:
# return key(array[0]) == query and array[0] or None
if end == -1:
end = len(array)
elif start >= end:
return None
# print(start, "and", end)
mid = int(start + (end - start) / 2)
# print(mid)
# mid = int(len(array) / 2)
compare_to = key(array[mid]) # Applies lambda to array items.
if query < compare_to:
return binary_search(array, query, key, start, mid)
elif query > compare_to:
return binary_search(array, query, key, mid + 1, end)
else:
return array[mid]
member_converter = MemberConverter()
async def get_member(ctx, query):
try:
return await member_converter.convert(ctx, query)
except BadArgument:
return None
def get_mention(ctx, user):
if ctx.guild is None:
return user.mention
return ctx.guild.get_member(user.id).mention
def get_predicate(ctx):
view = StringView(ctx.message.content)
view.skip_string(ctx.prefix + ctx.invoked_with)
return view.read_rest().strip()
# Prints a list in legible format
def list_string(alist, key=lambda a: a):
"""Given items a, b, ..., x, y, z in an array,
this will print "a, b, ..., x, y and z"
"""
if len(alist) == 0:
return "[empty]"
elif len(alist) < 2:
return str(key(alist[0]))
elif len(alist) == 2:
return "{} and {}".format(str(key(alist[0])), str(key(alist[1])))
alist = list(map(str, map(key, alist)))
return ", and ".join([", ".join(alist[:-1]), alist[-1]])
def print_line():
"""Print a line of 80 dashes."""
print("-" * 80)
def remove_bots(alist, key=lambda a: a):
"""Prunes Discord bots from a list of users."""
i = 0
while i < len(alist):
if key(alist[i]).bot:
alist.remove(alist[i])
else:
i += 1
async def reply(ctx, content):
if ctx.message.guild is None:
return await ctx.send(content)
else:
return await ctx.send("{}, {}".format(ctx.author.mention, content))
async def respond(ctx, content):
return await reply(ctx, content)
def trim(string):
"""Removes all carriage returns, newlines, and spaces from the
target string. Not sure how much this operation costs.
"""
while string[-1:] == "\r" or string[-1:] == "\n":
string = string[:-1].strip()
return string |
import glfw
import OpenGL.GL as gl
import imgui
from imgui.integrations.glfw import GlfwRenderer
def app(render):
imgui.create_context()
window = impl_glfw_init()
impl = GlfwRenderer(window)
while not glfw.window_should_close(window):
glfw.poll_events()
impl.process_inputs()
gl.glClearColor(.2, .5, .2, 0.6)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
imgui.new_frame()
render()
imgui.render()
impl.render(imgui.get_draw_data())
glfw.swap_buffers(window)
impl.shutdown()
glfw.terminate()
def impl_glfw_init():
width, height = 1280, 720
window_name = "minimal ImGui/GLFW3 example"
if not glfw.init():
print("Could not initialize OpenGL context")
exit(1)
# OS X supports only forward-compatible core profiles from 3.2
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, gl.GL_TRUE)
# Create a windowed mode window and its OpenGL context
window = glfw.create_window(
int(width), int(height), window_name, None, None
)
glfw.make_context_current(window)
if not window:
glfw.terminate()
print("Could not initialize Window")
exit(1)
return window
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础计算平台 available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from django.http import HttpResponse
from django.utils.decorators import available_attrs
from app_control.utils import func_check
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
def function_check(func_code):
"""
功能开关装饰器
@param func_code: 功能ID
"""
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
_result, _message = func_check(func_code)
if _result == 1:
return view_func(request, *args, **kwargs)
else:
return _redirect_func_check_failed(request)
return _wrapped_view
return decorator
def _redirect_func_check_failed(request):
"""
跳转功能权限检测失败的提示页面
"""
resp = HttpResponse(status=402, content="Not match function switch ....")
return resp
|
"""
__init__.py for alias command
"""
from .alias_command import AliasCommand
|
from . import utils
class Meshes(object):
def __init__(
self, vertices=None, vertices_t=None, normals=None, faces=None, faces_t=None, faces_n=None,
textures=None, texture_params=None):
# Vertices (vertices) must be [batch_size, num_vertices, 3].
# Texture vertices (vertices_t) must be [batch_size, num_vertices_t, 2].
# Normal vectors (normals) must be [batch_size, num_normals, 3].
# Normal vectors can be undefined.
utils.assert_shape(vertices, (None, None, 3))
utils.assert_shape(vertices_t, (None, None, 2))
if normals is not None:
utils.assert_shape(normals, (None, None, 3))
# Indices assigned to faces (faces, faces_t, faces_n) must be [num_faces, 3].
# faces_n can be undefined.
utils.assert_shape(faces, (None, 3))
utils.assert_shape(faces_t, (None, 3))
if faces_n is not None:
utils.assert_shape(faces_n, (None, 3))
# Texture images (textures) must be [batch_size, height, width, 3].
utils.assert_shape(textures, (None, None, None, 3))
# TODO: assertion for texture_params.
self.vertices = vertices
self.vertices_t = vertices_t
self.normals = normals
self.faces = faces
self.faces_t = faces_t
self.faces_n = faces_n
self.textures = textures
self.texture_params = texture_params
def create_meshes(
vertices=None, vertices_t=None, normals=None, faces=None, faces_t=None, faces_n=None, textures=None,
texture_params=None):
return Meshes(vertices, vertices_t, normals, faces, faces_t, faces_n, textures, texture_params)
|
import csv
import os
import random
import numpy as np
import torchfile
#from torch.utils.serialization import load_lua
data_folder = '/media/user/DATA/ArtImages'
out_base_f = './art_data'
if not os.path.exists(out_base_f):
os.makedirs(out_base_f)
max_folder_length = 34
vocab = {}
alphabet = list(" abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}.")
alp_dict = {}
for k, a in enumerate(alphabet):
alp_dict[a] = k
n_char_array = 200
with open(os.path.join(data_folder, 'new_info.csv'), 'r') as file:
reader = csv.reader(file, delimiter='|')
header = next(reader)
train_ids = open(os.path.join(out_base_f, 'trainvalids.txt'), 'w')
for k, row in enumerate(reader):
print(row)
#file_name = str(k+1).zfill(5)+'.'+f.replace('/', '_')[:-4]
#out_file_name = os.path.join(out_folder, file_name[:max_file_length]+'.npy')
new_folder = row[0].replace('/', '_')[:-4][:max_folder_length]
out_text_folder = os.path.join(out_base_f, 'text_c10', str(k+1).zfill(5)+'.'+new_folder)
if not os.path.exists(out_text_folder):
os.makedirs(out_text_folder)
### Text
file_name = new_folder + '.txt'
with open(os.path.join(out_text_folder, file_name), 'w') as txt_file:
txt_file.write(row[-1])
npy_text_name = str(k+1).zfill(5)+'.'+new_folder + '.npy'
char_array = np.zeros((1, n_char_array, 1))
for n_c, c in enumerate(row[-1].lower()):
if n_c == n_char_array:
break
if c not in alp_dict:
continue
char_array[0,n_c,0] = alp_dict[c]
np.save(os.path.join(out_base_f, 'text_c10', npy_text_name), char_array)
if random.random() < 0.8:
train_ids.write(str(k+1).zfill(5)+'\n')
#torchfile.load('/media/adrian/SSD/finegrained-text-embeddings-pytorch/cvpr2016_cub/text_c10/170.Mourning_Warbler.t7')
#load_lua('/media/adrian/SSD/finegrained-text-embeddings-pytorch/cvpr2016_cub/text_c10/170.Mourning_Warbler.t7')
|
import os
from datetime import datetime
if os.getenv("CARBONLINK_HOSTS"):
CARBONLINK_HOSTS = os.getenv("CARBONLINK_HOSTS").split(',')
if os.getenv("CLUSTER_SERVERS"):
CLUSTER_SERVERS = os.getenv("CLUSTER_SERVERS").split(',')
if os.getenv("MEMCACHE_HOSTS"):
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOSTS").split(',')
if os.getenv("WHISPER_DIR"):
WHISPER_DIR = os.getenv("WHISPER_DIR")
if os.getenv("TIME_ZONE"):
TIME_ZONE = os.getenv("TIME_ZONE")
SECRET_KEY = str(datetime.now())
|
# Solution of;
# Project Euler Problem 645: Every Day is a Holiday
# https://projecteuler.net/problem=645
#
# On planet J, a year lasts for $D$ days. Holidays are defined by the two
# following rules. At the beginning of the reign of the current Emperor, his
# birthday is declared a holiday from that year onwards. If both the day
# before and after a day $d$ are holidays, then $d$ also becomes a holiday.
# Initially there are no holidays. Let $E(D)$ be the expected number of
# Emperors to reign before all the days of the year are holidays, assuming
# that their birthdays are independent and uniformly distributed throughout
# the $D$ days of the year. You are given $E(2)=1$, $E(5)=31/6$,
# $E(365)\approx 1174. 3501$. Find $E(10000)$. Give your answer rounded to 4
# digits after the decimal point.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 645
timed.caller(dummy, n, i, prob_id)
|
__import__("calculator_1")
|
import torch
import math
import torch.nn.functional as F
from model.Model import Model
from abc import abstractmethod
class Generator(Model):
def __init__(self):
super(Generator,self).__init__()
@abstractmethod
def forward(self,e1_embedding, rel_embedding):
pass
class Translation_G(Generator):
def __init__(self):
super(Translation_G,self).__init__()
def forward(self,e1_embedding, rel_embedding):
return e1_embedding + rel_embedding
class FC_G(Generator):
def __init__(self,embedding_dim,hidden_layer_list):
super(FC_G,self).__init__()
self.fc_list = torch.nn.ModuleList()
hidden_layer_list_and_input_output=[]
hidden_layer_list_and_input_output.append(embedding_dim*2)
for hidden_layer in hidden_layer_list:
hidden_layer_list_and_input_output.append(hidden_layer)
hidden_layer_list_and_input_output.append(embedding_dim)
for i in range(len(hidden_layer_list_and_input_output)-1):
self.fc_list.append(torch.nn.Linear(hidden_layer_list_and_input_output[i],hidden_layer_list_and_input_output[i+1]))
def forward(self,e1_embedding, rel_embedding):
x = torch.cat([e1_embedding,rel_embedding],dim=1)
for fc in self.fc_list:
x = fc(x)
output = x
return output
class ConvE_G(Generator):
def __init__(self,embedding_dim,num_filter):
super(ConvE_G,self).__init__()
self.conv = torch.nn.Conv2d(1,num_filter,(1,2))
self.fc = torch.nn.Linear(num_filter*embedding_dim,embedding_dim)
self.embedding_dim = embedding_dim
self.num_filter = num_filter
def forward(self,e1_embedding, rel_embedding):
e1_embedding = e1_embedding.view(-1,1,self.embedding_dim,1)
rel_embedding = rel_embedding.view(-1,1,self.embedding_dim,1)
x = torch.cat([e1_embedding,rel_embedding],3)
x = self.conv(x)
x = F.relu(x)
x = x.view(-1,self.num_filter * self.embedding_dim)
x = self.fc(x)
return x
class ConvTransE_G(Generator):
def __init__(self,input_drop,feature_map_dropout,output_channel,kernel_size,embedding_dim):
super(ConvTransE_G, self).__init__()
self.input_dropout = torch.nn.Dropout(input_drop)
self.feature_map_dropout = torch.nn.Dropout(feature_map_dropout)
self.conv1 = torch.nn.Conv1d(2,output_channel,kernel_size,stride=1,padding=int(math.floor(kernel_size/2)),bias=True)
self.bn0 = torch.nn.BatchNorm1d(2)
self.bn1 = torch.nn.BatchNorm1d(output_channel)
self.fc = torch.nn.Linear(output_channel*embedding_dim,embedding_dim,bias=True)
self.embedding_dim = embedding_dim
def forward(self,head_embedding,rel_embedding):
batch_size = head_embedding.size()[0]
head_embedding = torch.unsqueeze(head_embedding,1)
rel_embedding = torch.unsqueeze(rel_embedding,1)
x = torch.cat([head_embedding,rel_embedding],1)
x = self.bn0(x)
x = self.input_dropout(x)
x = self.conv1(x)
x = self.bn1(x)
x = F.leaky_relu(x,negative_slope=0.2)
x = self.feature_map_dropout(x)
x = x.view(batch_size,-1)
x = self.fc(x)
output = x
return output
|
#!/usr/bin/env python
#
# year of release ! 2016
#
# With the exponential growth of Posttranslational modifications (PTMs)
# data and and lack of characterisation of all the PTM-types. Its important
# to undersand properly the functions and experimental relevence of PTMs by
# creating the tools that facilitate the PTMs based analyses.
# And to understand the importance of PTMs in Yeast genome, its important
# to make it easier to map experimental mutations to PTM positional data.
# it's also important and relevent to translate genetic abrretions to understand
# the phenotype.
# We architect a python (yMap) library to help users to understand which parts of
# mutated proteins are affected during the yeast experimentation.
# This facilitation not only would help bioligists to interpret their data
# efficiently but also gives freedom to save time by mapping data to mutations
# easily
#
# The yMap program is a python based fast and robust automated method to map
# large yeast variants to proteins post-translational modifications, proteins domains,
# proteins-DNA binding domains, proteins structural regions, proteins active and
# binding sites, proteins networks visualisation.
# For Usage see README file
#
# Dependencies:
# Orange Bioinformatics
# see README file
#
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
try:
from builtins import next
from builtins import str
from builtins import range
from builtins import object
from builtins import bytes
except ImportError:
pass
import os
import sys
import math
import zipfile
from itertools import groupby
import shutil
import time
import urllib
from collections import OrderedDict
import webbrowser
try:
import Orange
except ImportError:
import Orange3
from orangecontrib.bio import go
from six.moves import range
ontology = go.Ontology()
annotations = go.Annotations("sgd", ontology=ontology)
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
import pkg_resources
from pkg_resources import resource_stream
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ Mutation type (Synon | Non-Synon | Stop codon) module (see exmple data) \\\\\\\\\\\\\\\\\\\\\
#//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
genetic_code = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',
'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'}
def translate_dna(dna):
""" calculate the start position for the final codon """
last_codon_start = len(dna) - 2
protein = ""
# process the dna sequence in three base chunks
for start in range(0,last_codon_start,3):
codon = dna[start:start+3]
aa = genetic_code.get(codon.upper(), 'X')
protein = protein + aa
return protein
def revcomp(dna, reverse=True, complement=True):
""" reverse complement of a protein in negative strand"""
bases = 'ATGCTACG'
complement_dict = {bases[i]:bases[i+4] for i in range(4)}
if reverse:
dna = reversed(dna)
result_as_list = None
if complement:
result_as_list = [complement_dict[base] for base in dna]
else:
result_as_list = [base for base in dna]
return ''.join(result_as_list)
def mutation_file(mutation, d_id):
""" defines the mutation types; either Non-Synonmous or Stop Codon"""
with open('mutation.txt', 'wb') as t:
with open(mutation, 'rU') as mut:
for m in mut:
m = m.rstrip().split()
with open(d_id,'rU') as id:
for i in id:
i = i.rstrip().split()
if not m[0].startswith('c'.upper()):
if len(m) != 5 or not m[0].startswith('c'.lower()):
raise StopIteration('Please correct the format of input mutation file')
else:
if m[4] == i[2]:
take = m[4]+'\t'+m[0]+'\t'+i[3]+'\t'+m[1]+'\t'+i[4]+'\t'+m[2]+'\t'+m[3]+'\t'+i[5]
take1= take.rstrip().split()
with open('gff.txt', 'rU') as orf:
linee = orf.readlines()[23078:]
up = (x[1] for x in groupby(linee, lambda line: line[0] == ">"))
for head in up:
head = next(head)[1:].strip()
seq = "".join(s.strip() for s in next(up))
if head == take1[1] and take1[0] == i[2] and take1[7] == '-':
cod = 1 + (int(take1[4])-int(take1[3]))
cc = math.ceil(int(cod)/float(3))
c = str(cc).split('.')
cn = int(c[0])-1
sli_n = seq[int(take1[2]):int(take1[4])]
rev_sli_n = revcomp(sli_n, reverse=True, complement=True)
sli_m_n = sli_n[:int(-cod)]+take1[6]+sli_n[int(-cod)+1:]
rev_sli_m_n = revcomp(sli_m_n, reverse=True, complement=True)
wild_type_rev_n = translate_dna(rev_sli_n)
mut_type_n = translate_dna(rev_sli_m_n)
try:
if wild_type_rev_n[cn] != mut_type_n[cn] and mut_type_n[cn] == '_':
pic = take1[0]+'\t'+str(c[0])+'\t'+wild_type_rev_n[cn]+'\t'+mut_type_n[cn]+'\t'+'Stop' +'\t'+take1[1]+'\t'+take1[3]
if pic > str(0):
t = open('mutation.txt', 'a')
t.write(pic+'\n')
except IndexError as e:
pic1 = take1[0]+ '\t'+ 'Error:'+'\t'+ str(e)
t = open('mutation.txt', 'a+')
t.write(pic1+'\n')
continue
try:
if wild_type_rev_n[cn] != mut_type_n[cn] and mut_type_n[cn] != '_':
pic = take1[0]+'\t'+str(c[0])+'\t'+wild_type_rev_n[cn]+'\t'+mut_type_n[cn]+'\t'+'Non-Synonymous' +'\t'+take1[1]+'\t'+take1[3]
if pic > str(0):
t = open('mutation.txt', 'a+')
t.write(pic+'\n')
except IndexError as e:
pic1 = take1[0]+ '\t'+ 'Error:'+'\t'+ str(e)
t = open('mutation.txt', 'a+')
t.write(pic1+'\n')
continue
if head == take1[1] and take1[0]==i[2] and take1[7] == '+':
code = int(take1[3])-int(take1[2])
code1 = 1 + (int(take1[3])-int(take1[2]))
cce = math.ceil(int(code1)/float(3))
ce = str(cce).split('.')
cp = int(ce[0])-1
pos = int(take1[2]) - 1
sli_p = seq[int(pos):int(take1[4])]
sli_m_p = sli_p[:int(code)]+take1[6]+sli_p[int(code)+1:]
wild_type_p = translate_dna(sli_p)
mut_type_p = translate_dna(sli_m_p)
try:
if wild_type_p[cp] != mut_type_p[cp] and mut_type_p[cp] != '_':
pick = take1[0]+'\t'+str(ce[0])+'\t'+wild_type_p[cp]+'\t'+mut_type_p[cp]+'\t'+'Non-Synonymous'+'\t'+take1[1]+'\t'+take1[3]
if pick > str(0):
with open('mutation.txt', 'a+') as t:
t.write(pick+'\n')
except IndexError as e:
pic1 = take1[0]+ '\t'+ 'Error:'+'\t'+ str(e)
t = open('mutation.txt', 'a+')
t.write(pic1+'\n')
continue
try:
if wild_type_p[cp] != mut_type_p[cp] and mut_type_p[cp]=='_':
pick = take1[0]+'\t'+str(ce[0])+'\t'+wild_type_p[cp]+'\t'+mut_type_p[cp]+'\t'+'Stop' +'\t'+take1[1]+'\t'+take1[3]
if pick > str(0):
with open('mutation.txt', 'a+') as t:
t.write(pick+'\n')
except IndexError as e:
pic1 = take1[0]+ '\t'+ 'Error:'+'\t'+ str(e)
t = open('mutation.txt', 'a+')
t.write(pic1+'\n')
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
# ////////////////// UniProt data /////////////////////////////////////////////
#//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class YGtPM(object):
def gff(self):
""" The genomic coordinates downloaded in gff formate for further processing to calculate mutated codons, if not
available, see next method"""
rsponse = urlopen("http://downloads.yeastgenome.org/curation/chromosomal_feature/saccharomyces_cerevisiae.gff")
page = rsponse.read()
file = open('gff.txt','wb')
file.write(page)
file.close()
def frmt(self, file_gff):
"""This method format the gff file into a tsv one, with protein id, start and end with strand orientation"""
with open('frmt.txt','w') as file4:
with open(file_gff, 'r') as file_gff:
for line in file_gff:
if not line.startswith('##') and not line.startswith('#'):
word = line.split()
if len(word)!=1 and word[2]=='gene':
result = word[3]+'\t'+word[4]+'\t'+word[6]+'\t'+word[8]
result = result.split()
result = result[3].split(';')
results = result[0].split('=')
result2 = results[1]+'\t'+word[3]+'\t'+word[4]+'\t'+word[6]
with open('frmt.txt','a') as file4:
file4.write(result2+'\n')
def id_map(self, file_id, frmt):
with open('d_id_map.txt', 'w') as file2:
with open(file_id, 'r') as file_id_name:
for line in file_id_name:
line=line.split()
with open(frmt, 'r') as fr:
for f in fr:
f=f.split()
if len(line)>2:
if line[1]==f[0]:
result= line[0]+'\t'+line[1]+'\t'+line[2]+'\t'+f[1]+'\t'+f[2]+'\t'+f[3]
if result > str(0):
with open('d_id_map.txt', 'a') as file2:
file2.write(result+'\n')
def pTMdata(self):
"""Downloads UpiProt data as a raw txt file (uniprot_mod_raw.txt)"""
rsponse = urlopen('http://www.uniprot.org/uniprot/?query=yeast&fil=organism%3A%22Saccharomyces%20cerevisiae%20(strain%20ATCC%20204508%20%2F%20S288c)%20(Baker%27s%20yeast)%20%5B559292%5D%22&sort=score&format=gff&columns=id,feature(MODIFIED%20RESIDUE)')
page = rsponse.read()
fil = open('uniprot_mod_raw.txt','wb')
fil.write(page)
fil.close()
def clean(self, UniProt_file):
""" cleans file uniprot_mod_raw.txt into a tab separated PTMs.txt
"""
with open('PTMs.txt', 'w') as out:
with open(UniProt_file,'rU') as UniProt_file_name:
for l in UniProt_file_name:
if not l.startswith('##'):
line = l.split()
if line[2] == 'Lipidation':
lll = line[0]+'\t'+line[4]+'\t'+line[8]
ll = lll.split()
ll = ll[2].split('=')
p = line[0]+'\t'+line[4]+'\t'+ll[1]
if p > str(0):
out = open('PTMs.txt', 'a')
out.write(p+'\n')
continue
if line[2] == 'Glycosylation':
ggg = line[0]+'\t'+line[4]+'\t'+line[8]
gg = ggg.split()
gg = gg[2].split('=')
p1 = line[0]+'\t'+line[4]+'\t'+gg[1]
if p1 > str(0):
out = open('PTMs.txt', 'a+')
out.write(p1+'\n')
continue
if line[2] == 'Modified':
mmm = line[0]+'\t'+line[4]+'\t'+line[9]
mm = mmm.split()
mm = mm[2].split('=')
mm = mm[1].split(';')
p2 = line[0]+'\t'+line[4]+'\t'+mm[0]
if p2 > str(0):
out = open('PTMs.txt', 'a+')
out.write(p2+'\n')
continue
if line[2] == 'Cross-link': #ubiquitination
ccc = line[0]+'\t'+line[4]+'\t'+line[8]
cc = ccc.split()
cc = cc[2].split('=')
p3 = line[0]+'\t'+line[4]+'\t'+cc[1]
if p3 > str(0):
with open('PTMs.txt', 'a+') as out:
out.write(p3+'\n')
def iD(self):
""" This method retrieves the different ID types for maping """
rsponse = urlopen('http://www.uniprot.org/uniprot/?query=yeast&fil=organism%3A%22Saccharomyces%20cerevisiae%20(strain%20ATCC%20204508%20%2F%20S288c)%20(Baker%27s%20yeast)%20%5B559292%5D%22&sort=score&format=tab&columns=id,genes(OLN),%2Cgenes(PREFERRED)')
page1 = rsponse.read()
file_1 =open('yeastID.txt','wb')
file_1.write(page1)
file_1.close()
def pmap(self, file_id, file_PTMs):
""" if proteins ids are not SDG or uniprot or common names, this method maps the ids
"""
with open('PTM_id_file.txt', 'w') as file3:
with open(file_id, 'r') as file_id_name:
for lin in file_id_name:
line = lin.split()
with open(file_PTMs) as ptms:
for i in ptms:
i = i.split()
if len(line) > 2:
if line[0] == i[0]:
result3 = line[0]+'\t'+line[1]+'\t'+line[2]+'\t'+i[1]+'\t'+i[2]
if result3 > str(0):
file3 = open('PTM_id_file.txt', 'a')
file3.write(result3+'\n')
def ptm_map(self, mutation_file, PTM_id_file):
""" This method maps the overlap between mutated codons from previous method to the PTM sites"""
summary = open('summary.txt', 'w')
with open('mutated_proteins.txt', 'w') as file5:
with open(mutation_file, 'rU') as mutation_file:
for line in mutation_file:
line = line.split()
with open(PTM_id_file, 'r') as file_PTMs:
for line1 in file_PTMs:
line1 = line1.split()
if line[0] == line1[2] and line[1] == line1[3]:
take = line1[0]+'\t'+line1[1]+'\t'+line[0]+'\t'+line[1]+'\t'+line1[4]+'\t'+'UniProt'
if take > str(0):
file5 = open('mutated_proteins.txt', 'a')
summary = open('summary.txt', 'a')
file5.write(take+'\n')
summary.write(line1[0]+'\t'+line[0]+'\t'+line[1]+'\t'+line1[4]+'\t'+'PTMs'+'\t'+'UniProt'+'\n')
def dclean(self, uniprot_mod_raw):
"""domain data needed to be filters from UniProt file, before mapping domains"""
with open('domains.txt', 'w') as domain:
with open(uniprot_mod_raw, 'rU') as raw:
for a in raw:
if not a.startswith('##'):
a = a.split('=')
a1 = a[0].split()
if a1[2] == 'Domain':
if len(a) == 2:
a2 = a[1].rstrip()
take = a1[0]+'\t'+a1[3]+'\t'+a1[4]+'\t'+a2+'\n'
if take > str(0):
with open('domains.txt', 'a') as domain:
domain.write(take)
continue
if len(a) == 4:
a3 = a[1].rstrip().split(';')
a4 = a[3].rstrip().split('|')
if len(a4) > 1:
take2 = a1[0]+'\t'+a1[3]+'\t'+a1[4]+'\t'+a3[0]+'\t'+a4[1]+'\n'
if take2 > str(0):
with open('domains.txt', 'a+') as domain:
domain.write(take2)
if len(a4) == 1:
take3 = a1[0]+'\t'+a1[3]+'\t'+a1[4]+'\t'+a4[0]+'\n'
if take3 > str(0):
with open('domains.txt', 'a+') as domain:
domain.write(take3)
def d_map(self, yeast_id, domain):
""" maps the different proteins ids to domains"""
with open('id_domain.txt','w') as id_domain:
with open(yeast_id, 'rU') as fl:
for f in fl:
f = f.split()
with open(domain,'r') as dp:
for d in dp:
d = d.split()
if len(f) > 2 and f[0] == d[0]:
if len(d) == 4:
take = d[0]+'\t'+f[1]+'\t'+f[2]+'\t'+d[1]+'\t'+d[2]+'\t'+d[3]+'\n'
if take > str(0):
with open('id_domain.txt','a') as id_domain:
id_domain.write(take)
if len(d) == 5:
take1 = d[0]+'\t'+f[1]+'\t'+f[2]+'\t'+d[1]+'\t'+d[2]+'\t'+d[3]+'\t'+d[4]+'\n'
if take1 > str(0):
with open('id_domain.txt','a+') as id_domain:
id_domain.write(take1)
if len(d) == 6:
take2 = d[0]+'\t'+f[1]+'\t'+f[2]+'\t'+d[1]+'\t'+d[2]+'\t'+d[3]+'\t'+d[4]+'\t'+d[5]+'\n'
if take2 > str(0):
with open('id_domain.txt','a+') as id_domain:
id_domain.write(take2)
if len(d) == 7:
take3 = d[0]+'\t'+f[1]+'\t'+f[2]+'\t'+d[1]+'\t'+d[2]+'\t'+d[3]+'\t'+d[4]+'\t'+d[5]+'\t'+d[6]+'\n'
if take3 > str(0):
with open('id_domain.txt','a+') as id_domain:
id_domain.write(take3)
if len(d) == 8:
take4 = d[0]+'\t'+f[1]+'\t'+f[2]+'\t'+d[1]+'\t'+d[2]+'\t'+d[3]+'\t'+d[4]+'\t'+d[5]+'\t'+d[6]+'\t'+d[7]+'\n'
if take4 > str(0):
with open('id_domain.txt','a+') as id_domain:
id_domain.write(take4)
def dmap(self, file1, file2):
""" maps mutations to the yeast domains"""
with open('domains_mapped.txt', 'w') as mp:
with open('summary.txt', 'a+') as summary:
with open(file1,'rU') as f:
for line in f:
line1=line.split()
with open(file2, 'r') as f2:
for line2 in f2:
line2 = line2.split()
if line1[0] == line2[2]:
try:
if line1[1] == 'Error:':
with open('domains_mapped.txt', 'a') as mp:
mp.write("input file contains error position for" +line1[0]+ "this protein"+'\n')
continue
if int(line1[1]) >= int(line2[3]) and int(line1[1]) <= int(line2[4]):
if len(line2) == 6:
take = line2[0]+'\t'+line1[0]+'\t'+line2[3]+'\t'+line1[1]+'\t'+line2[4]+'\t'+line2[5]+'\t'+'UniProt'+'\n'
if take > str(0):
with open('domains_mapped.txt', 'a+') as mp:
mp.write(take)
summary.write(line2[0]+'\t'+line1[0]+'\t'+line1[1]+'\t'+line2[5]+'\t'+'domain'+'\t'+'UniProt'+'\n')
if len(line2) == 7:
take1 = line2[0]+'\t'+line1[0]+'\t'+line2[3]+'\t'+line1[1]+'\t'+line2[4]+'\t'+line2[5]+'\t'+line2[6]+'UniProt'+'\n'
if take1 > str(0):
with open('domains_mapped.txt', 'a+') as mp:
mp.write(take1)
summary.write(line2[0]+'\t'+line1[0]+'\t'+line1[1]+'\t'+line2[5]+'\t'+line2[6]+'domain'+'\t'+'UniProt'+'\n')
if len(line2) == 8:
take2 = line2[0]+'\t'+line1[0]+'\t'+line2[3]+'\t'+line1[1]+'\t'+line2[4]+'\t'+line2[5]+'\t'+line2[6]+'\t'+line2[7]+'UniProt'+'\n'
if take2 > str(0):
with open('domains_mapped.txt', 'a+') as mp:
mp.write(take2)
summary.write(line2[0]+'\t'+line1[0]+'\t'+line1[1]+'\t'+line2[5]+'\t'+line2[6]+'\t'+line2[7]+'domain'+'\t'+'UniProt'+'\n')
if len(line2) == 9:
take3 = line2[0]+'\t'+line1[0]+'\t'+line2[3]+'\t'+line1[1]+'\t'+line2[4]+'\t'+line2[5]+'\t'+line2[6]+'\t'+line2[7]+'\t'+line2[8]+'UniProt'+'\n'
if take3 > str(0):
with open('domains_mapped.txt', 'a+') as mp:
mp.write(take3)
summary.write(line2[0]+'\t'+line1[0]+'\t'+line1[1]+'\t'+line2[5]+'\t'+line2[6]+'\t'+line2[7]+'\t'+line2[8]+'domain'+'\t'+'UniProt'+'\n')
if len(line2) == 10:
take4 = line2[0]+'\t'+line1[0]+'\t'+line2[3]+'\t'+line1[1]+'\t'+line2[4]+'\t'+line2[5]+'\t'+line2[6]+'\t'+line2[7]+'\t'+line2[8]+'\t'+line2[9]+'UniProt'+'\n'
if take4 > str(0):
with open('domains_mapped.txt', 'a+') as mp:
mp.write(take4)
summary.write(line2[0]+'\t'+line1[0]+'\t'+line1[1]+'\t'+line2[5]+'\t'+line2[6]+'\t'+line2[7]+'\t'+line2[8]+'\t'+line2[9]+'domain'+'\t'+'UniProt'+'\n')
except IndexError:
pass
def enrich(self, file1):
""" This method performed enrichment analysis of mutated proteins and
return the p value of functional enrichment of mutated proteins functional regions/residues;
see main text for how pvalue is calculated"""
k = []
with open('pvalue.txt','w') as out:
with open(file1, 'rU') as f:
k = [(line.split())[1] for line in f]
res = annotations.get_enriched_terms(k)
if len(res) == 0:
with open('pvalue.txt','a') as out:
out.write('No enrichment found')
else:
for go_id, (genes, p_value, ref) in list(res.items()):
if p_value < 0.05 and len(genes) >= 2:
with open('pvalue.txt','a+') as out:
out.write(ontology[go_id].name + '\t'+ '%.2E' %Decimal(p_value) + '\t'+",".join(genes) +'\t'+ str(ref) +'\n')
def ab(self, file_raw):
"""Prepares raw Uniprot data for yeast active and binding sites mutation analysis"""
with open('bact.txt','w') as file2:
with open(file_raw, 'rU') as d:
for f in d:
if not f.startswith('##'):
f = f.split()
if f[2] == 'Active':
take = f[0]+'\t'+f[2]+'\t'+f[4]
if take > str(0):
with open('bact.txt','a') as file2:
file2.write(take+'\n')
if f[2] == 'Binding':
take2 = f[0]+'\t'+f[2]+'\t'+f[4]
if take2 > str(0):
with open('bact.txt','a+') as file2:
file2.write(take2+'\n')
def id(self, act, yeast_id):
""" maps proteins ids to active and binding sites containing proteins"""
with open('sites_id.txt', 'w') as file_id:
with open(act, 'rU') as a:
for a in a:
a = a.split()
with open('yeastID.txt')as id:
for i in id:
i = i.split()
if len(i) > 2:
if a[0] == i[0]:
take = i[2]+'\t'+i[1]+'\t'+i[0]+'\t'+a[1]+'\t'+a[2]
if take > str(0):
with open('sites_id.txt', 'a') as file_id:
file_id.write(take+'\n')
def mmap(self, file_sites, mutation):
""" maps mutations to proteins ab (active and binding sites) """
with open('ab_mutation_file.txt', 'w') as out:
with open(file_sites, 'rU') as s:
for a in s:
a = a.split()
with open(mutation, 'r') as mu:
for m in mu:
m = m.split()
if a[0] == m[0] and a[4] == m[1]:
take = a[2]+'\t'+ a[3]+'\t'+m[1]+'\t'+'UniProt'
if take > str(0):
with open('ab_mutation_file.txt', 'a') as out:
summary = open('summary.txt', 'a+')
out.write(take+'\n')
summary.write(a[2]+'\t'+a[0]+'\t'+m[1]+'\t'+ a[3]+'\t'+'Active/Binding site'+'\t'+'UniProt'+'\n')
def nucleotide(self):
""" prepares the UniProt data for the nucleotide motifs mapping to mutations """
with open('nucleotide.txt', 'w') as t:
with open('uniprot_mod_raw.txt', 'rU') as file_raw:
for fi in file_raw:
if not fi.startswith('##'):
f = fi.split()
if f[2] == 'Nucleotide' and len(f) > 8:
take = f[0]+'\t'+f[2]+'\t'+f[4]+'\t'+f[5]+'\t'+f[9]
take1 = take.split()
take1 = take1[4].split(';')
if take > str(0):
with open('nucleotide.txt', 'a') as t:
t.write(f[0]+'\t'+f[2]+'\t'+f[4]+'\t'+f[5]+'\t'+take1[0]+'\n')
def n_map(self, yeast_id, domain):
""" maps different proteins ids to nucleotides data """
with open('id_nucleotide.txt', 'w') as id_domain:
with open(yeast_id, 'rU') as fl:
for fe in fl:
f = fe.split()
with open(domain,'r') as dp:
for d in dp:
d=d.split()
if len(f)>2:
if f[0]==d[0]:
take=d[0]+'\t'+f[1]+'\t'+f[2]+'\t'+d[1]+'\t'+d[2]+'\t'+d[3]+'\t'+d[4]
if take > str(0):
with open('id_nucleotide.txt', 'a') as id_domain:
id_domain.write(take+'\n')
def nucleotide_map(self, file1, file2):
""" maps mutations to protein-nucleotide binding motifs """
with open('nucleotide_map.txt', 'w') as mp:
with open(file1,'rU') as f:
for line in f:
line1 = line.split()
with open(file2, 'r') as f2:
for line2 in f2:
line2 = line2.split()
if line1[0] == line2[2]:
try:
if line1[1] == 'Error:':
with open('nucleotide_map.txt', 'a') as mp:
mp.write("input file contains error position for" + line1[0]+"protein"+'\n')
continue
if int(line1[1]) >= int(line2[4]) and int(line1[1]) <= int(line2[5]):
take = line2[0]+'\t'+line1[0]+'\t'+line2[4]+'\t'+line1[1]+'\t'+line2[4]+'\t'+line2[6]+'\t'+'UniProt'
if take > str(0):
with open('nucleotide_map.txt', 'a') as mp:
summary = open('summary.txt', 'a+')
mp.write(take+'\n')
summary.write(line2[0]+'\t'+line1[0]+'\t'+line1[1]+'\t'+line2[4]+'\t'+'Nucleotide-Binding'+'\t'+'UniProt'+'\n')
except IndexError:
pass
def bioGrid(self):
""" Downloads BioGrid ids of yeast proteins from UniProt for further processing including mapping and web browsing
WARNING: requires powerful machines to work with as its expensive to open in machines with low memory
"""
response = urlopen('http://www.uniprot.org/uniprot/?query=yeast&fil=organism%3A%22Saccharomyces%20cerevisiae%20(strain%20ATCC%20204508%20%2F%20S288c)%20(Baker%27s%20yeast)%20%5B559292%5D%22&sort=score&format=tab&columns=id,database(BioGrid)')
page = response.read()
file1 = open('uniprot_bioGrid.txt','wb')
file1.write(page)
file1.close()
def preWeb(self, file1, mutation ):
""" maps mutations to BioGrid ids """
with open('biog.txt', 'w') as out:
with open(file1, 'rU') as fl:
for f in fl:
f = f.rstrip().split()
if len(f) > 1:
i = f[1].split(';')
take = f[0]+'\t'+i[0]
take = take.split()
with open(mutation, 'r') as pro:
for p in pro:
p = p.split()
if take[0] == p[0]:
take2 = take[0]+'\t'+take[1]+'\t'+'UniProt'
if take2 > str(0):
with open('biog.txt', 'a') as out:
out.write(take2+'\n')
def bweb(self, file1):
""" opens the BioGrid db in browser with as many tabs as mutated proteins"""
url = 'http://thebiogrid.org/'
fl = open(file1, 'rU')
for f in OrderedDict.fromkeys(fl):
f = f.split()
webbrowser.open(url + f[1])
def pdb_c(self, file_1):
""" Structure data filtration from UniProt"""
with open('pdb.txt', 'w') as stru:
with open(file_1, 'rU') as raw:
for r in raw:
if not r.startswith('##'):
line = r.split()
if line[2] == 'Beta' and len(line[9]) > 1:
take = line[9].split('|')
take3 = line[0]+'\t'+line[2]+'\t'+line[4]+'\t'+line[5]+'\t'+take[1]
if take3 > str(0):
with open('pdb.txt', 'a') as stru:
stru.write(take3+'\n')
continue
if len(line) > 7 and line[2] == 'Helix' or line[2]=='Turn':
if len(line[8])>1:
tak = line[8].split('|')
tak3 = line[0]+'\t'+line[2]+'\t'+line[3]+'\t'+line[4]+'\t'+take[1]
if tak3 > str(0):
with open('pdb.txt', 'a+') as stru:
stru.write(tak3+'\n')
def mu_map(self):
""" mutations proteins mapped to the yeastID file"""
with open('mutation_id.txt', 'w') as f:
with open('mutation.txt') as mutation_file:
for a in mutation_file:
a = a.split()
with open('yeastID.txt') as id:
for i in id:
i = i.split()
if len(i) > 2:
if a[0] == i[2]:
take = i[0]+'\t'+i[1]+'\t'+i[2]+'\t'+a[1]
if take > str(0):
with open('mutation_id.txt', 'a') as f:
f.write(take+'\n')
def pdb(self, file_pdb):
""" This code maps mutations to the proteins structural regions"""
with open('stru_mutation.txt', 'w') as s:
with open(file_pdb, 'rU') as raw:
for i in raw:
i = i.split()
with open('mutation_id.txt') as mu:
for m in mu:
m = m.split()
if i[0] == m[0]:
try:
if m[3] == 'Error:':
with open('stru_mutation.txt', 'a') as s:
s.write("input file contains error position for" +m[2]+ "protein"+'\n')
continue
if int(i[2]) <= int(m[3]) and int(i[3]) >= int(m[3]):
take = m[0]+'\t'+m[1]+'\t'+m[2]+'\t'+i[1]+'\t'+i[2]+'\t'+m[3]+'\t'+i[3]+'\t'+i[4]+'\t'+'UniProt'
if take > str(0):
with open('stru_mutation.txt', 'a+') as s:
summary = open('summary.txt', 'a+')
s.write(take+'\n')
summary.write(m[0]+'\t'+m[2]+'\t'+m[3]+'\t'+i[4]+'\t'+i[1]+'\t'+'UniProt'+'\n')
except IndexError:
pass
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
#/////////////////// Annotated PTMs data from other resources than UniProt (know to play role in PPI and cross-talk) /////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#To get the mutational effects on PPI, PTM based crosstalk, Protein domains, we need to run the following data files from one local dict.; the data
#retrieved from PTMcode 2.0 and PTMfunc, for this reason. To run your own lis tagainst this program, all you need to do is to change the file name in
#the test varible and then you get to go, the output contains the pvalue of the GO terms effected by the mutations and also step wise protein output data
#to interpret your experiment."""
#This frame work contains the advance stage of mapping, where same one code can be used for the mapping to the different
#PTM types, present at interface and/or ppi.
def interface(file1, mutation):
"""PTM present at the interface of two proteins and known to play role in interaction (Beltrao et al. Cell 2012)"""
with open('interface_mutation.txt', 'w') as out:
with open(file1, 'rU') as f:
for l in f:
line = l.split()
if len(line) > 5:
take = line[1]+'\t'+line[2]+'\t'+line[3]+'\t'+line[5]
take = take.split()
with open(mutation) as mu:
for m in mu:
m = m.split()
if m[0] == take[1] and m[1] == take[2]:
take2 = take[0]+'\t'+take[1]+'\t'+take[2]+'\t'+take[3]+'\t'+'PTMfunc'
fi = take2.split()
with open('yeastID.txt') as id:
for di in id:
di = di.split()
if len(di) > 2 and di[2] == fi[1]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+fi[3]+'\t'+'Interface'+'\t'+'PTMfunc'+'\n')
if take2 > str(0):
with open('interface_mutation.txt', 'a') as out:
out.write(take2+'\n')
def ppi(file1,mutation):
""" PTM present at the interface of two proteins and known to play role in interaction (PTMfunc; Beltrao et al. Cell 2012)"""
with open('ppi_mutation.txt', 'w') as out:
with open(file1, 'rU') as f:
for ls in f:
line = ls.split()
with open (mutation) as mu:
for m in mu:
m = m.split()
if len(line) > 7:
if m[0] == line[1] and m[1] == line[3]:
take = line[1]+'\t'+line[2]+'\t'+line[3]+'\t'+'PTMfunc'
fi = take.split()
with open('yeastID.txt') as i:
for di in i:
di = di.split()
if len(di) > 2 and di[2] == fi[0]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+'\t'+'PPI'+'\t'+'PTMfunc'+'\n')
if take > str(0):
with open('ppi_mutation.txt', 'a') as out:
out.write(take+'\n')
continue
if m[0] == line[6] and m[1] == line[3]:
take2 = line[6]+'\t'+line[2]+'\t'+line[3]+'\t'+'PTMfunc'
fi = take2.split()
with open('yeastID.txt') as i:
for di in i:
di=di.split()
if len(di) > 2 and di[2] == fi[0]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+'\t'+'PPI'+'\t'+'PTMfunc'+'\n')
if take2 > str(0):
with open('ppi_mutation.txt', 'a+') as out:
out.write(take2+'\n')
def withinPro(file2, mutation):
""" PTMs (predicted) involved in the crosstalk within a given protein at baker's years (Minguez el 2012)"""
with open('within_protein.txt', 'w') as file1:
with open(file2, 'rU') as f:
for l in f:
line = l.split()
if len(line)>19:
take = line[15]+'\t'+line[16]+'\t'+line[3]+'\t'+line[17]+'\t'+line[7]+'\t'+line[19]
take = take.split()
with open(mutation, 'r') as mu:
for m in mu:
m = m.split()
if m[0] == take[1] and m[1]==take[3]:
take2 = take[0]+'\t'+take[1]+'\t'+take[2]+'\t'+take[3]+'\t'+'PTMcode'
fi=take2.split()
with open('yeastID.txt') as id:
for di in id:
di=di.split()
if len(di) > 2 and di[2] == fi[1]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[3]+'\t'+fi[2]+'\t'+'WithinProtein'+'\t'+'PTMcode'+'\n')
if take2 > str(0):
with open('within_protein.txt', 'a') as file1:
file1.write(take2+'\n')
continue
if m[0] == take[1] and m[1] == take[5]:
take3 = take[0]+'\t'+take[1]+'\t'+take[4]+'\t'+take[5]+'\t'+'PTMcode'
fi = take3.split()
with open('yeastID.txt') as id:
for di in id:
di=di.split()
if len(di) > 2 and di[2] == fi[1]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[3]+'\t'+fi[2]+'\t'+'WithinProtein'+'\t'+'PTMcode'+'\n')
if take3 > str(0):
with open('within_protein.txt', 'a+') as file1:
file1.write(take3+'\n')
def betweenPro(fileb, mutation):
""" PTMs (predicted) involved in the crosstalk in different proteins at baker's years (PTMcode 2.0; Minguez el 2012) """
with open('ptm_between_proteins.txt', 'w') as file1:
with open(fileb, 'rU') as f:
for l in f:
line = l.split()
if len(line)>20:
take = line[16]+'\t'+line[18]+'\t'+line[15]+'\t'+line[17]+'\t'+line[19]+'\t'+line[21]+'\t'+line[4]+'\t'+line[8]
take = take.split()
with open(mutation, 'r') as mu:
for m in mu:
m = m.split()
if m[0] == take[0] and m[1]==take[4]:
take2 = take[0]+'\t'+take[2]+'\t'+take[4]+'\t'+take[6]+'\t'+'PTMcode'
fi = take2.split()
with open('yeastID.txt') as id:
for di in id:
di = di.split()
if len(di) > 2 and di[2] == fi[0]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+fi[3]+'\t'+'BetweenProteins'+'\t'+'PTMcode'+'\n')
if take2 > str(0):
with open('ptm_between_proteins.txt', 'a') as file1:
file1.write(take2+'\n')
continue
if m[0] == take[1] and m[1] == take[5]:
take3 = take[1]+'\t'+take[3]+'\t'+take[5]+'\t'+take[7]+'\t'+'PTMcode'
fi=take3.split()
with open('yeastID.txt') as id:
for di in id:
di = di.split()
if len(di) > 2 and di[2] == fi[0]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+fi[3]+'\t'+'BetweenProteins'+'\t'+'PTMcode'+'\n')
if take3 > str(0):
with open('ptm_between_proteins.txt', 'a+') as file1:
file1.write(take3+'\n')
def hotspot(fileh, mutation):
""" PTMs containing motifs in a close proximity are named hotspots (Beltrao et al. Cell 2012)"""
with open('hotspot.txt', 'w') as hotspot:
with open(fileh, 'rU') as f:
for l in f:
line = l.split()
with open(mutation, 'r') as mu:
for m in mu:
m = m.split()
if len(line) > 6:
if m[0] == line[2] and m[1] == line[3]:
take = line[1]+'\t'+line[2]+'\t'+line[3]+'\t'+line[5]+'\t'+line[6]+'\t'+'PTMfunc'
fi = take.split()
with open('yeastID.txt') as id:
for di in id:
di = di.split()
if len(di) > 2 and di[2] == fi[1]:
summary = open('summary.txt', 'a+')
summary.write(di[0]+'\t'+di[2]+'\t'+fi[2]+'\t'+fi[3]+'\t'+'HotSpot'+'\t'+'PTMFunc'+'\n')
if take > str(0):
with open('hotspot.txt', 'a') as hotspot:
hotspot.write(take+'\n')
def sum_file_map():
""" reports all the results in a 'final-report' file """
with open('final_report.txt', 'w') as x:
with open('summary.txt') as fil1:
for fi in OrderedDict.fromkeys(fil1):
x.write(fi)
def resc():
try:
r = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/3DID_aceksites_interfaceRes_sc.txt").read().decode()
with open('3DID_aceksites_interfaceRes_sc.txt','w') as h:
h.write(r+'\n')
except IOError:
pass
try:
ri = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/3DID_phosphosites_interfaceRes_sc.txt").read().decode()
with open('3DID_phosphosites_interfaceRes_sc.txt','w') as hi:
hi.write(ri+'\n')
except IOError:
pass
try:
riu = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/3DID_ubisites_interfaceRessc_sc.txt").read().decode()
with open('3DID_ubisites_interfaceRessc_sc.txt','w') as hiu:
hiu.write(riu+'\n')
except IOError:
pass
try:
rac = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/SC_acet_interactions.txt").read().decode()
with open('SC_acet_interactions.txt','w') as hia:
hia.write(rac+'\n')
except IOError:
pass
try:
t = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/sc_btw_proteins.txt.zip").read()
with open('sc_btw_proteins.txt.zip','wb') as ht:
ht.write(t)
except IOError:
pass
try:
zipfile.ZipFile('sc_btw_proteins.txt.zip', 'r').extractall()
except IOError:
pass
try:
rps = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/SC_psites_interactions_sc.txt").read().decode()
with open('SC_psites_interactions_sc.txt','w') as hip:
hip.write(rps+'\n')
except IOError:
pass
try:
rui = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/SC_ubi_interactions_sc.txt").read().decode()
with open('SC_ubi_interactions_sc.txt','w') as hui:
hui.write(rui+'\n')
except IOError:
pass
try:
rin = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/sc_within_proteins.txt").read().decode()
with open('sc_within_proteins.txt','w') as hin:
hin.write(rin+'\n')
except IOError:
pass
try:
rsc = resource_stream("ymap", "/data/PTMcode+PTMfunc_data/schotspot_updated.txt").read().decode()
with open('schotspot_updated.txt','w') as his:
his.write(rsc+'\n')
except IOError:
pass
return
#////////////////////////////////////////////////////////////////////////////////////////////////////////////
# USEAGE (Optional)
#------------------------------------------------------------------------------------------------------------
#This usage strategy is optional, and a user can use above written codes in any convenient way as
#required by experiemental settings and data interpretation (see README for proper use)
#////////////////////////////////////////////////////////////////////////////////////////////////////////////
c = YGtPM()
wd = os.getcwd()
def data():
""" this function will download and clean required data to run ymap methods smoothly """
start_time = time.time()
try:
resc()
except IOError:
pass
try:
dat = c.pTMdata()
except IOError:
pass
try:
cl = c.clean('uniprot_mod_raw.txt')
except IOError:
pass
try:
i = c.iD()
except IOError:
pass
try:
m = c.pmap('yeastID.txt', 'PTMs.txt')
except IOError:
pass
try:
d = c.dclean('uniprot_mod_raw.txt')
except IOError:
pass
try:
dm = c.d_map('yeastID.txt', 'domains.txt')
except IOError:
pass
try:
ab = c.ab('uniprot_mod_raw.txt')
except IOError:
pass
try:
ii = c.id('bact.txt', 'yeast_id.txt')
except IOError:
pass
try:
bio=c.bioGrid()
except IOError:
pass
try:
c.pdb_c('uniprot_mod_raw.txt')
except IOError:
pass
try:
c.gff()
except IOError:
pass
try:
c.frmt('gff.txt')
except IOError:
pass
try:
c.id_map('yeastID.txt', 'frmt.txt')
except IOError:
pass
try:
c.nucleotide()
except IOError:
pass
try:
c.n_map('yeastID.txt', 'nucleotide.txt')
except IOError:
pass
try:
z = zipfile.ZipFile(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'sc_btw_proteins.txt.zip', 'r')
z.extractall()
except IOError:
pass
try:
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'3DID_aceksites_interfaceRes_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'3DID_phosphosites_interfaceRes_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'3DID_ubisites_interfaceRessc_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'SC_acet_interactions.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'SC_psites_interactions_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'SC_ubi_interactions_sc.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'sc_within_proteins.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'schotspot_updated.txt', wd)
shutil.copy2(wd+'/'+'data'+'/'+'PTMcode+PTMfunc_data'+'/'+'sc_btw_proteins.txt', wd)
except IOError:
pass
return "All required data downloaded in %s seconds" % (time.time() - start_time)
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
#//////////////////////////////// Following two codes are used for return the mutations at proteins level \\\\\\\\\\\\\\\\\\
#///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
def mutation_types_file():
""" mutation type and amino acid change calculation where ref. and mutant base known """
start_time = time.time()
try:
mutation_file("mutated_proteins.txt", 'd_id_map.txt')
except IOError:
pass
return "Mutations with mutations types are available to map on functional entities"
#//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#////////////////////////////////// Following series of codes will return three files - mapped mutations, pvalue and biog.txt - for each type of data types \\\\\\\\\\\\\\\\\\\\
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
def ptm():
""" PTMs mapping to mutations """
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
a = c.ptm_map('mutation.txt', 'PTM_id_file.txt')
except IOError:
pass
try:
p = c.enrich('mutated_proteins.txt')
except IOError:
pass
try:
c.preWeb('uniprot_bioGrid.txt', 'mutated_proteins.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'PTMs')
shutil.move(wd+"/"+'mutated_proteins.txt', wd+"/"+'PTMs')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'PTMs')
shutil.move(wd+"/"+'biog.txt', wd+"/"+'PTMs')
except IOError:
pass
return "PTMs mapped in %s seconds" % (time.time() - start_time)
def domain():
""" protein domain mapping """
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
dom = c.dmap('mutation.txt', 'id_domain.txt')
except IOError:
pass
try:
p = c.enrich('domains_mapped.txt')
except IOError:
pass
try:
c.preWeb('uniprot_bioGrid.txt', 'domains_mapped.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'Domains')
except IOError:
pass
try:
shutil.move(wd+"/"+'domains_mapped.txt', wd+"/"+'Domains')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'Domains')
shutil.move(wd+"/"+'biog.txt', wd+"/"+'Domains')
except IOError:
pass
return "Domains mapped in %s seconds" % (time.time() - start_time)
def nucleo():
""" DNA-protein binding motif mapping """
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
c.nucleotide_map('mutation.txt', 'id_nucleotide.txt')
except IOError:
pass
try:
p = c.enrich('nucleotide_map.txt')
except IOError:
pass
try:
c.preWeb('uniprot_bioGrid.txt', 'nucleotide_map.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'Nucleotide_binding')
except IOError:
pass
try:
shutil.move(wd+"/"+'nucleotide_map.txt', wd+"/"+'Nucleotide_binding')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'Nucleotide_binding')
shutil.move(wd+"/"+'biog.txt', wd+"/"+'Nucleotide_binding')
except IOError:
pass
return "Nucleotide_binding domains mapped in %s seconds" % (time.time() - start_time)
def ab():
""" active and binding site mapping """
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
mm = c.mmap('sites_id.txt', 'mutation.txt')
except IOError:
pass
try:
p = c.enrich('ab_mutation_file.txt')
except IOError:
pass
try:
c.preWeb('uniprot_bioGrid.txt', 'ab_mutation_file.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'A-B-sites')
shutil.move(wd+"/"+'ab_mutation_file.txt', wd+"/"+'A-B-sites')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'A-B-sites')
shutil.move(wd+"/"+'biog.txt', wd+"/"+'A-B-sites')
except IOError:
pass
return "Active-Binding proteins sites mapped in %s seconds" % (time.time() - start_time)
def struc_map():
""" structural regions mapping """
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
c.mu_map()
except IOError:
pass
try:
pd = c.pdb('pdb.txt')
except IOError:
pass
try:
p = c.enrich('stru_mutation.txt')
except IOError:
pass
try:
c.preWeb('uniprot_bioGrid.txt', 'stru_mutation.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'PDB')
shutil.move(wd+"/"+'stru_mutation.txt', wd+"/"+'PDB')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'PDB')
shutil.move(wd+"/"+'biog.txt', wd+"/"+'PDB')
except IOError:
pass
return "Mutations are mapped to structural features in %s seconds" % (time.time() - start_time)
def intf():
""" east = effective data which shows PTMs present at interface, ppi and
domain (hotspot) this analaysis could lead to an effective way to interpret
user's mutational data on Yeast proteins from PTMfunc (also 3DID db) and PTMcode 2.0"""
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
interface('3DID_aceksites_interfaceRes_sc.txt' ,'mutation.txt')
except IOError:
pass
try:
p = c.enrich('interface_mutation.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'Interface')
os.system("mkdir "+wd+"/"+'Interface'+"/"+'acetylation')
shutil.move(wd+"/"+'interface_mutation.txt', wd+"/"+'Interface'+"/"+'acetylation')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'Interface'+"/"+'acetylation')
except IOError:
pass
try:
interface('3DID_phosphosites_interfaceRes_sc.txt' ,'mutation.txt')
except IOError:
pass
try:
p = c.enrich('interface_mutation.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'Interface'+"/"+'Phosphorylation')
shutil.move(wd+"/"+'interface_mutation.txt', wd+"/"+'Interface'+"/"+'Phosphorylation')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'Interface'+"/"+'Phosphorylation')
except IOError:
pass
try:
interface('3DID_ubisites_interfaceRessc_sc.txt' ,'mutation.txt')
except IOError:
pass
try:
p = c.enrich('interface_mutation.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'Interface'+"/"+'ubiquitination')
shutil.move(wd+"/"+'interface_mutation.txt', wd+"/"+'Interface'+"/"+'ubiquitination')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'Interface'+"/"+'ubiquitination')
except IOError:
pass
return "run time is %s seconds" % (time.time() - start_time)
def pi():
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
ppi('SC_acet_interactions.txt', 'mutation.txt')
except IOError:
pass
try:
c.enrich('ppi_mutation.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'PPI')
os.system("mkdir "+wd+"/"+'PPI'+"/"+'acetylation')
shutil.move(wd+"/"+'ppi_mutation.txt', wd+"/"+'PPI'+"/"+'acetylation')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'PPI'+"/"+'acetylation')
except IOError:
pass
try:
ppi('SC_psites_interactions_sc.txt', 'mutation.txt')
except IOError:
pass
try:
p = c.enrich('ppi_mutation.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'PPI'+"/"+'Phosphorylation')
shutil.move(wd+"/"+'ppi_mutation.txt', wd+"/"+'PPI'+"/"+'Phosphorylation')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'PPI'+"/"+'Phosphorylation')
except IOError:
pass
try:
ppi('SC_ubi_interactions_sc.txt', 'mutation.txt')
except IOError:
pass
try:
c.enrich('ppi_mutation.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'PPI'+"/"+'ubiquitination')
shutil.move(wd+"/"+'ppi_mutation.txt', wd+"/"+'PPI'+"/"+'ubiquitination')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'PPI'+"/"+'ubiquitination')
except IOError:
pass
return "run time is %s seconds" % (time.time() - start_time)
def withP():
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
withinPro('sc_within_proteins.txt','mutation.txt')
except IOError:
pass
try:
p = c.enrich('within_protein.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'PTMs_within_Proteins')
shutil.move(wd+"/"+'within_protein.txt', wd+"/"+'PTMs_within_Proteins')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'PTMs_within_Proteins')
except IOError:
pass
return "run time is %s seconds" % (time.time() - start_time)
def betweenP():
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
betweenPro('sc_btw_proteins.txt', 'mutation.txt')
except IOError:
pass
try:
p = c.enrich('ptm_between_proteins.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'PTMs_between_Proteins')
shutil.move(wd+"/"+'ptm_between_proteins.txt', wd+"/"+'PTMs_between_Proteins')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'PTMs_between_Proteins')
except IOError:
pass
return "run time is %s seconds" % (time.time() - start_time)
def hotS():
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
hotspot('schotspot_updated.txt', 'mutation.txt')
except IOError:
pass
try:
p = c.enrich('hotspot.txt')
except IOError:
pass
try:
os.system("mkdir "+wd+"/"+'PTMs_hotSpots')
shutil.move(wd+"/"+'hotspot.txt', wd+"/"+'PTMs_hotSpots')
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'PTMs_hotSpots')
except IOError:
pass
return "run time is %s seconds" % (time.time() - start_time)
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ Following two codes with perform all the codes on all the data /////////////////////////////////////
#//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
def uniprot_data():
""" to perform all functions on UniProt(like ptm, domain and ab () functions) all together """
try:
ptm()
except IOError:
pass
try:
domain()
except IOError:
pass
try:
ab()
except IOError:
pass
try:
struc_map()
except IOError:
pass
try:
nucleo()
except IOError:
pass
return "The Uniprot data is resolved into functional for interpretation"
def functional_data():
""" to perform all functions on UniProt(like ptm, domain and ab () functions) all together """
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
intf()
except IOError:
pass
try:
pi()
except IOError:
pass
try:
withP()
except IOError:
pass
try:
betweenP()
except IOError:
pass
try:
hotS()
except IOError:
pass
return "The data from PTMcode and PTMfunc on PTMs functional biasedness is resolved into functional for interpretation"
#///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#////////////////////////////// Final module of ymap package for executing all the modules \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
def ymap_genes():
""" returns all the results of all the codes of yMap; starting from genetics coordinates of proteins """
start_time = time.time()
if not os.path.exists('mutation.txt'):
try:
mutation_types_file()
except IOError:
pass
try:
uniprot_data()
except IOError:
pass
try:
functional_data()
except IOError:
pass
try:
sum_file_map()
except IOError:
pass
try:
y = (time.time() - start_time)
os.makedirs('yMap-results'+str(y))
except IOError:
pass
try:
p = c.enrich('final_report.txt')
except IOError:
pass
try:
c.preWeb('uniprot_bioGrid.txt', 'final_report.txt')
except IOError:
pass
try:
shutil.move(wd+"/"+'PTMs', wd+"/"+'yMap-results'+str(y))
shutil.move('Domains', wd+"/"+'yMap-results'+str(y))
shutil.move('Nucleotide_binding',wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'A-B-sites', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PDB', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'Interface', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PPI', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PTMs_within_Proteins', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PTMs_between_Proteins',wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PTMs_hotSpots',wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'mutation.txt', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'final_report.txt', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'biog.txt', wd+"/"+'yMap-results'+str(y))
os.remove(wd+"/"+'mutation_id.txt')
os.remove(wd+"/"+'summary.txt')
except IOError:
pass
return "All functional data from genomic coordinates is ready in about %s seconds" % (time.time() - start_time)
def ymap_proteins():
""" returns all the results of all the codes of yMap; starting from proteins level mutation positions """
start_time = time.time()
if not os.path.exists('mutation.txt'):
raise StopIteration('because of missing mutation file')
else:
try:
uniprot_data()
except IOError:
pass
try:
functional_data()
except IOError:
pass
try:
sum_file_map()
except IOError:
pass
try:
y = (time.time() - start_time)
os.makedirs('yMap-results'+str(y))
except IOError:
pass
try:
p = c.enrich('final_report.txt')
except IOError:
pass
try:
c.preWeb('uniprot_bioGrid.txt', 'final_report.txt')
except IOError:
pass
try:
shutil.move(wd+"/"+'PTMs', wd+"/"+'yMap-results'+str(y))
shutil.move('Domains', wd+"/"+'yMap-results'+str(y))
shutil.move('Nucleotide_binding',wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'A-B-sites', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PDB', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'Interface', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PPI', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PTMs_within_Proteins', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PTMs_between_Proteins',wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'PTMs_hotSpots',wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'mutation.txt', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'final_report.txt', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'pvalue.txt', wd+"/"+'yMap-results'+str(y))
shutil.move(wd+"/"+'biog.txt', wd+"/"+'yMap-results'+str(y))
os.remove(wd+"/"+'mutation_id.txt')
os.remove(wd+"/"+'summary.txt')
except IOError:
pass
return "All functional data from proteins mutation-positions is ready in about %s seconds" % (time.time() - start_time)
def web():
""" NOTE: to use the following function change to dir to respective folder to run web based analysis """
os.chdir(input('specify biog.txt path:')) # specify biog.txt path:/yMap-results78.50792193412781
c.bweb('biog.txt')
return "Web is ready for networks exploration of mutated proteins"
def path():
"Path to the BioGrid ids path for visualisation"
try:
os.chdir(raw_input("paste here path to biog.txt file:"))
except IOError:
pass
return "you need to provide path/to/biog.txt"
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--ydata', help='downloads required data to run yMap successfully')
parser.add_argument('-g','--ygenes', help='performs the yMap on genes level mutation positions')
parser.add_argument('-p', '--yproteins', help='performs the yMap on proteins level mutation positions')
parser.add_argument('-w', '--yweb', help='generates BioGrid web pages for interactome visualisation; paste the path to biog.txt file')
args = parser.parse_args()
if args.ydata:
try:
data()
except IOError:
pass
elif args.ygenes:
try:
ymap_genes()
except IOError:
pass
elif args.yproteins:
try:
ymap_proteins()
except IOError:
pass
elif args.yweb:
try:
web()
except IOError:
pass
else:
print ("to run a function seek help")
|
import json
from datetime import datetime
from numpy import array
from graphviz import Digraph
from log_parser.block_manager import BlockManager
from log_parser.executor import Executor
from log_parser.job import Job
from log_parser.job import Task
def get_json(line):
# Need to first strip the trailing newline, and then escape newlines (which can appear
# in the middle of some of the JSON) so that JSON library doesn't barf.
return json.loads(line.strip("\n").replace("\n", "\\n"))
class LogParser:
def __init__(self, filename, is_logging_enable=False):
self.filename = filename
self.parsed_data = {} # empty dicts.
self.block_managers = [] # empty lists.
self.executors = {}
self.jobs = {}
self.tasks = {}
self.is_logging_enable = is_logging_enable
def do_SparkListenerLogStart(self, data):
self.parsed_data["spark_version"] = data["Spark Version"]
# parsed_data is a empty dict.
def do_SparkListenerBlockManagerAdded(self, data):
bm = BlockManager(data)
self.block_managers.append(bm)
# block_managers is a empty list
def do_SparkListenerEnvironmentUpdate(self, data):
self.parsed_data["java_version"] = data["JVM Information"]["Java Version"]
self.parsed_data["app_name"] = data["Spark Properties"]["spark.app.name"]
self.parsed_data["app_id"] = data["Spark Properties"]["spark.app.id"]
# self.parsed_data["driver_memory"] = data["Spark Properties"]["spark.driver.memory"]
# self.parsed_data["executor_memory"] = data["Spark Properties"]["spark.executor.memory"]
self.parsed_data["commandline"] = data["System Properties"]["sun.java.command"]
def do_SparkListenerApplicationStart(self, data):
self.parsed_data["app_start_timestamp"] = data["Timestamp"]
def do_SparkListenerApplicationEnd(self, data):
self.parsed_data["app_end_timestamp"] = data["Timestamp"]
def do_SparkListenerJobStart(self, data):
job_id = data["Job ID"]
if job_id in self.jobs:
print("ERROR: Duplicate job ID!")
return
job = Job(data) # that class Job
# job = return s
self.jobs[job_id] = job # record into the `dict`
def do_SparkListenerStageSubmitted(self, data):
pass
def do_SparkListenerExecutorAdded(self, data):
exec_id = data["Executor ID"]
self.executors[exec_id] = Executor(data)
def do_SparkListenerTaskStart(self, data):
task_id = data["Task Info"]["Task ID"]
self.tasks[task_id] = Task(data)
def do_SparkListenerTaskEnd(self, data):
task_id = data["Task Info"]["Task ID"]
self.tasks[task_id].finish(data)
def do_SparkListenerExecutorRemoved(self, data):
exec_id = data["Executor ID"]
self.executors[exec_id].remove(data)
def do_SparkListenerBlockManagerRemoved(self, data):
pass
def do_SparkListenerStageCompleted(self, data):
stage_id = data["Stage Info"]["Stage ID"]
for j in self.jobs.values():
for s in j.stages: # class Stage in job.py
if s.stage_id == stage_id:
s.complete(data)
def do_SparkListenerJobEnd(self, data):
job_id = data["Job ID"]
self.jobs[job_id].complete(data)
def process_name_only(self):
with open(self.filename, "r") as log_file:
for line in log_file:
json_data = get_json(line)
event_type = json_data["Event"]
if event_type == "SparkListenerEnvironmentUpdate":
self.do_SparkListenerEnvironmentUpdate(json_data)
def process(self):
with open(self.filename, "r") as log_file:
unsupported_event_types = set()
for line in log_file:
json_data = get_json(line)
event_type = json_data["Event"]
# 13 event types
if event_type == "SparkListenerLogStart":
self.do_SparkListenerLogStart(json_data)
elif event_type == "SparkListenerBlockManagerAdded":
self.do_SparkListenerBlockManagerAdded(json_data)
elif event_type == "SparkListenerEnvironmentUpdate":
self.do_SparkListenerEnvironmentUpdate(json_data)
elif event_type == "SparkListenerApplicationStart":
self.do_SparkListenerApplicationStart(json_data)
elif event_type == "SparkListenerApplicationEnd":
self.do_SparkListenerApplicationEnd(json_data)
elif event_type == "SparkListenerJobStart":
self.do_SparkListenerJobStart(json_data)
elif event_type == "SparkListenerStageSubmitted":
self.do_SparkListenerStageSubmitted(json_data)
elif event_type == "SparkListenerExecutorAdded":
self.do_SparkListenerExecutorAdded(json_data)
elif event_type == "SparkListenerTaskStart":
self.do_SparkListenerTaskStart(json_data)
elif event_type == "SparkListenerTaskEnd":
self.do_SparkListenerTaskEnd(json_data)
elif event_type == "SparkListenerExecutorRemoved":
self.do_SparkListenerExecutorRemoved(json_data)
elif event_type == "SparkListenerBlockManagerRemoved":
self.do_SparkListenerBlockManagerRemoved(json_data)
elif event_type == "SparkListenerStageCompleted":
self.do_SparkListenerStageCompleted(json_data)
elif event_type == "SparkListenerJobEnd":
self.do_SparkListenerJobEnd(json_data)
else:
# print("WARNING: unknown event type: " + event_type)
unsupported_event_types.add(event_type)
if len(unsupported_event_types) > 0 and self.is_logging_enable:
print("WARNING: unknown event types:\n\t{}".format("\n\t".join(unsupported_event_types)))
# Link block managers and executors
for bm in self.block_managers:
if bm.executor_id != "driver":
self.executors[bm.executor_id].block_managers.append(bm)
for t in self.tasks.values():
self.executors[t.executor_id].tasks.append(t)
for j in self.jobs.values():
for s in j.stages:
if s.stage_id == t.stage_id:
s.tasks.append(t)
self.parsed_data["num_failed_tasks"] = 0
self.parsed_data["num_success_tasks"] = 0
for t in self.tasks.values():
if t.end_reason != "Success":
self.parsed_data["num_failed_tasks"] += 1
else:
self.parsed_data["num_success_tasks"] += 1
# Total average and stddev task run time
all_runtimes = [x.finish_time - x.launch_time for x in self.tasks.values() if x.end_reason == "Success"]
all_runtimes = array(all_runtimes)
self.parsed_data["tot_avg_task_runtime"] = all_runtimes.mean()
self.parsed_data["tot_std_task_runtime"] = all_runtimes.std()
self.parsed_data["min_task_runtime"] = all_runtimes.min()
self.parsed_data["max_task_runtime"] = all_runtimes.max()
def get_app_name(self):
return self.parsed_data["app_name"]
def get_app_id(self):
return self.parsed_data["app_id"]
def generate_report(self):
# return s
s = "Report for '{}' execution {}\n".format(self.parsed_data["app_name"], self.parsed_data["app_id"])
s += "Spark version: {}\n".format(self.parsed_data["spark_version"])
s += "Java version: {}\n".format(self.parsed_data["java_version"])
s += "Application Start time: {}\n".format(
datetime.fromtimestamp(self.parsed_data["app_start_timestamp"] / 1000))
s += "Application End time: {}\n".format(datetime.fromtimestamp(self.parsed_data["app_end_timestamp"] / 1000))
s += "Commandline: {}\n\n".format(self.parsed_data["commandline"])
s += "---> Jobs <---\n"
s += "In total, there are {} jobs in {}\n".format(len(self.jobs), self.parsed_data["app_name"])
s += "\n"
for j in self.jobs.values():
s += j.report(0)
s += "\n"
s += "---> Tasks <---\n"
s += "Total tasks: {}\n".format(len(self.tasks))
s += "Successful tasks: {}\n".format(self.parsed_data["num_success_tasks"])
s += "Failed tasks: {}\n".format(self.parsed_data["num_failed_tasks"])
s += "Task average runtime: {} ({} stddev)\n".format(self.parsed_data["tot_avg_task_runtime"],
self.parsed_data["tot_std_task_runtime"])
s += "Task min/max runtime: {} min, {} max\n".format(self.parsed_data["min_task_runtime"],
self.parsed_data["max_task_runtime"])
for t in self.tasks.values():
s += t.report(0)
s += "\n"
s += "---> Executors <---\n"
s += "In total, there are {} executors in {}\n".format(len(self.executors), self.parsed_data["app_name"])
s += "\n"
for e in self.executors.values():
s += e.report(0)
s += "\n"
s += "---> Block managers <---\n"
s += "In total, there are {} block managers in {}\n".format(len(self.block_managers),
self.parsed_data["app_name"])
for bm in self.block_managers:
s += bm.report(0)
s += "\n"
# print('generate_report is finished.')
return s
def save_plot_of_stages_dag(self, filename, view=False):
dag = Digraph()
for j in self.jobs.values():
for s in j.stages:
assert type(s.stage_id) == int
dag.node(str(s.stage_id), f"{s.stage_id} ({s.get_completion_time()}ms, j={j.job_id})")
for j in self.jobs.values():
for s in j.stages:
for parent in s.parent_ids:
assert type(parent) == int
assert type(s.stage_id) == int
dag.edge(str(parent), str(s.stage_id))
dag.render(filename, view=view)
def save_plot_of_rdds_dag(self, filename, view=False):
dag = Digraph()
for j in self.jobs.values():
for s in j.stages:
for r in s.RDDs:
assert type(r.rdd_id) == int
dag.node(str(r.rdd_id), f"{r.name} {r.rdd_id} (j={j.job_id}, s={s.stage_id})")
for j in self.jobs.values():
for s in j.stages:
for r in s.RDDs:
for parent in r.parent_ids:
assert type(parent) == int
assert type(r.rdd_id) == int
dag.edge(str(parent), str(r.rdd_id))
dag.render(filename, view=view) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from uvctypes import *
import time
import cv2
import numpy as np
try:
from queue import Queue
except ImportError:
from Queue import Queue
import platform
from datetime import datetime
BUF_SIZE = 2
q = Queue(BUF_SIZE)
def py_frame_callback(frame, userptr):
array_pointer = cast(frame.contents.data, POINTER(c_uint16 * (frame.contents.width * frame.contents.height)))
data = np.frombuffer(
array_pointer.contents, dtype=np.dtype(np.uint16)
).reshape(
frame.contents.height, frame.contents.width
) # no copy
# data = np.fromiter(
# frame.contents.data, dtype=np.dtype(np.uint8), count=frame.contents.data_bytes
# ).reshape(
# frame.contents.height, frame.contents.width, 2
# ) # copy
if frame.contents.data_bytes != (2 * frame.contents.width * frame.contents.height):
return
if not q.full():
q.put(data)
PTR_PY_FRAME_CALLBACK = CFUNCTYPE(None, POINTER(uvc_frame), c_void_p)(py_frame_callback)
def ktof(val):
return (1.8 * ktoc(val) + 32.0)
def ktoc(val):
return (val - 27315) / 100.0
def raw_to_8bit(data):
cv2.normalize(data, data, 0, 65535, cv2.NORM_MINMAX)
np.right_shift(data, 8, data)
return cv2.cvtColor(np.uint8(data), cv2.COLOR_GRAY2RGB)
def display_temperature(img, val_k, loc, color):
val = ktof(val_k)
cv2.putText(img,"{0:.1f} degF".format(val), loc, cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)
x, y = loc
cv2.line(img, (x - 2, y), (x + 2, y), color, 1)
cv2.line(img, (x, y - 2), (x, y + 2), color, 1)
def main():
ctx = POINTER(uvc_context)()
dev = POINTER(uvc_device)()
devh = POINTER(uvc_device_handle)()
ctrl = uvc_stream_ctrl()
res = libuvc.uvc_init(byref(ctx), 0)
if res < 0:
print("uvc_init error")
exit(1)
try:
res = libuvc.uvc_find_device(ctx, byref(dev), 0, 0, 0)
if res < 0:
print("uvc_find_device error")
exit(1)
try:
res = libuvc.uvc_open(dev, byref(devh))
if res < 0:
print("uvc_open error")
exit(1)
print("device opened!")
print_device_info(devh)
print_device_formats(devh)
frame_formats = uvc_get_frame_formats_by_guid(devh, VS_FMT_GUID_Y16)
if len(frame_formats) == 0:
print("device does not support Y16")
exit(1)
libuvc.uvc_get_stream_ctrl_format_size(devh, byref(ctrl), UVC_FRAME_FORMAT_Y16,
frame_formats[0].wWidth, frame_formats[0].wHeight, int(1e7 / frame_formats[0].dwDefaultFrameInterval)
)
res = libuvc.uvc_start_streaming(devh, byref(ctrl), PTR_PY_FRAME_CALLBACK, None, 0)
if res < 0:
print("uvc_start_streaming failed: {0}".format(res))
exit(1)
try:
start_time = datetime.now()
while True:
#try:
data = q.get(True, 30)
if data is None:
print("data is none")
main()
break
#except:
# print("error ... will reset...")
# cv2.destroyAllWindows()
# libuvc.uvc_stop_streaming(devh)
# libuvc.uvc_unref_device(dev)
# libuvc.uvc_exit(ctx)
# main()
#data = cv2.resize(data[:,:], (640, 480))
minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(data)
img = raw_to_8bit(data)
#display_temperature(img, minVal, minLoc, (255, 0, 0))
#display_temperature(img, maxVal, maxLoc, (0, 0, 255))
stop_time = datetime.now()
elapsed_time = stop_time-start_time
print(str(maxVal/1000) + " " + str(q.qsize()) + str(elapsed_time) )
#cv2.imshow('Lepton Radiometry', img)
cv2.imwrite("test.jpg",img)
#cv2.imwrite("test" + str(maxVal) + ".jpg",img)
cv2.waitKey(1)
cv2.destroyAllWindows()
finally:
libuvc.uvc_stop_streaming(devh)
print("done")
finally:
libuvc.uvc_unref_device(dev)
print("uvc done")
finally:
libuvc.uvc_exit(ctx)
print("uvc exit")
if __name__ == '__main__':
main()
|
from pyb import UART
from pyb import delay
from pyb import micros, elapsed_micros
# This hashmap collects all generic AT commands
CMDS_GENERIC = {
'TEST_AT': b'AT',
'RESET': b'AT+RST',
'VERSION_INFO': b'AT+GMR',
'DEEP_SLEEP': b'AT+GSLP',
'ECHO': b'ATE',
'FACTORY_RESET': b'AT+RESTORE',
'UART_CONFIG': b'AT+UART'
}
# All WIFI related AT commands
CMDS_WIFI = {
'MODE' : b'AT+CWMODE',
'CONNECT': b'AT+CWJAP',
'LIST_APS': b'AT+CWLAP',
'DISCONNECT': b'AT+CWQAP',
'AP_SET_PARAMS': b'AT+CWSAP',
'AP_LIST_STATIONS': b'AT+CWLIF',
'DHCP_CONFIG': b'AT+CWDHCP',
'SET_AUTOCONNECT': b'AT+CWAUTOCONN',
'SET_STATION_MAC': b'AT+CIPSTAMAC',
'SET_AP_MAC': b'AT+CIPAPMAC',
'SET_STATION_IP': b'AT+CIPSTA',
'SET_AP_IP': b'AT+CIPAP'
}
# IP networking related AT commands
CMDS_IP = {
'STATUS': b'AT+CIPSTATUS',
'START': b'AT+CIPSTART',
'SEND': b'AT+CIPSEND',
'CLOSE': b'AT+CIPCLOSE',
'GET_LOCAL_IP': b'AT+CIFSR',
'SET_MUX_MODE': b'AT+CIPMUX',
'CONFIG_SERVER': b'AT+CIPSERVER',
'SET_TX_MODE': b'AT+CIPMODE',
'SET_TCP_SERVER_TIMEOUT': b'AT+CIPSTO',
'UPGRADE': b'AT+CIUPDATE',
'PING': b'AT+PING'
}
# WIFI network modes the ESP8266 knows to handle
WIFI_MODES = {
1: 'Station',
2: 'Access Point',
3: 'Access Point + Station',
}
# Reverse feed lookup table
for key in WIFI_MODES.keys():
WIFI_MODES[WIFI_MODES[key]] = key
# WIFI network security protocols known to the ESP8266 module
WIFI_ENCRYPTION_PROTOCOLS = {
0: 'OPEN',
1: 'WEP',
2: 'WPA_PSK',
3: 'WPA2_PSK',
4: 'WPA_WPA2_PSK'
}
# Reverse feed lookup table
for key in WIFI_ENCRYPTION_PROTOCOLS.keys():
WIFI_ENCRYPTION_PROTOCOLS[WIFI_ENCRYPTION_PROTOCOLS[key]] = key
class CommandError(Exception):
pass
class CommandFailure(Exception):
pass
class UnknownWIFIModeError(Exception):
pass
class ESP8266(object):
def __init__(self, uart=1, baud_rate=115200):
"""Initialize this module. uart may be an integer or an instance
of pyb.UART. baud_rate can be used to set the Baud rate for the
serial communication."""
if uart:
if type(uart) is int:
self.uart = UART(uart, baud_rate)
elif type(uart) is UART:
self.uart = uart
else:
raise Exception("Argument 'uart' must be an integer or pyb.UART object!")
else:
raise Exception("Argument uart must not be 'None'!")
def _send_command(self, cmd, timeout=0, debug=False):
"""Send a command to the ESP8266 module over UART and return the
output.
After sending the command there is a 1 second timeout while
waiting for an anser on UART. For long running commands (like AP
scans) there is an additional 3 seconds grace period to return
results over UART.
Raises an CommandError if an error occurs and an CommandFailure
if a command fails to execute."""
if debug:
start = micros()
cmd_output = []
okay = False
if cmd == '' or cmd == b'':
raise CommandError("Unknown command '" + cmd + "'!")
# AT commands must be finalized with an '\r\n'
cmd += '\r\n'
if debug:
print("%8i - TX: %s" % (elapsed_micros(start), str(cmd)))
self.uart.write(cmd)
# wait at maximum one second for a command reaction
cmd_timeout = 100
while cmd_timeout > 0:
if self.uart.any():
cmd_output.append(self.uart.readline())
if debug:
print("%8i - RX: %s" % (elapsed_micros(start), str(cmd_output[-1])))
if cmd_output[-1].rstrip() == b'OK':
if debug:
print("%8i - 'OK' received!" % (elapsed_micros(start)))
okay = True
delay(10)
cmd_timeout -= 1
if cmd_timeout == 0 and len(cmd_output) == 0:
if debug == True:
print("%8i - RX timeout of answer after sending AT command!" % (elapsed_micros(start)))
else:
print("RX timeout of answer after sending AT command!")
# read output if present
while self.uart.any():
cmd_output.append(self.uart.readline())
if debug:
print("%8i - RX: %s" % (elapsed_micros(start), str(cmd_output[-1])))
if cmd_output[-1].rstrip() == b'OK':
if debug:
print("%8i - 'OK' received!" % (elapsed_micros(start)))
okay = True
# handle output of AT command
if len(cmd_output) > 0:
if cmd_output[-1].rstrip() == b'ERROR':
raise CommandError('Command error!')
elif cmd_output[-1].rstrip() == b'OK':
okay = True
elif not okay:
# some long running commands do not return OK in case of success
# and/or take some time to yield all output.
if timeout == 0:
cmd_timeout = 300
else:
if debug:
print("%8i - Using RX timeout of %i ms" % (elapsed_micros(start), timeout))
cmd_timeout = timeout / 10
while cmd_timeout > 0:
delay(10)
if self.uart.any():
cmd_output.append(self.uart.readline())
if debug:
print("%8i - RX: %s" % (elapsed_micros(start), str(cmd_output[-1])))
if cmd_output[-1].rstrip() == b'OK':
okay = True
break
elif cmd_output[-1].rstrip() == b'FAIL':
raise CommandFailure()
cmd_timeout -= 1
if not okay and cmd_timeout == 0 and debug:
print("%8i - RX-Timeout occured and no 'OK' received!" % (elapsed_micros(start)))
return cmd_output
@classmethod
def _join_args(cls, *args, debug=True):
"""Joins all given arguments as the ESP8266 needs them for the
argument string in a 'set' type command.
Strings must be quoted using '"' and no spaces outside of quoted
srrings are allowed."""
while type(args[0]) is tuple:
if len(args) == 1:
args = args[0]
if debug:
print(args)
str_args = []
for arg in args:
if type(arg) is str:
str_args.append('"' + arg + '"')
elif type(arg) is bytes:
str_args.append(arg.decode())
elif type(arg) is bool:
str_args.append(str(int(arg)))
else:
str_args.append(str(arg))
if debug:
print(str_args)
return ','.join(str_args).encode()
@classmethod
def _parse_accesspoint_str(cls, ap_str):
"""Parse an accesspoint string description into a hashmap
containing its parameters. Returns None if string could not be
split into 3 or 5 fields."""
if type(ap_str) is str:
ap_str = ap_str.encode()
ap_params = ap_str.split(b',')
if len(ap_params) == 5:
(enc_mode, ssid, rssi, mac, channel) = ap_params
ap = {
'encryption_protocol': int(enc_mode),
'ssid': ssid,
'rssi': int(rssi),
'mac': mac,
'channel': int(channel)
}
elif len(ap_params) == 3:
(enc_mode, ssid, rssi) = ap_params
ap = {
'encryption_protocol': int(enc_mode),
'ssid': ssid,
'rssi': int(rssi),
}
else:
ap = None
return ap
def _query_command(self, cmd, timeout=0, debug=False):
"""Sends a 'query' type command and return the relevant output
line, containing the queried parameter."""
return self._send_command(cmd + b'?', timeout=timeout, debug=debug)[1].rstrip()
def _set_command(self, cmd, *args, timeout=0, debug=False):
"""Send a 'set' type command and return all lines of the output
which are not command echo and status codes.
This type of AT command usually does not return output except
the echo and 'OK' or 'ERROR'. These are not returned by this
method. So usually the result of this methid must be an empty list!"""
return self._send_command(cmd + b'=' + ESP8266._join_args(args, debug=debug), timeout=timeout, debug=debug)[1:-2]
def _execute_command(self, cmd, timeout=0, debug=False):
"""Send an 'execute' type command and return all lines of the
output which are not command echo and status codes."""
return self._send_command(cmd, timeout=timeout, debug=debug)[1:-2]
def test(self, debug=False):
"""Test the AT command interface."""
return self._execute_command(CMDS_GENERIC['TEST_AT'], debug=debug) == []
def reset(self, debug=False):
"""Reset the module and read the boot message.
ToDo: Interpret the boot message and do something reasonable with
it, if possible."""
boot_log = []
if debug:
start = micros()
self._execute_command(CMDS_GENERIC['RESET'], debug=debug)
# wait for module to boot and messages appearing on self.uart
timeout = 300
while not self.uart.any() and timeout > 0:
delay(10)
timeout -= 1
if debug and timeout == 0:
print("%8i - RX timeout occured!" % (elapsed_micros(start)))
# wait for messages to finish
timeout = 300
while timeout > 0:
if self.uart.any():
boot_log.append(self.uart.readline())
if debug:
print("%8i - RX: %s" % (elapsed_micros(start), str(boot_log[-1])))
delay(20)
timeout -= 1
if debug and timeout == 0:
print("%8i - RTimeout occured while waiting for module to boot!" % (elapsed_micros(start)))
return boot_log[-1].rstrip() == b'ready'
def get_mode(self, debug=False):
"""Returns the mode the ESP WIFI is in:
1: station mode
2: accesspoint mode
3: accesspoint and station mode
Check the hashmap esp8266.WIFI_MODES for a name lookup.
Raises an UnknownWIFIModeError if the mode was not a valid or
unknown.
"""
mode = int(self._query_command(CMDS_WIFI['MODE']).split(b':')[1], debug=debug)
if mode in WIFI_MODES.keys():
return mode
else:
raise UnknownWIFIModeError("Mode '%s' not known!" % mode)
def set_mode(self, mode, debug=False):
"""Set the given WIFI mode.
Raises UnknownWIFIModeError in case of unknown mode."""
if mode not in WIFI_MODES.keys():
raise UnknownWIFIModeError("Mode '%s' not known!" % mode)
return self._set_command(CMDS_WIFI['MODE'], mode, debug=debug)
def get_accesspoint(self, debug=False):
"""Read the SSID of the currently joined access point.
The SSID 'No AP' tells us that we are not connected to an access
point!"""
answer = self._query_command(CMDS_WIFI["CONNECT"], debug=debug)
#print("Answer: " + str(answer))
if answer == b'No AP':
result = None
else:
result = answer.split(b'+' + CMDS_WIFI['CONNECT'][3:] + b':')[1][1:-1]
return result
def connect(self, ssid, psk, debug=False):
"""Tries to connect to a WIFI network using the given SSID and
pre shared key (PSK). Uses a 20 second timeout for the connect
command.
Bugs: AT firmware v0.21 has a bug to only join a WIFI which SSID
is 10 characters long."""
self._set_command(CMDS_WIFI['CONNECT'], ssid, psk, debug=debug, timeout=20000)
def disconnect(self, debug=False):
"""Tries to connect to a WIFI network using the given SSID and
pre shared key (PSK)."""
return self._execute_command(CMDS_WIFI['DISCONNECT'], debug=debug) == []
@classmethod
def _parse_list_ap_results(cls, ap_scan_results):
aps = []
for ap in ap_scan_results:
try:
ap_str = ap.rstrip().split(CMDS_WIFI['LIST_APS'][-4:] + b':')[1].decode()[1:-1]
except IndexError:
# Catching this exception means the line in scan result
# was probably rubbish
continue
# parsing the ap_str may not work because of rubbish strings
# returned from the AT command. None is returned in this case.
ap = ESP8266._parse_accesspoint_str(ap_str)
if ap:
aps.append(ap)
return aps
def list_all_accesspoints(self, debug=False):
"""List all available access points.
TODO: The IoT AT firmware 0.9.5 seems to sporadically yield
rubbish or mangled AP-strings. Check needed!"""
return ESP8266._parse_list_ap_results(self._execute_command(CMDS_WIFI['LIST_APS'], debug=debug))
def list_accesspoints(self, *args):
"""List accesspoint matching the parameters given by the
argument list.
The arguments may be of the types string or integer. Strings can
describe MAC adddresses or SSIDs while the integers refer to
channel names."""
return ESP8266._parse_list_ap_results(self._set_command(CMDS_WIFI['LIST_APS'], args))
def set_accesspoint_config(self, ssid, password, channel, encrypt_proto, debug=False):
"""Configure the parameters for the accesspoint mode. The module
must be in access point mode for this to work.
After setting the parameters the module is reset to
activate them.
The password must be at least 8 characters long up to a maximum of
64 characters.
WEP is not allowed to be an encryption protocol.
Raises CommandFailure in case the WIFI mode is not set to mode 2
(access point) or 3 (access point and station) or the WIFI
parameters are not valid."""
if self.get_mode() not in (2, 3):
raise CommandFailure('WIFI not set to an access point mode!')
if type(ssid) is not str:
raise CommandFailure('SSID must be of type str!')
if type(password) is not str:
raise CommandFailure('Password must be of type str!')
if len(password) > 64 or len(password) < 8:
raise CommandFailure('Wrong password length (8..64)!')
if channel not in range(1, 15) and type(channel) is not int:
raise CommandFailure('Invalid WIFI channel!')
if encrypt_proto not in (0, 2, 3, 4) or type(encrypt_proto) is not int:
raise CommandFailure('Invalid encryption protocol!')
self._set_command(CMDS_WIFI['AP_SET_PARAMS'], ssid, password, channel, encrypt_proto, debug=debug)
self.reset()
def get_accesspoint_config(self):
"""Reads the current access point configuration. The module must
be in an acces point mode to work.
Returns a hashmap containing the access point parameters.
Raises CommandFailure in case of wrong WIFI mode set."""
if self.get_mode() not in (2, 3):
raise CommandFailure('WIFI not set to an access point mode!')
(ssid, password, channel, encryption_protocol) = self._query_command(CMDS_WIFI['AP_SET_PARAMS'], debug=False).split(b':')[1].split(b',')
return {
'ssid': ssid,
'password': password,
'channel': int(channel),
'encryption_protocol': int(encryption_protocol)
}
def list_stations(self):
"""List IPs of stations which are connected to the access point.
ToDo: Parse result and return python list of IPs (as str)."""
return self._execute_command(CMDS_WIFI['AP_LIST_STATIONS'], debug=False)
def set_dhcp_config(self, mode, status, debug=False):
"""Set the DHCP configuration for a specific mode.
Oddities:
The mode seems not to be the WIFI mode known from the methods
set_mode() and get_mode(). The mode are as follows according to
the Esspressif documentation:
0: access point (softAP)
1: station
2: access point and station
The second argument (status) is strange as well:
0: enable
1: disable
"""
# Invert status to make the call to this methid reasonable.
if type(status) is int:
status = bool(status)
if type(status) is bool:
status = not status
return self._set_command(CMDS_WIFI['DHCP_CONFIG'], mode, status, debug=debug)
def set_autoconnect(self, autoconnect, debug=False):
"""Set if the module should connnect to an access point on
startup."""
return self._set_command(CMDS_WIFI['SET_AUTOCONNECT'], autoconnect, debug=debug)
def get_station_ip(self, debug=False):
"""get the IP address of the module in station mode.
The IP address must be given as a string. No check on the
correctness of the IP address is made."""
return self._query_command(CMDS_WIFI['SET_STATION_IP'], debug=debug)
def set_station_ip(self, ip_str, debug=False):
"""Set the IP address of the module in station mode.
The IP address must be given as a string. No check on the
correctness of the IP address is made."""
return self._set_command(CMDS_WIFI['SET_STATION_IP'], ip_str, debug=debug)
def get_accesspoint_ip(self, debug=False):
"""get the IP address of the module in access point mode.
The IP address must be given as a string. No check on the
correctness of the IP address is made."""
return self._query_command(CMDS_WIFI['SET_AP_IP'], debug=debug)
def set_accesspoint_ip(self, ip_str, debug=False):
"""Set the IP address of the module in access point mode.
The IP address must be given as a string. No check on the
correctness of the IP address is made."""
return self._set_command(CMDS_WIFI['SET_AP_IP'], ip_str, debug=debug)
def get_connection_status(self):
"""Get connection information.
ToDo: Parse returned data and return python data structure."""
return self._execute_command(CMDS_IP['STATUS'])
def start_connection(self, protocol, dest_ip, dest_port, debug=False):
"""Start a TCP or UDP connection.
ToDo: Implement MUX mode. Currently only single connection mode is
supported!"""
self._set_command(CMDS_IP['START'], protocol, dest_ip, dest_port, debug=debug)
def send(self, data, debug=False):
"""Send data over the current connection."""
self._set_command(CMDS_IP['SEND'], len(data), debug=debug)
print(b'>' + data)
self.uart.write(data)
def ping(self, destination, debug=False):
"""Ping the destination address or hostname."""
return self._set_command(CMDS_IP['PING'], destination, debug=debug)
|
from .config import add_fcos_config
# 只有导入FCOS类, 该模型才会被注册到detectron2的META_ARCH里面
from .fcos import FCOS
|
from interpreter import Builtin, to_str, Import, lenlist
def call_ref(interpreter, v):
try:
v.call(interpreter)
except AttributeError:
raise TypeError(f'"{v}" is not a reference')
_list = list
def _find_getch():
try:
import termios
except ImportError:
# Non-POSIX. Return msvcrt's (Windows') getch.
import msvcrt
return msvcrt.getch
# POSIX system. Create and return a getch that manipulates the tty.
import sys, tty
def _getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
return _getch
getch = _find_getch()
def fint(v: float):
assert isinstance(v, (float, int, bool)), '"int" expects number or bool'
return int(v)
def export():
values = {
'clear': Builtin('clear', 0, lambda interpreter: interpreter.stack.clear()),
'dump': Builtin('dump', 0, lambda interpreter: interpreter.stack.append(f'[{", ".join(to_str(v, repr_=True) for v in interpreter.stack)}]')),
'trace': Builtin('trace', 0, lambda interpreter: interpreter.stack.append(interpreter.trace())),
'stacklen': Builtin('stacklen', 0, lambda interpreter: interpreter.stack.append(len(interpreter.stack))),
# CHARACTER IN DOES NOT WORK IN PYCHARM IDE!! ONLY WORKS IN NORMAL CMD!!!
'inch': Builtin('inch', 0, lambda interpreter: interpreter.stack.append(getch())),
'~': Builtin('~', 1, lambda interpreter, v: interpreter.stack.append(~v)),
'!': Builtin('!', 1, lambda interpreter, v: interpreter.stack.append(True if v == 0 else False)),
'@': Builtin('@', 1, lambda interpreter, v: call_ref(interpreter, v)),
'out': Builtin('out', 1, lambda interpreter, v: print(to_str(v), end='')),
'outln': Builtin('outln', 1, lambda interpreter, v: print(v)),
'in': Builtin('in', 1, lambda interpreter, v: interpreter.stack.append(input(v))),
'exit': Builtin('exit', 1, lambda interpreter, v: exit(v)),
'sqrt': Builtin('in', 1, lambda interpreter, v: interpreter.stack.append(input(v))),
'int': Builtin('int', 1, lambda interpreter, v: interpreter.stack.append(fint(v))),
'dup': Builtin('dup', 1, lambda interpreter, v: interpreter.stack.extend((v, v))),
'rem': Builtin('rem', 1, lambda interpreter, v: [interpreter.stack.pop() for _ in range(v)]),
'pull': Builtin('pull', 1, lambda interpreter, v: interpreter.stack.append(interpreter.stack.pop(-v))),
'import': Import('import'),
'expand': Builtin('expand', 1, lambda interpreter, v: interpreter.stack.extend(lenlist(v))),
'collect': Builtin('collect', 1, lambda interpreter, v: interpreter.stack.append(interpreter.pop_stack(v))),
'len': Builtin('len', 1, lambda interpreter, v: interpreter.stack.append(len(v))),
'index': Builtin('index', 2, lambda interpreter, a, b: interpreter.stack.append(a[b])), # array/str(a) index(b)
'drop': Builtin('drop', 1, lambda interpreter, v: ()),
'+': Builtin('+', 2, lambda interpreter, a, b: interpreter.stack.append(a + b)),
'-': Builtin('-', 2, lambda interpreter, a, b: interpreter.stack.append(a - b)),
'*': Builtin('*', 2, lambda interpreter, a, b: interpreter.stack.append(a * b)),
'/': Builtin('/', 2, lambda interpreter, a, b: interpreter.stack.append(a / b)),
'**': Builtin('**', 2, lambda interpreter, a, b: interpreter.stack.append(a ** b)),
'%': Builtin('%', 2, lambda interpreter, a, b: interpreter.stack.append(a % b)),
'|': Builtin('|', 2, lambda interpreter, a, b: interpreter.stack.append(a | b)),
'^': Builtin('^', 2, lambda interpreter, a, b: interpreter.stack.append(a ^ b)),
'&': Builtin('&', 2, lambda interpreter, a, b: interpreter.stack.append(a & b)),
'>>': Builtin('>>', 2, lambda interpreter, a, b: interpreter.stack.append(a >> b)),
'<<': Builtin('<<', 2, lambda interpreter, a, b: interpreter.stack.append(a << b)),
'=': Builtin('=', 2, lambda interpreter, a, b: interpreter.stack.append(a == b)),
'>': Builtin('>', 2, lambda interpreter, a, b: interpreter.stack.append(a > b)),
'<': Builtin('<', 2, lambda interpreter, a, b: interpreter.stack.append(a < b)),
'>=': Builtin('>=', 2, lambda interpreter, a, b: interpreter.stack.append(a >= b)),
'<=': Builtin('<=', 2, lambda interpreter, a, b: interpreter.stack.append(a <= b)),
'swap': Builtin('swap', 2, lambda interpreter, a, b: interpreter.stack.extend((b, a))),
'push': Builtin('push', 2, lambda interpreter, a, b: interpreter.stack.insert(-b, a)), # obj(a) dest (b)
'sth': Builtin('sth', 3, lambda interpreter, a, b, c: interpreter.stack.extend((c, a, b))), # a b c -> c b a
}
return values
|
from coinbase_commerce.util import register_resource_cls
from .base import (
CreateAPIResource,
DeleteAPIResource,
ListAPIResource,
UpdateAPIResource,
)
__all__ = (
'Checkout',
)
@register_resource_cls
class Checkout(ListAPIResource,
CreateAPIResource,
UpdateAPIResource,
DeleteAPIResource):
RESOURCE_PATH = "checkouts"
RESOURCE_NAME = "checkout_aio"
|
# coding: utf-8
import sys
from plugins import Plugin
class FirstPlugin(Plugin):
hooks = ['on_privmsg']
def __init__(self):
pass
def on_privmsg(self, bot, source, target, message):
pass
|
from sympycore import *
def test_Matrix1():
a = Matrix(2)
assert a.rows==2
assert a.cols==2
assert a.tolist()==[[0,0], [0,0]]
a = Matrix([1,2])
assert a.rows==2
assert a.cols==1
assert a.tolist()==[[1], [2]]
a = Matrix([1,2], diagonal=True)
assert a.rows==2
assert a.cols==2
assert a.tolist()==[[1,0], [0,2]]
a = Matrix([0,1], permutation=True)
assert a.rows==2
assert a.cols==2
assert a.tolist()==[[1,0], [0,1]]
a = Matrix([1,0], permutation=True)
assert a.rows==2
assert a.cols==2
assert a.tolist()==[[0,1], [1,0]]
a = Matrix([1,[2]])
assert a.rows==2
assert a.cols==1
assert a.tolist()==[[1], [2]]
a = Matrix([[1,2], [3,4]])
assert a.rows==2
assert a.cols==2
assert a.tolist()==[[1,2], [3,4]]
a = Matrix([[1,2,3], [4,5,6]])
assert a.rows==2
assert a.cols==3
assert a.tolist()==[[1,2,3], [4,5,6]]
def test_Matrix_properties():
a = Matrix([[1,0,0,0],[1,1,0,0],[1,1,1,0],[1,1,1,1]])
assert a.is_square
assert a.is_lower
assert not a.is_upper
assert a.T.is_upper
assert not a.is_row_echelon_form
a[0,2] = 1
assert not a.is_lower
assert not a.is_upper
assert not a.T.is_upper
assert Matrix([[1,0],[0,1]]).is_orthogonal
assert Matrix([[1,0],[0,-1]]).is_orthogonal
assert Matrix([[0,0,0,1],[0,0,1,0],[1,0,0,0],[0,1,0,0]]).is_orthogonal
assert not Matrix([[1,0],[2,-1]]).is_orthogonal
assert not Matrix([[1,0],[0,1],[0,1]]).is_orthogonal
assert Matrix([[1,0],[0,1]]).is_row_echelon_form
assert Matrix([[1,0],[0,1]]).is_row_canonical_form
assert not Matrix([[1,0],[1,1]]).is_row_echelon_form
assert not Matrix([[1,0],[1,1]]).is_row_canonical_form
assert Matrix([[1,1],[0,1]]).is_row_echelon_form
assert not Matrix([[1,1],[0,1]]).is_row_canonical_form
assert Matrix([[1,1,0],[0,0,1],[0,0,0]]).is_row_echelon_form
a = Matrix([[1,1,0],[0,0,1],[0,0,0]])
assert a.is_row_canonical_form
assert not Matrix([[1,1,0],[0,0,0],[0,0,1]]).is_row_echelon_form
assert Matrix([[0,1,0],[0,0,1],[0,0,0]]).is_row_echelon_form
assert Matrix([[0,1,0],[0,0,1],[0,0,0]]).is_row_canonical_form
assert Matrix([[0,1,0,1],[0,0,1,1],[0,0,0,2]]).is_row_echelon_form
assert not Matrix([[0,1,0,1],[0,0,1,1],[0,0,0,2]]).is_row_canonical_form
assert not Matrix([[0,1,0,1],[0,0,1,1],[0,0,0,1]]).is_row_canonical_form
assert Matrix([[0,1,0,0],[0,0,1,0],[0,0,0,1]]).is_row_canonical_form
a = Matrix([[1,2,0,0,3,4,0,5,0],[0,0,1,0,2,3,0,4,0],[0,0,0,1,2,3,0,4,0],[0.0,0,0,0,0,0,1,2,0],[0,0,0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0,0,0]])
assert a.is_row_echelon_form
assert a.is_row_canonical_form
def test_Matrix2():
a = Matrix(2, 3)
assert a.rows==2
assert a.cols==3
assert a.tolist()==[[0,0,0], [0,0,0]]
def test_Matrix3():
a = Matrix(2, 3, {(0,0):1, (1,2):2})
assert a.rows==2
assert a.cols==3
assert a.tolist()==[[1,0,0], [0,0,2]]
a = Matrix(2, 3, [1,2,3,4,5,6])
assert a.rows==2
assert a.cols==3
assert a.tolist()==[[1,2,3],[4,5,6]]
def test_T():
a = Matrix([[1,2], [3,4]]).T
assert a.tolist()==[[1,3],[2,4]]
def test_get_items():
a = Matrix([[1,2], [3,4]])
assert a[0,0]==1
assert a[0,1]==2
assert a[1,0]==3
assert a[1,1]==4
assert a[0].tolist()==[[1,2]]
assert a[1].tolist()==[[3,4]]
assert a.T[0].tolist()==[[1,3]]
assert a.T[1].tolist()==[[2,4]]
assert a.T[0].T.tolist()==[[1],[3]]
assert a.T[1].T.tolist()==[[2],[4]]
def test_get_row_slice():
a = Matrix([[1,2], [3,4]])
assert a[:].tolist()==[[1,2], [3,4]]
assert a[:2].tolist()==[[1,2], [3,4]]
assert a[:1].tolist()==[[1,2]]
assert a[1:2].tolist()==[[3,4]]
assert a[1:].tolist()==[[3,4]]
assert a[:-1].tolist()==[[1,2]]
def test_get_column_slice():
a = Matrix([[1,2], [3,4]])
assert a[:,0].tolist()==[[1],[3]]
assert a[:,1].tolist()==[[2],[4]]
assert a[:,:].tolist()==[[1,2],[3,4]]
def test_get_slice():
a = Matrix([[1,2,3], [4,5,6], [7,8,9]])
assert a[:2,:2].tolist()==[[1,2],[4,5]]
assert a[1:,1:].tolist()==[[5,6],[8,9]]
assert a.T[:2,:2].tolist()==[[1,4],[2,5]]
assert a[:2,1:].tolist()==[[2,3],[5,6]]
def test_set_item():
a = Matrix(3,3)
a[0,0] = 1
a[0,1] = 2
a[0,2] = 3
assert a.tolist()==[[1,2,3],[0,0,0],[0,0,0]]
a = Matrix(3,3)
a[0,:] = [[1,2,3]]
a[2,:] = [[7,8,9]]
assert a.tolist()==[[1,2,3],[0,0,0],[7,8,9]]
a = Matrix(3,3)
a.T[0,:] = [[1,2,3]]
a.T[2,:] = [[7,8,9]]
assert a.tolist()==[[1,0,7],[2,0,8],[3,0,9]]
a = Matrix(3,3)
a[:,0] = [[1],2,3]
a[:,2] = [[7],8,9]
assert a.tolist()==[[1,0,7],[2,0,8],[3,0,9]]
a = Matrix(3,3)
b = Matrix([[1,2,3],[7,8,9]])
a[::2] = b
assert a.tolist()==[[1,2,3],[0,0,0],[7,8,9]]
a[:2,:] = 0*b
assert a.tolist()==[[0,0,0],[0,0,0],[7,8,9]]
a.T[0,:] = [[1,0,0]]
assert a.tolist()==[[1,0,0],[0,0,0],[0,8,9]]
a = Matrix([[1,2],[3,4]])
a.T[1,0] = 22
a.T[0,1] = 0
assert a.tolist()==[[1,22],[0,4]]
a.T[0,1] = 0
assert a.tolist()==[[1,22],[0,4]]
a = Matrix([[1,2,3],[4,5,6],[7,8,9]])
a[:2,:2] = 11
assert a.tolist()==[[11,11,3],[11,11,6],[7,8,9]]
a[1:,:] = 0
assert a.tolist()==[[11,11,3],[0,0,0],[0,0,0]]
a[:,:1] = 0
assert a.tolist()==[[0,11,3],[0,0,0],[0,0,0]]
a.T[::2,1] = 22
assert a.tolist()==[[0,11,3],[22,0,22],[0,0,0]]
a.T[:,0] = 0
assert a.tolist()==[[0,0,0],[22,0,22],[0,0,0]]
a.T[:1,:] = 0
assert a.tolist()==[[0,0,0],[0,0,22],[0,0,0]]
def test_get_diagonal():
a = Matrix([[1,2,3],
[4,5,6],
[7,8,9]])
assert a.D[0].tolist()==[[1],
[5],
[9]]
assert a.D[1].tolist()==[[2],
[6],
]
assert a.D[-1].tolist()==[[4],
[8],
]
def test_set_diagonal():
a = Matrix([[1,2,3],[4,5,6],[7,8,9]])
a.D[0] = 0
assert a.tolist()==[[0,2,3],
[4,0,6],
[7,8,0]]
a.D[1] = 1
assert a.tolist()==[[0,1,3],
[4,0,1],
[7,8,0]]
a.D[-1] = 2
assert a.tolist()==[[0,1,3],
[2,0,1],
[7,2,0]]
a.T.D[1] = 3
assert a.tolist()==[[0,1,3],
[3,0,1],
[7,3,0]]
a.D[0] = [1,2,3]
assert a.tolist()==[[1,1,3],
[3,2,1],
[7,3,3]]
a.D[1] = [7,8]
assert a.tolist()==[[1,7,3],
[3,2,8],
[7,3,3]]
def test_iadd():
a = a2 = Matrix([[1,2], [3,4]])
a += 1
assert a.tolist()==[[2,3],[4,5]]
assert a.data is a2.data
a = a2 = Matrix([[1,2], [3,4]])
hash(a)
a += 1
assert a.tolist()==[[2,3],[4,5]]
assert a2.tolist()==[[1,2],[3,4]]
assert a.data is not a2.data
a = a2 = Matrix([[1,2], [3,4]])
b = Matrix([[1,-2], [-3,4]])
a += b
assert a.tolist()==[[2,0],[0,8]]
assert a.data is a2.data
a = a2 = Matrix([[1,2], [3,4]])
b = Matrix([[1,-2], [-3,4]])
hash(a)
a += b
assert a.tolist()==[[2,0],[0,8]]
assert a2.tolist()==[[1,2],[3,4]]
assert a.data is not a2.data
a = 1
a2 = Matrix([[1,2], [3,4]])
a += a2
assert a.tolist()==[[2,3],[4,5]]
a += a.T
assert a.tolist()==[[4,7],[7,10]]
r = 2
r += a
assert r==2+a
def test_add():
a = Matrix([[1,2], [3,4]])
assert (a+1).tolist()==[[2,3],[4,5]]
assert (1+a).tolist()==[[2,3],[4,5]]
b = Matrix([[1,-2], [-3,4]])
assert (a+b).tolist()==[[2,0],[0,8]]
def test_isub():
a = a2 = Matrix([[1,2], [3,4]])
a -= 1
assert a.tolist()==[[0,1],[2,3]]
assert a.data is a2.data
b = Matrix([[1,-2], [-3,4]])
a -= b
assert a.tolist()==[[-1,3],[5,-1]]
assert a.data is a2.data
a -= a.T
assert a.tolist()==[[0,-2],[2,0]]
assert a.data is a2.data
r = 2
r -= a
assert r==2-a
def test_sub():
a = Matrix([[1,2],[3,4]])
assert (1-a).tolist()==[[0,-1],[-2,-3]]
assert (a-1).tolist()==[[0,1],[2,3]]
b = Matrix([[3,4],[1,2]])
assert (a-b).tolist()==[[-2,-2],[2,2]]
def test_posneg():
a = Matrix([[1,2], [3,4]])
assert (+a).tolist() == [[1,2],[3,4]]
assert (-a).tolist() == [[-1,-2],[-3,-4]]
def test_imul():
a = a2 = Matrix([[1,2], [3,4]])
a *= 2
assert a.tolist() == [[2,4],[6,8]]
assert a.data is a2.data
a = a2 = Matrix([[1,2], [3,4]])
hash(a)
a *= 2
assert a.tolist() == [[2,4],[6,8]]
assert a2.tolist() == [[1,2],[3,4]]
assert a.data is not a2.data
a = a2 = Matrix([[1,2], [3,4]])
a *= a
assert a.tolist()==[[7,10],[15,22]]
assert a2.tolist() == [[1,2],[3,4]]
a = a2 = Matrix([[1,2], [3,4]])
a *= a.A
assert a.tolist()==[[1,4],[9,16]]
assert a.data is a2.data
a = a2 = Matrix([[1,2], [3,4]])
hash(a)
a *= a.A
assert a.tolist()==[[1,4],[9,16]]
assert a2.tolist()==[[1,2],[3,4]]
assert a.data is not a2.data
r = 2
r *= a
assert r==2*a
def test_mul():
a = Matrix([[1,2], [3,4]])
assert (a*a).tolist() == [[7,10],[15,22]],`(a*a).tolist()`
assert (a.A*a).tolist() == [[1,4],[9,16]]
assert (a*a.A).tolist() == [[1,4],[9,16]]
assert (a*a.T).tolist() == [[5,11],[11,25]]
assert (a.T*a).tolist() == [[10,14],[14,20]]
assert (a.T*a.T).tolist() == [[7,15],[10,22]]
assert (a.A*a.T).tolist() == [[1,6],[6,16]]
assert (a*a.T.A).tolist() == [[1,6],[6,16]]
assert (a*2).tolist() == [[2,4],[6,8]]
assert (2*a).tolist() == [[2,4],[6,8]]
def test_div():
a = a2 = Matrix([[1,2], [3,4]])*2
assert (a/2).tolist() == [[1,2],[3,4]]
assert (a/4).tolist() == [[mpq((1,2)),1],[mpq((3,2)),2]]
a /= 2
assert a.tolist() == [[1,2],[3,4]]
assert a.data is a2.data
def test_rdiv():
a = Matrix([[1,2],[3,4]])
assert (1/a) == a.inv()
assert (a/a).is_identity
r = 1
r /= a
assert r==a.inv()
assert (1/a.A).tolist()==[[1,mpq((1,2))],[mpq((1,3)),mpq((1,4))]]
def test_pow():
a = Matrix([[1,2],[3,4]])
assert a ** 0 == eye(2)
assert a ** 1 == a
assert a ** 2 == a*a
assert a ** 3 == a*a*a
assert a ** 4 == a*a*a*a
assert a ** 5 == a*a*a*a*a
assert a ** (-1) == a.inv()
assert a ** (-2) == a.inv()*a.inv()
assert a ** (-3) == a.inv()**3
assert (a.A**1).tolist()==[[1,2],[3,4]]
assert (a.A**2).tolist()==[[1,4],[9,16]]
assert (a.A**-1).tolist()==[[1,mpq((1,2))],[mpq((1,3)),mpq((1,4))]]
def test_views():
a = Matrix([[1,2], [3,4]])
assert not a.head.is_array
assert not a.M.head.is_array
assert a.A.head.is_array
b = a.A
assert b.A is b
def test_trace():
assert Matrix([[1,3],[6,9]]).trace() == 10
assert Matrix([[1,3],[6,9]]).D.trace() == 10
assert Matrix([[1,2,3],[4,5,6],[7,8,9]]).trace() == 15
b = Matrix(10000, 10000)
assert b.trace() == 0
b[100,100] = 3
b[1000,1000] = 4
assert b.trace() == 7
def test_solve():
assert Matrix([[1,2],[3,4]]).solve([1,2]).tolist()==[[0],[mpq((1,2))]]
assert Matrix([[1,2],[3,4]]).solve([2,1]).tolist()==[[-3],[mpq((5,2))]]
while 1:
m = Matrix(3,3,random=True)
if m.det():
break
b = Matrix(3,1,random=True)
assert m * (m//b) == b
b = Matrix(3,2,random=True)
assert m * (m//b) == b
b = Matrix(3,15,random=True)
assert m * (m//b) == b
while 1:
m = Matrix(5,5,random=True)
if m.det():
break
b = Matrix(5,1,random=True)
assert m * (m//b) == b
b = Matrix(5,2,random=True)
assert m * (m//b) == b
b = Matrix(5,15,random=True)
assert m * (m//b) == b
def test_resize():
m = Matrix([[1,2],[3,4]])
assert m.resize(3,3).tolist()==[[1,2,0],[3,4,0],[0,0,0]]
m = Matrix([[1,2],[3,4]])
assert m.resize(2,3).tolist()==[[1,2,0],[3,4,0]]
m = Matrix([[1,2],[3,4]])
assert m.resize(2,1).tolist()==[[1],[3]]
m = Matrix([[1,2],[3,4]])
assert m.resize(2,1).resize(2,2).tolist()==[[1,2],[3,4]]
m = Matrix([[1,2],[3,4]])
assert m.resize(2,1).crop().resize(2,2).tolist()==[[1,0],[3,0]]
assert m.tolist()==[[1,0],[3,0]]
def test_inv():
m = Matrix([[1,2],[3,4]])
assert (m*m.inv()).is_identity
assert (m.inv()*m).is_identity
def test_lu_issue63():
d = {(7, 3): -1, (6, 6): -1, (5, 6): 1, (2, 8): -1, (0, 3): 1, (1, 0): -1, (1, 2): -1, (4, 9): 1, (2, 9): 1, (1, 5): 1, (3, 0): 1, (7, 10): 1, (0, 4): -1, (4, 10): -1, (2, 6): 1, (5, 0): -1, (2, 10): 1, (3, 9): -1, (0, 5): -1, (6, 4): 1, (6, 1): -1, (5, 7): 1, (2, 4): 1}
a = Matrix (8, 11, d)
p,l,u = a.lu ()
assert p*l*u == a
assert (p*l*u - a).data == {}
def test_solve_null():
x = ['x1', 'x2', 'x3', 'x4', 'x5', 'x6']
a = Matrix ([[2,3,5],[-4,2,3]])
xd,dep,indep = a.solve_null(x)
assert xd['x1'] == Matrix([mpq((-1, 16))]),`xd`
assert xd['x2'] == Matrix([mpq((-13, 8))]),`xd`
assert xd['x3'] == Matrix([1]),`xd`
assert set(dep) == set(['x1', 'x2']),`dep`
assert indep == ['x3'],`dep`
ker = Matrix([xd[s] for s in x[:3]])
assert (a*ker).is_zero,`a*ker`
a = Matrix(2,3,random=True)
xd,dep,indep = a.solve_null(x)
ker = Matrix([xd[s] for s in x[:3]])
assert (a*ker).is_zero,`a*ker`
a = Matrix ([[1,0,-3,0,2,-8],[0,1,5,0,-1,4],[0,0,0,1,7,-9],[0,0,0,0,0,0]])
xd,dep,indep = a.solve_null(x)
ker = Matrix([xd[s] for s in x[:6]])
assert (a*ker).is_zero,`a*ker`
a = Matrix(4,6,random=True)
xd,dep,indep = a.solve_null(x)
ker = Matrix([xd[s] for s in x[:6]])
assert (a*ker).is_zero,`a*ker`
a = Matrix([[1,0,0,0,-1,-1,-1,0,0,0,0],
[0,1,0,0,1,-1,-1,0,0,0,0],
[0,0,-1,0,0,1,0,1,0,0,0],
[0,0,0,-1,0,0,1,-1,0,0,0],
[0,0,0,0,0,0,0,0,1,-1,0],
[0,0,0,0,0,0,0,0,-1,0,1]])
xd,dep,indep = a.solve_null()
ker = Matrix([xd[s] for s in sorted (dep+indep)])
assert (a*ker).is_zero,`a*ker`
def test_gauss_jordan_elimination():
a = Matrix([[1,0,0,0,-1,-1,-1,0,0,0,0],
[0,1,0,0,1,-1,-1,0,0,0,0],
[0,0,-1,0,0,1,0,1,0,0,0],
[0,0,0,-1,0,0,1,-1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,1,-1,0],
[0,0,0,0,0,0,0,0,-1,0,1]])
r = Matrix([[1,0,0,0,-1,-1,-1,0,0,0,0],
[0,1,0,0,1,-1,-1,0,0,0,0],
[0,0,1,0,0,-1,0,-1,0,0,0],
[0,0,0,1,0,0,-1,1,0,0,0],
[0,0,0,0,0,0,0,0,1,0,-1],
[0,0,0,0,0,0,0,0,0,1,-1]
])
b = a.gauss_jordan_elimination()
assert (b==r)
a = a.T[:].T
b = a.gauss_jordan_elimination()
assert (b-r).is_zero,'\n%s==\n%s' % (b,r)
def test_gauss_jordan_elimination_swap_columns():
a = Matrix([[1,0,0,0,-1,-1,-1,0,0,0,0],
[0,1,0,0,1,-1,-1,0,0,0,0],
[0,0,-1,0,0,1,0,1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,-1,0,0,1,-1,0,0,0],
[0,0,0,0,0,0,0,0,1,-1,0],
[0,0,0,0,0,0,0,0,-1,0,1],
])
r = Matrix([[1,0,0,0,-1,-1,-1,0,0,0,0],
[0,1,0,0,1,-1,-1,0,0,0,0],
[0,0,1,0,0,-1,0,-1,0,0,0],
[0,0,0,1,0,0,-1,1,0,0,0],
[0,0,0,0,0,0,0,0,1,0,-1],
[0,0,0,0,0,0,0,0,0,1,-1]
])
b,p = a.gauss_jordan_elimination(swap_columns=True)
for k,j in enumerate (p):
assert (b[:,k]==r[:,j])
a = a.T[:].T
b,p = a.gauss_jordan_elimination(swap_columns=True)
for k,j in enumerate (p):
assert (b[:,k]==r[:,j])
def test_get_gauss_jordan_elimination_operations():
a = Matrix([[1,0,0,0,-1,-1,-1,0,0,0,0],
[0,1,0,0,1,-1,-1,0,0,0,0],
[0,0,-1,0,0,1,0,1,0,0,0],
[0,0,-1,0,0,1,0,1,0,0,0],
[0,0,0,-1,0,0,1,-1,0,0,0],
[0,0,0,0,0,0,0,0,1,-1,0],
[0,0,0,0,0,0,0,0,-1,0,1],
[0,0,0,0,0,0,0,0,0,0,0],
]).T[:].T
ab = Matrix ([[1, 0, 0, 0, -1, -1, -1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, -1, -1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, -1, 0, -1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, -1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, -1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1],
[0,0,0,0,0,0,0,0,0,0,0],
])
b, ops, r, c, z = a.get_gauss_jordan_elimination_operations()
assert r==[0, 1, 2, 4, 5, 6],`r`
assert z==[7, 3],`z`
assert c==[0, 1, 2, 3, 8, 9]
if not b[:]==ab:
print
print b
print ab
assert b[:]==ab
a1 = a.apply_row_operations(ops)
assert a1[:]==ab
def test_general_solver():
a = Matrix([[1,0,0,0,-1,-1,-1,0,0,0,0],
[0,1,0,0,1,-1,-1,0,0,0,0],
[0,0,-1,0,0,1,0,1,0,0,0],
[0,0,-1,0,0,1,0,1,0,0,0],
[0,0,0,-1,0,0,1,-1,0,0,0],
[0,0,0,0,0,0,0,0,1,-1,0],
[0,0,0,0,0,0,0,0,-1,0,1],
])
cols = ['A_D','A_B','AB_C','B_D','C_DE','Eout','Ain','Dout','Cin']
a = Matrix([[-1,-1,-1,0 ,0 ,0, 1, 0,0], # A
[0 ,1 ,-1,-1,0 ,0, 0, 0,0], # B
[0 ,0 ,1 ,0 ,-1,0, 0, 0,1], # C
[1 ,0 ,0 ,1 ,1 ,0, 0,-1,0], # D
[0 ,0 ,0 ,0 ,1 ,-1,0, 0,0], # E
[0 ,0 ,0 ,0 ,0 ,0,-1, 0,0], # Asrc
[0 ,0 ,0 ,0 ,0 ,0, 0, 1,0], # Dt
[0 ,0 ,0 ,0 ,0 ,0, 0, 0,-1], # Csrc
])
print
print 'a'
print a
for i,row in enumerate(a):
break
if len(row.data)<=1:
a[i] = 0
print "a'"
print a
b, ops, r, c, z = a.get_gauss_jordan_elimination_operations(leading_cols=range(5),
leading_column_selection='sparsest first'
)
print b
print r
print c
print z
|
from typing import Optional, Union
import numpy as np
import torch
from torch.optim import Optimizer
from pyraug.customexception import LoadError
from pyraug.data.loaders import BaseDataGetter, ImageGetterFromFolder
from pyraug.data.preprocessors import DataProcessor
from pyraug.models import RHVAE, BaseVAE
from pyraug.models.rhvae import RHVAEConfig
from pyraug.trainers import Trainer
from pyraug.trainers.training_config import TrainingConfig
from .base_pipeline import Pipeline
class TrainingPipeline(Pipeline):
"""
This Pipeline provides an end to end way to train your VAE model.
The trained model will be saved in ``output_dir`` stated in the
:class:`~pyraug.trainers.training_config.TrainingConfig`. A folder
``training_YYYY-MM-DD_hh-mm-ss`` is
created where checkpoints and final model will be saved. Checkpoints are saved in
``checkpoint_epoch_{epoch}`` folder (optimizer and training config
saved as well to resume training if needed)
and the final model is saved in a ``final_model`` folder. If ``output_dir`` is
None, data is saved in ``dummy_output_dir/training_YYYY-MM-DD_hh-mm-ss`` is created.
Parameters:
data_loader (Optional[BaseDataGetter]): The data loader you want to use to load your
data. This is usefull to get the data from a particular format and in a specific folder
for instance. If None, the :class:`~pyraug.data.loaders.ImageGetterFromFolder` is used.
Default: None.
data_processor (Optional[DataProcessor]): The data preprocessor you want to use to
preprocess your data (*e.g.* normalization, reshaping, type conversion). If None,
a basic :class:`~pyraug.data.preprocessors.DataProcessor` is used (by default data
is normalized such that the max value of each data is 1 and the min 0). Default: None.
model (Optional[BaseVAE]): An instance of :class:`~pyraug.models.BaseVAE` you want to train.
If None, a default :class:`~pyraug.models.RHVAE` model is used. Default: None.
optimizer (Optional[~torch.optim.Optimizer]): An instance of :class:`~torch.optim.Optimizer`
used to train the model. If None we provide an instance of
:class:`~torch.optim.Adam` optimizer. Default: None.
training_config (Optional[TrainingConfig]=None): An instance of
:class:`~pyraug.trainers.training_config.TrainingConfig` stating the training
parameters. If None, a default configuration is used.
.. note::
If you did not provide any data_processor, a default one will be used. By default it
normalizes the data so that the max value of each data equals 1 and min value 0.
"""
def __init__(
self,
data_loader: Optional[BaseDataGetter] = None,
data_processor: Optional[DataProcessor] = None,
model: Optional[BaseVAE] = None,
optimizer: Optional[Optimizer] = None,
training_config: Optional[TrainingConfig] = None,
):
# model_name = model_name.upper()
self.data_loader = data_loader
if data_processor is None:
data_processor = DataProcessor(
data_normalization_type="individual_min_max_scaling"
)
self.data_processor = data_processor
self.model = model
self.optimizer = optimizer
self.training_config = training_config
def _set_default_model(self, data):
model_config = RHVAEConfig(input_dim=int(np.prod(data.shape[1:])))
model = RHVAE(model_config)
self.model = model
def __call__(
self,
train_data: Union[str, np.ndarray, torch.Tensor],
eval_data: Union[str, np.ndarray, torch.Tensor] = None,
log_output_dir: str = None,
):
"""
Launch the model training on the provided data.
Args:
training_data (Union[str, ~numpy.ndarray, ~torch.Tensor]): The training data coming from
a folder in which each file is a data or a :class:`numpy.ndarray` or
:class:`torch.Tensor` of shape (mini_batch x n_channels x data_shape)
eval_data (Optional[Union[str, ~numpy.ndarray, ~torch.Tensor]]): The evaluation data coming from
a folder in which each file is a data or a np.ndarray or torch.Tensor. If None, no
evaluation data is used.
"""
if self.data_loader is None:
if isinstance(train_data, str):
self.data_loader = ImageGetterFromFolder()
try:
train_data = self.data_loader.load(train_data)
except Exception as e:
raise LoadError(
f"Unable to load training data. Exception catch: {type(e)} with message: "
+ str(e)
)
else:
try:
train_data = self.data_loader.load(train_data)
except Exception as e:
raise LoadError(
f"Unable to load training data. Exception catch: {type(e)} with message: "
+ str(e)
)
train_data = self.data_processor.process_data(train_data)
train_dataset = self.data_processor.to_dataset(train_data)
self.train_data = train_data
if self.model is None:
self._set_default_model(train_data)
if eval_data is not None:
if self.data_loader is None:
if isinstance(eval_data, str):
self.data_loader = ImageGetterFromFolder()
try:
train_data = self.data_loader.load(eval_data)
except Exception as e:
raise LoadError(
f"Unable to load training data. Exception catch: {type(e)} with message: "
+ str(e)
)
else:
try:
eval_data = self.data_loader.load(eval_data)
except Exception as e:
raise LoadError(
f"Enable to load eval data. Exception catch: {type(e)} with message: "
+ str(e)
)
eval_data = self.data_processor.process_data(eval_data)
eval_dataset = self.data_processor.to_dataset(eval_data)
else:
eval_dataset = None
trainer = Trainer(
model=self.model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
training_config=self.training_config,
optimizer=self.optimizer,
)
self.trainer = trainer
trainer.train(log_output_dir=log_output_dir)
|
# encoding: utf-8
# basics.py
# TODO documentation!
import numpy as np
from math import sqrt
from numba import jit
def pad_data(x, d1, d):
pad = lambda z: np.lib.pad(z, ((0, 0), (0, d1-d)),
'constant', constant_values=(0))
x = pad(x)
return x
@jit(nopython=True)
def radius(d, n):
rm = d + 2
D = int(get_D(d, n) / 2)
r = np.sqrt(2 * np.random.gamma(rm/2., 1., D))
return r
@jit(nopython=True)
def get_D(d, n):
'''
when using simplex method to generate points, we add also reflected ones,
so the overall number of points created is 2 * (n+1) + 1, where 1 comes
from the weight of function at zero. Since we add f(0) directly to the
integral, we don't need to account for it in D here.
'''
D = int(n * (2 * (d+1)))
return D
def generate_random_weights(d, n):
'''
Generates random Fourier features
Args
d: the input dimension of data
n: total number of features is D = 2*n(d+1)
Returns:
M: the points to estimate integral with
w: the weights of the points in M
'''
D = get_D(d, n)
M = np.random.randn(D, d)
w = None # all points have equal weights
return M, w
@jit(nopython=True)
def rnsimp(m):
S = np.zeros((m, m + 1))
mp = m + 1
for i in range(m):
rv = np.sqrt(mp / ((m - i) * m * (m - i + 1.)))
S[i, i] = (m - i) * rv
S[i, i+1:m+1] = -rv
return S
@jit(nopython=True)
def simplex_matvec(x):
'''
V.T @ x (V is n-simplex)
'''
d = x.shape[0]
mp = d + 1
r = np.empty(d + 1)
rv = np.empty(d)
s = 0.
for i in range(d):
rv[i] = sqrt(mp / ((d-i) * d * (d-i+1.))) * x[i]
r[i] = s + rv[i] * (d-i)
s += -rv[i]
r[d] = s
return r
@jit(nopython=True)
def batch_simplex_matvec(x):
nobj = x.shape[1]
d = x.shape[0]
mp = d + 1
r = np.empty((d+1, nobj))
rv = np.empty(d)
s = np.zeros(nobj)
for i in range(d):
rv[i] = sqrt(mp / ((d-i) * d * (d-i+1.)))
for o in range(nobj):
rvo = rv[i] * x[i, o]
r[i, o] = s[o] + rvo * (d-i)
s[o] += -rvo
for o in range(nobj):
r[d, o] = s[o]
return r
|
"""Command-line interface for notesdir."""
import argparse
import dataclasses
from datetime import datetime
import json
from operator import itemgetter, attrgetter
import os.path
import sys
from terminaltables import AsciiTable
from notesdir.api import Notesdir
from notesdir.models import FileInfoReq, FileInfo
def _print_file_info(info: FileInfo, fields: FileInfoReq) -> None:
if fields.path:
print(f'path: {info.path}')
if fields.title:
print(f'title: {info.title}')
if fields.created:
print(f'created: {info.created}')
if fields.tags:
print(f'tags: {", ".join(sorted(info.tags))}')
if fields.links:
print('links:')
for link in info.links:
line = f'\t{link.href}'
referent = link.referent()
if referent:
line += f' -> {referent}'
print(line)
if fields.backlinks:
print('backlinks:')
for link in info.backlinks:
print(f'\t{link.referrer}')
def _info(args, nd: Notesdir) -> int:
fields = FileInfoReq.parse(args.fields[0]) if args.fields else FileInfoReq.full()
info = nd.repo.info(args.path[0], fields)
if args.json:
print(json.dumps(info.as_json()))
else:
_print_file_info(info, fields)
return 0
def _new(args, nd: Notesdir) -> int:
path = nd.new(args.template[0], args.dest)
if not args.preview:
print(f'Created {path}')
return 0
def _change(args, nd: Notesdir) -> int:
nd.change(set(args.paths),
add_tags={t.strip() for t in (args.add_tags or [''])[0].lower().split(',') if t.strip()},
del_tags={t.strip() for t in (args.del_tags or [''])[0].lower().split(',') if t.strip()},
title=args.title[0] if args.title else None,
created=datetime.fromisoformat(args.created[0]) if args.created else None)
return 0
def _mv(args, nd: Notesdir) -> int:
src = args.src[0]
dest = args.dest[0]
moves = nd.move({src: dest})
if args.json:
print(json.dumps(moves))
elif not moves == {src: dest} and not args.preview:
for k, v in moves.items():
print(f'Moved {k} to {v}')
return 0
def _organize(args, nd: Notesdir) -> int:
moves = nd.organize()
if args.json:
print(json.dumps({str(k): str(v) for k, v in moves.items()}))
elif moves and not args.preview:
for k, v in moves.items():
print(f'Moved {k} to {v}')
return 0
def _backfill(args, nd: Notesdir) -> int:
changed, errors = nd.backfill()
if not args.preview:
for path in changed:
print(f'Updated {changed}')
for error in errors:
print(repr(error), file=sys.stderr)
return 0
def _tags(args, nd: Notesdir) -> int:
query = args.query or ''
counts = nd.repo.tag_counts(query)
if args.json:
print(json.dumps(counts))
else:
tags = sorted(counts.keys())
data = [('Tag', 'Count')] + [(t, counts[t]) for t in tags]
table = AsciiTable(data)
table.justify_columns[2] = 'right'
print(table.table)
return 0
def _relink(args, nd: Notesdir) -> int:
nd.replace_path_hrefs(args.old[0], args.new[0])
return 0
def _query(args, nd: Notesdir) -> int:
query = args.query or ''
infos = [i for i in nd.repo.query(query) if os.path.isfile(i.path)]
if args.fields:
fields = FileInfoReq.parse(args.fields[0])
else:
fields = FileInfoReq(path=True, tags=True, title=True, created=True)
if args.json:
infos.sort(key=attrgetter('path'))
print(json.dumps([i.as_json() for i in infos]))
elif args.table:
# TODO make sorting / path resolution consistent with json output
data = []
for info in infos:
row = ()
if fields.path:
row += (os.path.basename(info.path),)
if fields.title:
row += (info.title or '',)
if fields.created:
row += (info.created.strftime('%Y-%m-%d') if info.created else '',)
if fields.tags:
row += ('\n'.join(sorted(info.tags)),)
if fields.links:
row += ('\n'.join(sorted({os.path.relpath(link.referent()) for link in info.links if link.referent()})),)
if fields.backlinks:
row += ('\n'.join(sorted({os.path.relpath(link.referrer) for link in info.backlinks})),)
data.append(row)
data.sort(key=itemgetter(0))
heading = ()
if fields.path:
heading += ('Filename',)
if fields.title:
heading += ('Title',)
if fields.created:
heading += ('Created',)
if fields.tags:
heading += ('Tags',)
if fields.links:
heading += ('Link paths',)
if fields.backlinks:
heading += ('Backlink paths',)
data.insert(0, heading)
table = AsciiTable(data)
print(table.table)
else:
for info in infos:
print('--------------------')
_print_file_info(info, fields)
return 0
def argparser() -> argparse.ArgumentParser:
fields_help = f'Possible fields are: {", ".join(f.name for f in dataclasses.fields(FileInfoReq))}.'
parser = argparse.ArgumentParser()
parser.set_defaults(func=None, preview=False)
subs = parser.add_subparsers(title='Commands')
p_i = subs.add_parser('info', help='Show info about a file, such as metadata and links/backlinks.')
p_i.add_argument('-f', '--fields', nargs=1,
help=f'Comma-separated list of fields to show. {fields_help} By default, all fields are shown.')
p_i.add_argument('-j', '--json', action='store_true', help='Output as JSON.')
p_i.add_argument('path', nargs=1)
p_i.set_defaults(func=_info)
p_q = subs.add_parser(
'query',
help='Query for files. For full query syntax, see the documentation of '
'notesdir.models.FileQuery.parse - an example query is "tag:foo sort:title,-created".')
p_q.add_argument('query', nargs='?', help='Query string. If omitted, the query matches all files.')
p_q.add_argument('-f', '--fields', nargs=1,
help=f'Comma-separated list of fields to show. {fields_help} Not all fields are shown by default.')
p_q_formats = p_q.add_mutually_exclusive_group()
p_q_formats.add_argument('-j', '--json', help='Output as JSON.', action='store_true')
p_q_formats.add_argument('-t', '--table', help='Format output as a table.', action='store_true')
p_q.set_defaults(func=_query)
p_c = subs.add_parser('new',
help='Create new file from a Mako template. You can either specify the path to the template, '
'or just give its name without file extensions if it is listed in "templates" in '
'your ~/notesdir.conf.py file. '
'This command will print the path of the newly created file.')
p_c.add_argument('template', nargs=1, help='Name or path of template.')
p_c.add_argument('dest', nargs='?',
help='Suggested destination filename. This may be overridden by the template, or adjusted '
'if it conflicts with an existing file. A filename will be selected for you if omitted.')
p_c.add_argument('-p', '--preview', action='store_true', help='Print plan but do not create file')
p_c.set_defaults(func=_new)
p_change = subs.add_parser('change', help='Update metadata of the specified files.')
p_change.add_argument('paths', nargs='+', help='Files to update.')
p_change.add_argument('-a', '--add-tags', nargs=1,
help='Comma-separated list of tags to add (if not already present).')
p_change.add_argument('-d', '--del-tags', nargs=1,
help='Comma-separated list of tags to remove (if present).')
p_change.add_argument('-t', '--title', nargs=1, help='New title for files')
p_change.add_argument('-c', '--created', nargs=1, help='New created datetime for files, in ISO8601 format')
p_change.add_argument('-p', '--preview', action='store_true',
help='Print changes to be made but do not change files')
p_change.set_defaults(func=_change)
p_mv = subs.add_parser(
'mv',
help='Move a file. Any links to the file from other files in your configured notes directories will be '
'updated to point to the new location, provided the referrers are of supported file types. '
'Relative links from this file to other files will also be updated, if this file is of a supported file '
'type.')
p_mv.add_argument('src', help='File or folder to move.', nargs=1)
p_mv.add_argument('dest', nargs=1,
help='New file path or new parent folder. If the argument is a folder, notesdir will try to '
'keep the original filename. In either case, this command will not overwrite an existing '
'file; it will adjust the new filename if needed to be unique within the target directory.')
p_mv.add_argument('-j', '--json', action='store_true',
help='Output as JSON. The output is an object whose keys are the paths of files that were '
'moved, and whose values are the new paths of those files.')
p_mv.add_argument('-p', '--preview',
action='store_true', help='Print changes to be made but do not move or change files')
p_mv.set_defaults(func=_mv)
p_org = subs.add_parser(
'organize',
help='Organize files. All files within the directories configured in conf.repo_conf.root_paths will be '
'passed to the function defined in conf.path_organizer, and will be moved if it returns a new path. '
'New folders will be created when necessary and empty folders will be deleted. As with the mv command, '
'relative links between files will be updated, if the file type of the referrer is supported.')
p_org.add_argument('-j', '--json', action='store_true',
help='Output as JSON. The output is an object whose keys are the paths of files that were '
'moved, and whose values are the new paths of those files.')
p_org.add_argument('-p', '--preview', action='store_true',
help='Print changes to be made but do not move or change files')
p_org.set_defaults(func=_organize)
p_backfill = subs.add_parser(
'backfill',
help='Backfill missing metadata. All files within the directories configured in conf.repo_conf.root_paths '
'will be checked for title and created date metadata. If the title is missing, a title is set based '
'on the filename; if created is missing, it is set based on the file\'s birthtime or ctime. '
'Errors will be printed but will not result in a nonzero return status, since it is expected that '
'some files in your notes directories will not be supported by notesdir.')
p_backfill.add_argument('-p', '--preview', action='store_true',
help='Print changes to be made but do not change files')
p_backfill.set_defaults(func=_backfill)
p_tags_count = subs.add_parser(
'tags',
help='Show a list of tags and the number of files that have each tag.')
p_tags_count.add_argument('query', nargs='?',
help='Query to filter files by. If omitted, data for all files is shown. The query '
'format is the same as for the `query` command.')
p_tags_count.add_argument('-j', '--json', action='store_true',
help='Output as JSON. The output is an object whose keys are tags and whose values '
'are the number of notes that matched the query and also possess that tag.')
p_tags_count.set_defaults(func=_tags)
p_relink = subs.add_parser(
'relink',
help='Replace all links to one file with links to another. Note that this does not '
'currently replace links to children of the original path - e.g., if the '
'old path is "/foo/bar", a link to "/foo/bar/baz" will not be updated. '
'No files are moved, and this command does not care whether or not the old '
'or new paths refer to actual files.')
p_relink.add_argument('old', nargs=1)
p_relink.add_argument('new', nargs=1)
p_relink.add_argument('-p', '--preview', action='store_true',
help='Print changes to be made but do not change files')
p_relink.set_defaults(func=_relink)
return parser
def main(args=None) -> int:
"""Runs the tool and returns its exit code.
args may be an array of string command-line arguments; if absent,
the process's arguments are used.
"""
parser = argparser()
args = parser.parse_args(args)
if not args.func:
parser.print_help()
return 1
with Notesdir.for_user() as nd:
if args.preview:
nd.repo.conf.preview_mode = True
return args.func(args, nd)
|
"""
Testing the http sink
"""
import json
from mock import patch
from sinks.http import StatsiteHttp
@patch('requests.post')
def test_http(req):
metrics = [
"foo|123|1500000000",
"bar|456|1500000001",
]
expected = [
{"key": "foo", "value": "123", "timestamp": "1500000000"},
{"key": "bar", "value": "456", "timestamp": "1500000001"},
]
url = 'https://url.com'
sink = StatsiteHttp(url)
with sink:
for metric in metrics:
sink.append(metric)
assert expected == sink.metrics
req.assert_called_with(url, data=json.dumps({"metrics": expected}))
assert sink.metrics == []
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_threatfox
# Purpose: Check if an IP address is malicious according to ThreatFox.
#
# Author: <bcoles@gmail.com>
#
# Created: 2021-09-20
# Copyright: (c) bcoles 2021
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import time
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_threatfox(SpiderFootPlugin):
meta = {
'name': "ThreatFox",
'summary': "Check if an IP address is malicious according to ThreatFox.",
'flags': [],
'useCases': ["Investigate", "Passive"],
'categories': ["Reputation Systems"],
'dataSource': {
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://threatfox.abuse.ch/api/",
],
'website': "https://threatfox.abuse.ch",
'favIcon': 'https://threatfox.abuse.ch/favicon.ico',
'logo': "https://threatfox.abuse.ch/images/threatfox_logo.png",
'description': "ThreatFox is a free platform from abuse.ch with the goal of sharing"
"indicators of compromise (IOCs) associated with malware with the infosec community,"
"AV vendors and threat intelligence providers.",
}
}
opts = {
'checkaffiliates': True
}
optdescs = {
'checkaffiliates': "Apply checks to affiliates?"
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return [
"IP_ADDRESS",
"AFFILIATE_IPADDR"
]
def producedEvents(self):
return [
"BLACKLISTED_IPADDR",
"BLACKLISTED_AFFILIATE_IPADDR",
"MALICIOUS_IPADDR",
"MALICIOUS_AFFILIATE_IPADDR",
]
def query(self, qry):
"""Query IOCs
Args:
qry (str): IP address
Returns:
str: API response data as JSON
"""
params = {
'query': 'search_ioc',
'search_term': qry
}
headers = {
"Accept": "application/json",
}
res = self.sf.fetchUrl(
"https://threatfox-api.abuse.ch/api/v1/",
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout'],
headers=headers,
postData=json.dumps(params)
)
time.sleep(1)
if res['content'] is None:
return None
if res['code'] == "429":
self.error("You are being rate-limited by ThreatFox.")
self.errorState = True
return None
if res['code'] != '200':
self.error(f"Unexpected reply from ThreatFox: {res['code']}")
self.errorState = True
return None
try:
json_result = json.loads(res['content'])
except Exception as e:
self.debug(f"Error processing JSON response from ThreatFox: {e}")
return None
query_status = json_result.get('query_status')
if query_status == 'no_result':
self.debug(f"No results from ThreatFox for: {qry}")
return None
if query_status != 'ok':
self.debug(f"ThreatFox query failed: {query_status}")
return None
data = json_result.get('data')
if not data:
self.debug(f"No results from ThreatFox for: {qry}")
return None
return data
def handleEvent(self, event):
eventName = event.eventType
eventData = event.data
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {event.module}")
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
if eventName not in self.watchedEvents():
return
self.results[eventData] = True
if eventName == 'IP_ADDRESS':
malicious_type = 'MALICIOUS_IPADDR'
blacklist_type = 'BLACKLISTED_IPADDR'
elif eventName == 'AFFILIATE_IPADDR':
if not self.opts.get('checkaffiliates', False):
return
malicious_type = 'MALICIOUS_AFFILIATE_IPADDR'
blacklist_type = 'BLACKLISTED_AFFILIATE_IPADDR'
else:
self.debug(f"Unexpected event type {eventName}, skipping")
return
data = self.query(eventData)
if not data:
return
url = f"https://threatfox.abuse.ch/browse.php?search=ioc:{eventData}"
text = f"ThreatFox [{eventData}]\n<SFURL>{url}</SFURL>"
evt = SpiderFootEvent(malicious_type, text, self.__name__, event)
self.notifyListeners(evt)
evt = SpiderFootEvent(blacklist_type, text, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_threatfox class
|
from part import *
from parts import *
|
class Solution(object):
def tree2str(self, t):
"""
:type t: TreeNode
:rtype: str
"""
if not t:
return ""
res = []
def dfs(root):
if not root:
return
res.append(str(root.val))
if root.left:
res.append("(")
dfs(root.left)
res.append(")")
if not root.left and root.right:
res.append("()")
if root.right:
res.append("(")
dfs(root.right)
res.append(")")
dfs(t)
return "".join(res) |
import argparse
import collections
import logging
import asyncio
import datetime as dt
import sys
import zmq
import zmq.asyncio
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore, QtWidgets
from pyqtgraph.WidgetGroup import WidgetGroup
from ami import Defaults
from ami.data import Deserializer
from ami.comm import Ports
from ami.asyncqt import QEventLoop
from ami.flowchart.library.DisplayWidgets import symbols_colors
logger = logging.getLogger(__name__)
class HeartbeatData(object):
def __init__(self, heartbeat, metadata):
self.heartbeat = heartbeat
self.metadata = metadata
self.num_events = {} # {worker : events}
self.worker_time_per_heartbeat = {} # {worker : {node : time}}
self.worker_average = collections.defaultdict(list) # {node : times}
self.local_collector_time_per_heartbeat = {} # {localCollector : {node : time}}
self.local_collector_average = collections.defaultdict(list) # {node : times}
self.total_time_per_heartbeat = collections.defaultdict(lambda: 0)
self.heartbeat_times = collections.defaultdict(list)
def add_worker_data(self, worker, data):
self.num_events[worker] = len(data)
node_time_per_heartbeat = collections.defaultdict(lambda: 0)
heartbeat_time = data[-1][1] - data[0][0]
self.heartbeat_times['worker'].append(heartbeat_time)
for event in data:
start, stop, event = event
for node, time in event.items():
parent = self.metadata[node]['parent']
node_time_per_heartbeat[parent] += time
for node, time in node_time_per_heartbeat.items():
self.worker_average[node].append(time)
self.worker_time_per_heartbeat[worker] = node_time_per_heartbeat
def add_local_collector_data(self, localCollector, data):
node_time_per_heartbeat = collections.defaultdict(lambda: 0)
heartbeat_time = data[-1][1] - data[0][0]
self.heartbeat_times['localCollector'].append(heartbeat_time)
for contrib in data:
start, stop, contrib = contrib
for node, time in contrib.items():
parent = self.metadata[node]['parent']
node_time_per_heartbeat[parent] += time
for node, time in node_time_per_heartbeat.items():
self.local_collector_average[node].append(time)
self.local_collector_time_per_heartbeat[localCollector] = node_time_per_heartbeat
def add_global_collector_data(self, data):
heartbeat_time = data[-1][1] - data[0][0]
self.heartbeat_times['globalCollector'].append(heartbeat_time)
for contrib in data:
start, stop, contrib = contrib
for node, time in contrib.items():
parent = self.metadata[node]['parent']
self.total_time_per_heartbeat[parent] += time
for node, times in self.worker_average.items():
self.total_time_per_heartbeat[node] += np.average(times)
for node, times in self.local_collector_average.items():
self.total_time_per_heartbeat[node] += np.average(times)
self.total_heartbeat_time = 0
for typ, times in self.heartbeat_times.items():
self.total_heartbeat_time += np.average(times)
class ProfilerWindow(QtGui.QMainWindow):
def __init__(self, proc):
super().__init__()
self.proc = proc
def closeEvent(self, event):
self.proc.cancel()
self.destroy()
event.ignore()
class Profiler(QtCore.QObject):
def __init__(self, broker_addr="", profile_addr="", graph_name="graph", loop=None):
super().__init__()
if loop is None:
self.app = QtGui.QApplication([])
loop = QEventLoop(self.app)
asyncio.set_event_loop(loop)
self.ctx = zmq.asyncio.Context()
if broker_addr:
self.broker = self.ctx.socket(zmq.SUB)
self.broker.setsockopt_string(zmq.SUBSCRIBE, 'profiler')
self.broker.connect(broker_addr)
else:
self.broker = None
self.graph_name = graph_name
self.profile_addr = profile_addr
self.profile = self.ctx.socket(zmq.SUB)
self.profile.setsockopt_string(zmq.SUBSCRIBE, self.graph_name)
self.task = None
self.deserializer = Deserializer()
self.current_version = 0
self.metadata = {} # {version : metadata}
self.parents = set()
self.heartbeat_data = {}
self.widget = QtWidgets.QWidget()
self.layout = QtGui.QGridLayout(self.widget)
self.widget.setLayout(self.layout)
self.enabled_nodes = {}
self.trace_layout = QtGui.QFormLayout(self.widget)
hbox = QtWidgets.QHBoxLayout(self.widget)
selectAll = QtWidgets.QPushButton("Select All", self.widget)
selectAll.clicked.connect(self.selectAll)
unselectAll = QtWidgets.QPushButton("Unselect All", self.widget)
unselectAll.clicked.connect(self.unselectAll)
hbox.addWidget(selectAll)
hbox.addWidget(unselectAll)
self.trace_layout.addRow(hbox)
self.trace_group = WidgetGroup()
self.trace_group.sigChanged.connect(self.state_changed)
self.layout.addLayout(self.trace_layout, 0, 0, -1, 1)
self.graphicsLayoutWidget = pg.GraphicsLayoutWidget()
self.layout.addWidget(self.graphicsLayoutWidget, 0, 1, -1, -1)
self.time_per_heartbeat = self.graphicsLayoutWidget.addPlot(row=0, col=0)
self.time_per_heartbeat.showGrid(True, True)
self.time_per_heartbeat.setLabel('bottom', "Heartbeat")
self.time_per_heartbeat.setLabel('left', "Time (Sec)")
self.time_per_heartbeat_data = collections.defaultdict(lambda: np.array([np.nan]*100))
self.time_per_heartbeat_traces = {}
self.time_per_heartbeat_legend = self.time_per_heartbeat.addLegend()
self.heartbeats_per_second = self.graphicsLayoutWidget.addPlot(row=0, col=1)
self.heartbeats_per_second.showGrid(True, True)
self.heartbeats_per_second.setLabel('bottom', "Heartbeat")
self.heartbeats_per_second.setLabel('left', "Heartbeats/Second")
self.heartbeats_per_second_data = np.array([np.nan]*100)
self.heartbeats_per_second_trace = None
self.percent_per_heartbeat = self.graphicsLayoutWidget.addPlot(row=1, col=0, rowspan=1, colspan=2)
self.percent_per_heartbeat.showGrid(True, True)
self.percent_per_heartbeat_trace = None
self.last_updated = pg.LabelItem(parent=self.time_per_heartbeat.getViewBox())
self.total_heartbeat_time = pg.LabelItem(parent=self.percent_per_heartbeat.getViewBox())
self.heartbeat_per_second = pg.LabelItem(parent=self.heartbeats_per_second.getViewBox())
self.win = ProfilerWindow(self)
self.win.setWindowTitle('Profiler')
self.win.setCentralWidget(self.widget)
self.win.show()
with loop:
loop.run_until_complete(asyncio.gather(self.process_broker_message(), self.monitor()))
def selectAll(self, clicked):
for name, btn in self.enabled_nodes.items():
btn.setCheckState(QtCore.Qt.Checked)
def unselectAll(self, clicked):
for name, btn in self.enabled_nodes.items():
btn.setCheckState(QtCore.Qt.Unchecked)
def state_changed(self, *args, **kwargs):
node, checked = args
if node not in self.time_per_heartbeat_traces:
return
trace = self.time_per_heartbeat_traces[node]
if checked:
trace.show()
self.time_per_heartbeat_legend.addItem(trace, node)
else:
trace.hide()
self.time_per_heartbeat_legend.removeItem(trace)
async def monitor(self):
while self.task is None:
await asyncio.sleep(0.1)
try:
await self.task
except asyncio.CancelledError:
pass
async def process_broker_message(self):
if self.broker is None:
self.connect()
return
while True:
await self.broker.recv_string()
msg = await self.broker.recv_pyobj()
self.graph_name = msg.name
self.connect()
self.win.show()
def connect(self):
if self.task is None:
self.task = asyncio.ensure_future(self.process_profile_data())
def cancel(self):
self.task.cancel()
self.task = None
self.profile.disconnect(self.profile_addr)
async def process_profile_data(self):
self.profile.connect(self.profile_addr)
while True:
await self.profile.recv_string()
name = await self.profile.recv_string()
data_type = await self.profile.recv_string()
data = await self.profile.recv_serialized(self.deserializer, copy=False)
if data_type == "profile":
heartbeat = data['heartbeat']
version = data['version']
if heartbeat not in self.heartbeat_data:
if version not in self.metadata:
continue
metadata = self.metadata[version]
self.heartbeat_data[heartbeat] = HeartbeatData(data['heartbeat'],
metadata)
heartbeat_data = self.heartbeat_data[heartbeat]
if name.startswith('worker'):
heartbeat_data.add_worker_data(name, data['times'])
elif name.startswith('localCollector'):
heartbeat_data.add_local_collector_data(name, data['times'])
elif name.startswith('globalCollector'):
heartbeat_data.add_global_collector_data(data['times'])
if version > self.current_version:
self.current_version = version
self.percent_per_heartbeat_data = collections.defaultdict(lambda: 0)
if self.percent_per_heartbeat_trace:
self.percent_per_heartbeat.removeItem(self.percent_per_heartbeat_trace)
self.percent_per_heartbeat_trace = None
parents = set()
for k, v in self.metadata[version].items():
parent = v['parent']
parents.add(parent)
if parent not in self.enabled_nodes:
widget = QtWidgets.QCheckBox(self.widget)
widget.node = parent
widget.setCheckState(QtCore.Qt.Checked)
self.enabled_nodes[parent] = widget
self.trace_layout.addRow(parent, widget)
deleted_nodes = self.parents.difference(parents)
for node in deleted_nodes:
self.trace_layout.removeRow(self.enabled_nodes[node])
del self.enabled_nodes[node]
trace = self.time_per_heartbeat_traces[node]
self.time_per_heartbeat.removeItem(trace)
self.time_per_heartbeat_legend.removeItem(trace)
del self.time_per_heartbeat_traces[node]
del self.time_per_heartbeat_data[node]
self.parents = parents
self.trace_group.sigChanged.disconnect(self.state_changed)
self.trace_group = WidgetGroup()
self.trace_group.sigChanged.connect(self.state_changed)
for node, ctrl in self.enabled_nodes.items():
self.trace_group.addWidget(ctrl, node)
self.time_per_heartbeat_data["heartbeat"][-1] = heartbeat
self.time_per_heartbeat_data["heartbeat"] = np.roll(self.time_per_heartbeat_data["heartbeat"], -1)
total = 1
for node, time in heartbeat_data.total_time_per_heartbeat.items():
self.time_per_heartbeat_data[node][-1] = time
self.time_per_heartbeat_data[node] = np.roll(self.time_per_heartbeat_data[node], -1)
self.percent_per_heartbeat_data[node] = time/heartbeat_data.total_heartbeat_time
total -= time/heartbeat_data.total_heartbeat_time
self.percent_per_heartbeat_data['Transfer'] = total
i = 0
for node, times in self.time_per_heartbeat_data.items():
if node == "heartbeat":
continue
if node not in self.time_per_heartbeat_traces:
symbol, color = symbols_colors[i]
self.time_per_heartbeat_traces[node] = self.time_per_heartbeat.plot(
x=self.time_per_heartbeat_data["heartbeat"], y=times, name=node,
symbol=symbol, symbolBrush=color)
else:
self.time_per_heartbeat_traces[node].setData(
x=self.time_per_heartbeat_data["heartbeat"],
y=times)
i += 1
nodes, times = zip(*self.percent_per_heartbeat_data.items())
if self.percent_per_heartbeat_trace is None:
x = np.arange(len(nodes))
self.percent_per_heartbeat_trace = pg.BarGraphItem(x=x,
height=times,
width=1, brush='b')
self.percent_per_heartbeat.addItem(self.percent_per_heartbeat_trace)
xticks = dict(zip(x, nodes))
ax = self.percent_per_heartbeat.getAxis('bottom')
ax.setTicks([xticks.items()])
else:
self.percent_per_heartbeat_trace.setOpts(height=times)
self.heartbeats_per_second_data[-1] = 1/heartbeat_data.total_heartbeat_time
self.heartbeats_per_second_data = np.roll(self.heartbeats_per_second_data, -1)
if self.heartbeats_per_second_trace is None:
symbol, color = symbols_colors[0]
self.heartbeats_per_second_trace = self.heartbeats_per_second.plot(
x=self.time_per_heartbeat_data["heartbeat"],
y=self.heartbeats_per_second_data,
symbol=symbol, symbolBrush=color)
else:
self.heartbeats_per_second_trace.setData(
x=self.time_per_heartbeat_data["heartbeat"],
y=self.heartbeats_per_second_data)
now = dt.datetime.now()
now = now.strftime("%H:%M:%S")
last_updated = f"Last Updated: {now}"
self.last_updated.setText(last_updated)
text = f"Seconds/Heartbeat: {heartbeat_data.total_heartbeat_time:.6f}<br/>Heartbeat: {heartbeat}"
self.total_heartbeat_time.setText(text)
text = f"Heartbeats/Second: {1/heartbeat_data.total_heartbeat_time:.0f}<br/>Heartbeat: {heartbeat}"
self.heartbeat_per_second.setText(text)
del self.heartbeat_data[heartbeat]
elif data_type == "metadata":
graph_name = data['graph']
version = data['version']
logger.info("Received metadata for %s v%d", graph_name, version)
self.metadata[version] = data['metadata']
def main():
parser = argparse.ArgumentParser(description="AMII Profiler")
parser.add_argument(
'-H',
'--host',
default=Defaults.Host,
help='hostname of the AMII Manager (default: %s)' % Defaults.Host
)
parser.add_argument(
'-g',
'--graph-name',
default=Defaults.GraphName,
help='the name of the graph used (default: %s)' % Defaults.GraphName
)
addr_group = parser.add_mutually_exclusive_group()
addr_group.add_argument(
'-p',
'--port',
default=Ports.Profile,
help='port for profile info (default: %d)' % Ports.Profile
)
addr_group.add_argument(
'-i',
'--ipc-dir',
help='directory containing the ipc file descriptor for manager/client (GUI) communication'
)
args = parser.parse_args()
if args.ipc_dir is not None:
profile_addr = f"ipc://{args.ipc_dir}/profile"
else:
profile_addr = f"tcp://{args.host}:{args.port}"
Profiler("", profile_addr, args.graph_name)
if __name__ == '__main__':
sys.exit(main())
|
import os
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
from torchvision import datasets, transforms
from torchvision.datasets import MNIST
import pytorch_lightning as pl
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning import Trainer
class MNISTNet(LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, 128)
self.l2 = torch.nn.Linear(128, 256)
self.l3 = torch.nn.Linear(256, 10)
def forward(self, x):
batch_size, channels, width, height = x.size()
x = x.view(batch_size, -1) # flatten to vector
x = self.l1(x)
x = torch.relu(x)
x = self.l2(x)
x = torch.relu(x)
x = self.l3(x)
x = torch.log_softmax(x, dim=1)
return x
def training_step(self, batch, batch_idx):
x, y = batch
logits = self(x)
loss = F.F.cross_entropy(logits, y)
return loss
def train_dataloader(self):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
mnist_train = MNIST(os.getcwd(), train=True, download=True)
mnist_train = DataLoader(mnist_train, batch_size=64, num_workers=4)
return mnist_train
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-3)
net = MNISTNet()
trainer = Trainer()
trainer.fit(net) |
"""
DCI proof of concept
Context is a separate object to the Collaboration (again for exploration of alternatives).
Made a class for it, but a Dictionary is also possible.
Author: David Byers, Serge Beaumont
7 October 2008
N.B.
Adapted to run on python 3.
"""
import types
class Role:
"""A role is a special class that never gets instantiated directly.
instead, when the user wants to create a new role instance, we create a new class that
hast the role and another object's class as its superclasses, then create an instance of that class,
and link the new object's dict to the original object's dict.
"""
def __new__(cls, ob):
c = types.new_class(cls.__name__, bases=(cls, ob.__class__))
inst = super().__new__(c)
inst.__dict__ = ob.__dict__
return inst
def __init__(self, ob):
"""Do not call the superclass __init__. If we did then we would call the __init__ function
in the real class hierarchy too (i.e. Account, in this example).
"""
pass
def __getattr(self, attr):
"""Proxy to object"""
return getattr(self.__ob__, attr)
def __setattr(self, attr, value):
setattr(self.__ob__, attr, value)
def __delattr(self, attr):
delattr(self.__ob__, attr)
class MoneySource(Role):
def transfer_to(self, ctx, amount):
if self.balance >= amount:
self.decreaseBalance(amount)
ctx.sink.receive(ctx, amount)
class MoneySink(Role):
def receive(self, ctx, amount):
self.increaseBalance(amount)
class Account:
"""The class for the domain object."""
def __init__(self, amount):
print ( 'Creating a new account with balance of' + str(amount) )
self.balance = amount
super(Account, self).__init__()
def decreaseBalance(self, amount):
print ( 'Withdraw ' + str(amount) + 'from ' + str(self) )
self.balance -= amount
def increaseBalance(self, amount):
print ( 'Deposit ' + str(amount) + 'in ' + str(self) )
self.balance += amount
class Context:
"""Holds context state."""
pass
class TransferMoney:
"""This is the environment, like the controller, that binds the context and offers an interface to
trigger the context to run.
"""
def __init__(self, source, sink):
self.context = Context()
self.context.source = MoneySource(source)
self.context.sink = MoneySink(sink)
def __call__(self, amount):
self.context.source.transfer_to(self.context, amount)
if __name__ == '__main__':
src = Account(1000)
dst = Account(0)
t = TransferMoney(src, dst)
t(100)
print(src.balance)
print(dst.balance)
|
# coding=utf-8
# Copyright 2020 HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import is_pt_tf_cross_test, require_sentencepiece, require_tf, require_tokenizers, slow
from .test_configuration_common import ConfigTester
from .test_modeling_tf_bart import TFBartModelTester
from .test_modeling_tf_common import TFModelTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeq2SeqLM, TFPegasusForConditionalGeneration
class ModelTester(TFBartModelTester):
config_updates = dict(
normalize_before=True,
static_position_embeddings=True,
)
hidden_act = "relu"
config_cls = PegasusConfig
@require_tf
class TestTFPegasusCommon(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
all_generative_model_classes = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
model_tester_cls = ModelTester
is_encoder_decoder = True
test_pruning = False
def setUp(self):
self.model_tester = self.model_tester_cls(self)
self.config_tester = ConfigTester(self, config_class=PegasusConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_inputs_embeds(self):
# inputs_embeds not supported
pass
def test_saved_model_with_hidden_states_output(self):
# Should be uncommented during patrick TF refactor
pass
def test_saved_model_with_attentions_output(self):
# Should be uncommented during patrick TF refactor
pass
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
model_class = self.all_generative_model_classes[0]
input_ids = {
"decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"),
"input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"),
}
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pretrained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
x = model.get_output_layer_with_bias()
assert x is None
name = model.get_prefix_bias_name()
assert name is None
@is_pt_tf_cross_test
@require_sentencepiece
@require_tokenizers
class TFPegasusIntegrationTests(unittest.TestCase):
src_text = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning 'Oh I think you're nominated'", said Dappy."And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around."At the end of the day we're grateful to be where we are in our careers."If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" """,
]
expected_text = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to reduce the risk of wildfires.",
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
model_name = "google/pegasus-xsum"
@cached_property
def tokenizer(self):
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def model(self):
model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name, from_pt=True)
return model
def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs):
generated_words = self.translate_src_text(**tokenizer_kwargs)
assert self.expected_text == generated_words
def translate_src_text(self, **tokenizer_kwargs):
model_inputs = self.tokenizer.prepare_seq2seq_batch(
src_texts=self.src_text, **tokenizer_kwargs, return_tensors="tf"
)
generated_ids = self.model.generate(
model_inputs.input_ids,
attention_mask=model_inputs.attention_mask,
num_beams=2,
use_cache=True,
)
generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)
return generated_words
@slow
def test_batch_generation(self):
self._assert_generated_batch_equal_expected()
|
from project.models import Project, User
from project import bcrypt, db, app
from functools import wraps
from flask import abort, request, jsonify
import jwt
import json
def authorize(f):
@wraps(f)
def decorated_function(*args, **kws):
print('qweqweqwe')
if not 'Authorization' in request.headers:
abort(401)
auth_header = request.headers.get('Authorization')
if auth_header:
print('####')
try:
auth_token = auth_header.split(" ")[1]
except IndexError:
abort(401)
else:
print('###')
auth_token = ''
if auth_token:
print("qwe",auth_token, "qwe")
resp = User.decode_auth_token(auth_token)
if not isinstance(resp, str):
user = User.query.filter_by(id=resp).first()
responseObject = {
'user_id': user.id,
'email': user.email,
'username': user.username,
'admin': user.admin,
'registered_on': user.registered_on
}
return f(responseObject, *args, **kws)
else:
abort(401)
return decorated_function
@app.route('/create_project', methods=['POST'])
@authorize
def create_project(user):
post_data = request.get_json()
project = Project(
owner=user['username'],
name=post_data.get('name'),
is_public=bool(post_data.get('is_public')),
description=post_data.get('description'),
parent=None
)
db.session.add(project)
db.session.commit()
return jsonify({"status": "success", "pid": project.id})
@app.route('/fork_project', methods=['POST'])
@authorize
def fork_project(user):
post_data = request.get_json()
query_result = db.session.query(Project).filter(Project.id == post_data.get('id')).all()
if len(query_result) == 1:
info = {
"status": "success",
"name" : query_result[0].name,
"description" : query_result[0].description,
"xml": query_result[0].xml
}
project = Project(
owner=user['username'],
name='Fork of ' + query_result[0].name,
is_public=bool(query_result[0].is_public),
description=query_result[0].description,
xml=query_result[0].xml,
parent=post_data.get('id')
)
db.session.add(project)
db.session.commit()
return jsonify({"status": "success", "pid": project.id})
else:
return jsonify({"status": "fail"})
@app.route('/save_project', methods=['POST'])
@authorize
def save_project(user):
post_data = request.get_json()
p_id = post_data.get('p_id')
content = post_data.get('content')
res = save_project_helper(p_id, content, user)
if res:
return jsonify({"status":"success"})
return jsonify({"status": "fail"})
@app.route('/saveas_project', methods=['POST'])
@authorize
def saveas_project(user):
post_data = request.get_json()
query_result = db.session.query(Project).filter(Project.id == post_data.get('id')).all()
if len(query_result) == 1:
project = Project(
owner=user['username'],
name= post_data.get('name'),
is_public=bool(query_result[0].is_public),
description=post_data.get('desc'),
xml=query_result[0].xml,
parent=query_result[0].parent
)
db.session.add(project)
db.session.commit()
return jsonify({"status": "success", "pid": project.id})
else:
return jsonify({"status": "fail"})
@app.route("/projects/<project_id>")
def project_data(project_id):
content = get_project(int(project_id))
return content
@app.route("/user/<owner>")
def projects_data(owner):
return get_projects(owner)
def get_project(project_id: int) -> json:
"""
:param project_id:
:return:
"""
query_result = db.session.query(Project).filter(Project.id == project_id).all()
if len(query_result) == 1:
info = {
"status": "success",
"name" : query_result[0].name,
"description" : query_result[0].description,
"xml": query_result[0].xml
}
return json.dumps(info)
return "foo"
def get_projects(owner):
query_result = db.session.query(Project).filter(Project.owner == owner).all()
respond = []
for res in query_result:
info = {
"id": res.id,
"name" : res.name,
"description" : res.description,
"xml": res.xml,
"owner": res.owner,
"datetime": str(res.last_modified),
"num_stars": res.num_stars
}
respond.append(info)
respond.reverse()
respond = {"status":"success", "data": respond}
return json.dumps(respond)
def save_project_helper(p_id, content, submitter):
query_result = db.session.query(Project).filter(Project.id == p_id).first()
if query_result.owner == submitter['username']:
query_result.xml = content
db.session.commit()
return True
return False
|
"""
TopologicalGraph class will contain standard topological sort functionalities
"""
class TopologicalGraph:
def __init__(self, total_nodes):
self.graph = {}
self.reverse_graph = {}
self.total_nodes = total_nodes
self.dependeny_count = {}
self.clusters = []
for node in range(1, total_nodes+1):
self.graph[node] = []
self.reverse_graph[node] = []
self.dependeny_count[node] = 0
def add_edge(self, node_1, node_2):
self.graph[node_1].append(node_2)
self.reverse_graph[node_2].append(node_1)
self.dependeny_count[node_1] += 1
def build_clusters(self):
top_layer_nodes = []
for node in range(1, self.total_nodes+1):
if self.dependeny_count[node] == 0:
top_layer_nodes.append(node)
while len(top_layer_nodes) > 0:
self.clusters.append(top_layer_nodes)
next_layer_nodes = []
for node in top_layer_nodes:
for dependent_node in self.reverse_graph[node]:
self.dependeny_count[dependent_node] -= 1
if self.dependeny_count[dependent_node] == 0:
next_layer_nodes.append(dependent_node)
top_layer_nodes = next_layer_nodes
|
import new.setup.environment as env |
from __future__ import division
from __future__ import print_function
from sarclf import train
from sarclf import test
import argparse
def main():
"""Provides command line interface and calls functions to compute MLPH.
:raises ValueError: If passed argument h is an even number.
"""
# Defines CLI arguments and parser
parser = argparse.ArgumentParser(
description='IP-mlph-sar_clf-project cmd line options')
parser.add_argument('-run_mlph',
help='0/1: whether to make training data from scratch',
type=int, default=0)
parser.add_argument('-train_svm',
help='0/1: whether to make train SVM from scratch',
type=int, default=0)
parser.add_argument('-modified',
help='0/1: whether to run modified MLPH algorithm',
type=int, default=0)
parser.add_argument('-h_param', help='value for window size h', type=int,
default=5)
parser.add_argument('-test', help='whether to run test clf on image pixels',
type=int, default=1)
parser.add_argument('-clfimg', help='path of sar image to classify',
type=str, default='')
# Parses passed arguments
args = vars(parser.parse_args())
run_mlph = bool(args['run_mlph'])
train_svm = bool(args['train_svm'])
modified_mlph = bool(args['modified'])
h = args['h_param']
test_pixel_clf = bool(args['test'])
image_filename = args['clfimg']
if h % 2 != 1:
raise ValueError('h must me an odd integer.')
# Create test train split datasets
if run_mlph:
X_train, X_test, y_train, y_test = train.run_mlph(
modified=modified_mlph, h=h)
else:
X_train, X_test, y_train, y_test = train.load_training_data(
modified=modified_mlph)
if train_svm:
clf = train.train_svm(X_train=X_train, y_train=y_train,
modified=modified_mlph)
else:
clf = train.load_svm(modified=modified_mlph)
if test_pixel_clf:
test.test_pixel_classification(X_test=X_test, y_test=y_test, clf=clf)
if image_filename != '':
test.classify_image(image_path=image_filename, modified=modified_mlph,
h=h, clf=clf)
if __name__ == '__main__':
main()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from Utilis import init_weights, init_weights_orthogonal_normal, l2_regularisation
from torch.distributions import Normal, Independent, kl
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class UNet_CMs(nn.Module):
""" Proposed method containing a segmentation network and a confusion matrix network.
The segmentation network is U-net. The confusion matrix network is defined in cm_layers
"""
def __init__(self, in_ch, width, depth, class_no, norm='in', low_rank=False):
#
# ===============================================================================
# in_ch: dimension of input
# class_no: number of output class
# width: number of channels in the first encoder
# depth: down-sampling stages - 1
# rank: False
# ===============================================================================
super(UNet_CMs, self).__init__()
#
self.depth = depth
self.noisy_labels_no = 4
self.lowrank = low_rank
#
self.final_in = class_no
#
self.decoders = nn.ModuleList()
self.encoders = nn.ModuleList()
self.decoders_noisy_layers = nn.ModuleList()
#
for i in range(self.depth):
if i == 0:
#
self.encoders.append(double_conv(in_channels=in_ch, out_channels=width, step=1, norm=norm))
self.decoders.append(double_conv(in_channels=width*2, out_channels=width, step=1, norm=norm))
#
elif i < (self.depth - 1):
#
self.encoders.append(double_conv(in_channels=width*(2**(i - 1)), out_channels=width*(2**i), step=2, norm=norm))
self.decoders.append(double_conv(in_channels=width*(2**(i + 1)), out_channels=width*(2**(i - 1)), step=1, norm=norm))
#
else:
#
self.encoders.append(double_conv(in_channels=width*(2**(i-1)), out_channels=width*(2**(i-1)), step=2, norm=norm))
self.decoders.append(double_conv(in_channels=width*(2**i), out_channels=width*(2**(i - 1)), step=1, norm=norm))
#
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_last = nn.Conv2d(width, self.final_in, 1, bias=True)
#
for i in range(self.noisy_labels_no):
#
if self.lowrank is False:
self.decoders_noisy_layers.append(cm_layers(in_channels=width, norm=norm, class_no=self.final_in))
else:
self.decoders_noisy_layers.append(low_rank_cm_layers(in_channels=width, norm=norm, class_no=self.final_in, rank=1))
def forward(self, x):
#
y = x
#
encoder_features = []
y_noisy = []
#
for i in range(len(self.encoders)):
#
y = self.encoders[i](y)
encoder_features.append(y)
# print(y.shape)
for i in range(len(encoder_features)):
#
y = self.upsample(y)
y_e = encoder_features[-(i+1)]
#
if y_e.shape[2] != y.shape[2]:
diffY = torch.tensor([y_e.size()[2] - y.size()[2]])
diffX = torch.tensor([y_e.size()[3] - y.size()[3]])
#
y = F.pad(y, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
#
y = torch.cat([y_e, y], dim=1)
#
y = self.decoders[-(i+1)](y)
#
for i in range(self.noisy_labels_no):
#
y_noisy_label = self.decoders_noisy_layers[i](y)
y_noisy.append(y_noisy_label)
#
y = self.conv_last(y)
#
return y, y_noisy
class UNet_GlobalCMs(nn.Module):
""" Baseline with trainable global confusion matrices.
Each annotator is modelled through a class_no x class_no matrix, fixed for all images.
"""
def __init__(self, in_ch, width, depth, class_no, input_height, input_width, norm='in'):
# ===============================================================================
# in_ch: dimension of input
# class_no: number of output class
# width: number of channels in the first encoder
# depth: down-sampling stages - 1
# rank: False
# input_height: Height of the input image
# input_width: Width of the input image
# ===============================================================================
super(UNet_GlobalCMs, self).__init__()
#
self.depth = depth
self.noisy_labels_no = 4
self.final_in = class_no
#
self.decoders = nn.ModuleList()
self.encoders = nn.ModuleList()
for i in range(self.depth):
if i == 0:
#
self.encoders.append(double_conv(in_channels=in_ch, out_channels=width, step=1, norm=norm))
self.decoders.append(double_conv(in_channels=width*2, out_channels=width, step=1, norm=norm))
#
elif i < (self.depth - 1):
#
self.encoders.append(double_conv(in_channels=width*(2**(i - 1)), out_channels=width*(2**i), step=2, norm=norm))
self.decoders.append(double_conv(in_channels=width*(2**(i + 1)), out_channels=width*(2**(i - 1)), step=1, norm=norm))
#
else:
#
self.encoders.append(double_conv(in_channels=width*(2**(i-1)), out_channels=width*(2**(i-1)), step=2, norm=norm))
self.decoders.append(double_conv(in_channels=width*(2**i), out_channels=width*(2**(i - 1)), step=1, norm=norm))
#
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_last = nn.Conv2d(width, self.final_in, 1, bias=True)
# Define a list of global confusion matrices:
# self.decoders_noisy_layers = []
self.decoders_noisy_layers = nn.ModuleList()
for i in range(self.noisy_labels_no):
# self.decoders_noisy_layers.append(global_cm_layers(class_no, input_height, input_width))
self.decoders_noisy_layers.append(gcm_layers(class_no, input_height, input_width))
def forward(self, x):
#
y = x
#
encoder_features = []
y_noisy = []
#
for i in range(len(self.encoders)):
#
y = self.encoders[i](y)
encoder_features.append(y)
# print(y.shape)
for i in range(len(encoder_features)):
#
y = self.upsample(y)
y_e = encoder_features[-(i+1)]
#
if y_e.shape[2] != y.shape[2]:
diffY = torch.tensor([y_e.size()[2] - y.size()[2]])
diffX = torch.tensor([y_e.size()[3] - y.size()[3]])
#
y = F.pad(y, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
#
y = torch.cat([y_e, y], dim=1)
#
y = self.decoders[-(i+1)](y)
# Return the confusion matrices:
for i in range(self.noisy_labels_no):
# Copy the confusion matrix over the batch: (1, c, c, h , w) => (b, c, c, h, w)
# batch_size = x.size(0)
# y_noisy_label = self.decoders_noisy_layers[i].repeat(batch_size, 1, 1, 1, 1)
# y_noisy.append(y_noisy_label.to(device='cuda', dtype=torch.float32))
y_noisy.append(self.decoders_noisy_layers[i](x))
#
y = self.conv_last(y)
#
return y, y_noisy
class cm_layers(nn.Module):
""" This class defines the annotator network, which models the confusion matrix.
Essentially, it share the semantic features with the segmentation network, but the output of annotator network
has the size (b, c**2, h, w)
"""
def __init__(self, in_channels, norm, class_no):
super(cm_layers, self).__init__()
self.conv_1 = double_conv(in_channels=in_channels, out_channels=in_channels, norm=norm, step=1)
self.conv_2 = double_conv(in_channels=in_channels, out_channels=in_channels, norm=norm, step=1)
self.conv_last = nn.Conv2d(in_channels, class_no**2, 1, bias=True)
self.relu = nn.Softplus()
def forward(self, x):
y = self.relu(self.conv_last(self.conv_2(self.conv_1(x))))
return y
class gcm_layers(nn.Module):
""" This defines the global confusion matrix layer. It defines a (class_no x class_no) confusion matrix, we then use unsqueeze function to match the
size with the original pixel-wise confusion matrix layer, this is due to convenience to be compact with the existing loss function and pipeline.
"""
def __init__(self, class_no, input_height, input_width):
super(gcm_layers, self).__init__()
self.class_no = class_no
self.input_height = input_height
self.input_width = input_width
self.global_weights = nn.Parameter(torch.eye(class_no))
self.relu = nn.Softplus()
def forward(self, x):
all_weights = self.global_weights.unsqueeze(0).repeat(x.size(0), 1, 1)
all_weights = all_weights.unsqueeze(3).unsqueeze(4).repeat(1, 1, 1, self.input_height, self.input_width)
y = self.relu(all_weights)
return y
class low_rank_cm_layers(nn.Module):
""" This class defines the low-rank version of the annotator network, which models the confusion matrix at low-rank approximation.
Essentially, it share the semantic features with the segmentation network, but the output of annotator network
has the size (b, c**2, h, w)
"""
def __init__(self, in_channels, norm, class_no, rank):
super(low_rank_cm_layers, self).__init__()
self.conv_1 = double_conv(in_channels=in_channels, out_channels=in_channels, norm=norm, step=1)
self.conv_2 = double_conv(in_channels=in_channels, out_channels=in_channels, norm=norm, step=1)
if rank == 1:
self.conv_last = nn.Conv2d(in_channels, rank * class_no * 2 + 1, 1, bias=True)
else:
self.conv_last = nn.Conv2d(in_channels, rank*class_no*2 + 1, 1, bias=True)
self.relu = nn.Softplus()
def forward(self, x):
y = self.relu(self.conv_last(self.conv_2(self.conv_1(x))))
return y
# =========================
# U-net:
# =========================
class conv_block(nn.Module):
def __init__(self, in_channels, out_channels, step, norm):
super(conv_block, self).__init__()
#
self.conv_1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=step, padding=1, bias=False)
self.conv_2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.activation_1 = nn.PReLU()
self.activation_2 = nn.PReLU()
#
if norm == 'bn':
self.smooth_1 = nn.BatchNorm2d(out_channels, affine=True)
self.smooth_2 = nn.BatchNorm2d(out_channels, affine=True)
elif norm == 'in':
self.smooth_1 = nn.InstanceNorm2d(out_channels, affine=True)
self.smooth_2 = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'ln':
self.smooth_1 = nn.GroupNorm(out_channels, out_channels, affine=True)
self.smooth_2 = nn.GroupNorm(out_channels, out_channels, affine=True)
elif norm == 'gn':
self.smooth_1 = nn.GroupNorm(out_channels // 8, out_channels, affine=True)
self.smooth_2 = nn.GroupNorm(out_channels // 8, out_channels, affine=True)
def forward(self, inputs):
output = self.activation_1(self.smooth_1(self.conv_1(inputs)))
output = self.activation_2(self.smooth_2(self.conv_2(output)))
return output
def double_conv(in_channels, out_channels, step, norm):
# ===========================================
# in_channels: dimension of input
# out_channels: dimension of output
# step: stride
# ===========================================
if norm == 'in':
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride=step, padding=1, groups=1, bias=False),
nn.InstanceNorm2d(out_channels, affine=True),
nn.PReLU(),
nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1, groups=1, bias=False),
nn.InstanceNorm2d(out_channels, affine=True),
nn.PReLU()
)
elif norm == 'bn':
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride=step, padding=1, groups=1, bias=False),
nn.BatchNorm2d(out_channels, affine=True),
nn.PReLU(),
nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1, groups=1, bias=False),
nn.BatchNorm2d(out_channels, affine=True),
nn.PReLU()
)
elif norm == 'ln':
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride=step, padding=1, groups=1, bias=False),
nn.GroupNorm(out_channels, out_channels, affine=True),
nn.PReLU(),
nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1, groups=1, bias=False),
nn.GroupNorm(out_channels, out_channels, affine=True),
nn.PReLU()
)
elif norm == 'gn':
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, stride=step, padding=1, groups=1, bias=False),
nn.GroupNorm(out_channels // 8, out_channels, affine=True),
nn.PReLU(),
nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1, groups=1, bias=False),
nn.GroupNorm(out_channels // 8, out_channels, affine=True),
nn.PReLU()
)
class UNet(nn.Module):
#
def __init__(self, in_ch, width, depth, class_no, norm, dropout=False, apply_last_layer=True):
"""
Args:
in_ch:
width:
depth:
class_no:
norm:
dropout:
apply_last_layer:
"""
# ============================================================================================================
# This UNet is our own implementation, it is an enhanced version of the original UNet proposed at MICCAI 2015.
# in_ch: dimension of input
# class_no: number of output class
# width: number of channels in the first encoder
# depth: down-sampling stages - 1
# ============================================================================================================
super(UNet, self).__init__()
#
self.apply_last_layer = apply_last_layer
self.depth = depth
self.dropout = dropout
#
if class_no > 2:
#
self.final_in = class_no
else:
#
self.final_in = 1
#
self.decoders = nn.ModuleList()
self.encoders = nn.ModuleList()
#
if self.dropout is True:
self.dropout_layers = nn.ModuleList()
for i in range(self.depth):
if self.dropout is True:
self.dropout_layers.append(nn.Dropout2d(0.4))
if i == 0:
#
self.encoders.append(double_conv(in_channels=in_ch, out_channels=width, step=1, norm=norm))
self.decoders.append(double_conv(in_channels=width*2, out_channels=width, step=1, norm=norm))
#
elif i < (self.depth - 1):
#
self.encoders.append(double_conv(in_channels=width*(2**(i - 1)), out_channels=width*(2**i), step=2, norm=norm))
self.decoders.append(double_conv(in_channels=width*(2**(i + 1)), out_channels=width*(2**(i - 1)), step=1, norm=norm))
#
else:
#
self.encoders.append(double_conv(in_channels=width*(2**(i-1)), out_channels=width*(2**(i-1)), step=2, norm=norm))
self.decoders.append(double_conv(in_channels=width*(2**i), out_channels=width*(2**(i - 1)), step=1, norm=norm))
#
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_last = nn.Conv2d(width, self.final_in, 1, bias=True)
#
def forward(self, x):
#
y = x
# print(x.shape)
encoder_features = []
#
for i in range(len(self.encoders)):
#
y = self.encoders[i](y)
encoder_features.append(y)
# print(y.shape)
for i in range(len(encoder_features)):
#
y = self.upsample(y)
y_e = encoder_features[-(i+1)]
#
diffY = torch.tensor([y_e.size()[2] - y.size()[2]])
diffX = torch.tensor([y_e.size()[3] - y.size()[3]])
#
y = F.pad(y, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
#
y = torch.cat([y_e, y], dim=1)
y = self.decoders[-(i+1)](y)
#
if self.dropout is True:
#
y = self.dropout_layers[i](y)
#
if self.apply_last_layer is True:
y = self.conv_last(y)
return y
# ===============================
# Probablistic U-net
# ===============================
class Encoder(nn.Module):
"""
A convolutional neural network, consisting of len(num_filters) times a block of no_convs_per_block convolutional layers,
after each block a pooling operation is performed. And after each convolutional layer a non-linear (ReLU) activation function is applied.
"""
def __init__(self, input_channels, num_filters, no_convs_per_block, initializers, padding=True, posterior=False):
super(Encoder, self).__init__()
self.contracting_path = nn.ModuleList()
self.input_channels = input_channels
self.num_filters = num_filters
if posterior:
# To accomodate for the mask that is concatenated at the channel axis, we increase the input_channels.
self.input_channels += 1
layers = []
for i in range(len(self.num_filters)):
"""
Determine input_dim and output_dim of conv layers in this block. The first layer is input x output,
All the subsequent layers are output x output.
"""
input_dim = self.input_channels if i == 0 else output_dim
output_dim = num_filters[i]
if i != 0:
layers.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True))
layers.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, padding=int(padding), bias=False))
layers.append(nn.InstanceNorm2d(output_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
for _ in range(no_convs_per_block - 1):
layers.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, padding=int(padding)))
layers.append(nn.ReLU(inplace=True))
self.layers = nn.Sequential(*layers)
self.layers.apply(init_weights)
def forward(self, input):
output = self.layers(input)
return output
class AxisAlignedConvGaussian(nn.Module):
"""
A convolutional net that parametrizes a Gaussian distribution with axis aligned covariance matrix.
"""
def __init__(self, input_channels, num_filters, no_convs_per_block, latent_dim, initializers, posterior=False):
super(AxisAlignedConvGaussian, self).__init__()
self.input_channels = input_channels
self.channel_axis = 1
self.num_filters = num_filters
self.no_convs_per_block = no_convs_per_block
self.latent_dim = latent_dim
self.posterior = posterior
if self.posterior:
self.name = 'Posterior'
else:
self.name = 'Prior'
self.encoder = Encoder(self.input_channels, self.num_filters, self.no_convs_per_block, initializers, posterior=self.posterior)
self.conv_layer = nn.Conv2d(num_filters[-1], 2 * self.latent_dim, (1, 1), stride=1)
self.show_img = 0
self.show_seg = 0
self.show_concat = 0
self.show_enc = 0
self.sum_input = 0
nn.init.kaiming_normal_(self.conv_layer.weight, mode='fan_in', nonlinearity='relu')
nn.init.normal_(self.conv_layer.bias)
def forward(self, input, segm=None):
# If segmentation is not none, concatenate the mask to the channel axis of the input
if segm is not None:
self.show_img = input
self.show_seg = segm
# print(input.shape)
# print(segm.shape)
input = torch.cat((input, segm), dim=1)
self.show_concat = input
self.sum_input = torch.sum(input)
# print(input.shape)
encoding = self.encoder(input)
self.show_enc = encoding
# We only want the mean of the resulting hxw image
encoding = torch.mean(encoding, dim=2, keepdim=True)
encoding = torch.mean(encoding, dim=3, keepdim=True)
# Convert encoding to 2 x latent dim and split up for mu and log_sigma
mu_log_sigma = self.conv_layer(encoding)
# We squeeze the second dimension twice, since otherwise it won't work when batch size is equal to 1
mu_log_sigma = torch.squeeze(mu_log_sigma, dim=2)
mu_log_sigma = torch.squeeze(mu_log_sigma, dim=2)
mu = mu_log_sigma[:, :self.latent_dim]
log_sigma = mu_log_sigma[:, self.latent_dim:]
# This is a multivariate normal with diagonal covariance matrix sigma
# https://github.com/pytorch/pytorch/pull/11178
dist = Independent(Normal(loc=mu, scale=torch.exp(log_sigma)), 1)
# dist = Independent(Normal(loc=mu, scale=log_sigma), 1)
return dist
class Fcomb(nn.Module):
"""
A function composed of no_convs_fcomb times a 1x1 convolution that combines the sample taken from the latent space,
and output of the UNet (the feature map) by concatenating them along their channel axis.
"""
def __init__(self, num_filters, latent_dim, num_output_channels, num_classes, no_convs_fcomb, initializers, use_tile=True):
super(Fcomb, self).__init__()
self.num_channels = num_output_channels
self.num_classes = num_classes
self.channel_axis = 1
self.spatial_axes = [2, 3]
self.num_filters = num_filters
self.latent_dim = latent_dim
self.use_tile = use_tile
self.no_convs_fcomb = no_convs_fcomb
self.name = 'Fcomb'
if self.num_classes == 2:
self.num_classes = 1
if self.use_tile:
layers = []
# Decoder of N x a 1x1 convolution followed by a ReLU activation function except for the last layer
layers.append(nn.Conv2d(self.num_filters[0] + self.latent_dim, self.num_filters[0], kernel_size=1))
layers.append(nn.ReLU(inplace=True))
for _ in range(no_convs_fcomb - 2):
layers.append(nn.Conv2d(self.num_filters[0], self.num_filters[0], kernel_size=1))
layers.append(nn.ReLU(inplace=True))
self.layers = nn.Sequential(*layers)
self.last_layer = nn.Conv2d(self.num_filters[0], self.num_classes, kernel_size=1)
if initializers['w'] == 'orthogonal':
self.layers.apply(init_weights_orthogonal_normal)
self.last_layer.apply(init_weights_orthogonal_normal)
else:
self.layers.apply(init_weights)
self.last_layer.apply(init_weights)
def tile(self, a, dim, n_tile):
"""
This function is taken form PyTorch forum and mimics the behavior of tf.tile.
Source: https://discuss.pytorch.org/t/how-to-tile-a-tensor/13853/3
"""
init_dim = a.size(dim)
repeat_idx = [1] * a.dim()
repeat_idx[dim] = n_tile
a = a.repeat(*(repeat_idx))
order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)])).to(device)
return torch.index_select(a, dim, order_index)
def forward(self, feature_map, z):
"""
Z is batch_sizexlatent_dim and feature_map is batch_sizexno_channelsxHxW.
So broadcast Z to batch_sizexlatent_dimxHxW. Behavior is exactly the same as tf.tile (verified)
"""
if self.use_tile:
z = torch.unsqueeze(z, 2)
z = self.tile(z, 2, feature_map.shape[self.spatial_axes[0]])
z = torch.unsqueeze(z, 3)
z = self.tile(z, 3, feature_map.shape[self.spatial_axes[1]])
# Concatenate the feature map (output of the UNet) and the sample taken from the latent space
feature_map = torch.cat((feature_map, z), dim=self.channel_axis)
output = self.layers(feature_map)
return self.last_layer(output)
class ProbabilisticUnet(nn.Module):
"""
A probabilistic UNet (https://arxiv.org/abs/1806.05034) implementation.
input_channels: the number of channels in the image (1 for greyscale and 3 for RGB)
num_classes: the number of classes to predict
num_filters: is a list consisint of the amount of filters layer
latent_dim: dimension of the latent space
no_cons_per_block: no convs per block in the (convolutional) encoder of prior and posterior
"""
def __init__(self, input_channels, num_classes, num_filters=[32, 64, 128], latent_dim=6, no_convs_fcomb=4, beta=10.0):
super(ProbabilisticUnet, self).__init__()
self.input_channels = input_channels
self.num_classes = num_classes
self.num_filters = num_filters
self.latent_dim = latent_dim
self.no_convs_per_block = 2
self.no_convs_fcomb = no_convs_fcomb
self.initializers = {'w': 'he_normal', 'b': 'normal'}
self.beta = beta
self.z_prior_sample = 4
self.unet = Unet(self.input_channels, self.num_classes, self.num_filters, self.initializers, apply_last_layer=False, padding=True).to(device)
self.prior = AxisAlignedConvGaussian(self.input_channels, self.num_filters, self.no_convs_per_block, self.latent_dim, self.initializers, posterior=False).to(device)
self.posterior = AxisAlignedConvGaussian(self.input_channels, self.num_filters, self.no_convs_per_block, self.latent_dim, self.initializers, posterior=True).to(device)
self.fcomb = Fcomb(self.num_filters, self.latent_dim, self.input_channels, self.num_classes, self.no_convs_fcomb, {'w': 'orthogonal', 'b': 'normal'}, use_tile=True).to(device)
def forward(self, patch, segm, training=True):
"""
Construct prior latent space for patch and run patch through UNet,
in case training is True also construct posterior latent space
"""
if training:
self.posterior_latent_space = self.posterior.forward(patch, segm)
self.prior_latent_space = self.prior.forward(patch)
self.unet_features = self.unet.forward(patch, False)
def sample(self, testing=False):
"""
Sample a segmentation by reconstructing from a prior sample
and combining this with UNet features
"""
if testing == False:
z_prior = self.prior_latent_space.rsample()
self.z_prior_sample = z_prior
else:
# You can choose whether you mean a sample or the mean here. For the GED it is important to take a sample.
# z_prior = self.prior_latent_space.base_dist.loc
z_prior = self.prior_latent_space.sample()
self.z_prior_sample = z_prior
return self.fcomb.forward(self.unet_features, z_prior)
def reconstruct(self, use_posterior_mean=False, calculate_posterior=False, z_posterior=None):
"""
Reconstruct a segmentation from a posterior sample (decoding a posterior sample) and UNet feature map
use_posterior_mean: use posterior_mean instead of sampling z_q
calculate_posterior: use a provided sample or sample from posterior latent space
"""
if use_posterior_mean:
z_posterior = self.posterior_latent_space.loc
else:
if calculate_posterior:
z_posterior = self.posterior_latent_space.rsample()
return self.fcomb.forward(self.unet_features, z_posterior)
def kl_divergence(self, analytic=True, calculate_posterior=False, z_posterior=None):
"""
Calculate the KL divergence between the posterior and prior KL(Q||P)
analytic: calculate KL analytically or via sampling from the posterior
calculate_posterior: if we use samapling to approximate KL we can sample here or supply a sample
"""
if analytic:
# Neeed to add this to torch source code, see: https://github.com/pytorch/pytorch/issues/13545
kl_div = kl.kl_divergence(self.posterior_latent_space, self.prior_latent_space)
else:
if calculate_posterior:
z_posterior = self.posterior_latent_space.rsample()
log_posterior_prob = self.posterior_latent_space.log_prob(z_posterior)
log_prior_prob = self.prior_latent_space.log_prob(z_posterior)
kl_div = log_posterior_prob - log_prior_prob
return kl_div
def elbo(self, segm, analytic_kl=True, reconstruct_posterior_mean=False):
"""
Calculate the evidence lower bound of the log-likelihood of P(Y|X)
"""
criterion = nn.BCEWithLogitsLoss(reduction='mean')
z_posterior = self.posterior_latent_space.rsample()
self.kl = torch.mean(self.kl_divergence(analytic=analytic_kl, calculate_posterior=False, z_posterior=z_posterior))
# Here we use the posterior sample sampled above
self.reconstruction = self.reconstruct(use_posterior_mean=reconstruct_posterior_mean, calculate_posterior=False, z_posterior=z_posterior)
reconstruction_loss = criterion(input=self.reconstruction, target=segm)
self.reconstruction_loss = torch.sum(reconstruction_loss)
self.mean_reconstruction_loss = torch.mean(reconstruction_loss)
return -(self.reconstruction_loss + self.beta * self.kl), self.reconstruction_loss, self.beta * self.kl
class Unet(nn.Module):
"""
A UNet (https://arxiv.org/abs/1505.04597) implementation.
input_channels: the number of channels in the image (1 for greyscale and 3 for RGB)
num_classes: the number of classes to predict
num_filters: list with the amount of filters per layer
apply_last_layer: boolean to apply last layer or not (not used in Probabilistic UNet)
padidng: Boolean, if true we pad the images with 1 so that we keep the same dimensions
"""
def __init__(self, input_channels, num_classes, num_filters, initializers, apply_last_layer=True, padding=True):
super(Unet, self).__init__()
self.input_channels = input_channels
self.num_classes = num_classes
self.num_filters = num_filters
self.padding = padding
self.activation_maps = []
self.apply_last_layer = apply_last_layer
self.contracting_path = nn.ModuleList()
for i in range(len(self.num_filters)):
input = self.input_channels if i == 0 else output
output = self.num_filters[i]
if i == 0:
pool = False
else:
pool = True
self.contracting_path.append(DownConvBlock(input, output, initializers, padding, pool=pool))
self.upsampling_path = nn.ModuleList()
n = len(self.num_filters) - 2
for i in range(n, -1, -1):
input = output + self.num_filters[i]
output = self.num_filters[i]
self.upsampling_path.append(UpConvBlock(input, output, initializers, padding))
if self.apply_last_layer:
self.last_layer = nn.Conv2d(output, num_classes, kernel_size=1)
# nn.init.kaiming_normal_(self.last_layer.weight, mode='fan_in',nonlinearity='relu')
# nn.init.normal_(self.last_layer.bias)
def forward(self, x, val):
blocks = []
for i, down in enumerate(self.contracting_path):
x = down(x)
if i != len(self.contracting_path) - 1:
blocks.append(x)
for i, up in enumerate(self.upsampling_path):
x = up(x, blocks[-i - 1])
del blocks
# Used for saving the activations and plotting
if val:
self.activation_maps.append(x)
if self.apply_last_layer:
x = self.last_layer(x)
return x
class DownConvBlock(nn.Module):
"""
A block of three convolutional layers where each layer is followed by a non-linear activation function
Between each block we add a pooling operation.
"""
def __init__(self, input_dim, output_dim, initializers, padding, pool=True):
super(DownConvBlock, self).__init__()
layers = []
if pool:
layers.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True))
layers.append(nn.Conv2d(input_dim, output_dim, kernel_size=3, stride=1, padding=int(padding), bias=False))
layers.append(nn.InstanceNorm2d(output_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=1, padding=int(padding), bias=False))
layers.append(nn.InstanceNorm2d(output_dim, affine=True))
layers.append(nn.ReLU(inplace=True))
# layers.append(nn.Conv2d(output_dim, output_dim, kernel_size=3, stride=1, padding=int(padding)))
# layers.append(nn.ReLU(inplace=True))
self.layers = nn.Sequential(*layers)
self.layers.apply(init_weights)
def forward(self, patch):
return self.layers(patch)
class UpConvBlock(nn.Module):
"""
A block consists of an upsampling layer followed by a convolutional layer to reduce the amount of channels and then a DownConvBlock
If bilinear is set to false, we do a transposed convolution instead of upsampling
"""
def __init__(self, input_dim, output_dim, initializers, padding, bilinear=True):
super(UpConvBlock, self).__init__()
self.bilinear = bilinear
if not self.bilinear:
self.upconv_layer = nn.ConvTranspose2d(input_dim, output_dim, kernel_size=2, stride=2)
self.upconv_layer.apply(init_weights)
self.conv_block = DownConvBlock(input_dim, output_dim, initializers, padding, pool=False)
def forward(self, x, bridge):
if self.bilinear:
up = nn.functional.interpolate(x, mode='bilinear', scale_factor=2, align_corners=True)
else:
up = self.upconv_layer(x)
if up.shape[3] != bridge.shape[3]:
#
diffY = torch.tensor([bridge.size()[2] - up.size()[2]])
diffX = torch.tensor([bridge.size()[3] - up.size()[3]])
#
up = F.pad(up, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
#
# print(up.shape)
# print(bridge.shape)
#
# print(up.shape)
# print(bridge.shape)
# assert up.shape[3] == bridge.shape[3]
out = torch.cat([up, bridge], 1)
out = self.conv_block(out)
return out
# def global_cm_layers(class_no, height, width):
# """ Define (unnormalised) global confusion matrix model.
#
# This function defines an image-level (not pixel wise) global confusion matrix for each annotator.
# Currently, it first defines a class_no x class_no confusion matrix, and then copy this over to all
# pixels, so this function can be more readily integrated into the existing pipeline.
#
# Args:
# width (int): width of the image
# height (int): height of the image
# class_no (int): number of classes
#
# Returns:
# confusion_matrix (parameter tensor): unnormalised confusion matrix of size (1, c, c, h, w).
# The elements are ensured to be positive via a softplus function, but not normalised.
#
# """
# # Define global confusion matrix: (1, c, c, 1, 1)
# weights = nn.Parameter(torch.randn(1, class_no, class_no, 1, 1))
#
# # Broadcast to shape (1, c, c, h, w) by adding a zero tensor.
# confusion_matrix = torch.zeros(1, class_no, class_no, height, width) + F.softplus(weights)
# return confusion_matrix |
#!/usr/bin/env python3
import sys
import os
import copy
from unittest import TestCase
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "../utils"))
from config import config
config["datasource"] = "MySQL"
from cluster_resource import ClusterResource
from job_manager import discount_cluster_resource, \
get_cluster_schedulable as get_cluster_schedulable_from_reserved, \
mark_schedulable_non_preemptable_jobs
def get_cluster_schedulable_from_unschedulable(cluster_status):
# Compute cluster schedulable resource
cluster_capacity = ClusterResource(
params={
"cpu": cluster_status["cpu_capacity"],
"memory": cluster_status["memory_capacity"],
"gpu": cluster_status["gpu_capacity"],
})
cluster_unschedulable = ClusterResource(
params={
"cpu": cluster_status["cpu_unschedulable"],
"memory": cluster_status["memory_unschedulable"],
"gpu": cluster_status["gpu_unschedulable"],
})
cluster_schedulable = cluster_capacity - cluster_unschedulable
cluster_schedulable = discount_cluster_resource(cluster_schedulable)
return cluster_schedulable
class TestJobManager(TestCase):
def test_mark_schedulable_non_preemptable_gpu_jobs(self):
# job1 is running on an unschedulable node
job1_info = {
"job": {
"vcName": "platform",
"jobId": "job1",
},
"jobId": "job1",
"job_resource": ClusterResource(
params={
"cpu": {"Standard_ND24rs": 1},
"memory": {"Standard_ND24rs": 0},
"gpu": {"Standard_ND24rs": 3},
"gpu_memory": {"Standard_ND24rs": 0},
}),
"preemptionAllowed": False,
"sort_key": "0_0_999899_2020-03-31 08:07:46",
"allowed": False,
}
# job2 is running on a good node
job2_info = {
"job": {
"vcName": "platform",
"jobId": "job2",
},
"jobId": "job2",
"job_resource": ClusterResource(
params={
"cpu": {"Standard_ND24rs": 1},
"memory": {"Standard_ND24rs": 0},
"gpu": {"Standard_ND24rs": 4},
"gpu_memory": {"Standard_ND24rs": 0},
}),
"preemptionAllowed": False,
"sort_key": "0_0_999899_2020-03-31 08:08:49",
"allowed": False,
}
# job3 is submitted just now
job3_info = {
"job": {
"vcName": "platform",
"jobId": "job3",
},
"jobId": "job3",
"job_resource": ClusterResource(
params={
"cpu": {"Standard_ND24rs": 1},
"memory": {"Standard_ND24rs": 0},
"gpu": {"Standard_ND24rs": 4},
"gpu_memory": {"Standard_ND24rs": 0},
}),
"preemptionAllowed": False,
"sort_key": "0_2_999899_2020-03-31 09:00:10",
"allowed": False,
}
jobs_info = [job1_info, job2_info, job3_info]
cluster_status = {
"gpu_capacity": {"Standard_ND24rs": 12},
"gpu_reserved": {"Standard_ND24rs": 0},
"gpu_unschedulable": {"Standard_ND24rs": 4},
"cpu_capacity": {"Standard_ND24rs": 72},
"cpu_reserved": {"Standard_ND24rs": 23},
"cpu_unschedulable": {"Standard_ND24rs": 24},
"memory_capacity": {"Standard_ND24rs": "1344Gi"},
"memory_reserved": {"Standard_ND24rs": "448Gi"},
"memory_unschedulable": {"Standard_ND24rs": "448Gi"},
}
cluster_capacity = ClusterResource(
params={
"cpu": cluster_status["cpu_capacity"],
"memory": cluster_status["memory_capacity"],
"gpu": cluster_status["gpu_capacity"],
})
cluster_reserved = ClusterResource(
params={
"cpu": cluster_status["cpu_reserved"],
"memory": cluster_status["memory_reserved"],
"gpu": cluster_status["gpu_reserved"],
})
cluster_unschedulable = ClusterResource(
params={
"cpu": cluster_status["cpu_unschedulable"],
"memory": cluster_status["memory_unschedulable"],
"gpu": cluster_status["gpu_unschedulable"],
})
vc_capacity = ClusterResource(
params={
"cpu": cluster_status["cpu_capacity"],
"memory": cluster_status["memory_capacity"],
"gpu": cluster_status["gpu_capacity"],
})
vc_unschedulable = ClusterResource(
params={
"cpu": cluster_status["cpu_reserved"],
"memory": cluster_status["memory_reserved"],
"gpu": cluster_status["gpu_reserved"],
})
vc_schedulable = discount_cluster_resource(
vc_capacity - vc_unschedulable)
vc_schedulables = {"platform": vc_schedulable}
# job3 will not but should be scheduled if using
# cluster_schedulable = cluster_capacity - cluster_unschedulable
c_schedulable = discount_cluster_resource(
cluster_capacity - cluster_unschedulable)
jobs_info_list = copy.deepcopy(jobs_info)
mark_schedulable_non_preemptable_jobs(jobs_info_list,
c_schedulable,
copy.deepcopy(vc_schedulables))
self.assertTrue(jobs_info_list[0]["allowed"])
self.assertTrue(jobs_info_list[1]["allowed"])
self.assertFalse(jobs_info_list[2]["allowed"])
# job3 will and should be scheduled if using
# cluster_schedulable = cluster_capacity - cluster_reserved
c_schedulable = discount_cluster_resource(
cluster_capacity - cluster_reserved)
jobs_info_list = copy.deepcopy(jobs_info)
mark_schedulable_non_preemptable_jobs(jobs_info_list,
c_schedulable,
copy.deepcopy(vc_schedulables))
self.assertTrue(jobs_info_list[0]["allowed"])
self.assertTrue(jobs_info_list[1]["allowed"])
self.assertTrue(jobs_info_list[2]["allowed"])
|
#!/usr/bin/python
# Slightly modified version of mooglazer/blockfeed's FalconPunch
# https://github.com/blockfeed/FalconPunch
#
# The only difference is an extra log and a different size used
# in the initial struct ('I' instead of 'q')
import os, socket, sys, struct
statinfo = os.stat(sys.argv[2])
fbiinfo = struct.pack('!I', statinfo.st_size)
p = sys.argv[2]
dsip = sys.argv[1]
file = open(p, "rb")
sock = socket.socket()
sock.connect((dsip, 5000))
sock.send(fbiinfo)
print('File size: %d (%s)' % (statinfo.st_size, fbiinfo.encode('hex')))
while True:
chunk = file.read(16384)
if not chunk:
break # EOF
sock.sendall(chunk)
sock.close()
sys.exit()
|
import agent
import random as rand
import string
import time
import sys
if __name__ == "__main__":
a = agent.Agent()
while True:
a.live();
time.sleep(.5) |
import tsk4
class Flora:
def __init__(self, name, lifespan, habitat, plant_type):
self.name = name
self.lifespan = lifespan
self.habitat = habitat
self.plant_type = plant_type
self.plant_size = 0
def ad_flora(self, planet:tsk4.Planet):
if planet.flora:
planet.add_flora(self.name,self.plant_type)
class Fauna:
def __init__(self, name):
self.name = name
class Predator(Fauna):
def __init__(self, name:str, predator_type:str, what_eats:str, lifespan:int):
super().__init__(name)
self.predator_type = predator_type
self.what_eats = what_eats
self.lifespan = lifespan
def check_planet(self, planet: tsk4.Planet):
if planet.flora and planet.fauna and not planet.humanity:
planet.add_fauna(self.name,self.predator_type)
# def check_planet(self,planet:tsk4.Planet):
# if planet.fauna and not planet.humanity:
# print('YES')
# else:
# print('NO')
class Mammal(Fauna):
def __init__(self, name, mammal_type, lifespan):
super().__init__(name)
self.mammal_type = mammal_type
self.lifespan = lifespan
def check_planet(self,planet:tsk4.Planet):
if planet.flora and planet.fauna and not planet.humanity and 'grass' and 'wheat' in planet.flora_list["name"]:
planet.add_fauna(self.name,self.mammal_type)
shark = Predator('baby shark','predator','all',20)
shark.check_planet(tsk4.friendly)
grass = Flora("grass",20,"mammal","grass")
wheat = Flora("wheat", 20, "mammal", 'wheat')
wheat.ad_flora(tsk4.friendly)
grass.ad_flora(tsk4.friendly)
print(tsk4.friendly.flora_list)
giraffe = Mammal('malwan','earth',20)
giraffe.check_planet(tsk4.friendly)
marti = Mammal('marti','earth',20)
marti.check_planet(tsk4.friendly)
print(tsk4.friendly.__dict__)
|
import pandas as pd
import re
'''
luigi_miner takes a log generated by Luigi looking e.g. like:
2019-09-01 09:29:23,303 DEBUG worker.py:260 - Checking if RootTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=03) is complete
2019-09-01 09:29:23,306 INFO worker.py:313 - Scheduled RootTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=03) (PENDING)
2019-09-01 09:29:23,306 DEBUG worker.py:260 - Checking if ChunkTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=03) is complete
2019-09-01 09:29:23,311 INFO worker.py:313 - Scheduled ChunkTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=03) (PENDING)
2019-09-01 09:29:23,312 DEBUG worker.py:260 - Checking if UpdateTimelineTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=03) is complete
2019-09-01 09:29:24,604 INFO worker.py:313 - Scheduled UpdateTimelineTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=03) (PENDING)
2019-09-01 09:29:24,604 DEBUG worker.py:260 - Checking if ChunkReviewTimelineTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=02) is complete
2019-09-01 09:29:25,205 INFO worker.py:313 - Scheduled ChunkReviewTimelineTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=02) (PENDING)
2019-09-01 09:29:25,205 DEBUG worker.py:260 - Checking if ReviewDedupTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=02) is complete
and delivers a csv with the follwing columns:
'timestamp','number','mode','line_in_code','task','pid','state','message'
E.g.:
0,2019-09-01 09:29:23,303,DEBUG,worker.py:260 ,,,,
"Checking if RootTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=03) is complete
"
1,2019-09-01 09:42:15,089,INFO,worker.py:58 ,
"DumpTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=prep, sql_filename=daily_urls,
kvs=None, target_filename=None, db_host=host.com, db_port=6666, db_user=user, db_name=my_db)
",37714.0,start,"[pid 37714] Worker Worker(salt=235269763, host=my_host, username=this-user, pid=45075)
running DumpTask(date=2019-09-01_09-29-01, prev_date=2019-08-30_13-45-01, chunk=prep, sql_filename=daily_urls,
kvs=None, target_filename=None, db_host=host.com, db_port=6666, db_user=user, db_name=my_db)
'''
# TODO: Catch failed and restarted tasks
def file_to_df(inputfile):
data = pd.DataFrame(columns=['line'])
lineList = inputfile.readlines()
for index, row in enumerate(lineList):
if index == 0:
data = data.append({'line': row}, ignore_index=True)
if '[pid' in row:
data = data.append({'line': row}, ignore_index=True)
if index >= len(lineList)-3:
data = data.append({'line': row}, ignore_index=True)
pd.options.display.max_colwidth = 100
return data
def get_task(row):
if ' running' in row['message']:
return re.split(r'running\s*', row['message'])[1]
if ' done' in row['message']:
return re.split(r'done\s*', row['message'])[1]
return None
def get_state(row):
if ' running' in row['message']:
return 'start'
if ' done' in row['message']:
return 'done'
return None
def get_pid(row):
if '[pid' in row['message']:
return int(row['message'].split('pid ')[1].split('] ')[0])
return None
def get_message(row):
message = re.split(r'py:\d+\s+', row['line'])[1]
if message.startswith('- '):
message = row['line'].split('- ')[1]
return message
def data_from_log(input_path):
df = pd.DataFrame(columns=['timestamp', 'number', 'mode',
'line_in_code', 'task', 'pid', 'state',
'message'])
inputfile = open(input_path, 'r')
data = file_to_df(inputfile)
df['timestamp'] = data.apply(lambda row: row['line'].split(',')[0],
axis=1)
df['number'] = data.apply(lambda row:
row['line'].split(',')[1].split(' ')[0], axis=1)
df['mode'] = data.apply(lambda row: re.split(r'\d{3}\s',
row['line'])[1].split(' ')[0], axis=1)
df['line_in_code'] = data.apply(lambda row: re.split(r'[A-Z]+\s+',
row['line'])[1].split(' - ')[0],
axis=1)
df['message'] = data.apply(lambda row: get_message(row), axis=1)
df['pid'] = df.apply(lambda row: get_pid(row), axis=1)
df['task'] = df.apply(lambda row: get_task(row), axis=1)
df['state'] = df.apply(lambda row: get_state(row), axis=1)
return df
# TODO: Preprocess csv: Get rid of unnecessary info
def run_luigi_log_miner(log_path):
df = data_from_log(log_path)
df = df[['timestamp','task','state']]
filename = log_path.rsplit('/', 1)[1].split('.log')[0]
df['source_file'] = filename
df['state'][0]='start'
df['task'][0]='AllTasks'
df['state'][len(df)-1]='done'
df['task'][len(df)-1]='AllTasks'
df = df.dropna()
return df
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.