content
stringlengths 5
1.05M
|
|---|
import boto3
import time
import server.server_plugins.resource_base as resource_base
from server.common import constants
from server.common import fm_logger
# from server.dbmodule import db_handler
TIMEOUT_COUNT = 400
fmlogger = fm_logger.Logging()
class DynamoDBResourceHandler(resource_base.ResourceBase):
def __init__(self):
self.client = boto3.client('dynamodb')
def create(self, request_obj):
table_name = request_obj['name']
attribute_definitions = []
if 'attribute_definitions' in request_obj:
attribute_definitions = request_obj['attribute_definitions']
else:
attribute_definitions = [{'AttributeName': 'PrimaryKey', 'AttributeType': 'N'},
{'AttributeName': 'StringData', 'AttributeType': 'S'}]
key_schema = []
if 'key_schema' in request_obj:
key_schema = request_obj['key_schema']
else:
key_schema = [{'AttributeName': 'PrimaryKey', 'KeyType': 'HASH'},
{'AttributeName': 'StringData', 'KeyType': 'RANGE'}]
provisioned_throughput = {'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1}
try:
response = self.client.create_table(AttributeDefinitions=attribute_definitions,
TableName=table_name,
KeySchema=key_schema,
ProvisionedThroughput=provisioned_throughput)
fmlogger.debug(response)
except Exception as e:
fmlogger.error(e)
# wait for few seconds for create_table action to take effect
time.sleep(5)
status = ''
count = 1
while count < constants.TIMEOUT_COUNT and status.lower() is not 'active':
status_dict = self.client.describe_table(TableName=table_name)
status = status_dict['Table']['TableStatus']
# db_handler.DBHandler().update_resource(request_obj['resource_id'], status)
count = count + 1
time.sleep(2)
return status
def delete(self, request_obj):
table_name = request_obj['name']
try:
response = self.client.delete_table(TableName=table_name)
fmlogger.debug(response)
except Exception as e:
fmlogger.error(e)
# db_handler.DBHandler().delete_resource(request_obj['resource_id'])
return
deleted = False
count = 1
while count < constants.TIMEOUT_COUNT and not deleted:
try:
status_dict = self.client.describe_table(TableName=table_name)
status = status_dict['Table']['TableStatus']
fmlogger.debug(status)
# db_handler.DBHandler().update_resource(request_obj['resource_id'], status)
count = count + 1
time.sleep(2)
except Exception as e:
fmlogger.error(e)
deleted = True
# db_handler.DBHandler().delete_resource(request_obj['resource_id'])
|
#!/usr/bin/env python
import unittest
from functools import reduce
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf import fci
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
m.conv_tol_grad = 1e-8
ehf = m.scf()
mo_a, mo_b = m.mo_coeff
norb = mo_a.shape[1]
nelecr = ((mol.nelectron+1)//2, (mol.nelectron+1)//2)
h1er = reduce(numpy.dot, (mo_a.T, m.get_hcore(), mo_a))
g2er = ao2mo.incore.general(m._eri, (mo_a,)*4, compact=False)
h1es = (h1er, h1er)
g2es = (g2er, g2er, g2er)
na = fci.cistring.num_strings(norb, nelecr[0])
nb = fci.cistring.num_strings(norb, nelecr[1])
numpy.random.seed(15)
ci0 = numpy.random.random((na,nb))
ci1 = numpy.random.random((na,nb))
neleci = ((mol.nelectron+1)//2, (mol.nelectron-1)//2)
na = fci.cistring.num_strings(norb, neleci[0])
nb = fci.cistring.num_strings(norb, neleci[1])
h1ei = (reduce(numpy.dot, (mo_a.T, m.get_hcore(), mo_a)),
reduce(numpy.dot, (mo_b.T, m.get_hcore(), mo_b)))
g2ei = (ao2mo.incore.general(m._eri, (mo_a,)*4, compact=False),
ao2mo.incore.general(m._eri, (mo_a,mo_a,mo_b,mo_b), compact=False),
ao2mo.incore.general(m._eri, (mo_b,)*4, compact=False))
numpy.random.seed(15)
ci2 = numpy.random.random((na,nb))
ci3 = numpy.random.random((na,nb))
class KnowValues(unittest.TestCase):
def test_contract(self):
ci1ref = fci.direct_spin1.contract_1e(h1er, ci0, norb, nelecr)
ci1 = fci.direct_uhf.contract_1e(h1es, ci0, norb, nelecr)
self.assertTrue(numpy.allclose(ci1, ci1ref))
ci1ref = fci.direct_spin1.contract_2e(g2er, ci0, norb, nelecr)
ci1 = fci.direct_uhf.contract_2e(g2es, ci0, norb, nelecr)
self.assertTrue(numpy.allclose(ci1, ci1ref))
self.assertAlmostEqual(numpy.linalg.norm(ci1), 201.86408542259386, 8)
ci3 = fci.direct_uhf.contract_2e(g2ei, ci2, norb, neleci)
self.assertAlmostEqual(numpy.linalg.norm(ci3), 120.7768063693274, 8)
def test_kernel(self):
eref, cref = fci.direct_spin1.kernel(h1er, g2er, norb, nelecr)
e, c = fci.direct_uhf.kernel(h1es, g2es, norb, nelecr)
self.assertAlmostEqual(e, eref, 8)
self.assertAlmostEqual(e, -8.9347029192929, 8)
e = fci.direct_uhf.energy(h1es, g2es, c, norb, nelecr)
self.assertAlmostEqual(e, -8.9347029192929, 8)
e, c = fci.direct_uhf.kernel(h1es, g2es, norb, neleci)
self.assertAlmostEqual(e, -8.7498253981782, 8)
def test_hdiag(self):
hdiagref = fci.direct_spin1.make_hdiag(h1er, g2er, norb, nelecr)
hdiag = fci.direct_uhf.make_hdiag(h1es, g2es, norb, nelecr)
self.assertTrue(numpy.allclose(hdiag, hdiagref))
self.assertAlmostEqual(numpy.linalg.norm(hdiag), 133.98845707380985, 10)
hdiag = fci.direct_uhf.make_hdiag(h1es, g2es, norb, neleci)
self.assertAlmostEqual(numpy.linalg.norm(hdiag), 113.87136636920194, 10)
def test_rdm1(self):
dm1ref = fci.direct_spin1.make_rdm1(ci0, norb, nelecr)
dm1 = fci.direct_uhf.make_rdm1(ci0, norb, nelecr)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 393.03762428630, 10)
dm1 = fci.direct_uhf.make_rdm1(ci2, norb, neleci)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 242.33237916212, 10)
def test_rdm12(self):
dm1ref, dm2ref = fci.direct_spin1.make_rdm12(ci0, norb, nelecr)
dm1, dm2 = fci.direct_uhf.make_rdm12s(ci0, norb, nelecr)
dm1 = dm1[0] + dm1[1]
dm2 = dm2[0] + dm2[1] + dm2[1].transpose(2,3,0,1) + dm2[2]
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertTrue(numpy.allclose(dm2ref, dm2))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 393.0376242863019, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 1155.413506052811, 10)
dm1, dm2 = fci.direct_uhf.make_rdm12s(ci2, norb, neleci)
self.assertAlmostEqual(numpy.linalg.norm(dm1[0]), 143.05770559808, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm1[1]), 109.30195472840, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2[0]), 258.07143130273, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2[1]), 172.41469868799, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2[2]), 149.76371060734, 10)
def test_trans_rdm1(self):
dm1ref = fci.direct_spin1.trans_rdm1(ci0, ci1, norb, nelecr)
dm1 = fci.direct_uhf.trans_rdm1(ci0, ci1, norb, nelecr)
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 294.40681527414, 10)
dm0 = fci.direct_uhf.make_rdm1(ci0, norb, nelecr)
dm1 = fci.direct_uhf.trans_rdm1(ci0, ci0, norb, nelecr)
self.assertTrue(numpy.allclose(dm1, dm0))
dm1 = fci.direct_uhf.trans_rdm1(ci3, ci2, norb, neleci)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 193.703051323676, 10)
def test_trans_rdm12(self):
dm1ref, dm2ref = fci.direct_spin1.trans_rdm12(ci0, ci1, norb, nelecr)
dm1, dm2 = fci.direct_uhf.trans_rdm12s(ci0, ci1, norb, nelecr)
dm1 = dm1[0] + dm1[1]
dm2 = dm2[0] + dm2[1] + dm2[2] + dm2[3]
self.assertTrue(numpy.allclose(dm1ref, dm1))
self.assertTrue(numpy.allclose(dm2ref, dm2))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 294.4068152741418, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 949.0343056904616, 10)
_,dm0 = fci.direct_uhf.make_rdm12s(ci0, norb, nelecr)
_,dm2 = fci.direct_uhf.trans_rdm12s(ci0, ci0, norb, nelecr)
self.assertTrue(numpy.allclose(dm2[0], dm0[0]))
self.assertTrue(numpy.allclose(dm2[1], dm0[1]))
self.assertTrue(numpy.allclose(dm2[3], dm0[2]))
dm1, dm2 = fci.direct_uhf.trans_rdm12s(ci3, ci2, norb, neleci)
self.assertAlmostEqual(numpy.linalg.norm(dm1[0]+dm1[1]), 193.703051323676, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm1[0]), 112.85954124885, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm1[1]), 92.827695172359, 10)
self.assertAlmostEqual(numpy.linalg.norm(sum(dm2)), 512.111790469461, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2[0]), 228.750384383495, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2[1]), 155.324543159155, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2[2]), 155.324543159155, 10)
self.assertAlmostEqual(numpy.linalg.norm(dm2[3]), 141.269867535222, 10)
def test_contract2e_hubbard(self):
norb = 6
nelec = (3,2)
u = numpy.zeros((norb,)*4)
na = fci.cistring.num_strings(norb, nelec[0])
nb = fci.cistring.num_strings(norb, nelec[1])
for i in range(norb):
u[i,i,i,i] = 1
ci0 = numpy.random.random((na,nb))
ci1ref = fci.direct_uhf.contract_2e ((u*1.1, u*2.2, u*1.8), ci0, norb, nelec)
ci1 = fci.direct_uhf.contract_2e_hubbard(( 1.1, 2.2, 1.8), ci0, norb, nelec)
self.assertTrue(numpy.allclose(ci1ref, ci1))
if __name__ == "__main__":
print("Full Tests for uhf-based fci")
unittest.main()
|
# pylint: disable=C0103
"""
This module includes continuous-time models for a permmanent-magnet
synchronous motor drive. The space-vector model is implemented in rotor
coordinates.
"""
import numpy as np
from helpers import complex2abc
# %%
class Drive:
"""
This class interconnects the subsystems of a PMSM drive and provides an
interface to the solver.
"""
def __init__(self, motor, mech, converter, delay, pwm, datalog):
"""
Instantiate the classes.
"""
self.motor = motor
self.mech = mech
self.converter = converter
self.delay = delay
self.pwm = pwm
self.datalog = datalog
self.q = 0 # Switching-state space vector
self.t0 = 0 # Initial simulation time
def get_initial_values(self):
"""
Returns
-------
x0 : complex list, length 2
Initial values of the state variables.
"""
x0 = [self.motor.psi0, self.mech.theta_M0, self.mech.w_M0]
return x0
def set_initial_values(self, t0, x0):
"""
Parameters
----------
x0 : complex ndarray
Initial values of the state variables.
"""
self.t0 = t0
self.motor.psi0 = x0[0]
self.mech.theta_M0 = x0[1].real # x0[1].imag is always zero
self.mech.w_M0 = x0[2].real # x0[2].imag is always zero
# Limit the angle [0, 2*pi]
self.mech.theta_M0 = np.mod(self.mech.theta_M0, 2*np.pi)
def f(self, t, x):
"""
Compute the complete state derivative list for the solver.
Parameters
----------
t : float
Time.
x : complex ndarray
State vector.
Returns
-------
complex list
State derivatives.
"""
# Unpack the states
psi, theta_M, w_M = x
theta_m = self.motor.p*theta_M
# Interconnections: outputs for computing the state derivatives
u_s = self.converter.ac_voltage(self.q, self.converter.u_dc0)
u = np.exp(-1j*theta_m)*u_s # Voltage in rotor coordinates
i = self.motor.current(psi)
T_M = self.motor.torque(psi, i)
# State derivatives
motor_f = self.motor.f(psi, i, u, w_M)
mech_f = self.mech.f(t, w_M, T_M)
# List of state derivatives
return motor_f + mech_f
# %%
class Motor:
"""
This class represents a permanent-magnet synchronous motor. The
peak-valued complex space vectors are used.
"""
def __init__(self, mech, R=3.6, L_d=.036, L_q=.051, psi_f=.545, p=3):
"""
The default values correspond to the 2.2-kW PMSM.
Parameters
----------
mech : object
Mechanics, needed for computing the measured phase currents.
R : float, optional
Stator resistance. The default is 3.6.
L_d : float, optional
d-axis inductance. The default is .036.
L_q : float, optional
q-axis inductance. The default is .051.
psi_f : float, optional
PM-flux linkage. The default is .545.
p : int, optional
Number of pole pairs. The default is 3.
"""
self.R, self.L_d, self.L_q, self.psi_f, self.p = R, L_d, L_q, psi_f, p
self.mech = mech
self.psi0 = psi_f + 0j
def current(self, psi):
"""
Computes the stator current.
Parameters
----------
psi : complex
Stator flux linkage in rotor coordinates.
Returns
-------
i : complex
Stator current in rotor coordinates.
"""
i = (psi.real - self.psi_f)/self.L_d + 1j*psi.imag/self.L_q
return i
def torque(self, psi, i):
"""
Computes the electromagnetic torque.
Parameters
----------
psi : complex
Stator flux linkage.
i : complex
Stator current.
Returns
-------
T_M : float
Electromagnetic torque.
"""
T_M = 1.5*self.p*np.imag(i*np.conj(psi))
return T_M
def f(self, psi, i, u, w_M):
"""
Computes the state derivative.
Parameters
----------
psi : complex
Stator flux linkage in rotor coordinates.
u : complex
Stator voltage in rotor coordinates.
w_M : float
Rotor speed (in mechanical rad/s).
Returns
-------
dpsi : complex
Time derivatives of the state vector.
"""
dpsi = u - self.R*i - 1j*self.p*w_M*psi
return [dpsi]
def meas_currents(self):
"""
Returns the phase currents at the end of the sampling period.
Returns
-------
i_abc : 3-tuple of floats
Phase currents.
"""
i0 = self.current(self.psi0)
theta_m0 = self.p*self.mech.theta_M0
i_abc = complex2abc(np.exp(1j*theta_m0)*i0)
return i_abc
def __str__(self):
if self.psi_f == 0:
desc = ('Synchronous reluctance motor:\n'
' p={} R={} L_d={} L_q={}')
return desc.format(self.p, self.R, self.L_d, self.L_q)
else:
desc = ('Permanent-magnet synchronous motor:\n'
' p={} R={} L_d={} L_q={} psi_f={}')
return desc.format(self.p, self.R, self.L_d, self.L_q, self.psi_f)
# %%
class Datalogger:
"""
This class contains a datalogger. Here, stator coordinates are marked
with s, e.g. i_s is the stator current in stator coordinates.
"""
def __init__(self):
"""
Initialize the attributes.
"""
# pylint: disable=too-many-instance-attributes
self.t, self.q = [], []
self.psi = []
self.theta_M, self.w_M = [], []
self.u_s, self.i = 0j, 0j
self.w_m, self.theta_m = 0, 0
self.T_M, self.T_L = 0, 0
def save(self, mdl, sol):
"""
Saves the solution.
Parameters
----------
mdl : instance of a class
Continuous-time model.
sol : bunch object
Solution from the solver.
"""
self.t.extend(sol.t)
self.q.extend(len(sol.t)*[mdl.q])
self.psi.extend(sol.y[0])
self.theta_M.extend(sol.y[1].real)
self.w_M.extend(sol.y[2].real)
def post_process(self, mdl):
"""
Transforms the lists to the ndarray format and post-process them.
"""
# From lists to the ndarray
self.t = np.asarray(self.t)
self.q = np.asarray(self.q)
self.psi = np.asarray(self.psi)
self.theta_M = np.asarray(self.theta_M)
self.w_M = np.asarray(self.w_M)
# Compute some useful variables
self.i = mdl.motor.current(self.psi)
self.w_m = mdl.motor.p*self.w_M
self.T_M = mdl.motor.torque(self.psi, self.i)
self.T_L = mdl.mech.T_L_ext(self.t) + mdl.mech.B*self.w_M
self.u_s = mdl.converter.ac_voltage(self.q, mdl.converter.u_dc0)
self.theta_m = mdl.motor.p*self.theta_M
self.theta_m = np.mod(self.theta_m, 2*np.pi)
|
import json
import os
import time
import argparse
import uuid
import subprocess
import sys
import datetime
import yaml
from jinja2 import Environment, FileSystemLoader, Template
import base64
import re
import thread
import threading
import random
import textwrap
import logging
import logging.config
from multiprocessing import Process, Manager
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../storage"))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),"../utils"))
from jobs_tensorboard import GenTensorboardMeta
import k8sUtils
from osUtils import mkdirsAsUser
from config import config, GetStoragePath
from DataHandler import DataHandler
from cluster_manager import setup_exporter_thread, manager_iteration_histogram, register_stack_trace_dump, update_file_modification_time, record
logger = logging.getLogger(__name__)
def create_log(logdir = '/var/log/dlworkspace'):
if not os.path.exists( logdir ):
os.system("mkdir -p " + logdir)
with open('logging.yaml') as f:
logging_config = yaml.load(f)
f.close()
logging_config["handlers"]["file"]["filename"] = logdir+"/joblogmanager.log"
logging.config.dictConfig(logging_config)
def save_log(jobLogDir,containerID,userId,log,order=1):
try:
containerLogPath = os.path.join(jobLogDir, "log-container-" + containerID + ".txt"+"."+str(order))
with open(containerLogPath, 'w') as f:
f.write(log)
f.close()
os.system("chown -R %s %s" % (userId, containerLogPath))
except Exception as e:
logger.exception("write container log failed")
@record
def extract_job_log(jobId,logPath,userId,jobType=None):
try:
dataHandler = DataHandler()
# logs = k8sUtils.GetLog(jobId)
# logs = k8sUtils.getJobConsoleDetail(jobId)
jupyterLog = k8sUtils.getJupyterInfo(jobId)
# TODO: Replace joblog manager with elastic search
logs = k8sUtils.GetLog(jobId, tail=None,jobType=jobType)
# Do not overwrite existing logs with empty log
# DLTS bootstrap will generate logs for all containers.
# If one container has empty log, skip writing.
if not logs:
return
for log in logs:
if "containerLog" in log and log["containerLog"] == "":
return
jobLogDir = os.path.dirname(logPath)
if not os.path.exists(jobLogDir):
mkdirsAsUser(jobLogDir,userId)
logStr = ""
trimlogstr = ""
for log in logs:
if "podName" in log and "containerID" in log and "containerLog" in log:
logStr += "=========================================================\n"
logStr += "=========================================================\n"
logStr += "=========================================================\n"
logStr += " logs from pod: %s\n" % log["podName"]
logStr += "=========================================================\n"
logStr += "=========================================================\n"
logStr += "=========================================================\n"
logStr += log["containerLog"]
logStr += jupyterLog
logStr += "\n\n\n"
logStr += "=========================================================\n"
logStr += " end of logs from pod: %s\n" % log["podName"]
logStr += "=========================================================\n"
logStr += "\n\n\n"
logLines = logStr.split('\n')
length = len(logLines)
if len(logStr.strip()) > 0:
if (length <= 2000):
if os.path.exists(os.path.join(jobLogDir,"max_page")):
os.system("rm -rf %s" %(jobLogDir))
save_log(jobLogDir,str(jobId),userId,logStr)
else:
with open(os.path.join(jobLogDir,"max_page"), 'w') as f:
f.write(str(length//2000+1))
for i in range(1,length//2000+2):
trimlogstr = "\n".join(logLines[(i-1)*2000:i*2000])
save_log(jobLogDir, str(jobId), userId, trimlogstr,i)
except Exception as e:
logger.error(e)
def update_job_logs():
while True:
try:
dataHandler = DataHandler()
pendingJobs = dataHandler.GetPendingJobs()
for job in pendingJobs:
try:
if job["jobStatus"] == "running" :
logger.info("updating job logs for job %s" % job["jobId"])
jobParams = json.loads(base64.b64decode(job["jobParams"]))
jobPath,workPath,dataPath = GetStoragePath(jobParams["jobPath"],jobParams["workPath"],jobParams["dataPath"])
localJobPath = os.path.join(config["storage-mount-path"],jobPath)
logPath = os.path.join(localJobPath,"logs/joblog.txt")
extract_job_log(job["jobId"],logPath,jobParams["userId"],jobParams["jobType"])
except Exception as e:
logger.exception("handling logs from %s", job["jobId"])
except Exception as e:
logger.exception("get pending jobs failed")
time.sleep(1)
def Run():
register_stack_trace_dump()
create_log()
logger.info("start to update job logs ...")
while True:
update_file_modification_time("joblog_manager")
with manager_iteration_histogram.labels("joblog_manager").time():
try:
update_job_logs()
except Exception as e:
logger.exception("update job logs failed")
time.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--port", "-p", help="port of exporter", type=int, default=9203)
args = parser.parse_args()
setup_exporter_thread(args.port)
Run()
|
'''
HMMDeviceMap
'''
from Products.DataCollector.plugins.CollectorPlugin import (
SnmpPlugin, GetMap
)
class HMMDeviceMap(SnmpPlugin):
'''
HMMDeviceMap
'''
snmpGetMap = GetMap({
'.1.3.6.1.4.1.2011.2.82.1.82.2.2001.1.15.0': 'shelfSerialNumber',
})
def process(self, device, results, log):
'''
process oid
'''
log = log
device = device
getdata = results[0]
return self.objectMap({
'setHWSerialNumber': getdata.get('shelfSerialNumber'),
})
|
"""Question: https://leetcode.com/problems/palindrome-number/
"""
class Solution:
def isPalindrome(self, x: int) -> bool:
if x < 0:
return False
copy, reverse = x, 0
while copy:
reverse *= 10
reverse += copy % 10
copy = copy // 10
return x == reverse
def isPalindrome_using_str(self, x: int) -> bool:
return str(x) == str(x)[::-1]
if __name__ == '__main__':
x = 121
output = Solution().isPalindrome(x)
print(f'x: {x}\toutput: {output}')
x = -121
output = Solution().isPalindrome(x)
print(f'x: {x}\toutput: {output}')
|
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
np.random.seed(1)
import random
random.seed(1)
import pandas as pd
import cv2
import timeit
from os import path, makedirs, listdir
import sys
sys.setrecursionlimit(10000)
from multiprocessing import Pool
from skimage.morphology import square, dilation, watershed, erosion
from skimage import io
from shapely.wkt import loads
from tqdm import tqdm
import ntpath
# from matplotlib import pyplot as plt
# import seaborn as sns
# test_folders = ['/data/SN5_roads/test_public/AOI_7_Moscow', '/data/SN5_roads/test_public/AOI_8_Mumbai', '/data/SN5_roads/test_public/AOI_9_San_Juan']
test_folders = []
for i in range(1, len(sys.argv) - 1):
test_folders.append(sys.argv[i])
print("test_folders:", test_folders)
test_png = '/wdata/test_png'
test_png2 = '/wdata/test_png_5_3_0'
test_png3 = '/wdata/test_png_pan_6_7'
test_png_960 = '/wdata/test_png_960'
test_png2_960 = '/wdata/test_png_5_3_0_960'
test_png3_960 = '/wdata/test_png_pan_6_7_960'
def process_image(fn):
img_id = bn = ntpath.basename(fn)[0:-4]
img_id = img_id.replace('_PS-MS', '')
img = io.imread(fn)
img_bgr = (np.clip(img[..., [1, 2, 4]], None, 2000) / (2000 / 255)).astype('uint8')
cv2.imwrite(path.join(test_png, img_id + '.png'), img_bgr, [cv2.IMWRITE_PNG_COMPRESSION, 9])
cv2.imwrite(path.join(test_png_960, img_id + '.png'), cv2.resize(img_bgr, (960, 960)), [cv2.IMWRITE_PNG_COMPRESSION, 9])
img_0_3_5 = (np.clip(img[..., [0, 3, 5]], None, 2000) / (2000 / 255)).astype('uint8')
cv2.imwrite(path.join(test_png2, img_id + '.png'), img_0_3_5, [cv2.IMWRITE_PNG_COMPRESSION, 9])
cv2.imwrite(path.join(test_png2_960, img_id + '.png'), cv2.resize(img_0_3_5, (960, 960)), [cv2.IMWRITE_PNG_COMPRESSION, 9])
pan = io.imread(fn.replace('_PS-MS_', '_PAN_').replace('PS-MS', 'PAN'))
pan = pan[..., np.newaxis]
img_pan_6_7 = np.concatenate([pan, img[..., 7:], img[..., 6:7]], axis=2)
img_pan_6_7 = (np.clip(img_pan_6_7, None, (10000, 2000, 2000)) / (np.array([10000, 2000, 2000]) / 255)).astype('uint8')
cv2.imwrite(path.join(test_png3, img_id + '.png'), img_pan_6_7, [cv2.IMWRITE_PNG_COMPRESSION, 9])
cv2.imwrite(path.join(test_png3_960, img_id + '.png'), cv2.resize(img_pan_6_7, (960, 960)), [cv2.IMWRITE_PNG_COMPRESSION, 9])
if __name__ == '__main__':
t0 = timeit.default_timer()
makedirs(test_png, exist_ok=True)
makedirs(test_png2, exist_ok=True)
makedirs(test_png3, exist_ok=True)
makedirs(test_png_960, exist_ok=True)
makedirs(test_png2_960, exist_ok=True)
makedirs(test_png3_960, exist_ok=True)
all_files = []
for d in test_folders:
for f in listdir(path.join(d, 'PS-MS')):
if '.tif' in f:
all_files.append(path.join(d, 'PS-MS', f))
with Pool() as pool:
_ = pool.map(process_image, all_files)
elapsed = timeit.default_timer() - t0
print('Time: {:.3f} min'.format(elapsed / 60))
|
from handlers.group import GroupHandler
from handlers.travel import TravelHandler, TrainingHandler
from handlers.travel_result import TravelResultHandler
|
#!/usr/bin/python
######################################################################
# Ascii TMS Viewer
#
#--------------------------------------------------------------------
# Brian Hone | Initial Release
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
# Copyright (c) 2009 Brian Hone
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
######################################################################
import time, sys, os, string, random, math
import pprint
from TileLoader import TileLoader
import KMLParser
false = 0
true = 1
debug = 1
class KMLTileLoader( TileLoader ):
def __init__(self, (sizeX, sizeY), kmlFile, cacheUrl, zoomlevel ):
TileLoader.__init__( self, (sizeX, sizeY), cacheUrl )
self.zoomLevel = zoomlevel # minzoomlevel
self.kmlFile = kmlFile
self.kmlPoints = {}
self.kmlShapes = {}
self.initKML()
#self.tileUtils = TileUtils()
# end __init__
def fetchTile( self, x, y, z ):
tileArr = self.getEmptyTile()
#print tileStr
for key, value in self.kmlPoints.items():
lat = float( value[ "LAT" ] )
lon = float( value[ "LON" ] )
# returns None if point does not intersect
res = self.tileUtils.latlon2pixel( value["NAME" ], lat, lon, self.sizeX, self.sizeY, x,y, z )
# TODO: This logic relies on error handling to determine whether
# a point is "onscreen" - do something better
if res != None:
pixX, pixY = res[0], res[1]
tileArr = self.addTextToTile( pixX, pixY, value[ "NAME" ], tileArr )
return tileArr
# end createTile
def initKML( self ):
""" Load cities and countries from a kml file - ./kml_files.txt lists kml files to load"""
reader = KMLParser.kmlReader( self.kmlFile )
coords = reader.getCoordinates()
for c in coords:
if c.has_point and c.point.lat and c.point.lon:
self.kmlPoints[ c.name ] = { "LON" : c.point.lon, "LAT" : c.point.lat, "NAME": c.name, "ZOOMLEVEL" : self.zoomLevel }
if c.has_linear_ring:
self.kmlShapes[ c.name ] = { "NAME" : c.name, "ZOOMLEVEL" : self.zoomLevel, "POINTS" : c.linear_ring.points }
# end loadKML
def drawLatLonLine( self, latA, lonA, latB, lonB, x, y, z ):
resA = self.tileUtils.latlon2pixel( "A", latA, lonA, self.sizeX, self.sizeY, x, y, z )
resB = self.tileUtils.latlon2pixel( "B", latB, lonB, self.sizeX, self.sizeY, x, y, z )
self.drawLine( resA[1], resA[0], resB[1], resB[0], '.', x, y, z )
# end drawLatLonLine
def drawLines( self, x, y, z ):
for shape in self.kmlShapes.items():
shape = shape[1]
points = shape["POINTS"]
last_point = points[0]
for point in shape["POINTS"][1:]:
self.drawLatLonLine( last_point.lat,last_point.lon,point.lat,point.lon, x, y, z );
last_point = point
#end showLine
def drawLine( self, fromY, fromX, toY, toX, ch ):
""" draw from YX to YX using the character ch """
deltaY = toY - fromY
deltaX = toX - fromX
pts = []
pts.append( [fromX, fromY] )
direction = 1
if abs(deltaX) > abs(deltaY):
if toX - fromX < 0:
direction = -1
for x in range( fromX+1, toX, direction ):
pts.append( [x, fromY + deltaY * (( x-fromX ) / float(deltaX)) ] )
else:
if toY - fromY < 0:
direction = -1
for y in range( fromY+1, toY, direction ):
pts.append( [ fromX + deltaX * (( y-fromY ) / float(deltaY)), y ] )
for pt in pts:
if self.pixelIsShown( pt[0], pt[1] ):
try:
self.mainWin.addch( int(pt[1]), int(pt[0]), ord(ch), curses.color_pair(8) )
except:
pass
self.mainWin.refresh()
# end drawLine
def pixelIsShown( self, px, py ):
self.mainWinMaxY, self.mainWinMaxX = self.mainWin.getmaxyx()
if px > 0 and px < self.mainWinMaxX and py > 0 and py < self.mainWinMaxY:
return true
return false;
# end pixelIsShown
# end class KMLTileMap
if __name__=="__main__":
#def __init__(self, (x,y,z), (sizeX, sizeY), kmlFile, cacheUrl ):
T = KMLTileLoader((55,55), "us_states.kml", "test_cache", 0 )
print T.getTile( 1,2,3 )
|
# import lib.forest as forest
import math
import random
import numpy as np
import pandas as pd
from lib.tree import Tree
from lib.forest import RNF
from lib.evalMetrics import *
from sklearn.utils import shuffle
import sys
def cross_val_tree(df, tries):
for i in range(tries):
shuffle = df.sample(frac=1)
shuffle = shuffle.reset_index(drop=True)
ls = [x for x in range(60)]
tree = Tree(shuffle, 3, None, range(shuffle.shape[0]-20), ls)
tree.fit()
score = 0
labels = [row[60] for index, row in shuffle[188:208].iterrows()]
probas = tree.predict(shuffle[188:208])
for i in range(len(labels)):
if probas[i][0] > probas[i][1]:
# print('R/{}'.format(actual))
if 'R' == labels[i]:
score+=1
else:
# print('M/{}'.format(actual))
if 'M' == labels[i]:
score+=1
print(score/(208-188))
def cross_val_rnf(df, tries):
for i in range(tries):
shuffle = df.sample(frac=1, random_state=1)
shuffle = shuffle.reset_index(drop=True)
forest = RNF(shuffle[0:188], 2, 3, random.randint(1, 100), 40, 80)
forest.fit()
score = 0
labels = [row[60] for index, row in shuffle[188:208].iterrows()]
predicted_classes = forest.predict(shuffle[188:208])[1]
# print(predicted_classes, labels)
score = sum( [ 1 for i in range(len(predicted_classes)) if predicted_classes[i] == labels[i]])
print(score/(208-188))
def cross_val_rnf_incremental(num_trees, df, tries, num_increments, random_seed, cat_features, overall_training_ratio,
initial_training_ratio, increment_size):
'''
Testing for incremental learning.
Start some small subset of the dataset and increment with 10 more, see if there's an improvement.
args:
df (dataframe)
tries (int) number of times to repeat the test
num_increments (int) number of times to incrementally train a model
random_seed (some object)
cat_features (list) required to initalize forest
overall_training_ratio (float) fraction of df to use as the training set
initial_training_ratio (float) fraction of df to use as the training set before any increments
increment_size (int) number of new rows to use per incremental step
'''
if (overall_training_ratio >= 1):
print("The training set should be samller than the dataset")
return
if (initial_training_ratio >= 1):
print("The initial training set should be smaller than the dataset")
return
if (initial_training_ratio >= overall_training_ratio):
print("The initial training set should be smaller than the overall training set")
return
dataset_size = df.shape[0]
initial_train_size = math.floor(dataset_size * initial_training_ratio)
overall_train_size = math.floor(dataset_size * overall_training_ratio)
test_size = dataset_size - overall_train_size;
increment_limit = (overall_train_size - initial_train_size) / increment_size
increment_limit = int(math.floor(increment_limit))
if num_increments > increment_limit:
print('too many increments specified ({}), running with the max possible: ({})'.format(num_increments, increment_limit))
num_increments = increment_limit;
for i in range(tries):
cur_max = initial_train_size
shuffled_df = df.sample(frac=1, random_state=1)
#shuffled_df = shuffle(df, random_state=random_seed)
shuffled_df = shuffled_df.reset_index(drop=True)
# initial fit
initial_df = shuffled_df[0:cur_max]
n_features = math.floor(math.sqrt(df.shape[1]))
tree_depth = 20
forest = RNF(initial_df, num_trees, tree_depth, random_seed, n_features, cur_max, cat_features)
#forest.fit()
forest.fit_parallel()
score = 0
# This is the answer key
labels = [row["Label"] for index, row in shuffled_df[-test_size:].iterrows()]
print("LABELS:")
print(labels)
predicted_classes = forest.predict_parallel(shuffled_df[-test_size:])[1]
print("predicted_classes:")
print(predicted_classes)
score = sum( [ 1 for i in range(len(predicted_classes)) if predicted_classes[i] == labels[i]])
print('score before incremental training: ' + str(score / test_size))
last = initial_train_size
for j in range(1, num_increments + 1):
# print(last)
# put this into RNF later!!!
forest.n_max_input = last
predicted = forest.predict_parallel(shuffled_df[last:last + increment_size])
prediction_ratios = predicted[0]
low_confidence_threshold = .05
high_confidence_threshold = .8
# these store indices
less_confident = []
more_confident = []
for i in range(len(prediction_ratios)):
ratio = prediction_ratios[i]
if abs(ratio[0] - ratio[1]) <= low_confidence_threshold:
less_confident.append(last + i)
if abs(ratio[0] - ratio[1]) >= high_confidence_threshold:
more_confident.append(last + i)
print("len(less_confident): {}".format(len(less_confident)))
print("len(more_confident): {}".format(len(more_confident)))
less_confident.extend(more_confident)
forest.update(shuffled_df.loc[less_confident])
# print(type(forest.trees[0].head.rows[0]))
# evalMetrics.evalStats(predicted[1], shuffled_df[last:last + increment_size].reset_index(drop=True))
score = 0
labels = [row["Label"] for index, row in shuffled_df[-test_size:].iterrows()]
predicted_classes = forest.predict_parallel(shuffled_df[-test_size:])[1]
evalStats(predicted_classes, shuffled_df[-test_size:])
score = sum( [ 1 for i in range(len(predicted_classes)) if predicted_classes[i] == labels[i]])
print('score at increment ' + str(j) + ': ' + str(score / test_size))
last = last + increment_size
|
filename=input("Enter the File name:")
i=filename.index('.')
ext=filename[i+1:]
di={"py":"Python","m":"matlab","doc":"document","jpg":"image","sch":"Pspice schematics","mp4":"video","mp3":"audio"}
for j in di:
if j==ext:
print("The extension of the file is:",di[j])
break
else:
continue
if ext not in di:
print("The file is not valid")
|
from subprocess import Popen, DEVNULL
import time
if __name__ == '__main__':
start_id = 5000
step = 1000
g_len = 1
while True:
start = time.time()
p_list = list()
for i in range(g_len):
p = Popen(['python', 'pikabu_parser_basic.py', str(start_id + (i)*step), str(start_id + (i+1)*step)],
stdout=DEVNULL, stderr=DEVNULL)
p_list.append(p)
for p in p_list:
p.communicate()
end = time.time()
print(start_id, end-start)
start_id += step*g_len
# break
|
import pytest
from django.core.exceptions import ValidationError
from zinc.models import Policy
@pytest.mark.django_db
def test_policy_name_validation_not_unique_first_chars():
Policy(name="dev01").save()
with pytest.raises(ValidationError):
Policy(name="dev011").full_clean()
Policy(name="dev022").save()
with pytest.raises(ValidationError):
Policy(name="dev02").full_clean()
@pytest.mark.django_db
def test_policy_name_validation_regex():
with pytest.raises(ValidationError):
Policy(name="not-allowed-chars;").full_clean()
with pytest.raises(ValidationError):
Policy(name="UpperCaseName").full_clean()
|
# -*- coding: utf-8 -*-
#
# (c) 2016 Hareau SAS / Weenect, https://www.weenect.com
#
# This file is part of the weedi library
#
# MIT License : https://raw.githubusercontent.com/weenect/weedi/master/LICENSE.txt
import os
import unittest
import weedi.exc
import project.repository as repository
import project.services as services
class TestServicesRepository(unittest.TestCase):
def test_no_config_file(self):
service_repository = repository.ServicesRepository()
service_repository.load()
self.assertIsInstance(service_repository['mail'], services.Mail)
self.assertEqual(service_repository['mail'].host, "127.0.0.1")
self.assertEqual(service_repository['mail'].port, 25)
self.assertEqual(service_repository['mail'].timeout, 120.0)
self.assertIsInstance(service_repository['database'], services.Database)
self.assertEqual(service_repository['database'].host, "localhost")
self.assertEqual(service_repository['database'].port, 3306)
self.assertFalse(service_repository['database'].debug)
self.assertIsInstance(service_repository['manager'], services.Manager)
self.assertEqual(service_repository['manager'].db, service_repository['database'])
self.assertEqual(service_repository['manager'].mail, service_repository['mail'])
def test_no_config_file_overriden(self):
service_repository = repository.ServicesRepository()
service_repository.load(None, {'services': {'mail': {'host': 'smtp.local'}}})
self.assertIsInstance(service_repository['mail'], services.Mail)
self.assertEqual(service_repository['mail'].host, 'smtp.local')
self.assertEqual(service_repository['mail'].port, 25)
self.assertEqual(service_repository['mail'].timeout, 120.0)
self.assertIsInstance(service_repository['database'], services.Database)
self.assertEqual(service_repository['database'].host, "localhost")
self.assertEqual(service_repository['database'].port, 3306)
self.assertFalse(service_repository['database'].debug)
self.assertIsInstance(service_repository['manager'], services.Manager)
self.assertEqual(service_repository['manager'].db, service_repository['database'])
self.assertEqual(service_repository['manager'].mail, service_repository['mail'])
def test_with_config_file(self):
service_repository = repository.ServicesRepository()
service_repository.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini'))
self.assertIsInstance(service_repository['mail'], services.Mail)
self.assertEqual(service_repository['mail'].host, "8.8.8.8")
self.assertEqual(service_repository['mail'].port, 52)
self.assertEqual(service_repository['mail'].timeout, 300.0)
self.assertIsInstance(service_repository['database'], services.Database)
self.assertEqual(service_repository['database'].host, "database.local")
self.assertEqual(service_repository['database'].port, 5432)
self.assertTrue(service_repository['database'].debug)
self.assertIsInstance(service_repository['manager'], services.Manager)
self.assertEqual(service_repository['manager'].db, service_repository['database'])
self.assertEqual(service_repository['manager'].mail, service_repository['mail'])
def test_with_config_file_overriden(self):
service_repository = repository.ServicesRepository()
service_repository.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini'),
{'services': {'mail': {'host': 'smtp.local'}}})
self.assertIsInstance(service_repository['mail'], services.Mail)
self.assertEqual(service_repository['mail'].host, 'smtp.local')
self.assertEqual(service_repository['mail'].port, 52)
self.assertEqual(service_repository['mail'].timeout, 300.0)
self.assertIsInstance(service_repository['database'], services.Database)
self.assertEqual(service_repository['database'].host, "database.local")
self.assertEqual(service_repository['database'].port, 5432)
self.assertTrue(service_repository['database'].debug)
self.assertIsInstance(service_repository['manager'], services.Manager)
self.assertEqual(service_repository['manager'].db, service_repository['database'])
self.assertEqual(service_repository['manager'].mail, service_repository['mail'])
def test_instanciate_and_inject_outside_container(self):
service_repository = repository.ServicesRepository()
service_repository.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini'))
new_object = service_repository(services.InstanciatedOutsideContainer)
self.assertEqual(new_object.db, service_repository['database'])
self.assertEqual(new_object.mail, service_repository['mail'])
self.assertIsNone(new_object.manager)
service_repository(new_object.set_services)
self.assertEqual(new_object.db, service_repository['database'])
self.assertEqual(new_object.mail, service_repository['mail'])
self.assertEqual(new_object.manager, service_repository['manager'])
def test_instanciate_with_args_not_managed_by_container(self):
service_repository = repository.ServicesRepository()
service_repository.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini'))
new_object = service_repository(services.ServiceWithArguments, 'param1', 'param2', param4="param4")
self.assertEqual(new_object.db, service_repository['database'])
self.assertEqual(new_object.mail, service_repository['mail'])
self.assertEqual(new_object.param1, "param1")
self.assertEqual(new_object.param2, "param2")
self.assertIsNone(new_object.param3)
self.assertEqual(new_object.param4, "param4")
class TestMissingServicesRepository(unittest.TestCase):
def test_missing_service(self):
service_repository = repository.MissingServicesRepository()
with self.assertRaises(weedi.exc.ServiceMissing) as raised_ctx:
service_repository.load()
raised_exc = raised_ctx.exception
self.assertEqual(str(raised_exc), 'mail')
class TestMissingConfigServicesRepository(unittest.TestCase):
def test_missing_config_in_config_file(self):
service_repository = repository.ConfigurationServicesRepository()
with self.assertRaises(weedi.exc.WrongConfiguration) as raised_ctx:
service_repository.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini'))
raised_exc = raised_ctx.exception
self.assertTrue('section "[services_configuration / config]", parameter "param1": False' in str(raised_exc))
self.assertTrue('section "[services_configuration / config]", parameter "param2": False' in str(raised_exc))
def test_missing_config_without_config_file(self):
service_repository = repository.ConfigurationServicesRepository()
with self.assertRaises(weedi.exc.WrongConfiguration) as raised_ctx:
service_repository.load()
raised_exc = raised_ctx.exception
self.assertTrue('file "None", section "[services_configuration]", parameter "None": False' in str(raised_exc))
class TestUnpriorizedServicesRepository(unittest.TestCase):
def test_wrong_priority_for_dependency(self):
service_repository = repository.UnpriorizedServicesRepository()
with self.assertRaises(weedi.exc.ServiceWrongPriority) as raised_ctx:
service_repository.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.ini'))
raised_exc = raised_ctx.exception
self.assertEqual(str(raised_exc), 'database')
if __name__ == '__main__':
unittest.main()
|
import os
from flask import Flask
import db
def create_app(test_config=None):
# 创建、编译app
app = Flask(__name__, instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path, 'my-easy-pic-bed.sqlite'),
)
# ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
db.init_app(app)
return app
|
# Copyright 2018 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from common import clean_db, mongo, internal_api
import bravado
class TestInternalApi:
def test_create_tenant_ok(self, internal_api, clean_db):
_, r = internal_api.create_tenant('foobar')
assert r.status_code == 201
assert 'deviceauth-foobar' in clean_db.database_names()
assert 'migration_info' in clean_db['deviceauth-foobar'].collection_names()
def test_create_tenant_twice(self, internal_api, clean_db):
_, r = internal_api.create_tenant('foobar')
assert r.status_code == 201
# creating once more should not fail
_, r = internal_api.create_tenant('foobar')
assert r.status_code == 201
def test_create_tenant_empty(self, internal_api):
try:
_, r = internal_api.create_tenant('')
except bravado.exception.HTTPError as e:
assert e.response.status_code == 400
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`quaternion`
==================
.. module:: quaternion
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <henrik.blidh@nedomkull.com>
Created on 2015-06-03, 21:55
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
class Quaternion(object):
"""A simple quaternion class."""
def __init__(self, values):
"""Constructor for Quaternion"""
if len(values) != 4:
raise ValueError("Quaternion init vector must be of length 4.")
self._elements = np.array(values, 'float')
@classmethod
def i(cls):
return cls([0, 1, 0, 0])
@classmethod
def j(cls):
return cls([0, 0, 1, 0])
@classmethod
def k(cls):
return cls([0, 0, 0, 1])
def __repr__(self):
return "{0} + {1}i + {2}j + {3}k".format(*self._elements)
def __str__(self):
return repr(self)
def __iter__(self):
for value in self._elements:
yield value
def __getitem__(self, item):
return self._elements[item]
def __add__(self, other):
if isinstance(other, Quaternion):
return Quaternion(self.to_array() + other.to_array())
elif isinstance(other, (int, float)):
return self + Quaternion([other, 0, 0, 0])
else:
raise NotImplementedError("Cannot add Quaternion with type {0}".format(type(other)))
def __iadd__(self, other):
if isinstance(other, Quaternion):
self._elements += other.to_array()
elif isinstance(other, (int, float)):
self._elements += Quaternion([other, 0, 0, 0]).to_array()
else:
raise NotImplementedError("Cannot add Quaternion with type {0}".format(type(other)))
def __sub__(self, other):
if isinstance(other, Quaternion):
return Quaternion(self.to_array() - other.to_array())
elif isinstance(other, (int, float)):
return Quaternion(self._elements - other)
else:
raise NotImplementedError("Cannot subtract Quaternion with type {0}".format(type(other)))
def __isub__(self, other):
if isinstance(other, Quaternion):
self._elements -= other.to_array()
elif isinstance(other, (int, float)):
self._elements -= Quaternion([other, 0, 0, 0]).to_array()
else:
raise NotImplementedError("Cannot add Quaternion with type {0}".format(type(other)))
def __mul__(self, other):
if isinstance(other, Quaternion):
return Quaternion([self.w * other.w - (self.x * other.x) - (self.y * other.y) - (self.z * other.z),
self.w * other.x + self.x * other.w + self.y * other.z - (self.z * other.y),
self.w * other.y - (self.x * other.z) + self.y * other.w + self.z * other.x,
self.w * other.z + self.x * other.y - (self.y * other.x) + self.z * other.w])
elif isinstance(other, (int, float)):
return Quaternion(self._elements * other)
else:
raise NotImplementedError("Cannot multiply Quaternion with type {0}".format(type(other)))
def __imul__(self, other):
if isinstance(other, Quaternion):
self._elements[:] = [self.w * other.w - (self.x * other.x) - (self.y * other.y) - (self.z * other.z),
self.w * other.x + self.x * other.w + self.y * other.z - (self.z * other.y),
self.w * other.y - (self.x * other.z) + self.y * other.w + self.z * other.x,
self.w * other.z + self.x * other.y - (self.y * other.x) + self.z * other.w]
elif isinstance(other, (int, float)):
self._elements *= other
else:
raise NotImplementedError("Cannot multiply Quaternion with type {0}".format(type(other)))
def __rmul__(self, other):
# Catch only int and float rmultiplications.
if isinstance(other, (int, float)):
return Quaternion(self._elements * other)
def __truediv__(self, other):
if isinstance(other, Quaternion):
return self * other.inverse()
elif isinstance(other, (int, float)):
return Quaternion(self._elements / other)
else:
raise NotImplementedError("Cannot multiply Quaternion with type {0}".format(type(other)))
def __itruediv__(self, other):
if isinstance(other, Quaternion):
self._elements[:] = (self * other.inverse()).to_array()
elif isinstance(other, (int, float)):
self._elements /= other
else:
raise NotImplementedError("Cannot multiply Quaternion with type {0}".format(type(other)))
def __floordiv__(self, other):
raise NotImplementedError("Floor Division not implemented for Quaternions.")
def __div__(self, other):
return self.__truediv__(other)
def __neg__(self):
return self * -1
@property
def w(self):
return self._elements[0]
@property
def x(self):
return self._elements[1]
@property
def y(self):
return self._elements[2]
@property
def z(self):
return self._elements[3]
@property
def real(self):
return self._elements[0]
@property
def imag(self):
return self._elements[1:]
def to_array(self):
return self._elements.copy()
def conjugate(self):
return Quaternion([self.w, -self.x, -self.y, -self.z])
def inverse(self):
return self.conjugate() / (self.norm() ** 2)
def norm(self):
return np.linalg.norm(self._elements)
def normalize(self):
self._elements /= self.norm()
|
import sqlite3
def upper_word(raw):
return raw.upper()
conn = sqlite3.connect(':memory:')
conn.create_function('upper', 1, upper_word)
cur = conn.cursor()
cur.execute('CREATE TABLE users (first_name char(20))')
cur.execute('INSERT INTO users(first_name) VALUES ("Ivan"), ("Peter"), ("Mike")')
cur.execute('SELECT upper(first_name) FROM users')
row = cur.fetchall()
print(row)
|
from __future__ import annotations
from typing import Optional
__author__ = "Anton Höß"
__copyright__ = "Copyright 2021"
class ConnTemplate:
def __init__(self, in_block_id: str, in_block_pin: int, out_block_id: str, out_block_pin: Optional[int]):
self.__in_block_id: str = in_block_id
self.__in_block_pin: int = in_block_pin
self.__out_block_id: str = out_block_id
self.__out_block_pin: Optional[int] = out_block_pin
# end def
def __str__(self):
return f"ConnTemplate: '{self.__in_block_id}:{self.__in_block_pin}' <=> '{self.__out_block_id}:{self.__out_block_pin}'"
# end def
def __repr__(self):
return str(self)
# end def
@property
def in_block_id(self) -> str:
return self.__in_block_id
# end def
@property
def out_block_id(self) -> str:
return self.__out_block_id
# end def
@property
def in_block_pin(self) -> int:
return self.__in_block_pin
# end def
@property
def out_block_pin(self) -> int:
return self.__out_block_pin
# end def
# end class
|
x = input("Comando:")
y = 20
for y in range(0, 20):
print("[+]Testing http://mercury.picoctf.net:55079/")
print("picoCTF{th4ts_4_l0t_0f_pl4c3s_2_lO0k_d375c750}")
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for matrix factorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import resource_loader
_factorization_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_factorization_ops.so"))
class WALSModel(object):
r"""A model for Weighted Alternating Least Squares matrix factorization.
It minimizes the following loss function over U, V:
$$
\|\sqrt W \odot (A - U V^T)\|_F^2 + \lambda (\|U\|_F^2 + \|V\|_F^2)
$$
where,
A: input matrix,
W: weight matrix. Note that the (element-wise) square root of the weights
is used in the objective function.
U, V: row_factors and column_factors matrices,
\\(\lambda)\\: regularization.
Also we assume that W is of the following special form:
\\( W_{ij} = W_0 + R_i * C_j \\) if \\(A_{ij} \ne 0\\),
\\(W_{ij} = W_0\\) otherwise.
where,
\\(W_0\\): unobserved_weight,
\\(R_i\\): row_weights,
\\(C_j\\): col_weights.
Note that the current implementation supports two operation modes: The default
mode is for the condition where row_factors and col_factors can individually
fit into the memory of each worker and these will be cached. When this
condition can't be met, setting use_factors_weights_cache to False allows the
larger problem sizes with slight performance penalty as this will avoid
creating the worker caches and instead the relevant weight and factor values
are looked up from parameter servers at each step.
Loss computation: The loss can be computed efficiently by decomposing it into
a sparse term and a Gramian term, see wals.md.
The loss is returned by the update_{col, row}_factors(sp_input), and is
normalized as follows:
_, _, unregularized_loss, regularization, sum_weights =
update_row_factors(sp_input)
if sp_input contains the rows \\({A_i, i \in I}\\), and the input matrix A
has n total rows, then the minibatch loss = unregularized_loss +
regularization is
$$
(\|\sqrt W_I \odot (A_I - U_I V^T)\|_F^2 + \lambda \|U_I\|_F^2) * n / |I| +
\lambda \|V\|_F^2
$$
The sum_weights tensor contains the normalized sum of weights
\\(sum(W_I) * n / |I|\\).
A typical usage example (pseudocode):
with tf.Graph().as_default():
# Set up the model object.
model = tf.contrib.factorization.WALSModel(....)
# To be run only once as part of session initialization. In distributed
# training setting, this should only be run by the chief trainer and all
# other trainers should block until this is done.
model_init_op = model.initialize_op
# To be run once per worker after session is available, prior to
# the prep_gramian_op for row(column) can be run.
worker_init_op = model.worker_init
# To be run once per integration sweep before the row(column) update
# initialize ops can be run. Note that in the distributed training
# situations, this should only be run by the chief trainer. All other
# trainers need to block until this is done.
row_update_prep_gramian_op = model.row_update_prep_gramian_op
col_update_prep_gramian_op = model.col_update_prep_gramian_op
# To be run once per worker per iteration sweep. Must be run before
# any actual update ops can be run.
init_row_update_op = model.initialize_row_update_op
init_col_update_op = model.initialize_col_update_op
# Ops to update row(column). This can either take the entire sparse
# tensor or slices of sparse tensor. For distributed trainer, each
# trainer handles just part of the matrix.
_, row_update_op, unreg_row_loss, row_reg, _ = model.update_row_factors(
sp_input=matrix_slices_from_queue_for_worker_shard)
row_loss = unreg_row_loss + row_reg
_, col_update_op, unreg_col_loss, col_reg, _ = model.update_col_factors(
sp_input=transposed_matrix_slices_from_queue_for_worker_shard,
transpose_input=True)
col_loss = unreg_col_loss + col_reg
...
# model_init_op is passed to Supervisor. Chief trainer runs it. Other
# trainers wait.
sv = tf.train.Supervisor(is_chief=is_chief,
...,
init_op=tf.group(..., model_init_op, ...), ...)
...
with sv.managed_session(...) as sess:
# All workers/trainers run it after session becomes available.
worker_init_op.run(session=sess)
...
while i in iterations:
# All trainers need to sync up here.
while not_all_ready:
wait
# Row update sweep.
if is_chief:
row_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_row_update_op.run(session=sess)
# Go through the matrix.
reset_matrix_slices_queue_for_worker_shard
while_matrix_slices:
row_update_op.run(session=sess)
# All trainers need to sync up here.
while not_all_ready:
wait
# Column update sweep.
if is_chief:
col_update_prep_gramian_op.run(session=sess)
else:
wait_for_chief
# All workers run upate initialization.
init_col_update_op.run(session=sess)
# Go through the matrix.
reset_transposed_matrix_slices_queue_for_worker_shard
while_transposed_matrix_slices:
col_update_op.run(session=sess)
"""
def __init__(self,
input_rows,
input_cols,
n_components,
unobserved_weight=0.1,
regularization=None,
row_init="random",
col_init="random",
num_row_shards=1,
num_col_shards=1,
row_weights=1,
col_weights=1,
use_factors_weights_cache=True,
use_gramian_cache=True):
"""Creates model for WALS matrix factorization.
Args:
input_rows: total number of rows for input matrix.
input_cols: total number of cols for input matrix.
n_components: number of dimensions to use for the factors.
unobserved_weight: weight given to unobserved entries of matrix.
regularization: weight of L2 regularization term. If None, no
regularization is done.
row_init: initializer for row factor. Can be a tensor or numpy constant.
If set to "random", the value is initialized randomly.
col_init: initializer for column factor. See row_init for details.
num_row_shards: number of shards to use for row factors.
num_col_shards: number of shards to use for column factors.
row_weights: Must be in one of the following three formats: None, a list
of lists of non-negative real numbers (or equivalent iterables) or a
single non-negative real number.
- When set to None, w_ij = unobserved_weight, which simplifies to ALS.
Note that col_weights must also be set to "None" in this case.
- If it is a list of lists of non-negative real numbers, it needs to be
in the form of [[w_0, w_1, ...], [w_k, ... ], [...]], with the number of
inner lists matching the number of row factor shards and the elements in
each inner list are the weights for the rows of the corresponding row
factor shard. In this case, w_ij = unobserved_weight +
row_weights[i] * col_weights[j].
- If this is a single non-negative real number, this value is used for
all row weights and \\(w_ij\\) = unobserved_weight + row_weights *
col_weights[j].
Note that it is allowed to have row_weights as a list while col_weights
a single number or vice versa.
col_weights: See row_weights.
use_factors_weights_cache: When True, the factors and weights will be
cached on the workers before the updates start. Defaults to True. Note
that the weights cache is initialized through `worker_init`, and the
row/col factors cache is initialized through
`initialize_{col/row}_update_op`. In the case where the weights are
computed outside and set before the training iterations start, it is
important to ensure the `worker_init` op is run afterwards for the
weights cache to take effect.
use_gramian_cache: When True, the Gramians will be cached on the workers
before the updates start. Defaults to True.
"""
self._input_rows = input_rows
self._input_cols = input_cols
self._num_row_shards = num_row_shards
self._num_col_shards = num_col_shards
self._n_components = n_components
self._unobserved_weight = unobserved_weight
self._regularization = regularization
self._regularization_matrix = (
regularization * linalg_ops.eye(self._n_components)
if regularization is not None else None)
assert (row_weights is None) == (col_weights is None)
self._row_weights = WALSModel._create_weights(
row_weights, self._input_rows, self._num_row_shards, "row_weights")
self._col_weights = WALSModel._create_weights(
col_weights, self._input_cols, self._num_col_shards, "col_weights")
self._use_factors_weights_cache = use_factors_weights_cache
self._use_gramian_cache = use_gramian_cache
self._row_factors = self._create_factors(
self._input_rows, self._n_components, self._num_row_shards, row_init,
"row_factors")
self._col_factors = self._create_factors(
self._input_cols, self._n_components, self._num_col_shards, col_init,
"col_factors")
self._row_gramian = self._create_gramian(self._n_components, "row_gramian")
self._col_gramian = self._create_gramian(self._n_components, "col_gramian")
self._row_update_prep_gramian = self._prepare_gramian(
self._col_factors, self._col_gramian)
self._col_update_prep_gramian = self._prepare_gramian(
self._row_factors, self._row_gramian)
self._create_transient_vars()
@property
def row_factors(self):
"""Returns a list of tensors corresponding to row factor shards."""
return self._row_factors
@property
def col_factors(self):
"""Returns a list of tensors corresponding to column factor shards."""
return self._col_factors
@property
def row_weights(self):
"""Returns a list of tensors corresponding to row weight shards."""
return self._row_weights
@property
def col_weights(self):
"""Returns a list of tensors corresponding to col weight shards."""
return self._col_weights
@property
def initialize_op(self):
"""Returns an op for initializing tensorflow variables."""
all_vars = self._row_factors + self._col_factors
all_vars.extend([self._row_gramian, self._col_gramian])
if self._row_weights is not None:
assert self._col_weights is not None
all_vars.extend(self._row_weights + self._col_weights)
return variables.variables_initializer(all_vars)
@classmethod
def _shard_sizes(cls, dims, num_shards):
"""Helper function to split dims values into num_shards."""
shard_size, residual = divmod(dims, num_shards)
return [shard_size + 1] * residual + [shard_size] * (num_shards - residual)
@classmethod
def _create_factors(cls, rows, cols, num_shards, init, name):
"""Helper function to create row and column factors."""
if callable(init):
init = init()
if isinstance(init, list):
assert len(init) == num_shards
elif isinstance(init, str) and init == "random":
pass
elif num_shards == 1:
init = [init]
sharded_matrix = []
sizes = cls._shard_sizes(rows, num_shards)
assert len(sizes) == num_shards
def make_initializer(i, size):
def initializer():
if init == "random":
return random_ops.random_normal([size, cols])
else:
return init[i]
return initializer
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_initializer(i, size)
sharded_matrix.append(
variable_scope.variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_matrix
@classmethod
def _create_weights(cls, wt_init, num_wts, num_shards, name):
"""Helper function to create sharded weight vector.
Args:
wt_init: init value for the weight. If None, weights are not created. This
can be one of the None, a list of non-negative real numbers or a single
non-negative real number (or equivalent iterables).
num_wts: total size of all the weight shards
num_shards: number of shards for the weights
name: name for the new Variables.
Returns:
A list of weight shard Tensors.
Raises:
ValueError: If wt_init is not the right format.
"""
if wt_init is None:
return None
init_mode = "list"
if isinstance(wt_init, collections.Iterable):
if num_shards == 1 and len(wt_init) == num_wts:
wt_init = [wt_init]
assert len(wt_init) == num_shards
elif isinstance(wt_init, numbers.Real) and wt_init >= 0:
init_mode = "scalar"
else:
raise ValueError(
"Invalid weight initialization argument. Must be one of these: "
"None, a real non-negative real number, or a list of lists of "
"non-negative real numbers (or equivalent iterables) corresponding "
"to sharded factors.")
sizes = cls._shard_sizes(num_wts, num_shards)
assert len(sizes) == num_shards
def make_wt_initializer(i, size):
def initializer():
if init_mode == "scalar":
return wt_init * array_ops.ones([size])
else:
return wt_init[i]
return initializer
sharded_weight = []
for i, size in enumerate(sizes):
var_name = "%s_shard_%d" % (name, i)
var_init = make_wt_initializer(i, size)
sharded_weight.append(
variable_scope.variable(
var_init, dtype=dtypes.float32, name=var_name))
return sharded_weight
@staticmethod
def _create_gramian(n_components, name):
"""Helper function to create the gramian variable.
Args:
n_components: number of dimensions of the factors from which the gramian
will be calculated.
name: name for the new Variables.
Returns:
A gramian Tensor with shape of [n_components, n_components].
"""
return variable_scope.variable(
array_ops.zeros([n_components, n_components]),
dtype=dtypes.float32,
name=name)
@staticmethod
def _transient_var(name):
"""Helper function to create a Variable."""
return variable_scope.variable(
1.0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=False,
name=name)
def _prepare_gramian(self, factors, gramian):
"""Helper function to create ops to prepare/calculate gramian.
Args:
factors: Variable or list of Variable representing (sharded) factors.
Used to compute the updated corresponding gramian value.
gramian: Variable storing the gramian calculated from the factors.
Returns:
A op that updates the gramian with the calculated value from the factors.
"""
partial_gramians = []
for f in factors:
with ops.colocate_with(f):
partial_gramians.append(math_ops.matmul(f, f, transpose_a=True))
with ops.colocate_with(gramian):
prep_gramian = state_ops.assign(gramian,
math_ops.add_n(partial_gramians)).op
return prep_gramian
def _cached_copy(self, var, name, pass_through=False):
"""Helper function to create a worker cached copy of a Variable.
This assigns the var (either a single Variable or a list of Variables) to
local transient cache Variable(s). Note that if var is a list of Variables,
the assignment is done sequentially to minimize the memory overheads.
Also note that if pass_through is set to True, this does not create new
Variables but simply return the input back.
Args:
var: A Variable or a list of Variables to cache.
name: name of cached Variable.
pass_through: when set to True, this simply pass through the var back
through identity operator and does not actually creates a cache.
Returns:
Tuple consisting of following three entries:
cache: the new transient Variable or list of transient Variables
corresponding one-to-one with var.
cache_init: op to initialize the Variable or the list of Variables.
cache_reset: op to reset the Variable or the list of Variables to some
default value.
"""
if var is None:
return None, None, None
elif pass_through:
cache = var
cache_init = control_flow_ops.no_op()
cache_reset = control_flow_ops.no_op()
elif isinstance(var, variables.Variable):
cache = WALSModel._transient_var(name=name)
with ops.colocate_with(cache):
cache_init = state_ops.assign(cache, var, validate_shape=False)
cache_reset = state_ops.assign(cache, 1.0, validate_shape=False)
else:
assert isinstance(var, list)
assert var
cache = [
WALSModel._transient_var(name="%s_shard_%d" % (name, i))
for i in xrange(len(var))
]
reset_ops = []
for i, c in enumerate(cache):
with ops.colocate_with(c):
if i == 0:
cache_init = state_ops.assign(c, var[i], validate_shape=False)
else:
with ops.control_dependencies([cache_init]):
cache_init = state_ops.assign(c, var[i], validate_shape=False)
reset_ops.append(state_ops.assign(c, 1.0, validate_shape=False))
cache_reset = control_flow_ops.group(*reset_ops)
return cache, cache_init, cache_reset
def _create_transient_vars(self):
"""Creates local cache of factors, weights and gramian for rows and columns.
Note that currently the caching strategy is as follows:
When initiating a row (resp. column) update:
- The column (resp. row) gramian is computed.
- Optionally, if use_gramian_cache is True, the column (resp. row) Gramian
is cached, while the row (resp. column) gramian is reset.
- Optionally, if use_factors_weights_cache is True, the column (resp. row)
factors and weights are cached, while the row (resp. column) factors and
weights are reset.
"""
(self._row_factors_cache, row_factors_cache_init,
row_factors_cache_reset) = self._cached_copy(
self._row_factors,
"row_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_factors_cache, col_factors_cache_init,
col_factors_cache_reset) = self._cached_copy(
self._col_factors,
"col_factors_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_wt_cache, row_wt_cache_init, _) = self._cached_copy(
self._row_weights,
"row_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._col_wt_cache, col_wt_cache_init, _) = self._cached_copy(
self._col_weights,
"col_wt_cache",
pass_through=not self._use_factors_weights_cache)
(self._row_gramian_cache, row_gramian_cache_init,
row_gramian_cache_reset) = self._cached_copy(
self._row_gramian,
"row_gramian_cache",
pass_through=not self._use_gramian_cache)
(self._col_gramian_cache, col_gramian_cache_init,
col_gramian_cache_reset) = self._cached_copy(
self._col_gramian,
"col_gramian_cache",
pass_through=not self._use_gramian_cache)
self._row_updates_init = control_flow_ops.group(
col_factors_cache_init, row_factors_cache_reset, col_gramian_cache_init,
row_gramian_cache_reset)
self._col_updates_init = control_flow_ops.group(
row_factors_cache_init, col_factors_cache_reset, row_gramian_cache_init,
col_gramian_cache_reset)
if self._row_wt_cache is not None:
assert self._col_wt_cache is not None
self._worker_init = control_flow_ops.group(
row_wt_cache_init, col_wt_cache_init, name="worker_init")
else:
self._worker_init = control_flow_ops.no_op(name="worker_init")
@property
def worker_init(self):
"""Op to initialize worker state once before starting any updates.
Note that specifically this initializes the cache of the row and column
weights on workers when `use_factors_weights_cache` is True. In this case,
if these weights are being calculated and reset after the object is created,
it is important to ensure this ops is run afterwards so the cache reflects
the correct values.
"""
return self._worker_init
@property
def row_update_prep_gramian_op(self):
"""Op to form the gramian before starting row updates.
Must be run before initialize_row_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
Returns:
Op to form the gramian.
"""
return self._row_update_prep_gramian
@property
def col_update_prep_gramian_op(self):
"""Op to form the gramian before starting col updates.
Must be run before initialize_col_update_op and should only be run by one
trainer (usually the chief) when doing distributed training.
Returns:
Op to form the gramian.
"""
return self._col_update_prep_gramian
@property
def initialize_row_update_op(self):
"""Op to initialize worker state before starting row updates."""
return self._row_updates_init
@property
def initialize_col_update_op(self):
"""Op to initialize worker state before starting column updates."""
return self._col_updates_init
@staticmethod
def _get_sharding_func(size, num_shards):
"""Create sharding function for scatter update."""
def func(ids):
if num_shards == 1:
return None, ids
else:
ids_per_shard = size // num_shards
extras = size % num_shards
assignments = math_ops.maximum(ids // (ids_per_shard + 1),
(ids - extras) // ids_per_shard)
new_ids = array_ops.where(assignments < extras,
ids % (ids_per_shard + 1),
(ids - extras) % ids_per_shard)
return assignments, new_ids
return func
@classmethod
def scatter_update(cls, factor, indices, values, sharding_func, name=None):
"""Helper function for doing sharded scatter update."""
assert isinstance(factor, list)
if len(factor) == 1:
with ops.colocate_with(factor[0]):
# TODO(agarwal): assign instead of scatter update for full batch update.
return state_ops.scatter_update(
factor[0], indices, values, name=name).op
else:
num_shards = len(factor)
assignments, new_ids = sharding_func(indices)
assert assignments is not None
assignments = math_ops.cast(assignments, dtypes.int32)
sharded_ids = data_flow_ops.dynamic_partition(new_ids, assignments,
num_shards)
sharded_values = data_flow_ops.dynamic_partition(values, assignments,
num_shards)
updates = []
for i in xrange(num_shards):
updates.append(
state_ops.scatter_update(factor[i], sharded_ids[i], sharded_values[
i]))
return control_flow_ops.group(*updates, name=name)
def update_row_factors(self, sp_input=None, transpose_input=False):
r"""Updates the row factors.
Args:
sp_input: A SparseTensor representing a subset of rows of the full input
in any order. Please note that this SparseTensor must retain the
indexing as the original input.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following elements:
new_values: New values for the row factors.
update_op: An op that assigns the newly computed values to the row
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. If sp_input contains the rows \\({A_{i, :}, i \in I}\\), and the
input matrix A has n total rows, then the unregularized loss is:
\\(\|\sqrt W_I \odot (A_I - U_I V^T)\|_F^2 * n / |I|\\)
The total loss is unregularized_loss + regularization.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
If sp_input contains the rows \\({A_{i, :}, i \in I}\\), and the input
matrix A has n total rows, then the regularization term is:
\\(\lambda \|U_I\|_F^2) * n / |I| + \lambda \|V\|_F^2\\).
sum_weights: The sum of the weights W_I corresponding to sp_input,
normalized by a factor of \\(n / |I|\\). The root weighted squared
error is: \sqrt(unregularized_loss / sum_weights).
"""
return self._process_input_helper(
True, sp_input=sp_input, transpose_input=transpose_input)
def update_col_factors(self, sp_input=None, transpose_input=False):
r"""Updates the column factors.
Args:
sp_input: A SparseTensor representing a subset of columns of the full
input. Please refer to comments for update_row_factors for
restrictions.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are updated.
Returns:
A tuple consisting of the following elements:
new_values: New values for the column factors.
update_op: An op that assigns the newly computed values to the column
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. If sp_input contains the columns \\({A_{:, j}, j \in J}\\), and
the input matrix A has m total columns, then the unregularized loss is:
\\(\|\sqrt W_J \odot (A_J - U V_J^T)\|_F^2 * m / |I|\\)
The total loss is unregularized_loss + regularization.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
If sp_input contains the columns \\({A_{:, j}, j \in J}\\), and the
input matrix A has m total columns, then the regularization term is:
\\(\lambda \|V_J\|_F^2) * m / |J| + \lambda \|U\|_F^2\\).
sum_weights: The sum of the weights W_J corresponding to sp_input,
normalized by a factor of \\(m / |J|\\). The root weighted squared
error is: \sqrt(unregularized_loss / sum_weights).
"""
return self._process_input_helper(
False, sp_input=sp_input, transpose_input=transpose_input)
def project_row_factors(self,
sp_input=None,
transpose_input=False,
projection_weights=None):
"""Projects the row factors.
This computes the row embedding \\(u_i\\) for an observed row \\(a_i\\) by
solving one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of rows. Please note that the
column indices of this SparseTensor must match the model column feature
indexing while the row indices are ignored. The returned results will be
in the same ordering as the input rows.
transpose_input: If true, the input will be logically transposed and the
rows corresponding to the transposed input are projected.
projection_weights: The row weights to be used for the projection. If None
then 1.0 is used. This can be either a scaler or a rank-1 tensor with
the number of elements matching the number of rows to be projected.
Note that the column weights will be determined by the underlying WALS
model.
Returns:
Projected row factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(
True,
sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def project_col_factors(self,
sp_input=None,
transpose_input=False,
projection_weights=None):
"""Projects the column factors.
This computes the column embedding \\(v_j\\) for an observed column
\\(a_j\\) by solving one iteration of the update equations.
Args:
sp_input: A SparseTensor representing a set of columns. Please note that
the row indices of this SparseTensor must match the model row feature
indexing while the column indices are ignored. The returned results will
be in the same ordering as the input columns.
transpose_input: If true, the input will be logically transposed and the
columns corresponding to the transposed input are projected.
projection_weights: The column weights to be used for the projection. If
None then 1.0 is used. This can be either a scaler or a rank-1 tensor
with the number of elements matching the number of columns to be
projected. Note that the row weights will be determined by the
underlying WALS model.
Returns:
Projected column factors.
"""
if projection_weights is None:
projection_weights = 1
return self._process_input_helper(
False,
sp_input=sp_input,
transpose_input=transpose_input,
row_weights=projection_weights)[0]
def _process_input_helper(self,
update_row_factors,
sp_input=None,
transpose_input=False,
row_weights=None):
"""Creates the graph for processing a sparse slice of input.
Args:
update_row_factors: if True, update or project the row_factors, else
update or project the column factors.
sp_input: Please refer to comments for update_row_factors,
update_col_factors, project_row_factors, and project_col_factors for
restrictions.
transpose_input: If True, the input is logically transposed and then the
corresponding rows/columns of the transposed input are updated.
row_weights: If not None, this is the row/column weights to be used for
the update or projection. If None, use the corresponding weights from
the model. Note that the feature (column/row) weights will be
determined by the model. When not None, it can either be a scalar or
a rank-1 tensor with the same number of elements as the number of rows
of columns to be updated/projected.
Returns:
A tuple consisting of the following elements:
new_values: New values for the row/column factors.
update_op: An op that assigns the newly computed values to the row/column
factors.
unregularized_loss: A tensor (scalar) that contains the normalized
minibatch loss corresponding to sp_input, without the regularization
term. Add the regularization term below to yield the loss.
regularization: A tensor (scalar) that contains the normalized
regularization term for the minibatch loss corresponding to sp_input.
sum_weights: The sum of the weights corresponding to sp_input. This
can be used with unregularized loss to calculate the root weighted
squared error.
"""
assert isinstance(sp_input, sparse_tensor.SparseTensor)
if update_row_factors:
left = self._row_factors
right_factors = self._col_factors_cache
row_wt = self._row_wt_cache
col_wt = self._col_wt_cache
total_rows = self._input_rows
total_cols = self._input_cols
sharding_func = WALSModel._get_sharding_func(self._input_rows,
self._num_row_shards)
gramian = self._col_gramian_cache
else:
left = self._col_factors
right_factors = self._row_factors_cache
row_wt = self._col_wt_cache
col_wt = self._row_wt_cache
total_rows = self._input_cols
total_cols = self._input_rows
sharding_func = WALSModel._get_sharding_func(self._input_cols,
self._num_col_shards)
gramian = self._row_gramian_cache
transpose_input = not transpose_input
# Note that the row indices of sp_input are based on the original full input
# Here we reindex the rows and give them contiguous ids starting at 0.
# We use tf.unique to achieve this reindexing. Note that this is done so
# that the downstream kernel can assume that the input is "dense" along the
# row dimension.
row_ids, col_ids = array_ops.split(
value=sp_input.indices, num_or_size_splits=2, axis=1)
update_row_indices, all_row_ids = array_ops.unique(row_ids[:, 0])
update_col_indices, all_col_ids = array_ops.unique(col_ids[:, 0])
col_ids = array_ops.expand_dims(math_ops.cast(all_col_ids, dtypes.int64), 1)
row_ids = array_ops.expand_dims(math_ops.cast(all_row_ids, dtypes.int64), 1)
if transpose_input:
update_indices = update_col_indices
row_shape = [
math_ops.cast(array_ops.shape(update_row_indices)[0], dtypes.int64)
]
gather_indices = update_row_indices
else:
update_indices = update_row_indices
row_shape = [
math_ops.cast(array_ops.shape(update_col_indices)[0], dtypes.int64)
]
gather_indices = update_col_indices
num_rows = math_ops.cast(array_ops.shape(update_indices)[0], dtypes.int64)
col_shape = [num_rows]
right = embedding_ops.embedding_lookup(
right_factors, gather_indices, partition_strategy="div")
new_sp_indices = array_ops.concat([row_ids, col_ids], 1)
new_sp_shape = (array_ops.concat([row_shape, col_shape], 0)
if transpose_input else
array_ops.concat([col_shape, row_shape], 0))
new_sp_input = sparse_tensor.SparseTensor(
indices=new_sp_indices,
values=sp_input.values,
dense_shape=new_sp_shape)
# Compute lhs and rhs of the normal equations
total_lhs = (self._unobserved_weight * gramian)
if self._regularization_matrix is not None:
total_lhs += self._regularization_matrix
if self._row_weights is None:
# Special case of ALS. Use a much simpler update rule.
total_rhs = (
self._unobserved_weight * sparse_ops.sparse_tensor_dense_matmul(
new_sp_input, right, adjoint_a=transpose_input))
# TODO(rmlarsen): handle transposing in tf.matrix_solve instead of
# transposing explicitly.
# TODO(rmlarsen): multi-thread tf.matrix_solve.
new_left_values = array_ops.transpose(
linalg_ops.matrix_solve(total_lhs, array_ops.transpose(total_rhs)))
else:
if row_weights is None:
# TODO(yifanchen): Add special handling for single shard without using
# embedding_lookup and perform benchmarks for those cases. Same for
# col_weights lookup below.
row_weights_slice = embedding_ops.embedding_lookup(
row_wt, update_indices, partition_strategy="div")
else:
num_indices = array_ops.shape(update_indices)[0]
with ops.control_dependencies(
[check_ops.assert_less_equal(array_ops.rank(row_weights), 1)]):
row_weights_slice = control_flow_ops.cond(
math_ops.equal(array_ops.rank(row_weights), 0),
lambda: (array_ops.ones([num_indices]) * row_weights),
lambda: math_ops.cast(row_weights, dtypes.float32))
col_weights = embedding_ops.embedding_lookup(
col_wt, gather_indices, partition_strategy="div")
partial_lhs, total_rhs = (
gen_factorization_ops.wals_compute_partial_lhs_and_rhs(
right,
col_weights,
self._unobserved_weight,
row_weights_slice,
new_sp_input.indices,
new_sp_input.values,
num_rows,
transpose_input,
name="wals_compute_partial_lhs_rhs"))
total_lhs = array_ops.expand_dims(total_lhs, 0) + partial_lhs
total_rhs = array_ops.expand_dims(total_rhs, -1)
new_left_values = array_ops.squeeze(
linalg_ops.matrix_solve(total_lhs, total_rhs), [2])
update_op_name = "row_update" if update_row_factors else "col_update"
update_op = self.scatter_update(
left,
update_indices,
new_left_values,
sharding_func,
name=update_op_name)
# Create the loss subgraph
loss_sp_input = (sparse_ops.sparse_transpose(new_sp_input)
if transpose_input else new_sp_input)
# sp_approx is the low rank estimate of the input matrix, formed by
# computing the product <\\(u_i, v_j\\)> for (i, j) in loss_sp_input.indices.
sp_approx_vals = gen_factorization_ops.masked_matmul(
new_left_values,
right,
loss_sp_input.indices,
transpose_a=False,
transpose_b=True)
sp_approx = sparse_tensor.SparseTensor(
loss_sp_input.indices, sp_approx_vals, loss_sp_input.dense_shape)
sp_approx_sq = math_ops.square(sp_approx)
sp_residual = sparse_ops.sparse_add(loss_sp_input, sp_approx * (-1))
sp_residual_sq = math_ops.square(sp_residual)
row_wt_mat = (constant_op.constant(0.)
if self._row_weights is None else array_ops.expand_dims(
row_weights_slice, 1))
col_wt_mat = (constant_op.constant(0.)
if self._col_weights is None else array_ops.expand_dims(
col_weights, 0))
# We return the normalized loss
partial_row_gramian = math_ops.matmul(
new_left_values, new_left_values, transpose_a=True)
normalization_factor = total_rows / math_ops.cast(num_rows, dtypes.float32)
unregularized_loss = (
self._unobserved_weight * ( # pyformat line break
sparse_ops.sparse_reduce_sum(sp_residual_sq) - # pyformat break
sparse_ops.sparse_reduce_sum(sp_approx_sq) + # pyformat break
math_ops.trace(math_ops.matmul(partial_row_gramian, gramian))) +
sparse_ops.sparse_reduce_sum(row_wt_mat * (sp_residual_sq * col_wt_mat))
) * normalization_factor
if self._regularization is not None:
regularization = self._regularization * (
math_ops.trace(partial_row_gramian) * normalization_factor +
math_ops.trace(gramian))
else:
regularization = constant_op.constant(0.)
sum_weights = self._unobserved_weight * math_ops.cast(
total_rows * total_cols, dtypes.float32)
if self._row_weights is not None and self._col_weights is not None:
ones = sparse_tensor.SparseTensor(
indices=loss_sp_input.indices,
values=array_ops.ones(array_ops.shape(loss_sp_input.values)),
dense_shape=loss_sp_input.dense_shape)
sum_weights += sparse_ops.sparse_reduce_sum(row_wt_mat * (
ones * col_wt_mat)) * normalization_factor
return (new_left_values, update_op, unregularized_loss, regularization,
sum_weights)
|
#
# Copyright (c) 2019-2020 Google LLC. All Rights Reserved.
# Copyright (c) 2016-2018 Nest Labs Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Description:
# This file effects the actual implementation for the Weave Data
# Language (WDL) Compiler (WDLC) Google Protocol Buffers
# (protobuf) compiler, protoc plugin used to validate and code
# generate against WDL schema.
#
"""WDL protoc plugin script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
from google.protobuf.compiler import plugin_pb2
from gwv import gwvc
from gwv import schema
from gwv import template_set
from nwv import nwv_parser
def codegen(request, response):
"""Generates wdl c code for devices from Jinja templates.
Args:
request: CodeGeneratorRequest, see google/protobuf/compiler/plugin.proto
response: CodeGeneratorResponse, see google/protobuf/compiler/plugin.proto.
Output files filled with generated files.
Raises:
Exception: Valid Jinja template directory required for wdl plugin.
"""
args = {}
for argument in request.parameter.split(','):
(key, value) = argument.split('=', 1)
values = value.split(':')
if len(values) == 1:
args[key] = values[0]
else:
args[key] = values
legacy_mode_enabled = ('legacy_mode_enabled' in args and
args['legacy_mode_enabled'].lower() == 'true')
gen_dependencies = ('gen_dependencies' in args and
args['gen_dependencies'].lower() == 'true')
codegen_reference_mode = ('codegen_reference_mode' in args and
args['codegen_reference_mode'].lower() == 'true')
if args['templates'] is None or not args['templates']:
raise Exception('wdl_plugin: \'templates\' argument is empty')
if isinstance(args['templates'], list):
template_files = args['templates']
else:
template_files = [
args['templates'],
]
template_languages = {
'c': template_set.TemplateLanguage.C,
'cpp': template_set.TemplateLanguage.CPLUSPLUS,
'java': template_set.TemplateLanguage.JAVA,
'js': template_set.TemplateLanguage.JAVASCRIPT,
'md': template_set.TemplateLanguage.MARKDOWN,
'objc': template_set.TemplateLanguage.OBJECTIVEC,
}
if 'language' not in args or args['language'] not in template_languages:
language = template_set.TemplateLanguage.BASE
else:
language = template_languages[args['language']]
schema_obj = schema.Schema()
schema_parser = nwv_parser.Parser(schema_obj)
file_descs_to_gen = [
proto_file for proto_file in request.proto_file
if (('semantic' not in proto_file.name)
and ('retention' not in proto_file.name))
]
dependency_set = []
# This file needs to get added to the dependency list if we're in
# codegen mode, since this file doesn't show up as a dependency by
# default, but is still necessary for some code-generated targets.
if (codegen_reference_mode):
dependency_set.append('google/protobuf/field_mask.proto')
for proto_file in file_descs_to_gen:
dependency_set.append(proto_file.name)
schema_parser.add_file_set(file_descs_to_gen)
gwvc.check(schema_obj)
# Add two spaces to each log messages to make it line up with our output.
template_set.log = lambda *a: print(' ', *a)
templates = template_set.TemplateSet(
template_files,
legacy_mode_enabled=legacy_mode_enabled, language=language)
if gen_dependencies:
templates.codegen(schema_obj, None, dependency_set)
else:
templates.codegen(schema_obj, None, request.file_to_generate)
for filename, content in templates.output_files:
out_file = response.file.add()
out_file.name = filename
# The newline was added in the legacy template_set file writer,
# so it's included here to preserve compatibility.
out_file.content = content.encode('utf-8') + '\n'.encode('utf-8')
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARN)
if sys.version_info.major == 2:
std_in = sys.stdin.read()
else:
std_in = open(0, "rb").read()
request_pb2 = plugin_pb2.CodeGeneratorRequest.FromString(std_in)
response_pb2 = plugin_pb2.CodeGeneratorResponse()
codegen(request_pb2, response_pb2)
if sys.version_info.major == 2:
sys.stdout.write(response_pb2.SerializeToString())
else:
open(1,"wb").write(response_pb2.SerializeToString())
|
#
# SPDX-License-Identifier: MIT
#
import os
import re
import time
import logging
import bb.tinfoil
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd
class TinfoilTests(OESelftestTestCase):
""" Basic tests for the tinfoil API """
def test_getvar(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(True)
machine = tinfoil.config_data.getVar('MACHINE')
if not machine:
self.fail('Unable to get MACHINE value - returned %s' % machine)
def test_expand(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(True)
expr = '${@os.getpid()}'
pid = tinfoil.config_data.expand(expr)
if not pid:
self.fail('Unable to expand "%s" - returned %s' % (expr, pid))
def test_getvar_bb_origenv(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(True)
origenv = tinfoil.config_data.getVar('BB_ORIGENV', False)
if not origenv:
self.fail('Unable to get BB_ORIGENV value - returned %s' % origenv)
self.assertEqual(origenv.getVar('HOME', False), os.environ['HOME'])
def test_parse_recipe(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=False, quiet=2)
testrecipe = 'mdadm'
best = tinfoil.find_best_provider(testrecipe)
if not best:
self.fail('Unable to find recipe providing %s' % testrecipe)
rd = tinfoil.parse_recipe_file(best[3])
self.assertEqual(testrecipe, rd.getVar('PN'))
def test_parse_recipe_copy_expand(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=False, quiet=2)
testrecipe = 'mdadm'
best = tinfoil.find_best_provider(testrecipe)
if not best:
self.fail('Unable to find recipe providing %s' % testrecipe)
rd = tinfoil.parse_recipe_file(best[3])
# Check we can get variable values
self.assertEqual(testrecipe, rd.getVar('PN'))
# Check that expanding a value that includes a variable reference works
self.assertEqual(testrecipe, rd.getVar('BPN'))
# Now check that changing the referenced variable's value in a copy gives that
# value when expanding
localdata = bb.data.createCopy(rd)
localdata.setVar('PN', 'hello')
self.assertEqual('hello', localdata.getVar('BPN'))
def test_parse_recipe_initial_datastore(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=False, quiet=2)
testrecipe = 'mdadm'
best = tinfoil.find_best_provider(testrecipe)
if not best:
self.fail('Unable to find recipe providing %s' % testrecipe)
dcopy = bb.data.createCopy(tinfoil.config_data)
dcopy.setVar('MYVARIABLE', 'somevalue')
rd = tinfoil.parse_recipe_file(best[3], config_data=dcopy)
# Check we can get variable values
self.assertEqual('somevalue', rd.getVar('MYVARIABLE'))
def test_list_recipes(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=False, quiet=2)
# Check pkg_pn
checkpns = ['tar', 'automake', 'coreutils', 'm4-native', 'nativesdk-gcc']
pkg_pn = tinfoil.cooker.recipecaches[''].pkg_pn
for pn in checkpns:
self.assertIn(pn, pkg_pn)
# Check pkg_fn
checkfns = {'nativesdk-gcc': '^virtual:nativesdk:.*', 'coreutils': '.*/coreutils_.*.bb'}
for fn, pn in tinfoil.cooker.recipecaches[''].pkg_fn.items():
if pn in checkpns:
if pn in checkfns:
self.assertTrue(re.match(checkfns[pn], fn), 'Entry for %s: %s did not match %s' % (pn, fn, checkfns[pn]))
checkpns.remove(pn)
if checkpns:
self.fail('Unable to find pkg_fn entries for: %s' % ', '.join(checkpns))
def test_wait_event(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=True)
tinfoil.set_event_mask(['bb.event.FilesMatchingFound', 'bb.command.CommandCompleted'])
# Need to drain events otherwise events that were masked may still be in the queue
while tinfoil.wait_event():
pass
pattern = 'conf'
res = tinfoil.run_command('findFilesMatchingInDir', pattern, 'conf/machine')
self.assertTrue(res)
eventreceived = False
commandcomplete = False
start = time.time()
# Wait for 5s in total so we'd detect spurious heartbeat events for example
while time.time() - start < 5:
event = tinfoil.wait_event(1)
if event:
if isinstance(event, bb.command.CommandCompleted):
commandcomplete = True
elif isinstance(event, bb.event.FilesMatchingFound):
self.assertEqual(pattern, event._pattern)
self.assertIn('qemuarm.conf', event._matches)
eventreceived = True
elif isinstance(event, logging.LogRecord):
continue
else:
self.fail('Unexpected event: %s' % event)
self.assertTrue(commandcomplete, 'Timed out waiting for CommandCompleted event from bitbake server')
self.assertTrue(eventreceived, 'Did not receive FilesMatchingFound event from bitbake server')
def test_setvariable_clean(self):
# First check that setVariable affects the datastore
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=True)
tinfoil.run_command('setVariable', 'TESTVAR', 'specialvalue')
self.assertEqual(tinfoil.config_data.getVar('TESTVAR'), 'specialvalue', 'Value set using setVariable is not reflected in client-side getVar()')
# Now check that the setVariable's effects are no longer present
# (this may legitimately break in future if we stop reinitialising
# the datastore, in which case we'll have to reconsider use of
# setVariable entirely)
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=True)
self.assertNotEqual(tinfoil.config_data.getVar('TESTVAR'), 'specialvalue', 'Value set using setVariable is still present!')
# Now check that setVar on the main datastore works (uses setVariable internally)
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=True)
tinfoil.config_data.setVar('TESTVAR', 'specialvalue')
value = tinfoil.run_command('getVariable', 'TESTVAR')
self.assertEqual(value, 'specialvalue', 'Value set using config_data.setVar() is not reflected in config_data.getVar()')
def test_datastore_operations(self):
with bb.tinfoil.Tinfoil() as tinfoil:
tinfoil.prepare(config_only=True)
# Test setVarFlag() / getVarFlag()
tinfoil.config_data.setVarFlag('TESTVAR', 'flagname', 'flagval')
value = tinfoil.config_data.getVarFlag('TESTVAR', 'flagname')
self.assertEqual(value, 'flagval', 'Value set using config_data.setVarFlag() is not reflected in config_data.getVarFlag()')
# Test delVarFlag()
tinfoil.config_data.setVarFlag('TESTVAR', 'otherflag', 'othervalue')
tinfoil.config_data.delVarFlag('TESTVAR', 'flagname')
value = tinfoil.config_data.getVarFlag('TESTVAR', 'flagname')
self.assertEqual(value, None, 'Varflag deleted using config_data.delVarFlag() is not reflected in config_data.getVarFlag()')
value = tinfoil.config_data.getVarFlag('TESTVAR', 'otherflag')
self.assertEqual(value, 'othervalue', 'Varflag deleted using config_data.delVarFlag() caused unrelated flag to be removed')
# Test delVar()
tinfoil.config_data.setVar('TESTVAR', 'varvalue')
value = tinfoil.config_data.getVar('TESTVAR')
self.assertEqual(value, 'varvalue', 'Value set using config_data.setVar() is not reflected in config_data.getVar()')
tinfoil.config_data.delVar('TESTVAR')
value = tinfoil.config_data.getVar('TESTVAR')
self.assertEqual(value, None, 'Variable deleted using config_data.delVar() appears to still have a value')
# Test renameVar()
tinfoil.config_data.setVar('TESTVAROLD', 'origvalue')
tinfoil.config_data.renameVar('TESTVAROLD', 'TESTVARNEW')
value = tinfoil.config_data.getVar('TESTVAROLD')
self.assertEqual(value, None, 'Variable renamed using config_data.renameVar() still seems to exist')
value = tinfoil.config_data.getVar('TESTVARNEW')
self.assertEqual(value, 'origvalue', 'Variable renamed using config_data.renameVar() does not appear with new name')
# Test overrides
tinfoil.config_data.setVar('TESTVAR', 'original')
tinfoil.config_data.setVar('TESTVAR_overrideone', 'one')
tinfoil.config_data.setVar('TESTVAR_overridetwo', 'two')
tinfoil.config_data.appendVar('OVERRIDES', ':overrideone')
value = tinfoil.config_data.getVar('TESTVAR')
self.assertEqual(value, 'one', 'Variable overrides not functioning correctly')
def test_variable_history(self):
# Basic test to ensure that variable history works when tracking=True
with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
tinfoil.prepare(config_only=False, quiet=2)
# Note that _tracking for any datastore we get will be
# false here, that's currently expected - so we can't check
# for that
history = tinfoil.config_data.varhistory.variable('DL_DIR')
for entry in history:
if entry['file'].endswith('/bitbake.conf'):
if entry['op'] in ['set', 'set?']:
break
else:
self.fail('Did not find history entry setting DL_DIR in bitbake.conf. History: %s' % history)
# Check it works for recipes as well
testrecipe = 'zlib'
rd = tinfoil.parse_recipe(testrecipe)
history = rd.varhistory.variable('LICENSE')
bbfound = -1
recipefound = -1
for i, entry in enumerate(history):
if entry['file'].endswith('/bitbake.conf'):
if entry['detail'] == 'INVALID' and entry['op'] in ['set', 'set?']:
bbfound = i
elif entry['file'].endswith('.bb'):
if entry['op'] == 'set':
recipefound = i
if bbfound == -1:
self.fail('Did not find history entry setting LICENSE in bitbake.conf parsing %s recipe. History: %s' % (testrecipe, history))
if recipefound == -1:
self.fail('Did not find history entry setting LICENSE in %s recipe. History: %s' % (testrecipe, history))
if bbfound > recipefound:
self.fail('History entry setting LICENSE in %s recipe and in bitbake.conf in wrong order. History: %s' % (testrecipe, history))
|
import yaml, re
converter = {
"int" : "std::to_string",
"int32_t" : "std::to_string",
"uint32_t" : "std::to_string",
"int64_t" : "std::to_string",
"uint64_t" : "std::to_string",
"user_addr_t" : "std::to_string",
"user_size_t" : "std::to_string",
"dev_t" : "std::to_string",
"es_file_t *" : "EndpointSecurityImpl::getEsFile",
"es_string_token_t" : "EndpointSecurityImpl::getEsStringToken",
"const es_event_exec_t *" : "### FIXME",
"struct attrlist" : "### FIXME",
"es_statfs_t" : "### FIXME",
"struct statfs *" : "### FIXME",
}
with open('endpointsecurity.yaml') as f:
data = yaml.load(f, Loader=yaml.FullLoader)
mainmap = []
headerprotos = []
functionbodies = []
mainswitchop = []
for d in data:
name = d["name"]
methodname = "on_" + name
events = d["events"]
params = d["params"]
# this ensures a) auth event is always first, and b) if we have two events we have auth
events.sort()
# Prepare main.cpp switch
if len( events ) > 1:
mainentry = '\t{{ "{0}", {{ {1}, {2} }} }}'.format( name, events[1], events[0] )
else:
mainentry = '\t{{ "{0}", {{ {1}, ES_EVENT_TYPE_LAST }} }}'.format( name, events[0] )
mainmap.append( mainentry )
# Prepare the prototype
if len(params) == 0:
protoargs = "const es_event_" + name + "_t * event"
else:
protoargs = ", ".join(params)
# Prepare the header prototype
headerprotos.append( " void\t" + methodname + " ( " + protoargs + " );" );
# Prepare the main switch operator case
switchop = " case " + events[0] + ":\n"
if len(events) > 1:
switchop += " case " + events[1] + ":\n"
#
# Create the function bodies
#
funcbody = "void EndpointSecurity::" + methodname + " ( " + protoargs + " )\n" \
+ "{\n" \
+ " pimpl->event.event = \"" + name + "\";\n"
# An empty args mean we need to write code
argslist = []
for p in params:
# Ensure a space after the pointer sign, if there is
# Split the type and the name
m = re.match( "\s*(.*)\s+(\w+)", p.replace( '*', '* ') )
argtype = m[1].strip()
argname = m[2].strip()
argslist.append( "message->event." + name + "." + argname )
# Get a converter function
if argtype == "es_process_t *":
funcbody += ' pimpl->getEsProcess( {0}, "{0}_" );\n'.format( argname )
else:
if argtype == "bool":
confunc = argname + ' ? "true" : "false"'
else:
convfunc = converter[ argtype ] + "(" + argname + ")"
funcbody += ' pimpl->event.parameters["{0}"] = {1};\n'.format( argname, convfunc )
# Terminate and append the function body
if len(params) == 0:
funcbody += " ### FIXME ###\n"
funcbody += "}\n"
functionbodies.append( funcbody )
# Call and terminate the switch operator
if len(params) == 0:
switchop += " " + methodname + "( &message->event." + name + " );\n"
else:
switchop += " " + methodname + "( " + (", ").join(argslist) + ");\n"
switchop += " break;\n"
mainswitchop.append( switchop );
#print( "main.cpp switch:\n" )
#print( ",\n".join( mainmap ) )
print( "\n\nheader prototypes:\n" )
print( "\n".join( headerprotos ) )
print( "\n\nfunction bodies:\n" )
print( "\n\n".join( functionbodies ) )
print( "\n\nmain switch:\n" )
print( "\n".join( mainswitchop ) )
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 22:15:35 2017
@author: Stuart
"""
vessels = {}
import json
with open('facility_assumptions.json', 'r') as j:
d = json.load(j)
vessels = d["Vessels"]
print(vessels)
|
#
# Copyright (C) 2020-2021 Arm Limited or its affiliates and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Definition of an SPDX File."""
from pathlib import Path
from spdx.checksum import Algorithm
from spdx.document import License
from spdx.file import File, FileType
from typing import Optional
from continuous_delivery_scripts.spdx_report.spdx_helpers import (
determine_spdx_value,
determine_file_licence,
determine_file_copyright_text,
)
from continuous_delivery_scripts.utils.definitions import UNKNOWN
from continuous_delivery_scripts.utils.hash_helpers import generate_uuid_based_on_str, determine_sha1_hash_of_file
from continuous_delivery_scripts.utils.third_party_licences import cleanse_licence_expression
class SpdxFile:
"""SPDX File.
See https://spdx.org/spdx-specification-21-web-version#h.nmf14n
"""
def __init__(self, path: Path, project_root: Path, package_licence: str) -> None:
"""Constructor."""
self._path = path
self._project_root = project_root
self._package_licence = package_licence
@property
def path(self) -> Path:
"""Gets the file path.
Returns:
the file path
"""
return self._path
@property
def unix_relative_path(self) -> str:
"""Gets the unix relative path.
Returns:
the file path
"""
if str(self.path) == UNKNOWN:
return UNKNOWN
unix_path = str(self.path.relative_to(self._project_root)).replace("\\", "/")
return f"./{unix_path}"
@property
def name(self) -> str:
"""Gets the file name.
Returns:
the fine name
"""
return self._path.name
@property
def id(self) -> str:
"""Gets a unique identifier.
Returns:
a UUID
"""
# Generates a unique Id based on the name of the file
return generate_uuid_based_on_str(self.unix_relative_path)
@property
def sha1_check_sum(self) -> str:
"""Gets file SHA1 hash.
Returns:
corresponding hash
"""
return determine_sha1_hash_of_file(self._path)
@property
def licence(self) -> str:
"""Determines licence from file notice.
Returns:
file's licence
"""
file_licence = determine_file_licence(self.path)
return cleanse_licence_expression(file_licence) if file_licence else self._package_licence
@property
def copyright(self) -> Optional[str]:
"""Determines copyright text from file notice.
Returns:
file's copyright text
"""
return determine_file_copyright_text(self.path)
def generate_spdx_file(self) -> File:
"""Generates the SPDX file.
SPDX File example:
FileName: ./tests/test_mbed_targets.py
SPDXID: SPDXRef-cb9cce30c285e6083c2d19a463cbe592
FileChecksum: SHA1: d3db49873bd2b1cab45bf81e7d88617dea6caaff
LicenseConcluded: NOASSERTION
FileCopyrightText: NONE
Returns:
the corresponding file
"""
source_file = File(determine_spdx_value(self.unix_relative_path))
source_file.type = FileType.SOURCE
source_file.comment = determine_spdx_value(None)
source_file.chk_sum = Algorithm("SHA1", self.sha1_check_sum)
source_file.conc_lics = License.from_identifier(str(determine_spdx_value(self.licence)))
source_file.spdx_id = f"SPDXRef-{self.id}"
source_file.copyright = determine_spdx_value(self.copyright)
source_file.add_lics(License.from_identifier(str(determine_spdx_value(self.licence))))
return source_file
|
class DoiExcelData:
_criteria1=""
_criteria2=""
_criteria3=""
_criteria=""
_total_issued_shares_underly_com_ht=0
_disclosure=""
_is_disclosure_3_pct=""
_is_disclosure_5_pct=""
_company_date=""
_long_short=""
_business_unit=""
_name_of_report_entity=""
_name_of_sec_broker_or_dealer=""
_account_num=""
_investment_acct_type=""
_nature_of_sec=""
_name_of_sec=""
_exchange_ticker_syb_sec=""
_name_of_exchange_sec=""
_isin_code_sec=""
_underly_sec_name=""
_underly_sec_syb=""
_name_of_exchange_underly_sec=""
_isin_code_underly_sec=""
_num_underly_sec_held_remark=""
_total_issued_shares_underly_com_remark=""
_num_underly_sec_held_sum=""
_total_issued_shares_underly_com=""
_msse_issued_shares=""
_last_report_date=""
_instrument_display_code=""
_instrument_name=""
_market_code=""
_portfolio=""
_portfolio_name=""
_beneficial_eq=""
|
#!/usr/bin/env python
import os
import webapp2
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
#Data store model
class GeoLocation(db.Model):
user = db.UserProperty()
date = db.DateTimeProperty(auto_now_add = True)
#GeoPt object which stores Latitude and Longitude
position = db.GeoPtProperty()
address = db.PostalAddressProperty()
header = db.TextProperty()
class MainHandler(webapp2.RequestHandler):
#Class Variables: To be passed as template values
admin_str = ''
geo_str = ''
loginout_str = ''
user_str = ''
#GET method
def get(self):
#Check if the user has logged in (Google Account)
user = users.get_current_user()
if not user:
#Create appropriate login/logout string for the template
login_url = users.create_login_url(self.request.url)
#self.loginout_str = '<a href="' + login_url + '">Login</a>'
self.loginout_str = ''
#Ask the user to login if he wants personalized results.
self.geo_str = '<center><p>Please <a href="' + login_url + '">login here</a> with your Google Account to enjoy personalized geo-location based services.</p></center>'
else:
#Create appropriate login/logout string for the template
logout_url = users.create_logout_url(self.request.url)
self.loginout_str = '<a href="' + logout_url + '">Logout</a>'
#If the user is admin generate Admin Area Link string
if users.is_current_user_admin():
self.admin_str = '<a href="/admin/">Admin Area</a> |'
#Welcome string for the user (his e-mail ID for now)
self.user_str = '<p>Hello, ' + user.email() + '</p>'
#Selective JavaScript html to be pasted if the user has logged in.
self.geo_str = """<!-- Geo-Coding JavaScript start -->
<center>
<p id="geoloc"><img height="50px" width="50px" src="static/loading.gif" alt="Loading ..." />
<br>Waiting for your permission/Processing ...</p>
</center>
<script src="static/jquery-min.js" type="text/javascript" charset="utf-8"></script>
<script type="text/javascript" src="https://maps.googleapis.com/maps/api/js?sensor=false"></script>
<script src="http://code.google.com/apis/gears/gears_init.js" type="text/javascript" charset="utf-8"></script>
<script src="static/geo-min.js" type="text/javascript" charset="utf-8"></script>
<script type="text/javascript" src="static/main.js"></script>
<!-- Geo-coding JavaScript End -->"""
#templating and rendering using the above variables
template_values = {
'loginout_str' : self.loginout_str,
'geo_str' : self.geo_str,
'user_str' : self.user_str,
'admin_str' : self.admin_str
}
file_path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
html = template.render(file_path, template_values)
self.response.out.write(html)
#Class MainHandler End
#When a user posts the data to the server this handles the request
class StoreHandler(webapp2.RequestHandler):
def post(self):
gstore = GeoLocation(parent = None)
gstore.user = users.get_current_user()
gstore.position = db.GeoPt(float(self.request.get('lat')), float(self.request.get('long')))
gstore.header = db.Text(str(self.request.headers))
address = self.request.get('address')
gstore.address = db.PostalAddress(address)
gstore.put()
#Getting the values from POST request header and inserting them into the DataStore
#End of StoreHandler class
#Admin Area class: Shows the last 100 peoples' information
#as a table
class AdminHandler(webapp2.RequestHandler):
#Again some class variables to handle template values
loginout_str = ''
admin_str = ''
query_dict = None
#Get method
def get(self):
#See if the user has logged in
user = users.get_current_user()
if user:
#Double check if the user is an administrator
if users.is_current_user_admin():
#Create appropriate login/logout url
logout_url = users.create_logout_url(self.request.url)
self.loginout_str = '<a href="' + logout_url + '">Logout</a>'
#Admin Area Login Link (Not necessary)
if users.is_current_user_admin():
self.admin_str = '<a href="/admin/">Admin Area</a> |'
#Query the datastore for the last 100 entries from the dataModel
#named 'GeoLocation', there are no ancestors for this datastore (no namespaces)
self.query_dict = db.GqlQuery("SELECT * FROM GeoLocation ORDER BY date DESC LIMIT 100")
#the regular templating follows this code
template_values = {
'table' : self.query_dict,
'loginout_str' : self.loginout_str,
'admin_str' : self.admin_str
}
file_path = os.path.join(os.path.dirname(__file__), 'templates/admin.html')
html = template.render(file_path, template_values)
self.response.out.write(html)
else:
self.response.out.write('Your\'e not an administrator!')
else:
self.response.out.write('Please <a href="' + users.create_login_url() + '">Login</a> here')
app = webapp2.WSGIApplication([('/store',StoreHandler),('/', MainHandler),('/admin/.*', AdminHandler)], debug=True)
|
import os
from ehive.runnable.IGFBaseProcess import IGFBaseProcess
from igf_data.igfdb.collectionadaptor import CollectionAdaptor
from igf_data.utils.tools.ppqt_utils import Ppqt_tools
from igf_data.utils.fileutils import get_datestamp_label
from igf_data.utils.analysis_collection_utils import Analysis_collection_utils
class RunPPQT(IGFBaseProcess):
'''
A ehive process class for running Phantopeakqualtools (PPQT) analysis
'''
def param_defaults(self):
params_dict=super(RunPPQT,self).param_defaults()
params_dict.update({
'analysis_files':[],
'output_prefix':None,
'load_metrics_to_cram':False,
'cram_collection_type':'ANALYSIS_CRAM',
'ppqt_collection_type':'PPQT_REPORT',
'collection_table':'experiment',
'analysis_name':None,
'force_overwrite':True,
'threads':0,
'use_ephemeral_space':0,
})
return params_dict
def run(self):
'''
A runnable method for running PPQT analysis
'''
try:
project_igf_id = self.param_required('project_igf_id')
sample_igf_id = self.param_required('sample_igf_id')
experiment_igf_id = self.param_required('experiment_igf_id')
igf_session_class = self.param_required('igf_session_class')
input_files = self.param_required('input_files')
rscript_path = self.param_required('rscript_path')
ppqt_exe = self.param_required('ppqt_exe')
base_work_dir = self.param_required('base_work_dir')
base_result_dir = self.param_required('base_result_dir')
library_strategy = self.param_required('library_strategy')
analysis_files = self.param_required('analysis_files')
output_prefix = self.param_required('output_prefix')
species_name = self.param_required('species_name')
analysis_name = self.param('analysis_name')
seed_date_stamp = self.param_required('date_stamp')
load_metrics_to_cram = self.param('load_metrics_to_cram')
ppqt_collection_type = self.param('ppqt_collection_type')
cram_collection_type = self.param('cram_collection_type')
collection_table = self.param('collection_table')
force_overwrite = self.param('force_overwrite')
use_ephemeral_space = self.param('use_ephemeral_space')
threads = self.param('threads')
seed_date_stamp = get_datestamp_label(seed_date_stamp)
if output_prefix is not None:
output_prefix='{0}_{1}'.format(output_prefix,
seed_date_stamp) # adding datestamp to the output file prefix
if not isinstance(input_files, list) or \
len(input_files) == 0:
raise ValueError('No input file found')
if len(input_files)>1:
raise ValueError('More than one input file found: {0}'.\
format(input_files))
if analysis_name is None:
analysis_name = library_strategy # use library_strategy as default analysis_name
input_file = input_files[0]
work_dir_prefix = \
os.path.join(\
base_work_dir,
project_igf_id,
sample_igf_id,
experiment_igf_id)
work_dir = self.get_job_work_dir(work_dir=work_dir_prefix) # get a run work dir
ppqt_obj = \
Ppqt_tools(\
rscript_path=rscript_path,
ppqt_exe=ppqt_exe,
use_ephemeral_space=use_ephemeral_space,
threads=threads)
ppqt_cmd,spp_output, pdf_output,spp_data = \
ppqt_obj.run_ppqt(\
input_bam=input_file,
output_dir=work_dir,
output_spp_name='{0}_{1}.spp.out'.format(output_prefix,'PPQT'),
output_pdf_name='{0}_{1}.spp.pdf'.format(output_prefix,'PPQT'))
analysis_files.append(spp_output)
au = \
Analysis_collection_utils(\
dbsession_class=igf_session_class,
analysis_name=analysis_name,
tag_name=species_name,
collection_name=experiment_igf_id,
collection_type=ppqt_collection_type,
collection_table=collection_table,
base_path=base_result_dir)
output_ppqt_list = \
au.load_file_to_disk_and_db(\
input_file_list=[pdf_output],
file_suffix='pdf',
withdraw_exisitng_collection=force_overwrite) # load file to db and disk
if load_metrics_to_cram and \
len(spp_data) > 0:
ca = CollectionAdaptor(**{'session_class':igf_session_class})
attribute_data = \
ca.prepare_data_for_collection_attribute(\
collection_name=experiment_igf_id,
collection_type=cram_collection_type,
data_list=spp_data)
ca.start_session()
try:
ca.create_or_update_collection_attributes(\
data=attribute_data,
autosave=False)
ca.commit_session()
ca.close_session()
except Exception as e:
ca.rollback_session()
ca.close_session()
raise ValueError('Failed to load data to db: {0}'.\
format(e))
self.param('dataflow_params',{'analysis_files':analysis_files,
'output_ppqt_list':output_ppqt_list}) # pass on samtools output list
message='finished PPQT for {0} {1}'.\
format(project_igf_id,
sample_igf_id)
self.post_message_to_slack(message,reaction='pass') # send log to slack
self.post_message_to_ms_team(
message=message,
reaction='pass')
message='finished PPQT for {0} {1}: {2}'.\
format(project_igf_id,
sample_igf_id,
ppqt_cmd)
self.comment_asana_task(task_name=project_igf_id, comment=message) # send comment to Asana
except Exception as e:
message='project: {2}, sample:{3}, Error in {0}: {1}'.\
format(self.__class__.__name__,
e,
project_igf_id,
sample_igf_id)
self.warning(message)
self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs
self.post_message_to_ms_team(
message=message,
reaction='fail')
raise
|
import os,sys
sys.path.append('/usr/local/lib/python2.7/site-packages')
import pandas as pd
import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Activation,Dense
from keras.optimizers import Adam
import random
import time
from gym import spaces
import matplotlib.pyplot as plt
#With DQN we don't need to discretise the observations and put the observations into distinct state buckets.
#Why because neural networks are darn good at handling continuous inputs, after all they are general function approximators
# The NN will predict Q values for a state
class QAgent:
def __init__(self, state_num , action_num, capacity):
self.state_num = state_num
self.action_num = action_num
self.memory = [] #initialise replay memory, this needs to have some capacity limit - so we out with old and in with the new so to speak
self.capacity = capacity
self.times = []
self.learning_rate = 0.1
self.explore_rate = 1
self.updatetargetat = 10000
self.explore_decay = 0.995
self.explore_rate_min = 0.1
self.discount_factor = 0.9
self.model = self._buildnn()
self.targetModel = self._buildnn()
def _buildnn(self):
model = Sequential()
model.add(Dense(24, input_shape =(4,), activation = 'relu'))
model.add(Dense(24, input_shape =(4,), activation = 'relu'))
model.add(Dense(self.action_num, activation='linear'))
model.compile(loss='mse',optimizer = Adam(lr=0.01))
return model
def getQValues(self, state):
predicted = self.model.predict(state)
return predicted[0]
def updateTargetNetwork(self):
self.backupNetwork(self.model, self.targetModel)
def backupNetwork(self, model, backup):
weights = model.get_weights()
backup.set_weights(weights)
def getTargetQValues(self, state):
# print state
predicted = self.targetModel.predict(state)
# print predicted[0]
return predicted[0]
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
# print self.capacity
# if len(self.memory) > self.capacity:
# self.memory.pop(0)
# print "popped it!" #remember needs to be changed
def replay(self, batch_size):
x_batch, y_batch = [], []
minibatch = random.sample(self.memory, min(len(self.memory), batch_size))
#we wil use these observations to train our network
# print "mini batch" , minibatch
for state, action, reward, next_state, done in minibatch:
qVals = self.getQValues(state)
qValsNewState = self.getTargetQValues(next_state)
y_target = self.getTargetQValues(state) #this predicts the reward from the stat
y_target[action] = reward if done else reward + self.discount_factor * np.max(self.getTargetQValues(next_state))
x_batch.append(state[0])
y_batch.append(y_target)
self.model.fit(np.array(x_batch), np.array(y_batch), batch_size=len(x_batch), verbose=0) #train the model
if self.explore_rate > self.explore_rate_min:
self.explore_rate *= self.explore_decay
def select_action(self, qValues):
# print self.explore_rate
if np.random.rand() <= self.explore_rate:
return random.randrange(self.action_num)
else:
return np.argmax(qValues) # returns action
def main():
ep_num = 100000
max_t = 500
start = 0
solved_t = 199
solved_num = 0
capacity = 100000
solved_max = 100
env = gym.make('CartPole-v1')
state_num = env.observation_space.shape[0]
action_num = env.action_space.n
agent = QAgent(state_num, action_num, capacity)
batch_size = 32
done = False
for ep in range(ep_num):
obv = env.reset()
obv = np.reshape(obv, [1, 4])
for t in range(max_t):
env.render()
qVals = agent.getQValues(obv)
agent.explore_rate *= agent.explore_decay
agent.explore_rate = max(agent.explore_rate, agent.explore_rate_min)
action = agent.select_action(qVals)
newobv, reward, done, info = env.step(action)
newobv = np.reshape(newobv, [1, state_num])
agent.remember(obv, action, reward, newobv, done)
obv = newobv
if done:
# print("Episode %d finished after %f time steps" % (ep , t))
agent.times.append(t)
if (t >= solved_t):
solved_num += 1
print "success"
else:
solved_num = 0
break
if solved_num == solved_max:
print "solved!"
break
print "agent memory", len(agent.memory)
agent.replay(batch_size)
if ep % agent.updatetargetat == 0:
agent.updateTargetNetwork()
print "updating target network"
if ep % 100 == 0:
end = time.time() - start
print "it tooks %f to do 100 episodes" %(end)
print ep
print t
print agent.explore_rate
print "before", len(agent.times)
print "mean survival score" , np.mean(agent.times)
print agent.times
del agent.times[0:99]
print "number of times record after deletion" , len(agent.times)
start = time.time()
agent.replay(batch_size)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import rospy
from SensorsListener import SensorsListener
#import motores
class Refletancia():
def __init__(self, sl):
self.sl = sl
def maisEsqBranco(self): return self.sl.getRefle(0) > 4
def esqBranco(self): return self.sl.getRefle(1) > 4
def dirBranco(self): return self.sl.getRefle(2) > 4
def maisDirBranco(self): return self.sl.getRefle(3) > 4
def b_b_b_b(self):
return self.maisEsqBranco() and self.esqBranco() and self.dirBranco() and self.maisDirBranco()
def p_p_p_p(self):
return not self.maisEsqBranco() and not self.esqBranco() and not self.dirBranco() and not self.maisDirBranco()
def p_b_b_b(self):
return not maisEsqBranco() and self.esqBranco() and self.dirBranco() and self.maisDirBranco()
def p_p_b_b(self):
return not self.maisEsqBranco() and not self.esqBranco() and self.dirBranco() and self.maisDirBranco()
def p_p_p_b(self):
return not self.maisEsqBranco() and not self.esqBranco() and not self.dirBranco() and self.maisDirBranco()
def b_p_p_p(self):
return self.maisEsqBranco() and not self.esqBranco() and not self.dirBranco() and not self.maisDirBranco()
def b_b_p_p(self):
return self.maisEsqBranco() and self.esqBranco() and not self.dirBranco() and not self.maisDirBranco()
def b_b_b_p(self):
return self.maisEsqBranco() and self.esqBranco() and self.dirBranco() and not self.maisDirBranco()
def p_b_p_b(self):
return not self.maisEsqBranco() and self.esqBranco() and not self.dirBranco() and self.maisDirBranco()
def p_b_b_p(self):
return not self.maisEsqBranco() and self.esqBranco() and self.dirBranco() and not self.maisDirBranco()
def b_p_b_p(self):
return self.maisEsqBranco() and not self.esqBranco() and self.dirBranco() and not self.maisDirBranco()
def b_p_p_b(self):
return self.maisEsqBranco() and not self.esqBranco() and not self.dirBranco() and self.maisDirBranco()
def p_b_p_p(self):
return not self.maisEsqBranco() and self.esqBranco() and not self.dirBranco() and not self.maisDirBranco()
def p_p_b_p(self):
return not self.maisEsqBranco() and not self.esqBranco() and self.dirBranco() and not self.maisDirBranco()
def b_p_b_b(self):
return self.maisEsqBranco() and not self.esqBranco() and self.dirBranco() and self.maisDirBranco()
def b_b_p_b(self):
return self.maisEsqBranco() and self.esqBranco() and not self.dirBranco() and self.maisDirBranco()
'''
Utilizar com a classe feita de testeEstrategia
if __name__ == "__main__":
try:
sl = SensorsListener()
threading.Thread(target=showValue).start()
sl.register()
'''
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
def norm(self, ord=None, dim=None, keepdim=False, dtype=None):
return flow._C.norm(self, ord, dim, keepdim, dtype=dtype)
def vector_norm(self, ord=2, dim=None, keepdim=False, dtype=None):
return flow._C.vector_norm(self, ord, dim, keepdim, dtype=dtype)
def matrix_norm(self, ord="fro", dim=(-2, -1), keepdim=False, dtype=None):
return flow._C.matrix_norm(self, ord, dim, keepdim, dtype=dtype)
|
"""
Tests for salt.utils.boto3mod
"""
import random
import string
import salt.loader
import salt.utils.boto3mod as boto3mod
from salt.utils.versions import LooseVersion
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
REQUIRED_BOTO3_VERSION = "1.2.1"
@skipIf(HAS_BOTO3 is False, "The boto module must be installed.")
@skipIf(
LooseVersion(boto3.__version__) < LooseVersion(REQUIRED_BOTO3_VERSION),
"The boto3 module must be greater or equal to version {}".format(
REQUIRED_BOTO3_VERSION
),
)
class Boto3modTestCase(TestCase, LoaderModuleMockMixin):
"""
TestCase for salt.utils.boto3mod module
"""
region = "us-east-1"
service = "test-service"
resource_name = "test-resource"
resource_id = "test-resource-id"
access_key = "GKTADJGHEIQSXMKKRBJ08H"
secret_key = "askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs"
conn_parameters = {}
error_message = (
"An error occurred ({}) when calling the {} operation: Test-defined error"
)
error_content = {"Error": {"Code": 101, "Message": "Test-defined error"}}
session_ret = {}
conn = None
def setup_loader_modules(self):
self.opts = {
"__salt__": {"config.option": salt.config.DEFAULT_MINION_OPTS.copy()}
}
return {boto3mod: self.opts}
def setUp(self):
super().setUp()
del self.opts
# Set up MagicMock to replace the boto3 session
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
self.conn_parameters = {
"region": self.region,
"keyid": self.secret_key,
"profile": {},
}
self.conn_parameters["key"] = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(50)
)
self.not_found_error = ClientError(
{
"Error": {
"Code": "ResourceNotFoundException",
"Message": "Test-defined error",
}
},
"msg",
)
self.conn = MagicMock()
self.addCleanup(delattr, self, "conn")
self.patcher = patch("boto3.session.Session")
self.addCleanup(self.patcher.stop)
self.addCleanup(delattr, self, "patcher")
mock_session = self.patcher.start()
session_instance = mock_session.return_value
session_instance.configure_mock(client=MagicMock(return_value=self.conn))
self.paginator = MagicMock()
self.addCleanup(delattr, self, "paginator")
self.conn.configure_mock(get_paginator=MagicMock(return_value=self.paginator))
def test_set_and_get_with_no_auth_params(self):
boto3mod.cache_id(
self.service, self.resource_name, resource_id=self.resource_id
)
self.assertEqual(
boto3mod.cache_id(self.service, self.resource_name), self.resource_id
)
def test_set_and_get_with_explicit_auth_params(self):
boto3mod.cache_id(
self.service,
self.resource_name,
resource_id=self.resource_id,
**self.conn_parameters
)
self.assertEqual(
boto3mod.cache_id(self.service, self.resource_name, **self.conn_parameters),
self.resource_id,
)
def test_set_and_get_with_different_region_returns_none(self):
boto3mod.cache_id(
self.service,
self.resource_name,
resource_id=self.resource_id,
region="us-east-1",
)
self.assertEqual(
boto3mod.cache_id(self.service, self.resource_name, region="us-west-2"),
None,
)
def test_set_and_get_after_invalidation_returns_none(self):
boto3mod.cache_id(
self.service, self.resource_name, resource_id=self.resource_id
)
boto3mod.cache_id(
self.service,
self.resource_name,
resource_id=self.resource_id,
invalidate=True,
)
self.assertEqual(boto3mod.cache_id(self.service, self.resource_name), None)
def test_partial(self):
cache_id = boto3mod.cache_id_func(self.service)
cache_id(self.resource_name, resource_id=self.resource_id)
self.assertEqual(cache_id(self.resource_name), self.resource_id)
|
s = 0
for i in range(0,501):
if(i%2 != 0):
if(i%3 == 0):
s += i
print('A soma dos impares multiplos de 3 foi: {}'.format(s))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""errors.py
"""
__all__ = (
'error_500'
)
from flask import render_template
def error_500(error):
"""
Called when an internel server error (500) occured when
responding to a request.
"""
return render_template(
'errors/500.html',
error_code=500,
e=error
), 500
|
import demistomock as demisto # noqa
import ExpanseRefreshIssueAssets
EXAMPLE_INCIDENT = {
'CustomFields': {
'expanseasset': [
{'assettype': 'Certificate', 'assetkey': 'fakeMD5', 'id': 'id-old-certificate'},
{'assettype': 'IpRange', 'assetkey': 'fakeIPRange', 'id': 'id-old-iprange'},
{'assettype': 'Domain', 'assetkey': 'fakeDomain', 'id': 'id-old-domain'},
]
}
}
REFRESH_RESULT = {'expanseasset': [{'assettype': 'Certificate',
'assetkey': 'fakeMD5',
'tags': 'tag-certificate',
'id': 'id-certificate',
'attributionReasons': 'fake-certificate-reason1\nfake-certificate-reason2'},
{'assettype': 'IpRange',
'assetkey': 'fakeIPRange',
'tags': 'tag-iprange1\ntag-iprange2',
'id': 'id-iprange',
'attributionReasons': 'fake-iprange-reason'},
{'assettype': 'Domain',
'assetkey': 'fakeDomain',
'tags': 'tag-domain',
'id': 'id-domain',
'attributionReasons': 'fake-domain-reason'},
]}
ASSET_CERTIFICATE = {
'annotations': {
'tags': [{'name': 'tag-certificate'}]
},
'attributionReasons': [{'reason': 'fake-certificate-reason1'}, {'reason': 'fake-certificate-reason2'}],
'id': 'id-certificate'
}
ASSET_IPRANGE = {
'annotations': {
'tags': [{'name': 'tag-iprange1'}, {'name': 'tag-iprange2'}]
},
'attributionReasons': [{'reason': 'fake-iprange-reason'}],
'id': 'id-iprange'
}
ASSET_DOMAIN = {
'annotations': {
'tags': [{'name': 'tag-domain'}]
},
'attributionReasons': [{'reason': 'fake-domain-reason'}],
'id': 'id-domain'
}
def test_refresh_issue_assets_command(mocker):
"""
Given:
- current incident with iprange, domain and certificate assets
When
- Refreshing Expanse assets for an incident
Then
- commands are invoked to refresh asset data
- incident is updated with the refreshed asset data
"""
def executeCommand(name, args):
if name == "expanse-get-domain" and args['domain'] == 'fakeDomain':
return [{'Contents': ASSET_DOMAIN}]
elif name == "expanse-get-iprange" and args['id'] == 'fakeIPRange':
return [{'Contents': ASSET_IPRANGE}]
elif name == "expanse-get-certificate" and args['md5_hash'] == 'fakeMD5':
return [{'Contents': ASSET_CERTIFICATE}]
elif name == "setIncident":
return "OK"
raise ValueError(f"Error: Unknown command or command/argument pair: {name} {args!r}")
mocker.patch.object(demisto, 'incident', return_value=EXAMPLE_INCIDENT)
ec_mock = mocker.patch.object(demisto, 'executeCommand', side_effect=executeCommand)
result = ExpanseRefreshIssueAssets.refresh_issue_assets_command({})
assert result.readable_output == "OK"
assert len(ec_mock.call_args_list) == 4
assert ec_mock.call_args_list[3][0][0] == "setIncident"
assert ec_mock.call_args_list[3][0][1] == REFRESH_RESULT
|
import random
from . import tokens
from . import dealer
""" The Button module manages the movement of the Dealer Button.
"""
class Button(tokens.Token):
def __init__(self, table):
tokens.Token.__init__(self, name="Button", table=table)
self.seat = -1 # Start at -1 or None?
def __repr__(self):
return str(self.seat)
def __int__(self):
return self.seat
def move(self):
""" Move the button clockwise to the next valid player/seat. """
self.seat = dealer.next_player(self.table, from_seat=self.seat)
def randomize(self):
""" Places the button at a random player's seat.
If there is no players at the table, sets seat to -1.
"""
seats = list(dealer.get_playerdict(self.table).keys())
if len(seats) == 0:
raise Exception('Cannot place the button, no players at table!')
choice = random.choice(seats)
self.seat = choice
|
'''
===============================================================================
-- Author: Hamid Doostmohammadi, Azadeh Nazemi
-- Create date: 01/11/2020
-- Description: This code is for T-distributed Stochastic Neighbor Embedding (t-SNE)
which is a method for redcuing dimension for image comparison.
This can be used for visualisation of correlation or connection
between images belong to a unit class.
-- Status: In progress
===============================================================================
'''
import numpy as np
import cv2
import sys
import os
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
def image_to_feature_vector(image, size=(16, 16)):
return cv2.resize(image, size).flatten()
data = []
labels = []
for root, dirs, files in os.walk(sys.argv[1]):
for filename in files:
ext = filename[filename.rfind("."):].lower()
imagePath = os.path.join(root, filename)
label = imagePath.split(os.path.sep)[-2]
print(label)
image = cv2.imread(imagePath, 0)
H = image_to_feature_vector(image, size=(160, 160))
H = np.array(H, dtype="float") / 255.0
labels.append(label)
data.append(H)
data = np.array(data)
tsne = TSNE(n_components=2, random_state=0)
X = data
X_2d = tsne.fit_transform(X)
plt.figure(figsize=(6, 5))
colors = 'r', 'g', 'b', 'c', 'm', 'y', 'k', 'w', 'orange', 'purple'
y = []
la = list(set(labels))
print(la)
for i in range(0, len(X)):
for j in range(0, len(la)):
if labels[i] == la[j]:
y.append(j)
plt.scatter(X_2d[i, 0], X_2d[i, 1], c=colors[j], label=la[j])
break
plt.show()
|
from run.cochran import run_benchmark
from . import benchmark_utils as bm_utils
import subprocess
#Performs the list of benchmarks and saves to results to output csv file
def perform_benchmarks(benchmarks, experiment_iterations, output_file):
statistics, csv_output = bm_utils.setup(output_file)
benchmark_count = len(benchmarks)
for index, b in enumerate(benchmarks):
print('\r' + "Performing benchmark " + str(index + 1) + " of " + str(benchmark_count), end='', flush=True)
print("\n", b.path, flush=True)
subprocess.run(b.get_build_command(),
shell=True, check=True, stdout=subprocess.DEVNULL)
statistics.clear()
#The measuring equipment
current = 0
while(current < experiment_iterations):
run_benchmark(b, current, experiment_iterations, csv_output, statistics)
current += 1
bm_utils.save(statistics, csv_output, b.path)
print("", flush=True)
print('\n', flush=True)
def run_benchmark(benchmark, i, iterations, csv_output, statistics):
print("\r" + str(i + 1) + " of " + str(iterations), end="", flush=True)
bm_utils.run(benchmark)
results = bm_utils.collect_results(bm_utils.RESULT_FILE_PATH)
bm_utils.handle_results(results, benchmark.path, csv_output, statistics)
|
'''
黑名单中的随机数
给定一个包含 [0,n) 中不重复整数的黑名单 blacklist ,写一个函数从 [0, n) 中返回一个不在 blacklist 中的随机整数。
对它进行优化使其尽量少调用系统方法 Math.random() 。
提示:
1 <= n <= 1000000000
0 <= blacklist.length < min(100000, N)
[0, n) 不包含 n ,详细参见 interval notation 。
示例 1:
输入:
["Solution","pick","pick","pick"]
[[1,[]],[],[],[]]
输出:[null,0,0,0]
示例 2:
输入:
["Solution","pick","pick","pick"]
[[2,[]],[],[],[]]
输出:[null,1,1,1]
示例 3:
输入:
["Solution","pick","pick","pick"]
[[3,[1]],[],[],[]]
输出:[null,0,0,2]
示例 4:
输入:
["Solution","pick","pick","pick"]
[[4,[2]],[],[],[]]
输出:[null,1,3,1]
输入语法说明:
输入是两个列表:调用成员函数名和调用的参数。Solution的构造函数有两个参数,n 和黑名单 blacklist。
pick 没有参数,输入参数是一个列表,即使参数为空,也会输入一个 [] 空列表。
'''
from typing import List
import bisect
import random
'''
思路:排序 随机 二分查找
初始化函数:
1. 将黑名单进行排序,然后遍历黑名单,生成白名单区间list:whitelist
2. 再遍历whitelist,生成白名单区间长度前缀和数组lenPrefixs
3. 白名单的整数总数为m=n-blacklist.length
时间复杂度:O(klogk),k=blacklist.length
pick函数:
1. 调用系统的random,生成0..m-1的一个随机整数randIdx
2. 在lenPrefixs中二分查找randIdx,定位到白名单区间的索引,然后在白名单区间内定位到一个整数
时间复杂度:O(log(whitelist.length))
'''
class Solution:
def __init__(self, n: int, blacklist: List[int]):
self.m = n - len(blacklist)
blacklist.sort()
self.whitelist = []
self.lenPrefis = []
total = 0
if blacklist:
if blacklist[0]: # 第1个黑名单不是0,需要在最开始添加一个白名单区间
self.whitelist.append((0, blacklist[0] - 1))
self.lenPrefis.append(blacklist[0])
total += blacklist[0]
pre = blacklist[0]
for i in range(1, len(blacklist)):
pre += 1
if pre != blacklist[i]: # 当前数不是前数+1,2个黑名单数之间有空洞,需要添加为白名单
self.whitelist.append((pre, blacklist[i] - 1))
total += blacklist[i] - pre
self.lenPrefis.append(total)
pre = blacklist[i]
if blacklist[-1] != n - 1: # 最后一个黑名单不是n-1,需要追加1个白名单区间
self.whitelist.append((blacklist[-1] + 1, n - 1))
total += n - 1 - blacklist[-1]
self.lenPrefis.append(total)
else:
self.whitelist.append((0, n))
self.lenPrefis.append(n)
def pick(self) -> int:
randIdx = random.randint(0, self.m - 1)
rangeIdx = bisect.bisect_right(self.lenPrefis, randIdx) # 二分查找定位随机索引处于哪个白名单区间
if 0 < rangeIdx < len(self.whitelist):
randIdx -= self.lenPrefis[rangeIdx - 1] # 定位随机索引在白名单区间内的偏移量
return self.whitelist[rangeIdx][0] + randIdx
elif rangeIdx == len(self.whitelist):
randIdx -= self.lenPrefis[-1] # 定位随机索引在白名单区间内的偏移量
return self.whitelist[-1][0] + randIdx
return self.whitelist[0][0] + randIdx
s = Solution(3, [0])
print(s.pick())
print(s.pick())
print(s.pick())
print(s.pick())
print(s.pick())
s = Solution(4, [2])
print(s.pick())
print(s.pick())
print(s.pick())
print(s.pick())
print(s.pick())
|
from django.contrib import admin
from reversion.admin import VersionAdmin
from armstrong.core.arm_content.admin import fieldsets
from armstrong.core.arm_sections.admin import SectionTreeAdminMixin
from armstrong import hatband
from armstrong.apps.related_content.admin import RelatedContentInline
from .models import Article
class ArticleAdmin(SectionTreeAdminMixin, VersionAdmin, hatband.ModelAdmin):
fieldsets = (
(None, {
'fields': ('title', 'slug', 'summary', 'body', ),
}),
fieldsets.TAXONOMY,
fieldsets.PUBLICATION,
fieldsets.AUTHORS,
)
inlines = [
RelatedContentInline,
]
admin.site.register(Article, ArticleAdmin)
|
from train import ex
def main():
batch_size = 8
sequence_length = 327680
model_complexity = 48
piano_midis = list(range(8))
guitar_midis = list(range(24, 32))
bass_midis = list(range(32, 40))
brass_midis = list(range(57, 64))
reed_midis = list(range(64, 72))
ex.run(
config_updates={
"split": "redux",
"audio": "individual",
"instrument": "all-ind",
"midi_programs": piano_midis + guitar_midis + bass_midis + brass_midis + reed_midis,
"max_harmony": None,
"skip_pitch_bend_tracks": True,
"batch_size": batch_size,
"sequence_length": sequence_length,
"model_complexity": model_complexity,
"validation_length": 4 * sequence_length,
"validation_interval": 500,
"num_validation_files": 50,
"create_validation_images": True,
"predict_velocity": False,
"min_midi": 28, # E1
"max_midi": 96, # C7
}
)
main()
|
from django import forms
#formulario no html de produtos
class Produtos(forms.Form):
nome = forms.CharField(label='Nome ')
quantidade = forms.IntegerField(label='Quantidade ')
preco = forms.IntegerField(label='Preço')
|
FALSE = 0
Enc_FALSE = b'\x00'
TRUE = 1
Enc_TRUE = b'\x01'
NULL = 2
Enc_NULL = b'\x02'
INF = 3
Enc_INF = b'\x03'
NEGINF = 4
Enc_NEGINF = b'\x04'
NAN = 5
Enc_NAN = b'\x05'
TERMINATED_LIST = 0x0c
Enc_TERMINATED_LIST = b'\x0c'
CUSTOM = 0x0e
Enc_CUSTOM = b'\x0e'
TERMINATOR = 0x0f
Enc_TERMINATOR = b'\x0f'
INT = 0x20
NEGINT = 0x40
FLOAT = 0x60
STRING = 0x80
BINARY = 0xA0
LIST = 0xC0
DICT = 0xE0
FltEnc_Plus = 0xa
FltEnc_Minus = 0xb
FltEnc_Decimal = 0xd
FltEnc_E = 0xe
float_encode = {
'0': 0,
'1': 1,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'+': FltEnc_Plus,
'-': FltEnc_Minus,
'.': FltEnc_Decimal,
'e': FltEnc_E,
'E': FltEnc_E
}
float_decode = dict((v, k) for k, v in float_encode.items())
|
import numpy
import numpy.fft
import numpy.ma
import math
import scipy.stats
import scipy.interpolate
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
def rect_make_FFT(rect_half_length, beta, outer_scale, sigma,
m_func=None, s_func=None, scale_ratio=None,
x_half_length=None):
if x_half_length is None:
x_half_length = rect_half_length
half_rect_shape = rect_half_length*2, x_half_length+1
rect_shape = rect_half_length*2, x_half_length*2
double_rect_shape = rect_half_length*4, x_half_length*4
outer_scale_L = 1./outer_scale
rect = scipy.stats.uniform.rvs(loc=0, scale=2*math.pi,
size=half_rect_shape)
rect = numpy.cos(rect) + 1j*numpy.sin(rect)
index = -beta # gives-10/3 for logN power spec
if scale_ratio:
k2_rect = numpy.fromfunction(lambda i, k:
pow((i-rect_half_length)
* scale_ratio, 2)+pow(k, 2),
half_rect_shape)
else:
k2_rect = numpy.fromfunction(lambda i, k:
pow(i-rect_half_length, 2)
+ pow(k, 2), half_rect_shape)
k_rect = numpy.power(1 + k2_rect*numpy.power(outer_scale_L, 2), -beta/2)
k_rect = numpy.fft.ifftshift(k_rect, axes=(0))
var1 = math.sqrt(numpy.sum(numpy.abs(k_rect)))
k_rect = numpy.sqrt(numpy.abs(k_rect))
m1 = numpy.real(rect[0, 0])
rect = rect * k_rect[:, :x_half_length+1]
# take fft
fft_rect = numpy.fft.irfftn(rect)
fft_rect = numpy.fft.fftshift(fft_rect)
# exponentiate
mean = 0
std = var1 / (4*rect_half_length*x_half_length)
if m_func:
m = numpy.fromfunction(m_func, rect_shape) # 0
else:
m = 0
if s_func:
s = numpy.fromfunction(s_func, rect_shape) # 1
s *= sigma/numpy.mean(s)
else:
s = sigma
rect = numpy.exp(m + ((fft_rect-mean)/(std))*s)
return rect
|
"""
Define testing utils
"""
IMAGE = "895885662937.dkr.ecr.us-west-2.amazonaws.com/spark/emr-5.32.0-20210129:2.4.7-amzn-0-vanilla"
RELEASE_NAME = "emr-5.32"
IMAGE_TYPE = "spark"
INSPECT = dict()
INSPECT['Id'] = 'sha:asdf'
INSPECT['Created'] = '2020/04/22'
INSPECT['Config'] = dict()
INSPECT['Config']['User'] = 'user'
INSPECT['Config']['WorkingDir'] = 'workingdir'
INSPECT['Config']['Entrypoint'] = ['entrypoint']
INSPECT['Config']['Env'] = ['env1=path1', "env2=path2"]
|
#!/usr/bin/env python3 -u
import time
import sys
from vwradio import avrclient
from vwradio.constants import Keys
client = avrclient.make_client()
if __name__ == '__main__':
# build list of keycodes to try (one bit per key, max 32 bits)
keycodes = []
keycode = 1
while len(keycodes) < 32:
keycodes.append(keycode)
keycode = keycode << 1
client.set_auto_key_passthru(False)
for keycode in keycodes:
# put the display into a known state
client.hit_key(Keys.MODE_AM)
client.hit_key(Keys.MODE_FM)
client.hit_key(Keys.PRESET_1)
# split the 32-bit keycode to 4 key data bytes
key_data = []
for i in range(3, -1, -1):
key_data.append((keycode >> (8*i) & 0xFF))
# try the key
if key_data == [0,0,0,0x40]:
print('skipped initial')
elif key_data == [0,0,0x02,0]:
print('skipped scan')
elif key_data == [0,0,0x20,0]:
print('skipped code')
elif key_data == [0,0x10,0,0]:
print('skipped bal')
elif key_data == [0,0,0,0x80]:
print('skipped no code')
else:
sys.stdout.write("\n%r%s" % (key_data, ' ' * 25,))
sys.stdout.flush()
client.emulated_upd_load_key_data(key_data)
time.sleep(20)
reading_1 = client.read_lcd()
client.emulated_upd_load_key_data([0, 0, 0, 0])
time.sleep(0.25)
reading_2 = client.read_lcd()
if (reading_1 != 'FM11 891MHZ') or (reading_2 != 'FM11 891MHZ'):
sys.stdout.write("\r%r down:%r up:%r\n" %
(key_data, reading_1, reading_2))
sys.stdout.flush()
client.set_auto_key_passthru(True)
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test ping message
"""
import time
from test_framework.messages import (
msg_pong,
)
from test_framework.mininode import (
P2PInterface,
wait_until,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
PING_INTERVAL = 2 * 60
class msg_pong_corrupt(msg_pong):
def serialize(self):
return b""
class NodePongAdd1(P2PInterface):
def on_ping(self, message):
self.send_message(msg_pong(message.nonce + 1))
class NodeNoPong(P2PInterface):
def on_ping(self, message):
pass
class PingPongTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-peertimeout=3']]
def check_peer_info(self, *, pingtime, minping, pingwait):
stats = self.nodes[0].getpeerinfo()[0]
assert_equal(stats.pop('pingtime', None), pingtime)
assert_equal(stats.pop('minping', None), minping)
assert_equal(stats.pop('pingwait', None), pingwait)
def mock_forward(self, delta):
self.mock_time += delta
self.nodes[0].setmocktime(self.mock_time)
def run_test(self):
self.mock_time = int(time.time())
self.mock_forward(0)
self.log.info('Check that ping is sent after connection is established')
no_pong_node = self.nodes[0].add_p2p_connection(NodeNoPong())
self.mock_forward(3)
assert no_pong_node.last_message.pop('ping').nonce != 0
self.check_peer_info(pingtime=None, minping=None, pingwait=3)
self.log.info('Reply without nonce cancels ping')
with self.nodes[0].assert_debug_log(['pong peer=0: Short payload']):
no_pong_node.send_and_ping(msg_pong_corrupt())
self.check_peer_info(pingtime=None, minping=None, pingwait=None)
self.log.info('Reply without ping')
with self.nodes[0].assert_debug_log([
'pong peer=0: Unsolicited pong without ping, 0 expected, 0 received, 8 bytes',
]):
no_pong_node.send_and_ping(msg_pong())
self.check_peer_info(pingtime=None, minping=None, pingwait=None)
self.log.info('Reply with wrong nonce does not cancel ping')
assert 'ping' not in no_pong_node.last_message
with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']):
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
wait_until(lambda: 'ping' in no_pong_node.last_message)
self.mock_forward(9)
# Send the wrong pong
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce - 1))
self.check_peer_info(pingtime=None, minping=None, pingwait=9)
self.log.info('Reply with zero nonce does cancel ping')
with self.nodes[0].assert_debug_log(['pong peer=0: Nonce zero']):
no_pong_node.send_and_ping(msg_pong(0))
self.check_peer_info(pingtime=None, minping=None, pingwait=None)
self.log.info('Check that ping is properly reported on RPC')
assert 'ping' not in no_pong_node.last_message
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
wait_until(lambda: 'ping' in no_pong_node.last_message)
ping_delay = 29
self.mock_forward(ping_delay)
wait_until(lambda: 'ping' in no_pong_node.last_message)
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))
self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)
self.log.info('Check that minping is decreased after a fast roundtrip')
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
wait_until(lambda: 'ping' in no_pong_node.last_message)
ping_delay = 9
self.mock_forward(ping_delay)
wait_until(lambda: 'ping' in no_pong_node.last_message)
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))
self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)
self.log.info('Check that peer is disconnected after ping timeout')
assert 'ping' not in no_pong_node.last_message
self.nodes[0].ping()
wait_until(lambda: 'ping' in no_pong_node.last_message)
with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']):
self.mock_forward(20 * 60 + 1)
time.sleep(4) # peertimeout + 1
if __name__ == '__main__':
PingPongTest().main()
|
import yaml
from serilizer_lib.parsers.yaml.yaml_config import *
#own parser
# def dumps(s):
# pass
#
#
# def dump(s, fp):
# pass
#
#
# def load(fp):
# pass
#
#
# def loads(s):
#
# strings = s.split("\n")
# ind = 0
# depth = 0
# def parse_obj(dep):
# ans = {}
# nonlocal strings
# nonlocal ind
# while ind < len(strings):
# if strings[ind].strip().startswith(COMMENT_SYMBOL):
# ind += 1
# continue
# if depth > get_depth(strings[ind]):
# break
# elif strings[ind].count(NAME_VALUE_MATCHER) == 1:
# key, value = strings[ind].split(NAME_VALUE_MATCHER)
# ans[slice_string_with_depth(key, dep)] = value
# elif strings[ind].endswith("}"):
# #regex for object seq
# pass
# elif strings[ind].strip().endswith("]"):
# #regex for array seq
# pass
# elif strings[ind].strip().startswith(BLOCK_SEQ_SEPARATOR):
# #parsing object of sequence
# pass
# elif strings[ind].strip().endswith(NAME_OBJECT_MATCHER):
# name = strings[ind].split(NAME_OBJECT_MATCHER)[0]
# ind += 1
# ans[name] = parse_obj(dep + 1)
# continue
#
# ind += 1
#
# return ans
#
# def get_depth(s):
# ans = 0
# for c in s:
# if c != ' ':
# break
# ans += 1
#
# if ans % 2 != 0:
# raise ValueError
# return ans // 2
#
#
# def slice_string_with_depth(s, dep):
# return s[dep * 2:]
#
#
# print(parse_obj(0))
# return 228
def dump(obj, fp):
s = dumps(obj)
fp.write(s)
def dumps(obj):
def dump_complex(o):
ans = ""
tp = type(o)
ans = yaml.dump(o)
# if tp == dict or tp == tuple or tp == list:
# ans += yaml.dump(o)
# else:
# # ans += f"{COMPLEX_OBJECT_NAME}{NAME_OBJECT_MATCHER}{TAB_LITERAL}"
# # fields = dir(o)
# # ans += f"{TYPE_FIELD_NAME}{NAME_VALUE_MATCHER}{re.search(CLASS_TYPE_REGEX, str(tp)).group(1)}\n"
# # for field in fields:
# # if re.match(META_METHOD, field) is None:
# # ans += f"{TAB_LITERAL}{field}{NAME_VALUE_MATCHER}" + dump_obj(o.__getattribute__(field)) + "\n"
return ans
s = dump_complex(obj)
return s
def load(fp):
return yaml.load(fp, Loader=yaml.FullLoader)
def loads(s):
return yaml.load(s, Loader=yaml.FullLoader)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from bottledaemon import daemon_run
from bottle import route, request
import time
import os
@route("/hello")
def hello():
return "hello :: {0}\n".format(
request.environ['HTTP_HOST'])
@route("/api")
def api():
return "api called\n"
@route("/ping")
@route("/ping/<sleeptime:int>")
def ping(sleeptime=0):
time.sleep(sleeptime)
return "pong\n"
if __name__ == "__main__":
try:
port = os.environ['BOTTLE_PORT']
except KeyError:
port = 4343
daemon_run(host="0.0.0.0", port=port)
|
from . import wrapper
from .. util import log
import random as rand
class Sequence(wrapper.Indexed):
def __init__(self, layout, random=False, length=None, **kwds):
"""
Arguments
random -- If True, a random animation is selected each step
length -- if length is a number, run all the animations in a loop
for `length` seconds each. If `length` is a list of numbers,
use the numbers successively as times.
"""
super().__init__(layout, **kwds)
self.random = random
if isinstance(length, (list, tuple)):
self.length = length
else:
self.length = length and [length]
def restart(self):
self.random and rand.shuffle(self.animations._animations)
self.index = 0
def pre_run(self):
self.offset = 0
super().pre_run()
self.restart()
def step(self, amt=1):
if (not self.animations) or super().step(amt):
return
self.index += 1
if self.index >= len(self.animations):
if self.runner.until_complete:
self.completed = True
else:
self.offset += len(self.animations)
self.restart()
def _on_index(self, old_index):
if self.current_animation:
super()._on_index(old_index)
if self.length:
step = self.offset + self.index
length = self.length[step % len(self.length)]
self.current_animation.runner.seconds = length
|
#coding: utf-8
import argparse
import os
from solver import lightsoutsolver
from solver import f2
from utils import validator
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='LightsOut Solver')
parser.add_argument('-s', type=int, choices=validator.range_check(low_limit=2), help='LightsOut Size')
argv_values = parser.parse_args()
size = argv_values.s
always_solvable = lightsoutsolver.is_all_solvable_size(size)
if always_solvable:
print "size %d lightsout is always solvable." % size
else:
print "size %d lightsout is *not* always solvable." % size
|
#!/usr/bin/env python
#
# Copyright (c) 2014 10X Genomics, Inc. All rights reserved.
#
# Measure coverage in give regions
#
import tenkit.pandas as pd
import numpy as np
import tenkit.bio_io as tk_io
import tenkit.bam as tk_bam
import tenkit.stats as tk_stats
import martian
import tenkit.hdf5 as tk_hdf5
def mean_coverage_region(bam, region, read_filter=lambda x: True):
''' Measure the coverage mean in a region '''
(chrom, start, end) = region
reads_iter = tk_bam.filter_bam(bam.fetch(chrom, start, end), remove_unmapped=True, read_filter=read_filter)
depth_df = get_depth_info(reads_iter, chrom, start, end)
return depth_df.coverage.mean()
def get_depth_info(read_iter, chrom, cstart, cend):
depths = np.zeros(cend-cstart, np.int32)
for read in read_iter:
pos = read.pos
rstart = max(pos, cstart)
# Increment to the end of the window or the end of the
# alignment, whichever comes first
rend = min(read.aend, cend)
depths[(rstart-cstart):(rend-cstart)] += 1
positions = np.arange(cstart, cend, dtype=np.int32)
depth_df = pd.DataFrame({"chrom": chrom, "pos": positions, "coverage": depths})
return depth_df
def get_depth_info_json(info):
fixed_info = {int(x): y for (x, y) in info.iteritems()}
total_depth_counts = sum(fixed_info.values())
median_depth = None
sorted_depths = sorted(fixed_info.keys())
seen_depth_count = 0
mean_depth = 0.0
for depth in sorted_depths:
seen_depth_count += fixed_info[depth]
mean_depth += float(depth*fixed_info[depth])/float(total_depth_counts)
if seen_depth_count > total_depth_counts/2 and median_depth is None:
median_depth = depth
zero_cov_fract = tk_stats.robust_divide(float(fixed_info.get(0, 0.0)), float(total_depth_counts))
return (mean_depth, median_depth, zero_cov_fract)
EXONS_SAMPLE_COVERAGE = 3000
WGS_WINDOWS_SAMPLE_COVERAGE = 3000
WGS_WINDOW_SIZE = 10000
WGS_WINDOWS_SMALL_GENOME = 5
def allow_all(x):
return True
def estimate_mean_coverage(targets_file, bam_in, read_filter=lambda x: True):
if targets_file is not None:
target_regions_dict = tk_io.get_target_regions_dict(open(targets_file))
# Pick a random sample of target regions to estimate overall depth on
targets = [(chrom, start, end) for (chrom, regions) in target_regions_dict.items() for (start, end) in regions if end-start > 0]
if len(targets) == 0:
martian.log_info("No non-empty target regions")
return 1.0
np.random.seed(0)
regions_to_sample = min(EXONS_SAMPLE_COVERAGE, len(targets))
region_indices = np.random.choice(len(targets), regions_to_sample, replace=False)
sample_targets = [targets[idx] for idx in region_indices]
else:
# Pick a series of random intervals on the genome to measure coverage
np.random.seed(0)
if sum(bam_in.lengths) < 1e6:
num_windows = WGS_WINDOWS_SMALL_GENOME
else:
num_windows = WGS_WINDOWS_SAMPLE_COVERAGE
chrom_probs = np.array(bam_in.lengths, dtype=np.float) / sum(bam_in.lengths)
rand_chroms = np.random.choice(len(bam_in.lengths), num_windows, replace=True, p=chrom_probs)
starts = [np.random.randint(max(bam_in.lengths[chrom]-WGS_WINDOW_SIZE, 1)) for chrom in rand_chroms]
sample_targets = [(bam_in.references[chrom], start, min(start+WGS_WINDOW_SIZE, bam_in.lengths[chrom])) for (chrom, start) in zip(rand_chroms, starts)]
mean_depth = float(np.mean([mean_coverage_region(bam_in, region, read_filter) for region in sample_targets]))
return mean_depth
def get_hap_coverage(in_bam, ps_h5, chrom, start, stop, cov_quals):
"""Return a dataframe with coverage per haplotype.
Args:
- in_bam: reader for a position sorted bam
- ps_h5: HDF5 with phase set coordinates
- chrom, start, stop: region to get coverage
- cov_quals: Array of MAPQ cutoffs.
Return value:
A dataframe with columns:
- chrom
- pos
- cov_q<M>_hap<H> for all M in cov_quals and for H in [0, 1, 2]: This is the
coverage on haplotype H using reads of MAPQ >= M. Haplotype 2 corresponds to
unphased.
- phase_set: null if ps_h5 is missing.
"""
coverages = [np.zeros((stop - start, 3)) for _ in cov_quals]
for _, read in enumerate(in_bam.fetch(str(chrom), int(start), int(stop))):
if not read.is_unmapped and not read.aend is None and not read.is_secondary and not read.is_duplicate:
hap = tk_io.get_read_haplotype(read)
hap_idx = 2 if hap is None else hap - 1
range_start = max(0, read.pos - start)
range_stop = min(stop, read.aend) - start
for qi, q in enumerate(cov_quals):
if read.mapq >= q:
coverages[qi][range_start:range_stop + 1, hap_idx] += 1
base_df = pd.DataFrame({'chrom':chrom, 'pos':np.arange(start, stop)})
dfs = map(lambda x: pd.DataFrame(x[0], columns=['cov_q' + str(x[1]) + '_hap' + str(i) for i in range(3)]),
zip(coverages, cov_quals))
df = pd.concat([base_df, pd.concat(dfs, axis=1)], axis=1)
phase_sets = -np.ones((stop - start, ), dtype=np.int)
# This can be None if for example the input is unbarcoded.
if not ps_h5 is None:
ps_df = tk_hdf5.read_data_frame(ps_h5)
ps_df = ps_df[np.logical_and(ps_df.chrom == chrom, np.logical_and(ps_df.end >= start, ps_df.start < stop))]
for _, row in ps_df.iterrows():
range_start = max(0, row.start - start)
range_stop = min(stop, row.end) - start
phase_sets[range_start:range_stop + 1] = row.phase_set
df['phase_set'] = phase_sets
return df
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import os
from appengine_wrappers import GetAppVersion
from compiled_file_system import CompiledFileSystem
from copy import deepcopy
from file_system import FileNotFoundError
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_object_store import TestObjectStore
import unittest
_TEST_DATA = {
'404.html': '404.html contents',
'apps': {
'a11y.html': 'a11y.html contents',
'about_apps.html': 'about_apps.html contents',
'fakedir': {
'file.html': 'file.html contents'
},
'deepdir': {
'deepfile.html': 'deepfile.html contents',
'deeper': {
'deepest.html': 'deepest.html contents',
},
}
},
'extensions': {
'activeTab.html': 'activeTab.html contents',
'alarms.html': 'alarms.html contents'
}
}
identity = lambda _, x: x
def _GetTestCompiledFsCreator():
'''Returns a function which creates CompiledFileSystem views of
TestFileSystems backed by _TEST_DATA.
'''
return functools.partial(
CompiledFileSystem.Factory(
ObjectStoreCreator(start_empty=False,
store_type=TestObjectStore,
disable_wrappers=True),
).Create,
TestFileSystem(deepcopy(_TEST_DATA)))
class CompiledFileSystemTest(unittest.TestCase):
def testPopulateNamespace(self):
def CheckNamespace(expected_file, expected_list, fs):
self.assertEqual(expected_file, fs._file_object_store.namespace)
self.assertEqual(expected_list, fs._list_object_store.namespace)
compiled_fs_creator = _GetTestCompiledFsCreator()
f = lambda x: x
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/list&'
'app_version=%s' % GetAppVersion(),
compiled_fs_creator(f, CompiledFileSystemTest))
CheckNamespace(
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/file&'
'app_version=%s' % GetAppVersion(),
'class=CompiledFileSystem&'
'category=CompiledFileSystemTest/TestFileSystem/foo/list&'
'app_version=%s' % GetAppVersion(),
compiled_fs_creator(f, CompiledFileSystemTest, category='foo'))
def testPopulateFromFile(self):
def Sleepy(key, val):
return '%s%s' % ('Z' * len(key), 'z' * len(val))
compiled_fs = _GetTestCompiledFsCreator()(Sleepy, CompiledFileSystemTest)
self.assertEqual('ZZZZZZZZzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual('ZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('apps/a11y.html').Get())
self.assertEqual('ZZZZZZZZZZZZZZZZZZZZZZzzzzzzzzzzzzzzzzzz',
compiled_fs.GetFromFile('apps/fakedir/file.html').Get())
def testPopulateFromFileListing(self):
def strip_ext(path, files):
return [os.path.splitext(f)[0] for f in files]
compiled_fs = _GetTestCompiledFsCreator()(strip_ext, CompiledFileSystemTest)
expected_top_listing = [
'404',
'apps/a11y',
'apps/about_apps',
'apps/deepdir/deeper/deepest',
'apps/deepdir/deepfile',
'apps/fakedir/file',
'extensions/activeTab',
'extensions/alarms'
]
self.assertEqual(expected_top_listing,
sorted(compiled_fs.GetFromFileListing('').Get()))
expected_apps_listing = [
'a11y',
'about_apps',
'deepdir/deeper/deepest',
'deepdir/deepfile',
'fakedir/file',
]
self.assertEqual(expected_apps_listing,
sorted(compiled_fs.GetFromFileListing('apps/').Get()))
self.assertEqual(['file',],
compiled_fs.GetFromFileListing('apps/fakedir/').Get())
self.assertEqual(['deeper/deepest', 'deepfile'],
sorted(compiled_fs.GetFromFileListing(
'apps/deepdir/').Get()))
self.assertEqual(['deepest'],
compiled_fs.GetFromFileListing(
'apps/deepdir/deeper/').Get())
def testCaching(self):
compiled_fs = _GetTestCompiledFsCreator()(identity, CompiledFileSystemTest)
self.assertEqual('404.html contents',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir/').Get()))
compiled_fs._file_system._path_values['404.html'] = 'boom'
compiled_fs._file_system._path_values['apps/fakedir/'] = [
'file.html', 'boom.html']
self.assertEqual('404.html contents',
compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html',)),
set(compiled_fs.GetFromFileListing('apps/fakedir/').Get()))
compiled_fs._file_system.IncrementStat()
self.assertEqual('boom', compiled_fs.GetFromFile('404.html').Get())
self.assertEqual(set(('file.html', 'boom.html')),
set(compiled_fs.GetFromFileListing('apps/fakedir/').Get()))
def testFailures(self):
compiled_fs = _GetTestCompiledFsCreator()(identity, CompiledFileSystemTest)
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFile('405.html').Get)
# TODO(kalman): would be nice to test this fails since apps/ is a dir.
compiled_fs.GetFromFile('apps')
#self.assertRaises(SomeError, compiled_fs.GetFromFile, 'apps/')
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing('nodir/').Get)
# TODO(kalman): likewise, not a FileNotFoundError.
self.assertRaises(FileNotFoundError,
compiled_fs.GetFromFileListing('404.html/').Get)
def testCorrectFutureBehaviour(self):
# Tests that the underlying FileSystem's Read Future has had Get() called
# on it before the Future is resolved, but the underlying Future isn't
# resolved until Get is.
mock_fs = MockFileSystem(TestFileSystem(_TEST_DATA))
compiled_fs = CompiledFileSystem.Factory(
ObjectStoreCreator.ForTest()).Create(
mock_fs, lambda path, contents: contents, type(self))
self.assertTrue(*mock_fs.CheckAndReset())
future = compiled_fs.GetFromFile('404.html')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1, read_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset(read_resolve_count=1))
future = compiled_fs.GetFromFileListing('apps/')
# Current behaviour is to have read=2 and read_resolve=1 because the first
# level is read eagerly, then all of the second is read (in parallel). If
# it weren't eager (and it may be worth experimenting with that) then it'd
# be read=1 and read_resolve=0.
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=2,
read_resolve_count=1))
future.Get()
# It's doing 1 more level 'deeper' (already read 'fakedir' and 'deepdir'
# though not resolved), so that's 1 more read/resolve + the resolve from
# the first read.
self.assertTrue(*mock_fs.CheckAndReset(read_count=1, read_resolve_count=2))
# Even though the directory is 1 layer deep the caller has no way of
# determining that ahead of time (though perhaps the API could give some
# kind of clue, if we really cared).
future = compiled_fs.GetFromFileListing('extensions/')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=1,
read_resolve_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset())
# Similar configuration to the 'apps/' case but deeper.
future = compiled_fs.GetFromFileListing('')
self.assertTrue(*mock_fs.CheckAndReset(stat_count=1,
read_count=2,
read_resolve_count=1))
future.Get()
self.assertTrue(*mock_fs.CheckAndReset(read_count=2, read_resolve_count=3))
if __name__ == '__main__':
unittest.main()
|
from rabbitai.db_engine_specs.base import BaseEngineSpec, LimitMethod
class TeradataEngineSpec(BaseEngineSpec):
"""Dialect for Teradata DB."""
engine = "teradata"
engine_name = "Teradata"
limit_method = LimitMethod.WRAP_SQL
max_column_name_length = 30 # since 14.10 this is 128
_time_grain_expressions = {
None: "{col}",
"PT1M": "TRUNC(CAST({col} as DATE), 'MI')",
"PT1H": "TRUNC(CAST({col} as DATE), 'HH')",
"P1D": "TRUNC(CAST({col} as DATE), 'DDD')",
"P1W": "TRUNC(CAST({col} as DATE), 'WW')",
"P1M": "TRUNC(CAST({col} as DATE), 'MONTH')",
"P0.25Y": "TRUNC(CAST({col} as DATE), 'Q')",
"P1Y": "TRUNC(CAST({col} as DATE), 'YEAR')",
}
@classmethod
def epoch_to_dttm(cls) -> str:
return (
"CAST(((CAST(DATE '1970-01-01' + ({col} / 86400) AS TIMESTAMP(0) "
"AT 0)) AT 0) + (({col} MOD 86400) * INTERVAL '00:00:01' "
"HOUR TO SECOND) AS TIMESTAMP(0))"
)
|
# Interaction of .pend_throw() with .throw() and .close()
def gen():
i = 0
while 1:
yield i
i += 1
g = gen()
try:
g.pend_throw
except AttributeError:
print("SKIP")
raise SystemExit
g.pend_throw(1)
try:
g.throw(ValueError())
except ValueError:
print("expected ValueError #1")
g = gen()
print(next(g))
g.pend_throw(2)
try:
g.throw(ValueError())
except ValueError:
print("expected ValueError #2")
g = gen()
g.pend_throw(3)
g.close()
g = gen()
print(next(g))
g.pend_throw(4)
g.close()
print("OK")
|
from .stream_interface import WbvalveStreamInterface
__all__ = ['WbvalveStreamInterface']
|
# Microsoft Azure Linux Agent
#
# Copyright 2014 Microsoft Corporation
# Copyright (c) 2016, 2017 by Delphix. All rights reserved.
# Copyright 2019 Joyent, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+ and Openssl 1.0+
#
import os
import time
from azurelinuxagent.common.exception import OSUtilError
import azurelinuxagent.common.utils.fileutil as fileutil
import azurelinuxagent.common.utils.shellutil as shellutil
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.osutil.default import DefaultOSUtil
class illumosOSUtil(DefaultOSUtil):
def __init__(self):
super(illumosOSUtil, self).__init__()
#
# The methods that emit an "error" are not expected to be called
# when the agent is running on illumos. The code paths that could
# have called them have been disabled either by configuration file
# settings, or code changes to other parts of the codebase.
#
def useradd(self, username, expiration=None):
logger.error('"useradd" not supported.')
def chpasswd(self, username, password, crypt_id=6, salt_len=10):
logger.error('"chpasswd" not supported.')
def conf_sudoer(self, username, nopasswd=False, remove=False):
logger.error('"conf_sudoer" not supported.')
def conf_sshd(self, disable_password):
logger.error('"conf_sshd" not supported.')
def del_root_password(self):
logger.error('"del_root_password" not supported.')
def clear_ips_uuid(self):
if not os.path.isfile('/var/pkg/pkg5.image'): return
fileutil.update_conf_file("/var/pkg/pkg5.image", "last_uuid", "")
def stop_mgmt_service(self):
logger.error('"stop_mgmt_service" not supported.')
def start_agent_service(self):
return shellutil.run("svcadm enable -st svc:/system/virtualization/waagent", chk_err=False)
def stop_agent_service(self):
return shellutil.run("svcadm disable -st svc:/system/virtualization/waagent", chk_err=False)
def register_agent_service(self):
return shellutil.run("svcadm enable svc:/system/virtualization/waagent", chk_err=False)
def unregister_agent_service(self):
return shellutil.run("svcadm disable svc:/system/virtualization/waagent", chk_err=False)
def set_admin_access_to_ip(self, dest_ip):
logger.warn('"set_admin_access_to_ip" not supported.')
def set_hostname(self, hostname):
#
# In order for the identity-node service to properly detect the
# hostname from the contents of /etc/nodename, the file needs to
# contain a newline after the hostname. Otherwise, the service
# will simply assign "unknown" as the hostname for the system.
#
fileutil.write_file('/etc/nodename', '{0}\n'.format(hostname))
# Make it happen NOW.
ret = shellutil.run('uname -S {0}'.format(hostname))
if ret:
raise OSUtilError('Unable to set hostname to {0}.'.format(hostname))
#
# Unfortunately, there isn't a way to cause the service refresh
# executed above a synchronous operation. Thus, without this
# loop, it would be possible for this function to return without
# having the hostname having been updated yet.
#
# Setting the hostname on the other platforms is a synchronous
# operation, so we've opted to enforce this fuction as being
# synchronus as well.
#
actual = None
for i in range(0, 10):
ret = shellutil.run_get_output('hostname')
if ret[0] == 0:
actual = ret[1].strip()
else:
raise OSUtilError('Unable to retrieve hostname')
if hostname == actual:
break
else:
time.sleep(1)
if actual != hostname:
raise OSUtilError('Unable to modify hostname to the desired value')
def restart_if(self, ifname):
return shellutil.run("ipadm refresh-addr {0}".format(ifname))
def publish_hostname(self, hostname):
#
# We intentionally leave this method unimplemented as we don't
# rely on the DHCP for providing the system's hostname. Instead,
# we rely on the "set_hostname" function to configure the
# "/etc/nodename" file, as well as configure the "identity:node"
# service to always use that file's contents to configure the
# hostname of the system.
#
logger.warn('"publish_hostname" not supported.')
def set_dhcp_hostname(self, hostname):
#
# We initentionally leave this function unimplemented, for the
# same reason that we leave "publish_hostname" unimplemented;
# see the comment in that function for more details.
#
logger.warn('"set_dhcp_hostname" not supported.')
def restart_ssh_service(self):
ret = shellutil.run('svcadm disable -s svc:/network/ssh')
if ret == 0:
return shellutil.run('svcadm enable -s svc:/network/ssh')
else:
return ret
def enable_serial_console(self):
#
# For now, assume your illumos distro's VHD or image ALREADY HAS
# serial console enabled.
#
return True
def reboot_system(self):
logger.info('Rebooting system')
ret = shellutil.run('reboot')
if ret != 0:
logger.error('Failed to reboot the system')
def get_dhcp_lease_endpoint(self):
ret = shellutil.run_get_output('/sbin/dhcpinfo 245')
#
# The dhcpinfo command can fail if the Azure specific DHCP
# option of 245 isn't contained in the /etc/dhcp/inittab file.
# Additionally, if the command succeeds, it's possible that the
# option wasn't found, in which case dhcpinfo will produce no
# output.
#
if ret[0] == 0 and ret[1] != '':
return ret[1].strip()
else:
return None
def is_sys_user(self, username):
logger.warn('"is_sys_user" not supported.')
def del_account(self, username):
logger.warn('"del_account" not supported.')
def deploy_ssh_pubkey(self, username, pubkey):
logger.warn('"deploy_ssh_pubkey" not supported.')
def is_selinux_system(self):
return False
def get_dvd_mount_options(self):
return "-o ro -F udfs"
def get_dvd_device(self, dev_dir='/dev'):
cmd = "rmformat -l | grep 'Logical Node' | awk '{print $NF}' | sed -e 's/rdsk/dsk/'"
ret = shellutil.run_get_output(cmd)
if ret[0] == 0:
device = ret[1].strip()
logger.info('Using dvd device: "{0}"'.format(device))
return device
else:
raise OSUtilError('Failed to determine DVD device.')
def eject_dvd(self, chk_err=True):
logger.warn('"eject_dvd" not supported.')
def get_if_mac(self, ifname):
data = self._get_net_info()
if data[0] == ifname:
return data[2].replace(':', '').upper()
return None
def get_first_if(self):
return self._get_net_info()[:2]
def route_add(self, net, mask, gateway):
#
# The "Router" DHCP option is provided by the Azure cloud's DHCP
# server, so instead of having the Agent modify the routes, we
# rely on the DHCP client on the DE to do this.
#
logger.warn('"route_add" not supported.')
def is_missing_default_route(self):
return False
#
# When probing for the wireserver endpoint using DHCP, the DHCP
# services doesn't need to be disabled when running on illumos.
# Additionally, this won't normally be called, since the DHCP cache
# will normally be used to determine the wireserver endpoint; and
# thus, we won't need to probe for the endpoint using DHCP requests.
#
def is_dhcp_enabled(self):
return False
def allow_dhcp_broadcast(self):
pass
def get_dhcp_pid(self):
ret = shellutil.run_get_output("pgrep -c $(svcs -H -o ctid svc:/network/dhcp-client)", chk_err=False)
return ret[1] if ret[0] == 0 else None
def set_scsi_disks_timeout(self, timeout):
pattern = r'^set sd:sd_io_time = (.*)$'
#
# Since changes to this setting require a reboot to take effect,
# we're careful to only change the value and print the warning
# message if the current value is different than the desired
# value. Essentially, we only want to print the warning message
# that suggest a reboot is required, if we actually modify the
# value that's already set; otherwise, we could unnecessarily
# suggest rebooting the system when that's not actually necessary.
#
for sf in ['/etc/system', '/etc/system.d/.self-assembly']:
if not os.path.isfile(sf): continue
match = fileutil.findstr_in_file(sf, pattern)
if match:
logger.info('Found existing SCSI disk timeout setting: "{0}".'.format(match.group(0)))
try:
current = int(match.group(1))
except ValueError:
raise OSUtilError('Unable to parse existing SCSI disk timeout: "{0}".'.format(match.group(1)))
if current == int(timeout):
logger.info('Current SCSI disk timeout matches desired SCSI disk timeout, skipping.')
return
logger.warn('Updating SCSI disk timeout to desired value of "{0}", reboot required to take effect.'.format(timeout))
fileutil.write_file('/etc/system.d/system:virtualization:azure-agent',
'set sd:sd_io_time = {0}\n'.format(timeout))
def check_pid_alive(self, pid):
return shellutil.run("ps -p {0}".format(pid), chk_err=False) == 0
@staticmethod
def _get_net_info():
iface = ''
inet = ''
mac = ''
err, output = shellutil.run_get_output('dladm show-ether -p -o LINK', chk_err=False)
if err:
raise OSUtilError("Can't find ether interface:{0}".format(output))
ifaces = output.split()
if not ifaces:
raise OSUtilError("Can't find ether interface.")
iface = ifaces[0]
err, output = shellutil.run_get_output('dladm show-phys -m -p -o address ' + iface, chk_err=False)
if err:
raise OSUtilError("Can't get mac address for interface:{0}".format(iface))
macs = output.split()
if not macs:
raise OSUtilError("Can't find mac address.")
mac = macs[0]
#
# It's possible for the output from "dladm show-phys" to output
# a mac address, such that each octet is not two characters
# (e.g. "2:dc:0:0:23:ff"). Certain parts of the agent expect
# each octet of the mac address to be two hex characters long,
# so we're forcing the address returned by this function to
# always have two character long octets.
#
mac = ":".join(map(lambda x: "{0:02x}".format(int(x, 16)), mac.split(":")))
err, output = shellutil.run_get_output('ipadm show-addr -p -o addr ' + iface + '/', chk_err=False)
if err:
raise OSUtilError("Can't get ip address for interface:{0}".format(iface))
ips = output.split()
if not ips:
raise OSUtilError("Can't find ip address.")
ip = ips[0].split('/')[0]
logger.verbose("Interface info: ({0},{1},{2})", iface, ip, mac)
return iface, ip, mac
def device_for_ide_port(self, port_id):
logger.warn('"device_for_ide_port" not supported.')
|
def perm(l): # error: first line indented
for i in range(len(l)): # error: not indented
s = l[:i] + l[i+1:]
p = perm(l[:i] + l[i+1:]) # error: unexpected indent
for x in p:
r.append(l[i:i+1] + x)
return r # error: inconsistent dedent
|
from discord.ext import commands
from discord.utils import get
import discord
import sys
import subprocess
import asyncio
import datetime
import re
import random
import requests
import json
import os
if not os.path.isfile("settings.json"):
sys.exit("'settings.json' not found!")
else:
with open("settings.json") as file:
settings = json.load(file)
class Forecast(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(brief='weatherreport', description='detail')
async def forecast(self,ctx):
if ctx.message.content == settings["prefix"]+"forecast":
embed = discord.Embed(title="Error", description=settings["prefix"]+"forecast requires a location \n!forecast <Location/City/Town>", color=0xFF0000)
await ctx.send(content=None, embed=embed)
return
city = str(ctx.message.content).replace(settings["prefix"]+"forecast ","").capitalize()
try:
response = requests.get("https://api.openweathermap.org/data/2.5/forecast?q="+city+"&appid="+settings["API-token"])
except:
embed = discord.Embed(title="Error: 400", description="An External Error has occured!", color=0xFF0000)
await ctx.send(content=None, embed=embed)
#conversion to json
jsondata = response.json()
temp_list=[]
temp_list_raw=[]
hum_list_raw=[]
status_list=[]
timestamp_list=[]
id_list=[]
RainCounter = 0
RainID=[-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1]
try:
for i in range(0,9,1):
temp_list.append(str(round(jsondata['list'][i]['main']['temp']-273.15,1))+" °C")
temp_list_raw.append(round(jsondata['list'][i]['main']['temp']-273.15,1))
hum_list_raw.append(round(jsondata['list'][i]['main']['humidity'])*100)
icon_string = str(jsondata['list'][i]['weather'][0]['icon'])
#Replaces id with emoji
x = icon_string
x = x.replace("01d",":sunny:")
x = x.replace("01n",":sunny:")
x = x.replace("02d",":white_sun_small_cloud:")
x = x.replace("02n",":white_sun_small_cloud:")
x = x.replace("03d",":white_sun_cloud:")
x = x.replace("03n",":white_sun_cloud:")
x = x.replace("04d",":cloud:")
x = x.replace("04n",":cloud:")
x = x.replace("09d",":white_sun_rain_cloud:")
x = x.replace("09n",":white_sun_rain_cloud:")
x = x.replace("10d",":cloud_rain:")
x = x.replace("10n",":cloud_rain:")
x = x.replace("11d",":thunder_cloud_rain:")
x = x.replace("11n",":thunder_cloud_rain:")
x = x.replace("13d",":snowflake:")
x = x.replace("13n",":snowflake:")
x = x.replace("50d",":rock:")
x = x.replace("50n",":rock:")
if 'rain' in str(jsondata['list'][i]['weather'][0]['description']):
status_list.append(x+" "+str(jsondata['list'][i]['weather'][0]['description']).capitalize()+" ("+str(round(float(jsondata['list'][i]['pop'])*100))+"%)")
RainID[RainCounter]= i
RainCounter = RainCounter + 1
else:
status_list.append(x+" "+str(jsondata['list'][i]['weather'][0]['description']).capitalize() )
tmpstring=str(jsondata['list'][i]['dt_txt'])
timestamp_list.append(tmpstring[-8:-3]+"h")
id_list.append(str(jsondata['list'][i]['weather'][0]['icon']))
except:
embed = discord.Embed(title="Error: 404", description="The location you specified is not valid.", color=0xFF0000)
await ctx.send(content=None, embed=embed)
intro_string_list = ['In the next hours it will be ','The weather for now will be ']
intro_string = random.choice(intro_string_list)
weather_stat_next_hours = jsondata['list'][1]['weather'][0]['description']
rain_string_list = ['No Rain is expected for the next time. ', 'There seems to be a slight risk of Rain in the next time. ', 'There is a high risk of Rain in the next few hours. ']
if(RainID[0] == 0 or RainID[0] == 1 or RainID[0] == 2 ):
rain_decider = 2
elif(RainID[0] == 3 or RainID[0] == 4 or RainID[0] == 5 ):
rain_decider = 1
else:
rain_decider = 0
rain_string = rain_string_list[rain_decider]
Forecast_string = intro_string + weather_stat_next_hours + ". " + rain_string
embed = discord.Embed(title="Weather Forecast for "+ city, description="The Weather forecast in "+city+" for the next 24 hours is as follows: \n" + Forecast_string, color=0x1C9FF6)
embed.add_field(name="Temperature", value="\n".join(temp_list), inline=True)
embed.add_field(name="Weather", value="\n".join(status_list), inline=True)
embed.add_field(name="Time", value="\n".join(timestamp_list), inline=True)
await ctx.send(content=None, embed=embed)
def setup(bot):
bot.add_cog(Forecast(bot))
|
# Copyright (c) 2021 - Jojo#7791
# Licensed under MIT
import logging
from typing import Dict, Iterable, Optional, Union
from discord import Guild, Member, Role, User
from redbot.core import Config
from redbot.core.bot import Red
from ...const import _config_structure # type:ignore
__all__ = [
"add_to_blacklist",
"add_to_whitelist",
"clear_blacklist",
"clear_whitelist",
"edit_reason",
"get_blacklist",
"get_whitelist",
"in_blacklist",
"in_whitelist",
"remove_from_blacklist",
"remove_from_whitelist",
"startup",
]
log = logging.getLogger("red.jojocogs.advancedblacklist.api")
_config = Config.get_conf(None, 544974305445019651, True, "AdvancedBlacklist")
[getattr(_config, f"register_{x}", lambda **z: z)(**z) for x, z in _config_structure.items()]
UserOrRole = Union[int, Role, Member, User]
async def startup(bot: Red):
await _schema_check()
for i in ("whitelist", "blacklist"):
async with getattr(_config, i)() as bl:
blacklist = await getattr(bot._whiteblacklist_cache, f"get_{i}")(None)
for uid in blacklist:
if str(uid) in bl.keys():
continue
bl[str(uid)] = "No reason provided."
keys = list(bl.keys())
for key in keys:
if int(key) not in blacklist:
bl.pop(key)
for guild in bot.guilds:
for i in ("whitelist", "blacklist"):
async with getattr(_config.guild(guild), i)() as bl:
blacklist = await getattr(bot._whiteblacklist_cache, f"get_{i}")(guild)
for uid in blacklist:
if str(uid) in bl.keys():
continue
bl[str(uid)] = "No reason provided."
keys = list(bl.keys())
for key in keys:
if int(key) not in blacklist:
bl.pop(key)
async def _schema_check():
data = await _config.all()
if data.get("schema_v1"):
return
log.debug("Schema no tbh")
guild_data = data.pop("localblacklist", None)
if guild_data:
for gid, gdata in guild_data.items():
await _config.guild_from_id(gid).set_raw("blacklist", value=gdata)
await _config.schema_v1.set(True)
async def add_to_blacklist(
bot: Red,
users_or_roles: Iterable[UserOrRole],
reason: str,
*,
guild: Optional[Guild] = None,
override: Optional[bool] = False,
) -> None:
coro = _config if not guild else _config.guild(guild)
async with coro.blacklist() as bl:
for item in users_or_roles:
item = str(getattr(item, "id", item))
bl[item] = reason
if override:
return
await bot._whiteblacklist_cache.add_to_blacklist( # type:ignore
guild, {getattr(u, "id", u) for u in users_or_roles}, dispatch=False
)
async def remove_from_blacklist(
bot: Red,
users_or_roles: Iterable[UserOrRole],
*,
guild: Optional[Guild] = None,
override: Optional[bool] = False,
) -> None:
coro = _config if not guild else _config.guild(guild)
async with coro.blacklist() as bl:
for item in users_or_roles:
item = str(getattr(item, "id", item))
bl.pop(item, None)
if override:
return
await bot._whiteblacklist_cache.remove_from_blacklist( # type:ignore
guild, {getattr(u, "id", u) for u in users_or_roles}, dispatch=False
)
async def in_blacklist(bot: Red, id: int, guild: Optional[Guild] = None) -> bool:
coro = _config if not guild else _config.guild(guild)
data = await coro.blacklist()
return str(id) in data.keys() or id in await bot._whiteblacklist_cache.get_blacklist(guild)
async def edit_reason(
bot: Red,
user: Union[User, Member, int],
reason: str,
whitelist: bool,
*,
guild: Optional[Guild] = None,
) -> None:
attr = "whitelist" if whitelist else "blacklist"
coro = getattr((_config if not guild else _config.guild(guild)), attr)
uid = getattr(user, "id", user)
async with coro() as edit:
edit[str(uid)] = reason
async def get_blacklist(bot: Red, guild: Optional[Guild] = None) -> Dict[str, str]:
coro = _config if not guild else _config.guild(guild)
ret = await coro.blacklist()
if not ret:
# So, we don't have a blacklist in the config
# Let's check if the bot has a blacklist in the cache
blacklist = await bot._whiteblacklist_cache.get_blacklist(guild)
if not blacklist:
return {}
ret = {str(i): "No reason provided." for i in blacklist}
await coro.blacklist.set(ret)
return ret
async def clear_blacklist(
bot: Red, guild: Optional[Guild] = None, override: Optional[bool] = False
) -> None:
coro = _config if not guild else _config.guild(guild)
await coro.blacklist.clear()
if override:
return
await bot._whiteblacklist_cache.clear_blacklist(guild, dispatch=False) # type:ignore
async def add_to_whitelist(
bot: Red,
users_or_roles: Iterable[UserOrRole],
reason: str,
*,
guild: Optional[Guild] = None,
override: Optional[bool] = False,
) -> None:
coro = _config if not guild else _config.guild(guild)
async with coro.whitelist() as wl:
for item in users_or_roles:
item = str(getattr(item, "id", item))
wl[item] = reason
if override:
return
await bot._whiteblacklist_cache.add_to_whitelist( # type:ignore
guild, {getattr(u, "id", u) for u in users_or_roles}, dispatch=False
)
async def remove_from_whitelist(
bot: Red,
users_or_roles: Iterable[UserOrRole],
*,
guild: Optional[Guild] = None,
override: Optional[bool] = False,
) -> None:
coro = _config if not guild else _config.guild(guild)
async with coro.whitelist() as wl:
for item in users_or_roles:
item = str(getattr(item, "id", item))
wl.pop(item, None)
if override:
return
await bot._whiteblacklist_cache.remove_from_whitelist( # type:ignore
guild, {getattr(u, "id", u) for u in users_or_roles}, dispatch=False
)
async def get_whitelist(bot: Red, guild: Optional[Guild] = None) -> Dict[str, str]:
coro = _config if not guild else _config.guild(guild)
ret = await coro.whitelist()
if not ret:
# Like with the `get_blacklist` method let's check the bot's whitelist
whitelist = await bot._whiteblacklist_cache.get_whitelist(guild)
if not whitelist:
return {}
ret = {str(i): "No reason provided." for i in whitelist}
await coro.whitelist.set(ret)
return ret
async def in_whitelist(bot: Red, id: int, guild: Optional[Guild] = None) -> bool:
coro = _config if not guild else _config.guild(guild)
data = await coro.whitelist()
return str(id) in data.keys() or id in await bot._whiteblacklist_cache.get_whitelist(guild)
async def clear_whitelist(
bot: Red, guild: Optional[Guild] = None, override: Optional[bool] = False
) -> None:
coro = _config if not guild else _config.guild(guild)
await coro.whitelist.clear()
if override:
return
await bot._whiteblacklist_cache.clear_whitelist(guild, dispatch=False) # type:ignore
|
from guard import Community, polity, default_parameters, generate_parameters
import pytest
@pytest.fixture
def polity_10():
state = polity.Polity(
[Community(default_parameters) for i in range(10)]
)
return state
@pytest.fixture
def arbitrary_polity():
def _arbitrary_polity(size):
state = polity.Polity(
[Community(default_parameters) for i in range(size)]
)
return state
return _arbitrary_polity
@pytest.fixture
def example_traits():
traits = [3, 4, 6, 3, 8, 3, 2, 2, 8, 6]
mean_traits = sum(traits)/len(traits)
return traits, mean_traits
# Test the polity class
class TestPolity(object):
# Add a new community to the polity
def test_add_community(self, polity_10):
state = polity_10
state.add_community(Community(default_parameters))
assert state.size() == 11
# Ensure new community points to the polity
def test_new_community(self, polity_10):
state = polity_10
state.add_community(Community(default_parameters))
assert state.communities[-1].polity is state
# Remove a community from the polity
def test_remove_community(self, polity_10):
state = polity_10
state.remove_community(state.communities[0])
assert state.size() == 9
# Determine the mean number of ultrasocietal traits of communities
# in the polity
def test_mean_ultrasocietal_traits(self, polity_10, example_traits):
state = polity_10
traits, mean_traits = example_traits
set_ultrasocietal_traits(default_parameters, state, traits)
assert mean_traits == state.mean_ultrasocietal_traits()
# Calculate the attack power of the polity
def test_attack_power(self, polity_10, example_traits):
state = polity_10
traits, mean_traits = example_traits
set_ultrasocietal_traits(default_parameters, state, traits)
attack_power = (default_parameters.ultrasocietal_attack_coefficient
* sum(traits) + 1.)
assert attack_power == state.attack_power(default_parameters)
# Test disintegration method
class TestDisintegration(object):
# Ensure disintegration works correctly
def test_disintegration(self, polity_10):
state = polity_10
new_states = state.disintegrate()
assert state.size() == 0
assert all([polity.size() == 1 for polity in new_states])
assert all(
[polity.communities[0].polity != state for polity in new_states]
)
assert all(
[polity.communities[0].polity == polity for polity in new_states]
)
# Calculate the disintegration probability
def disintegration_probability(self, params, size, mean_traits):
probability = (
params.disintegration_size_coefficient*size
- params.disintegration_ultrasocietal_trait_coefficient*mean_traits
)
if probability > 0:
return min(params.disintegration_base + probability, 1)
else:
return params.disintegration_base
# Ensure the minimum probability
def test_negative_disintegration_probability(self, polity_10):
size = 10
state = polity_10
traits = [default_parameters.n_ultrasocietal_traits]*size
mean_traits = sum(traits)/len(traits)
set_ultrasocietal_traits(default_parameters, state, traits)
probability = self.disintegration_probability(default_parameters, size,
mean_traits)
assert (probability
== state.disintegrate_probability(default_parameters)
== default_parameters.disintegration_base)
# Ensure the maximum probability is 1
def test_large_disintegration_probability(self, arbitrary_polity):
size = 100
state = arbitrary_polity(size)
traits = [0]*size
mean_traits = sum(traits)/len(traits)
set_ultrasocietal_traits(default_parameters, state, traits)
probability = self.disintegration_probability(default_parameters, size,
mean_traits)
assert (probability
== state.disintegrate_probability(default_parameters)
== 1)
# Calculate the probability for an intermediate case where the
# probability is neither 1 nor 0
def test_intermediate_disintegration_probability(self, arbitrary_polity):
size = 50
state = arbitrary_polity(size)
traits = [1]*size
mean_traits = sum(traits)/len(traits)
set_ultrasocietal_traits(default_parameters, state, traits)
probability = self.disintegration_probability(default_parameters, size,
mean_traits)
assert all(
[probability > 0,
probability < 1,
probability == state.disintegrate_probability(default_parameters)]
)
# Tests for communities which require them to be members of a polity
class TestCommunitiesInPolity(object):
# Ensure all communities point to the polity
def test_community_assignment(self, polity_10):
state = polity_10
assert ([community.polity for community in state.communities] ==
[state]*len(state.communities))
# Calculate attack power from a community object
def test_community_attack_power(self, polity_10, example_traits):
state = polity_10
traits, _ = example_traits
set_ultrasocietal_traits(default_parameters, state, traits)
assert (state.communities[0].attack_power(default_parameters)
== state.attack_power(default_parameters))
# Calculate the defensive power of a community
def test_community_defence_power(self, polity_10, example_traits):
state = polity_10
traits, _ = example_traits
set_ultrasocietal_traits(default_parameters, state, traits)
elevation = 50
index = 4
state.communities[index].elevation = elevation
defence_power = (state.attack_power(default_parameters)
+ default_parameters.elevation_defence_coefficient
* elevation)
assert (
defence_power
== state.communities[index].defence_power(default_parameters,
sea_attack=False)
)
# Calculate the defensive power of a community in a sea attack
def test_community_defence_power_sea(self, polity_10, example_traits):
state = polity_10
traits, _ = example_traits
set_ultrasocietal_traits(default_parameters, state, traits)
elevation = 50
index = 4
state.communities[index].elevation = elevation
assert (
state.attack_power(default_parameters)
== state.communities[index].defence_power(default_parameters,
sea_attack=True)
)
# Test attempting cultural shift on all communities in a polity
class TestCulturalShift(object):
def test_shift_to_true(self, polity_10):
params = generate_parameters(mutation_to_ultrasocietal=1,
mutation_from_ultrasocietal=1)
state = polity_10
state.cultural_shift(params)
assert (
state.mean_ultrasocietal_traits() == params.n_ultrasocietal_traits
)
def test_shift_to_false(self, polity_10):
params = generate_parameters(mutation_to_ultrasocietal=1,
mutation_from_ultrasocietal=1)
state = polity_10
for tile in state.communities:
tile.ultrasocietal_traits = [True]*params.n_ultrasocietal_traits
state.cultural_shift(params)
assert state.mean_ultrasocietal_traits() == 0
# Test transfering of a community from one polity to another
def test_transfer(arbitrary_polity):
size_a = 10
state_a = arbitrary_polity(size_a)
size_b = 5
state_b = arbitrary_polity(size_b)
# Give the ceded community some characteristic trait
ceded_community = state_b.communities[3]
ceded_community.elevation = 12
# Transfer the ceded community to state a from state b
state_a.transfer_community(ceded_community)
assert all([ceded_community in state_a.communities,
ceded_community not in state_b.communities,
state_a.size() == 11, state_b.size() == 4,
state_a.communities[-1].elevation == 12])
# Assign the example ultrasocietal traits to polity
def set_ultrasocietal_traits(params, polity, traits):
for i, number in enumerate(traits):
polity.communities[i].ultrasocietal_traits = (
[True]*number + [False]*(params.n_ultrasocietal_traits-number)
)
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import UnivariateSpline
n, c, gflops = np.loadtxt('triad.txt', unpack=True)
spline = UnivariateSpline(n, np.log10(gflops))
#spline.set_smoothing_factor(0.5)
plt.semilogx(n, gflops, 'ko')
#plt.semilogx(n, 10**spline(n), 'k', alpha=0.5)
plt.xlabel('N')
plt.ylabel('GFLOP/S')
plt.show()
|
import os
import re
import cv2
import torch
import imgaug
import numpy as np
import matplotlib.pyplot as plt
from termcolor import colored
from imgaug import augmenters
from torchvision import transforms
from torch.utils.data import Dataset
from torch.utils.data import DataLoader as DL
from sklearn.model_selection import train_test_split
os.system("color")
#####################################################################################################
class DS(Dataset):
def __init__(self, X=None, y=None, transform=None, mode="train"):
self.mode = mode
self.transform = transform
assert(re.match(r"train", self.mode, re.IGNORECASE) or re.match(r"valid", self.mode, re.IGNORECASE) or re.match(r"test", self.mode, re.IGNORECASE))
self.X = X
if re.match(r"train", self.mode, re.IGNORECASE) or re.match(r"valid", self.mode, re.IGNORECASE):
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):
if re.match(r"train", self.mode, re.IGNORECASE) or re.match(r"valid", self.mode, re.IGNORECASE):
return self.transform(self.X[idx]), torch.LongTensor(self.y[idx])
else:
return self.transform(self.X[idx])
#####################################################################################################
def myprint(text: str, color: str) -> None:
print(colored(text=text, color=color))
def breaker(num=50, char="*") -> None:
myprint("\n" + num*char + "\n", "magenta")
def debug(text: str):
myprint(text, "red")
#####################################################################################################
def get_augment(seed: int):
imgaug.seed(seed)
augment = augmenters.SomeOf(None, [
augmenters.HorizontalFlip(p=0.5),
augmenters.VerticalFlip(p=0.5),
augmenters.Affine(scale=(0.75, 1.25), translate_percent=(-0.1, 0.1), rotate=(-45, 45), seed=seed),
], seed=seed)
return augment
def read_image(name: str) -> np.ndarray:
image = cv2.imread(os.path.join(TEST_DATA_PATH, name), cv2.IMREAD_COLOR)
assert(image is not None)
return cv2.cvtColor(src=image, code=cv2.COLOR_BGR2RGB)
def downscale(image: np.ndarray, size: int) -> np.ndarray:
return cv2.resize(src=image, dsize=(size, size), interpolation=cv2.INTER_AREA)
def show(image: np.ndarray, title=None) -> None:
plt.figure()
plt.imshow(image)
plt.axis("off")
if title:
plt.title(title)
plt.show()
def get_images(path: str, size: int) -> np.ndarray:
images = np.zeros((len(os.listdir(path)), size, size, 3)).astype("uint8")
i = 0
for name in os.listdir(path):
image = cv2.imread(os.path.join(path, name), cv2.IMREAD_COLOR)
image = cv2.cvtColor(src=image, code=cv2.COLOR_BGR2RGB)
image = cv2.resize(src=image, dsize=(size, size), interpolation=cv2.INTER_AREA)
images[i] = image
i += 1
return images
def get_data(base_path: str):
folders = os.listdir(base_path)
images_1 = get_images(os.path.join(base_path, folders[0]), size=SIZE)
images_2 = get_images(os.path.join(base_path, folders[1]), size=SIZE)
images_3 = get_images(os.path.join(base_path, folders[2]), size=SIZE)
images_4 = get_images(os.path.join(base_path, folders[3]), size=SIZE)
labels_1 = np.zeros((images_1.shape[0], 1))
labels_2 = np.ones((images_2.shape[0], 1))
labels_3 = np.ones((images_3.shape[0], 1)) * 2
labels_4 = np.ones((images_4.shape[0], 1)) * 3
images = np.concatenate((images_1, images_2, images_3, images_4), axis=0)
labels = np.concatenate((labels_1, labels_2, labels_3, labels_4), axis=0)
return images, labels
def build_dataloaders(path: str, batch_size: int, pretrained=False, do_augment=False):
breaker()
myprint("Fetching images and labels ...", "yellow")
images, labels = get_data(path)
breaker()
myprint("Splitting into train and validation sets ...", "yellow")
tr_images, va_images, tr_labels, va_labels = train_test_split(images, labels, test_size=0.2, shuffle=True, random_state=SEED, stratify=labels)
if do_augment:
breaker()
myprint("Augmenting training set ...", "yellow")
augment = get_augment(SEED)
tr_images = augment(images=tr_images)
breaker()
myprint("Building Dataloaders ...", "yellow")
if pretrained:
tr_data_setup = DS(X=tr_images, y=tr_labels, transform=TRANSFORM_1, mode="train")
va_data_setup = DS(X=va_images, y=va_labels, transform=TRANSFORM_1, mode="valid")
else:
tr_data_setup = DS(X=tr_images, y=tr_labels, transform=TRANSFORM_2, mode="train")
va_data_setup = DS(X=va_images, y=va_labels, transform=TRANSFORM_2, mode="valid")
dataloaders = {
"train" : DL(tr_data_setup, batch_size=batch_size, shuffle=True, generator=torch.manual_seed(SEED)),
"valid" : DL(va_data_setup, batch_size=batch_size, shuffle=False)
}
return dataloaders
def save_graphs(L: list, A: list) -> None:
TL, VL, TA, VA = [], [], [], []
for i in range(len(L)):
TL.append(L[i]["train"])
VL.append(L[i]["valid"])
TA.append(A[i]["train"])
VA.append(A[i]["valid"])
x_Axis = np.arange(1, len(TL) + 1)
plt.figure("Plots")
plt.subplot(1, 2, 1)
plt.plot(x_Axis, TL, "r", label="Train")
plt.plot(x_Axis, VL, "b", label="Valid")
plt.legend()
plt.grid()
plt.title("Loss Graph")
plt.subplot(1, 2, 2)
plt.plot(x_Axis, TA, "r", label="Train")
plt.plot(x_Axis, VA, "b", label="Valid")
plt.legend()
plt.grid()
plt.title("Accuracy Graph")
plt.savefig("./Graphs.jpg")
plt.close("Plots")
#####################################################################################################
SEED = 0
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
DATA_PATH_1 = "./Data"
DATA_PATH_2 = "./Data_Reduced"
DATA_PATH_3 = "../input/edible-and-poisonous-fungi"
data_path_4 = None
TEST_DATA_PATH = "./Test Images"
CHECKPOINT_PATH = "./Checkpoints"
if not os.path.exists(CHECKPOINT_PATH):
os.makedirs(CHECKPOINT_PATH)
# To avoid clipping issue during tensor display, do not normalize
TRANSFORM_1 = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
TRANSFORM_2 = transforms.Compose([transforms.ToTensor(), ])
SIZE = 224
LABELS = [
"Edible Mushroom Sporocarp",
"Edible Sporocarp",
"Poisonous Mushroom Sporocarp",
"Poisonous Sporocarp"
]
#####################################################################################################
|
import unittest
import json
from webhook import create_app
from unittest.mock import call
from unittest.mock import patch
class TestRoadLocation(unittest.TestCase):
@patch('webhook.views.rabbit', spec=['channel'])
@patch('webhook.views.requests', spec=['post'])
def test_location(self, request_mock, rabbit_mock):
app = create_app(test_config={
'TESTING': True,
'GOOGLE_KEY': 'dummy',
'API_KEY': 'dummy',
'RABBITMQ_USER': 'dummy',
'RABBITMQ_PWD': 'dummy',
'RABBITMQ_EXCHANGE': 'webhook'})
requested_location = {'long': 4.8367074, 'lat': 51.321642499999996 }
expected_result = {'snappedPoints':
[{'location': {'latitude': 51.32162537389561, 'longitude': 4.836712880479233},
'originalIndex': 0, 'placeId': 'ChIJv2Ez8i2sxkcRgMoDzJ4ADy0'},
{'location': {'latitude': 51.32162537389561, 'longitude': 4.836712880479233},
'originalIndex': 0, 'placeId': 'ChIJv2Ez8i2sxkcRgcoDzJ4ADy0'}]}
with app.test_client() as client:
rabbit_mock.channel.basic_publish.return_value = True
request_mock.post.return_value.json.return_value = expected_result
request_mock.post.return_value.status_code = 200
rv = client.post('/demo/street?api_key={}'.format(app.config['API_KEY']),
data=json.dumps(requested_location),
content_type='application/json')
self.assertEqual([call.channel.basic_publish(
body=expected_result,
exchange='webhook', routing_key='demo')], rabbit_mock.mock_calls)
self.assertEqual({'success': True}, json.loads(rv.data))
|
from random import randint
from asciimatics.screen import Screen
import time
def demo(screen):
for i in range(1,12953):
with open("output/output"+str(i)+".txt",'r') as f:
lines = f.readlines()
count = 0;
for line in lines:
screen.print_at(line,0,count)
count = count + 1
screen.refresh()
Screen.wrapper(demo)
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../..', 'wikidump'))
import collections
from wikidump.extractors import user_warnings_template
INPUT_TEXT = """
<includeonly>This is a sample template</includeonly>
<includeonly><noinclude>This is a sample template</noinclude> part 2</includeonly>
<includeonly>Hi there</includeonly>
"""
def wikibreaks_extractor():
new_regex = user_warnings_template.userwarnings_regex_extractor(INPUT_TEXT)
print('{}'.format(new_regex))
if __name__ == "__main__":
wikibreaks_extractor()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function, unicode_literals)
from collections import namedtuple, defaultdict, OrderedDict
import logging
import cgi
import re
import datetime
log = logging.getLogger(__name__)
class FragmentElement(object):
pass
class Fragment(object):
_types = None
@classmethod
def from_json(cls, data):
"""Create a corresponding fragment object from json."""
if not cls._types:
cls._types = {
"Image": Fragment.Image,
"Color": Fragment.Color,
"Text": Fragment.Text,
"Select": Fragment.Text,
"Number": Fragment.Number,
"Range": Fragment.Range,
"Date": Fragment.Date,
"Timestamp": Fragment.Timestamp,
"StructuredText": StructuredText,
"Link.document": Fragment.DocumentLink,
"Link.file": Fragment.MediaLink,
"Link.web": Fragment.WebLink,
"Link.image": Fragment.ImageLink,
"Embed": Fragment.Embed,
"GeoPoint": Fragment.GeoPoint,
"Group": Fragment.Group,
"SliceZone": Fragment.SliceZone
}
fragment_type = data.get("type")
f_type = cls._types.get(fragment_type)
if f_type:
return f_type(data.get("value"))
log.warning("fragment_type not found: %s" % fragment_type)
class WithFragments(object):
def __init__(self, fragments):
self.fragments = fragments
def get(self, field):
return self.fragments.get(field, None)
def get_field(self, field):
return self.fragments.get(field, None)
def get_all(self, field):
indexed_key = "^%s(\[\d+\])?$" % field
return list(v for k, v in list(self.fragments.items()) if re.match(indexed_key, k))
def get_fragment_type(self, field, f_type):
fragment = self.fragments.get(field)
return fragment if isinstance(fragment, f_type) else None
def get_image(self, field, view="main"):
fragment = self.get_field(field)
if isinstance(fragment, Fragment.Image):
return fragment.get_view(view) if fragment else None
if view == "main" and isinstance(fragment, StructuredText):
image = fragment.get_image()
return image.view if image else None
return None
def get_number(self, field):
return self.get_fragment_type(field, Fragment.Number)
def get_range(self, field):
return self.get_fragment_type(field, Fragment.Range)
def get_color(self, field):
return self.get_fragment_type(field, Fragment.Color)
def get_text(self, field):
fragment = self.fragments.get(field)
if isinstance(fragment, StructuredText):
texts = [block.text for block in fragment.blocks if isinstance(
block, Text)]
return "\n".join(texts) if texts else None
elif fragment is None:
return None
else:
return fragment.value
def get_link(self, field):
return self.get_fragment_type(field, Fragment.Link)
def get_embed(self, field):
return self.get_fragment_type(field, Fragment.Embed)
def get_date(self, field):
return self.get_fragment_type(field, Fragment.Date)
def get_timestamp(self, field):
return self.get_fragment_type(field, Fragment.Timestamp)
def get_geopoint(self, field):
return self.get_fragment_type(field, Fragment.GeoPoint)
def get_group(self, field):
return self.get_fragment_type(field, Fragment.Group)
def get_structured_text(self, field):
return self.get_fragment_type(field, StructuredText)
def get_slice_zone(self, field):
return self.get_fragment_type(field, Fragment.SliceZone)
def get_html(self, field, link_resolver):
"""Get the html of a field.
:param field: String with a name of the field to get.
:param link_resolver: A resolver function for document links.
Will be called with :class:`prismic.fragments.Fragment.DocumentLink <prismic.fragments.Fragment.DocumentLink>`
object as argument. Resolver function should return a string, the local url to the document.
"""
fragment = self.fragments.get(field)
return self.fragment_to_html(fragment, link_resolver)
@property
def linked_documents(self):
"""
Return the documents linked from this document's fragments
:return: array<DocumentLink>
"""
result = []
for (name, fragment) in list(self.fragments.items()):
if isinstance(fragment, Fragment.DocumentLink):
result.append(fragment)
elif isinstance(fragment, Fragment.Group):
for groupdoc in fragment.value:
result = result + groupdoc.linked_documents
elif isinstance(fragment, StructuredText):
for block in fragment.blocks:
if isinstance(block, Text):
for span in block.spans:
if isinstance(span, Span.Hyperlink):
if isinstance(span.link, Fragment.DocumentLink):
result.append(span.link)
return result
@staticmethod
def fragment_to_html(fragment, link_resolver, html_serializer=None):
if isinstance(fragment, StructuredText):
return fragment.as_html(link_resolver, html_serializer)
if isinstance(fragment, Fragment.Group)\
or isinstance(fragment, Fragment.SliceZone)\
or isinstance(fragment, Fragment.DocumentLink)\
or isinstance(fragment, Fragment.Image)\
or isinstance(fragment, Fragment.Image.View):
return fragment.as_html(link_resolver)
elif fragment:
return fragment.as_html
return None
def as_html(self, link_resolver):
html = []
for key, fragment in list(self.fragments.items()):
html.append("""<section data-field="%s">""" % key)
html.append(self.fragment_to_html(fragment, link_resolver))
html.append("""</section>""")
return ''.join(html)
def __getitem__(self, name):
return self.fragments[name]
def __iter__(self):
return iter(self.fragments)
def keys(self):
return self.fragments.keys()
def items(self):
return self.fragments.items()
def values(self):
return self.fragments.values()
# Links
class Link(FragmentElement):
@staticmethod
def parse(data):
if data is None:
return None
hyperlink_type = data.get("type")
return {
"Link.web": Fragment.WebLink,
"Link.document": Fragment.DocumentLink,
"Link.image": Fragment.MediaLink,
"Link.file": Fragment.FileLink
}.get(hyperlink_type, lambda x: None)(data.get("value"))
class DocumentLink(WithFragments, Link):
def __init__(self, value):
Fragment.WithFragments.__init__(self, OrderedDict())
document = value.get("document")
self.id = document.get("id")
self.uid = document.get("uid")
self.type = document.get("type")
self.tags = document.get("tags")
self.slug = document.get("slug")
self.is_broken = value.get("isBroken")
fragments = document.get("data").get(self.type) if "data" in document else {}
for (fragment_name, fragment_value) in list(fragments.items()):
f_key = "%s.%s" % (self.type, fragment_name)
if isinstance(fragment_value, list):
for index, fragment_value_element in enumerate(fragment_value):
self.fragments["%s[%s]" % (f_key, index)] = Fragment.from_json(
fragment_value_element)
elif isinstance(fragment_value, dict):
self.fragments[f_key] = Fragment.from_json(fragment_value)
def as_html(self, documentlink_resolver, html_serializer=None):
"""Get the DocumentLink as html.
:param documentlink_resolver: A resolver function will be called with
:class:`prismic.fragments.Fragment.DocumentLink <prismic.fragments.Fragment.DocumentLink>` object as
argument. Resolver function should return a string, the local url to the document.
"""
return """<a href="%(link)s">%(slug)s</a>""" % {
"link": self.get_url(documentlink_resolver),
"slug": self.slug
}
def get_url(self, documentlink_resolver=None):
if not hasattr(documentlink_resolver, '__call__'):
raise Exception(
"documentlink_resolver should be a callable object, but it's: %s"
% type(documentlink_resolver)
)
return documentlink_resolver(self)
def get_document_id(self):
return self.id
def get_document_type(self):
return self.type
def get_document_tags(self):
return self.tags
def get_document_slug(self):
return self.slug
def __repr__(self):
return "DocumentLink %s, %s, %s, %s" % (self.id, self.type, self.tags, self.is_broken)
class WebLink(Link):
def __init__(self, value):
self.url = value.get("url")
@property
def as_html(self):
return """<a href="%(url)s">%(url)s</a>""" % self.__dict__
def get_url(self, link_resolver=None):
return self.url
class MediaLink(Link):
def __init__(self, value):
self.image = value.get("image")
self.name = self.image.get("name")
self.kind = self.image.get("kind")
self.url = self.image.get("url")
self.size = self.image.get("size")
self.height = self.image.get("height")
self.width = self.image.get("width")
def as_html(self):
return "<a href='%(url)s'>%(name)s</a>" % self.__dict__
def get_url(self, link_resolver=None):
return self.url
class FileLink(Link):
def __init__(self, value):
self.file = value.get("file")
self.url = self.file.get("url")
self.kind = self.file.get("kind")
self.size = self.file.get("size")
self.name = self.file.get("name")
def as_html(self):
return "<a href='%(url)s'>%(name)s</a>" % self.__dict__
def get_file(self):
return self.file
def get_filename(self):
return self.name
def get_url(self, link_resolver=None):
return self.url
class ImageLink(Link):
def __init__(self, value):
self.image = value.get("image")
self.url = self.image.get("url")
self.alt = self.image.get("alt", "")
@property
def as_html(self):
return """<a href="%(url)s"><img src="%(url)s" alt="%(alt)s"/></a>""" % self.__dict__
def get_image(self):
return self.image
def get_url(self):
return self.url
class Image(FragmentElement):
_View = namedtuple('View', ['url', 'width', 'height', 'linkTo'])
class View(FragmentElement):
"""View class"""
def __init__(self, data):
self.url = data["url"]
self.width = data["dimensions"]["width"]
self.height = data["dimensions"]["height"]
self.alt = data.get("alt")
self.copyright = data.get("copyright")
self.link_to = Fragment.Link.parse(data.get("linkTo"))
self.label = data.get("label")
def as_html(self, link_resolver):
img_tag = """<img src="%(url)s" alt="%(alt)s" width="%(width)s" height="%(height)s" />""" % {
'url': self.url,
'width': self.width,
'height': self.height,
'alt': self.alt if (self.alt is not None) else ""
}
if self.link_to is None:
return img_tag
else:
url = self.link_to.get_url(link_resolver)
return """<a href="%(url)s">%(content)s</a>""" % {
'url': url,
'content': img_tag
}
@property
def ratio(self):
return self.width / self.height
def __init__(self, value):
main, views, link = value.get("main"), value.get("views"), value.get("linkTo")
self.main = Fragment.Image.View(main)
self.views = {
view_key: Fragment.Image.View(view_value) for (view_key, view_value) in list(views.items())
}
self.link_to = Fragment.Link.parse(link)
def get_view(self, key):
if key == "main":
return self.main
else:
return self.views.get(key)
def as_html(self, link_resolver):
view_html = self.main.as_html(link_resolver)
if self.link_to is None:
return view_html
else:
return """<a href="%(url)s">%(content)s</a>""" % {
'url': self.link_to.get_url(link_resolver),
'content': view_html
}
class Embed(FragmentElement):
def __init__(self, value):
oembed = value.get("oembed")
self.type = oembed.get("type")
self.provider = oembed.get("provider_name")
self.provider = oembed.get("provider_name")
self.url = oembed.get("embed_url")
self.width = oembed.get("width")
self.height = oembed.get("height")
self.html = oembed.get("html")
@property
def as_html(self):
return ("""<div data-oembed="%(url)s" data-oembed-type="%(type)s" data-oembed-provider="%(provider)s">"""
"%(html)s"
"</div>") % self.__dict__
class GeoPoint(FragmentElement):
def __init__(self, value):
self.latitude = value.get("latitude")
self.longitude = value.get("longitude")
@property
def as_html(self):
return ("""<div class="geopoint"><span class="latitude">"""
"""%(latitude)f</span><span class="longitude">%(longitude)f</span>"""
"""</div>""") % self.__dict__
# Basic fragments
class BasicFragment(FragmentElement):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value.__str__()
class Number(BasicFragment):
@property
def as_html(self):
return """<span class="number">%g</span>""" % self.value
class Range(BasicFragment):
@property
def as_html(self):
return """<span class="range">%s</span>""" % self.value
class Color(BasicFragment):
@property
def as_html(self):
return """<span class="color">%s</span>""" % self.value
class Text(BasicFragment):
@property
def as_html(self):
return """<span class="text">%s</span>""" % cgi.escape(self.value)
class Date(BasicFragment):
@property
def as_datetime(self):
return datetime.datetime(*map(int, re.split('[^\d]', self.value)))
@property
def as_html(self):
return """<time>%s</time>""" % self.value
class Timestamp(BasicFragment):
@property
def as_datetime(self):
return datetime.datetime(*map(int, re.split('[^\d]', self.value)))
@property
def as_html(self):
return """<time>%s</time>""" % self.value
class Group(BasicFragment):
def __init__(self, value):
self.value = []
for elt in value:
fragments = OrderedDict()
for name, frag in elt.items():
fragments[name] = Fragment.from_json(frag)
self.value.append(Fragment.WithFragments(fragments))
def as_html(self, link_resolver):
html = []
for group_doc in self.value:
html.append(group_doc.as_html(link_resolver))
return "\n".join(html)
def __iter__(self):
return iter(self.value)
class Slice(FragmentElement):
def __init__(self, slice_type, slice_label, value):
self.slice_type = slice_type
self.slice_label = slice_label
self.value = value
def as_html(self, link_resolver):
classes = ['slice']
if self.slice_label is not None:
classes.append(self.slice_label)
return '<div data-slicetype="%(slice_type)s" class="%(classes)s">%(body)s</div>' % {
"slice_type": self.slice_type,
"classes": ' '.join(classes),
"body": self.value.as_html(link_resolver)
}
class CompositeSlice(FragmentElement):
def __init__(self, slice_type, slice_label, elt):
self.slice_type = slice_type
self.slice_label = slice_label
self.repeat = []
self.non_repeat = {}
_repeat = elt.get('repeat')
_non_repeat = elt.get('non-repeat')
if any(_repeat):
self.repeat = self.parse_repeat(_repeat)
if _non_repeat:
self.non_repeat = self.parse_non_repeat(_non_repeat)
@staticmethod
def parse_repeat(repeat):
return Fragment.Group(repeat)
@staticmethod
def parse_non_repeat(non_repeat):
return Fragment.Group([non_repeat])
def as_html(self, link_resolver):
classes = ['slice']
if self.slice_label:
classes.append(self.slice_label)
body = ""
if self.non_repeat:
body += self.non_repeat.as_html(link_resolver)
if self.repeat:
body += self.repeat.as_html(link_resolver)
return '<div data-slicetype="%(slice_type)s" class="%(classes)s">%(body)s</div>' % {
"slice_type": self.slice_type,
"classes": ' '.join(classes),
"body": body
}
class SliceZone(FragmentElement):
def __init__(self, value):
self.slices = []
for elt in value:
slice_type = elt['slice_type']
slice_label = elt.get('slice_label')
# Old style slice
if 'value' in elt:
fragment = Fragment.from_json(elt['value'])
self.slices.append(Fragment.Slice(slice_type, slice_label, fragment))
else:
Fragment.CompositeSlice(slice_type, slice_label, elt)
self.slices.append(Fragment.CompositeSlice(slice_type, slice_label, elt))
def as_html(self, link_resolver):
html = []
for slice in self.slices:
html.append(slice.as_html(link_resolver))
return "\n".join(html)
def __iter__(self):
return iter(self.slices)
class StructuredText(object):
def __init__(self, values):
types = {
"heading1": Block.Heading,
"heading2": Block.Heading,
"heading3": Block.Heading,
"heading4": Block.Heading,
"paragraph": Block.Paragraph,
"list-item": Block.ListItem,
"o-list-item": lambda val: Block.ListItem(val, True),
"image": lambda val: Block.Image(Fragment.Image.View(val)),
"embed": lambda val: Block.Embed(Fragment.Embed(val)),
}
blocks = []
for value in values:
text_type = value.get("type")
type_class = types.get(text_type)
if type_class:
blocks.append(type_class(value))
else:
log.warning("StructuredText, type not found: %s" % text_type)
self.blocks = blocks
def get_title(self):
return next(p for p in self.blocks if isinstance(p, Block.Heading))
def get_first_paragraph(self):
return next(p for p in self.blocks if isinstance(p, Block.Paragraph))
def get_image(self):
return next(p for p in self.blocks if isinstance(p, Block.Image))
class Group(object):
def __init__(self, tag, blocks):
self.tag = tag
self.blocks = blocks
def as_html(self, link_resolver, html_serializer=None):
groups = []
for block in self.blocks:
if len(groups) > 0:
last_one = groups[-1:][0]
if last_one.tag == "ul" and isinstance(block, Block.ListItem) and not block.is_ordered:
last_one.blocks.append(block)
elif last_one.tag == "ol" and isinstance(block, Block.ListItem) and block.is_ordered:
last_one.blocks.append(block)
elif isinstance(block, Block.ListItem) and not block.is_ordered:
groups.append(StructuredText.Group("ul", [block]))
elif isinstance(block, Block.ListItem) and block.is_ordered:
groups.append(StructuredText.Group("ol", [block]))
else:
groups.append(StructuredText.Group(None, [block]))
else:
groups.append(StructuredText.Group(None, [block]))
html = []
for group in groups:
if group.tag is not None:
html.append("<%(tag)s>" % group.__dict__)
for block in group.blocks:
content = ""
if isinstance(block, Text):
content = StructuredText.span_as_html(block.text, block.spans, link_resolver, html_serializer)
html.append(StructuredText.block_as_html(block, content, link_resolver, html_serializer))
if group.tag is not None:
html.append("</%(tag)s>" % group.__dict__)
html_str = ''.join(html)
log.debug("as_html result: %s" % html_str)
return html_str
@staticmethod
def block_as_html(block, content, link_resolver, html_serializer):
if html_serializer is not None:
custom_html = html_serializer(block, content)
if custom_html is not None:
return custom_html
cls = ""
if isinstance(block, Text) and block.label is not None:
cls = " class=\"%s\"" % block.label
if isinstance(block, Block.Heading):
return "<h%(level)s%(cls)s>%(html)s</h%(level)s>" % {
"level": block.level,
"cls": cls,
"html": content
}
elif isinstance(block, Block.Paragraph):
return "<p%s>%s</p>" % (cls, content)
elif isinstance(block, Block.ListItem):
return "<li%s>%s</li>" % (cls, content)
elif isinstance(block, Block.Image):
all_classes = ["block-img"]
if block.view.label is not None:
all_classes.append(block.view.label)
return "<p class=\"%s\">%s</p>" % (" ".join(all_classes), block.get_view().as_html(link_resolver))
elif isinstance(block, Block.Embed):
return block.get_embed().as_html
@staticmethod
def span_write_tag(span, content, link_resolver, html_serializer):
if html_serializer is not None:
custom_html = html_serializer(span, content)
if custom_html is not None:
return custom_html
if isinstance(span, Span.Em):
return "<em>" + content + "</em>"
elif isinstance(span, Span.Strong):
return "<strong>" + content + "</strong>"
elif isinstance(span, Span.Hyperlink):
return """<a href="%s">""" % span.get_url(link_resolver) + content + "</a>"
else:
cls = ""
if span.label is not None:
cls = " class=\"%s\"" % span.label
return """<span%s>%s</span>""" % (cls, content)
@staticmethod
def span_as_html(text, spans, link_resolver, html_serializer):
html = []
tags_start = defaultdict(list)
tags_end = defaultdict(list)
for span in spans:
tags_start[span.start].append(span)
for span in reversed(spans):
tags_end[span.end].append(span)
index = 0
stack = []
for index, letter in enumerate(text):
if index in tags_end:
for end_tag in tags_end.get(index):
# Close a tag
tag = stack.pop()
inner_html = StructuredText.span_write_tag(tag["span"], tag["content"], link_resolver, html_serializer)
if len(stack) == 0:
# The tag was top-level
html.append(inner_html)
else:
# Add the content to the parent tag
stack[-1]["content"] += inner_html
if index in tags_start:
for span in reversed(sorted(tags_start.get(index), key=lambda s: s.length())):
# Open a tag
stack.append({
"span": span,
"content": ""
})
if len(stack) == 0:
# Top-level text
html.append(cgi.escape(letter))
else:
# Inner text of a span
stack[-1]["content"] += cgi.escape(letter)
# Check for the tags after the end of the string
while len(stack) > 0:
# Close a tag
tag = stack.pop()
inner_html = StructuredText.span_write_tag(tag["span"], tag["content"], link_resolver, html_serializer)
if len(stack) == 0:
# The tag was top-level
html.append(inner_html)
else:
# Add the content to the parent tag
stack[-1]["content"] += inner_html
return ''.join(html)
class Span(object):
@classmethod
def from_json(cls, data):
return {
"strong": Span.Strong,
"em": Span.Em,
"hyperlink": Span.Hyperlink
}.get(data.get("type"), Span.SpanElement)(data)
class SpanElement(object):
def __init__(self, value):
self.start = value.get("start")
self.end = value.get("end")
if value.get("data") is not None:
self.label = value.get("data").get("label")
else:
self.label = None
def length(self):
return self.end - self.start
class Em(SpanElement):
pass
class Strong(SpanElement):
pass
class Hyperlink(SpanElement):
def __init__(self, value):
super(Span.Hyperlink, self).__init__(value)
data = value.get('data')
self.link = Fragment.Link.parse(data)
if self.link is None:
log.warning("StructuredText::Span::Hyperlink type not found: %s" % data.get('type'))
def get_url(self, link_resolver):
return self.link.get_url(link_resolver)
class Text(object):
"""Base class for blocks"""
def __init__(self, value):
self.text = value.get("text")
self.spans = [Span.from_json(span) for span in value.get("spans")]
self.label = value.get("label")
class Block(object):
"""A block in a structured text"""
pass
class Heading(Text):
def __init__(self, value):
super(Block.Heading, self).__init__(value)
self.level = value.get("type")[-1]
class Paragraph(Text):
def __init__(self, value):
super(Block.Paragraph, self).__init__(value)
class ListItem(Text):
def __init__(self, value, is_ordered=False):
super(Block.ListItem, self).__init__(value)
self.is_ordered = is_ordered
class Embed(object):
def __init__(self, embed):
self.obj = embed
def get_embed(self):
return self.obj
class Image(object):
"""Block image
:param view: The Fragment.Image.View object
"""
def __init__(self, view):
self.view = view
def get_view(self):
return self.view
|
lines = open('day-19.input')
rules= {}
messages = []
parsing_rules = True
lines = enumerate(lines)
while True:
line = next(lines, None)
if line == None:
break
line = line[1].strip()
if line == '':
parsing_rules = False
continue
if parsing_rules:
tokens = line.split(':')
rules[int(tokens[0])] = tokens[1].strip()
else:
messages.append(line)
def cart(old, new):
res = []
for i in range(len(old)):
for j in range(len(new)):
res.append(old[i] + new[j])
return res
#print(rules)
#print(messages)
class rule:
def __init__(self, id):
self.id = id
def flatten_simple_rule(self, rule_str):
parts = [p for p in rule_str.split(' ') if p != ' ' and p != '']
res = None
for part in parts:
if res == None:
res = rule(int(part)).flatten()
else:
res = cart(res, rule(int(part)).flatten())
return res
def flatten(self):
r = rules[self.id]
if '"' in r:
return [r[1]]
elif '|' in r:
options = r.split('|')
res = []
for option in options:
res += self.flatten_simple_rule(option)
return res
else:
return self.flatten_simple_rule(r)
h = set(rule(0).flatten())
res = 0
for message in messages:
if message in h:
res += 1
print(res)
|
from .login import loginPage
from ..models import AccessAttemptAddons
from axes.models import AccessAttempt
from django.contrib import messages
from django.shortcuts import redirect
import datetime
"""
Authors: Conor Behard Roberts
Description: Converts timedelta object into a readable string
"""
def strfdelta_round(tdelta, round_period='second'):
"""timedelta to string, use for measure running time
attend period from days downto smaller period, round to minimum period
omit zero value period
"""
period_names = ('day', 'hour', 'minute', 'second', 'millisecond')
if round_period not in period_names:
raise Exception(f'round_period "{round_period}" invalid, should be one of {",".join(period_names)}')
period_seconds = (86400, 3600, 60, 1, 1 / pow(10, 3))
period_desc = ('days', 'hours', 'minutes', 'seconds', 'msecs')
round_i = period_names.index(round_period)
string = ''
remainder = tdelta.total_seconds()
for i in range(len(period_names)):
q, remainder = divmod(remainder, period_seconds[i])
if int(q) > 0:
if not len(string) == 0:
string += ' '
string += f'{q:.0f} {period_desc[i]}'
if i == round_i:
break
if i == round_i + 1:
string += f'{remainder} {period_desc[round_i]}'
break
return string
"""
Authors: Conor Behard Roberts
Description: When user is locked out add message and redirect to home page
"""
def lockout(request, credentials, *args, **kwargs):
try:
username = request.POST.get("username").lower()
ip_address = request.axes_ip_address
account = AccessAttempt.objects.filter(username=username).filter(ip_address=ip_address)
current_time = datetime.datetime.now()
timeout = 5 # In minutes
result = AccessAttempt.objects.raw(
'''
SELECT axes_accessattempt.id, base_accessattemptaddons.expiration_date
FROM axes_accessattempt
INNER JOIN base_accessattemptaddons
ON axes_accessattempt.id = base_accessattemptaddons.accessattempt_id
WHERE axes_accessattempt.username = %s and axes_accessattempt.ip_address = %s
''', [username, ip_address]
)[0]
# Check if the user still has to wait to login again
if (current_time < result.expiration_date):
time = result.expiration_date - current_time
time_s = strfdelta_round(time)
messages.warning(request, (f"Locked out for {time_s} due to too many login failures"))
else:
# Delete the user from the timeout model and re-request the login
account.delete()
return loginPage(request)
except IndexError:
expiration_date = current_time + datetime.timedelta(minutes=timeout)
id = AccessAttempt.objects.filter(username=username, ip_address=ip_address)[0].id
addons = AccessAttemptAddons(expiration_date=expiration_date, accessattempt_id=id)
messages.warning(request, (f"Locked out for {timeout} minutes due to too many login failures"))
addons.save()
return redirect('login')
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ============================================================================
# Nomen - Multi-purpose rename tool
# Common core module
# Copyright (C) 2018 by Ralf Kilian
# Distributed under the MIT License (https://opensource.org/licenses/MIT)
#
# GitHub: https://github.com/urbanware-org/nomen
# GitLab: https://gitlab.com/urbanware-org/nomen
# ============================================================================
__version__ = "2.3.6"
import os
import paval as pv
import random
import re
import sys
import tempfile
from datetime import datetime as dt
def compile_regex(string, ignore_case=True, regex_syntax=False):
"""
Compile a regular expression from the given pattern string.
"""
pv.string(string, "regular expression", True, None)
if regex_syntax:
pattern = ".*" + string + ".*"
else:
spec_chars = [ "\\", ".", "^", "$", "+", "?", "{", "}", "[", "]",
"|", "(", ")" ]
for char in spec_chars:
string = string.replace(char, "\\" + char)
string = string.strip("*").strip(";")
while ("*" * 2) in string:
string = string.replace(("*" * 2), "*")
while (";" * 2) in string:
string = string.replace((";" * 2), ";")
list_string = string.split(";")
if len(list_string) > 0:
pattern = ""
for crit in list_string:
if not crit == "":
pattern += "(.*" + crit.replace("*", ".*") + ".*)|"
pattern = pattern.rstrip("|")
if pattern == "":
raise Exception("The given string does not make sense this " \
"way.")
if ignore_case:
regex = re.compile(pattern, re.IGNORECASE)
else:
regex = re.compile(pattern)
return regex
def confirm_notice():
"""
Display a notice which must be confirmed by the user to proceed.
"""
string = random_string(6, True, True, True, True)
proceed = False
notice_text = """ o o o 88
8 8 88
8 8 .oPYo. oPYo. odYo. o8 odYo. .oPYo. 88
8 db 8 .oooo8 8 '' 8' '8 8 8' '8 8 8 88
'b.PY.d' 8 8 8 8 8 8 8 8 8 8
'8 8' 'YooP8 8 8 8 8 8 8 'YooP8 88
. 8
'oooP'
Please use this tool with care to avoid data damage or loss!
There is no function to undo the changes done by this tool, so you
should be aware of what you are doing. Improper use (e.g. modifying
files inside system directories) will corrupt your system!
If you wish to proceed, type '%s' (case-sensitive, without any
quotes or spaces) and press the <Return> key. Otherwise, the process
will be canceled.""" % string
print_text_box("", notice_text)
choice = raw_input("> ")
if choice == string:
choice = "Proceeding."
proceed = True
else:
choice = "Canceled."
print "\n%s\n" % choice
return proceed
def dir_space_modifier(directory, remove_duplicate=False,
remove_leading=False, remove_trailing=False,
brackets=False, hyphens=False, punctuation=False,
ignore_symlinks=False, recursive=False, exclude=None):
"""
Modify a directory name by removing leading, trailing and duplicate
spaces or by inserting and removing spaces next to punctuation
characters.
"""
list_exclude = []
if not exclude == None:
list_exclude = exclude.split(";")
for item in os.listdir(directory):
excluded = False
if os.path.isdir(os.path.join(directory, item)):
path = os.path.join(directory, item)
for excl in list_exclude:
if excl.lower() in path.lower():
excluded = True
break
else:
continue
if excluded:
nextdir = path
else:
if remove_duplicate:
while (" " * 2) in item:
item = item.replace((" " * 2), " ")
if hyphens:
item = item.replace("-", " - ")
while "- " in item:
item = item.replace("- ", "- ")
while " -" in item:
item = item.replace(" -", " -")
if brackets:
while "( " in item:
item = item.replace("( ", "(")
item = item.replace("(", " (")
while " )" in item:
item = item.replace(" )", ")")
item = item.replace(")", ") ").replace(") .", ").")
while "[ " in item:
item = item.replace("[ ", "[")
item = item.replace("[", " [")
while " ]" in item:
item = item.replace(" ]", "]")
item = item.replace("]", "] ").replace("] .", "].")
item = item.replace("( [", "([").replace("] )", "])")
item = item.replace("[ (", "[(").replace(") ]", ")]")
if punctuation:
item = item.replace(".", ". ")
while " ." in item:
item = item.replace(" .", ".")
item = item.replace(",", ", ")
while " ," in item:
item = item.replace(" ,", ",")
item = item.replace(":", ": ")
while " :" in item:
item = item.replace(" :", ":")
item = item.replace(";", "; ")
while " ;" in item:
item = item.replace(" ;", ";")
item = item.replace("!", "! ")
while " !" in item:
item = item.replace(" !", "!")
item = item.replace("?", "? ")
while " ?" in item:
item = item.replace(" ?", "?")
remove_leading = True
remove_trailing = True
if remove_leading:
item = item.lstrip()
if remove_trailing:
item = item.rstrip()
newpath = os.path.join(directory, item)
if remove_duplicate:
# Repeat this step after the replace actions above
while (" " * 2) in newpath:
newpath = newpath.replace((" " * 2), " ")
if not os.path.exists(newpath):
os.rename(path, newpath)
nextdir = newpath
else:
nextdir = path
if recursive:
dir_space_modifier(nextdir, remove_duplicate, remove_leading,
remove_trailing, brackets, hyphens,
punctuation, ignore_symlinks, True, exclude)
def file_exists(file_path, list_files, fs_case_sensitive):
"""
Check if a file already exists on the file system as well as in a
given list.
"""
file_path = os.path.abspath(file_path)
if os.path.exists(file_path):
file_exists = True
else:
file_exists = False
for item in list_files:
if item[1] == None:
item[1] = ""
if fs_case_sensitive:
if file_path == item[1] or file_path == item[2]:
file_exists = True
break
else:
if file_path.lower() == item[1].lower() or \
file_path.lower() == item[2].lower():
file_exists = True
break
return file_exists
def format_timestamp(float_stamp=0):
"""
Convert a timestamp float into a readable format.
"""
return str(dt.fromtimestamp(float(str(float_stamp))))
def get_files(directory, recursive=False, ignore_case=True, regex=None,
regex_exclude=True, ignore_symlinks=False, order_by=None):
"""
Get the files and sub-directories from the given directory.
"""
pv.path(directory, "given", False, True)
directory = os.path.abspath(directory)
list_files = []
list_excluded = []
list_files, list_excluded = \
__get_files( \
directory, ignore_case, regex, regex_exclude, ignore_symlinks,
recursive, list_files, list_excluded, order_by)
if order_by == None:
list_files.sort()
list_excluded.sort()
return list_files, list_excluded
def get_fs_case_sensitivity(directory):
"""
Determine if the file system of the given directory is case-sensitive.
"""
# This should be done with every directory that is processed, due to the
# fact, that e.g. a device containing a case-insensitive file system can
# be mounted into a directory of a case-sensitive file system.
pv.path(directory, "given", False, True)
directory = os.path.abspath(directory)
fd_temp, file_temp = tempfile.mkstemp(dir=directory)
file_name = os.path.basename(file_temp)
if os.path.exists(os.path.join(directory, file_name.upper())):
fs_case_sensitive = False
else:
fs_case_sensitive = True
os.close(fd_temp)
os.remove(file_temp)
return fs_case_sensitive
def get_invalid_chars():
"""
Return the invalid file name characters (which must or should not be
part of a file name).
"""
# This list of characters depends on the file system where the files are
# being renamed on. Due to the fact, that e.g. a device containing a
# different file system can be mounted into a directory of the local file
# system, the following characters will be handled as invalid on every
# file system.
invalid_chars = "/\\?%*:|\"<>\n\r\t"
return invalid_chars
def get_version():
"""
Return the version of this module.
"""
return __version__
def print_text_box(heading, text):
"""
Print a text message outlined with an ASCII character frame.
"""
heading = heading.strip()
if len(heading) > 72:
raise Exception("The text box heading must not be longer than 72 " \
"characters.")
if text == "":
raise Exception("The text box text must not be empty.")
text_box = "\n+" + ("-" * 76) + "+" + \
"\n|" + (" " * 76) + "|"
if not heading == "":
padding = int((72 - len(heading)) / 2)
heading = (" " * (padding + 2) + heading).ljust(76, " ")
text_box += ("\n|%s|\n|" + (" " * 76) + "|") % heading
list_text = text.split("\n")
for text in list_text:
list_words = text.split(" ")
count = 1
line = ""
for word in list_words:
if len(line + word + " ") > 73:
text_box += "\n| " + line.ljust(74, " ") + "|"
line = word + " "
else:
line = line + word + " "
count += 1
if count > len(list_words):
text_box += "\n| " + line.ljust(74, " ") + "|"
text_box += "\n|" + (" " * 76) + "|" \
"\n+" + ("-" * 76) + "+\n"
print text_box
def random_string(length, uppercase=True, lowercase=False, numbers=False,
unique=False):
"""
Generate a random string out of literals and numbers.
"""
literals = "ABCDEFGHIJLMNOPQRSTUVWXYZ"
numbers = "0123456789"
chars = ""
string = ""
if uppercase:
chars += literals
if lowercase:
chars += literals.lower()
if numbers:
chars += numbers
if len(chars) == 0:
return string
if len(chars) < length:
length = len(chars)
while len(string) < length:
rnd = random.randint(0, len(chars) - 1)
char = chars[rnd]
if unique:
if char in string:
continue
string += char
return string
def rename(list_files, reverse=False):
"""
Rename the files which have neither been excluded nor skipped.
"""
list_skipped = []
if len(list_files) > 0:
if reverse:
list_files = reversed(list_files)
for item in list_files:
if os.path.exists(item[0]):
if os.path.exists(item[2]):
list_skipped.append([item[0], item[1], item[2]])
continue
# In some cases the file will get a temporary name first and
# then its name will be changed to what it should be.
#
# This behavior is required when using file systems that are
# case-insensitive (such as FAT32 or NTFS) where e.g. the
# file "FOOBAR.txt" would overwrite the file "foobar.txt"
# inside the same directory.
if item[1] == None or \
item[1] == "":
os.rename(item[0], item[2])
else:
os.rename(item[0], item[1])
os.rename(item[1], item[2])
if len(list_skipped) > 0:
if not list_skipped == list_files:
rename(list_skipped, reverse)
def report(report_file=None, list_header=[], list_renamed=[],
list_excluded=[], list_skipped=[], time_start=None):
"""
Write the details of the simulated rename process (simulation report)
into a file.
"""
files_total = str(len(list_renamed) + len(list_excluded) + \
len(list_skipped))
just = len(files_total)
files_renamed = str(len(list_renamed)).rjust(just, " ")
files_excluded = str(len(list_excluded)).rjust(just, " ")
files_skipped = str(len(list_skipped)).rjust(just, " ")
time_end = dt.now()
try:
time_elapsed = str(time_end - time_start)
time_start = str(time_start)
except:
raise Exception("An invalid start date was given.")
output = "\r\n" + "=" * 78 + \
"\r\nFile type: " + list_header[0] + \
"\r\n" + "-" * 78
for i in range(1, len(list_header)):
output += "\r\n" + list_header[i][0].ljust(20, " ") + \
str(list_header[i][1])
output += "\r\n" + "-" * 78 + \
"\r\nFiles renamed: " + files_renamed + \
"\r\nFiles excluded: " + files_excluded + \
"\r\nFiles skipped: " + files_skipped + \
"\r\nFiles total: " + files_total + \
"\r\n" + "-" * 78 + \
"\r\nTimestamp: " + time_start[:-7] + \
"\r\nElapsed time: " + time_elapsed + \
"\r\nNomen version: " + __version__ + \
"\r\n" + "=" * 78 + "\r\n\r\n"
if len(list_renamed) > 0:
output += "\r\n [Renamed]\r\n"
for item in list_renamed:
output += " - Old: %s\r\n" % item[0]
output += " - New: %s\r\n\r\n" % item[2]
output += "\r\n"
if len(list_excluded) > 0:
output += "\r\n [Excluded]\r\n"
for item in list_excluded:
output += " - %s\r\n" % item
output += "\r\n"
if len(list_skipped) > 0:
output += "\r\n [Skipped]\r\n"
for item in list_skipped:
output += " - %s\r\n" % item
output += "\r\n"
fh_report = open(report_file, "wb")
# Run the appropriate code for the Python framework used
if sys.version_info[0] == 2:
fh_report.write(output)
elif sys.version_info[0] > 2:
fh_report.write(output.encode(sys.getdefaultencoding()))
fh_report.close()
def __get_files(directory, ignore_case, regex, regex_exclude, ignore_symlinks,
recursive, list_content, list_excluded, order_by):
"""
Core method to get the files from the given directory and its
sub-directories.
"""
list_dirs = []
list_files = []
for item in os.listdir(directory):
path = os.path.join(directory, item)
if ignore_symlinks:
if os.path.islink(path):
continue
if os.path.isfile(path):
if regex == None:
list_files.append(path)
else:
if regex_exclude:
if ignore_case:
if regex.match(item.lower()):
list_excluded.append(path)
else:
list_files.append(path)
else:
if regex.match(item):
list_excluded.append(path)
else:
list_files.append(path)
else:
if ignore_case:
if regex.match(item.lower()):
list_files.append(path)
else:
list_excluded.append(path)
else:
if regex.match(item):
list_files.append(path)
else:
list_excluded.append(path)
else:
list_dirs.append(path)
if len(list_files) > 0:
if order_by == None:
list_files.sort()
else:
list_files = __set_order(list_files, order_by)
list_content.append([directory, list_files])
if recursive:
for directory in list_dirs:
list_content, list_excluded = \
__get_files(directory, ignore_case, regex, regex_exclude,
ignore_symlinks, True, list_content,
list_excluded, order_by)
return list_content, list_excluded
def __set_order(file_list, order_by):
"""
Set a certain order of the files before renaming them.
"""
list_files = []
list_temp = []
for file_name in file_list:
if "." in file_name:
file_ext = file_name.split(".")[-1]
else:
file_ext = ""
time_access = format_timestamp(os.stat(file_name).st_atime)
time_create = format_timestamp(os.stat(file_name).st_ctime)
time_modify = format_timestamp(os.stat(file_name).st_mtime)
if order_by == "accessed":
list_temp.append([time_access, file_name, file_ext])
elif order_by == "created":
list_temp.append([time_create, file_name, file_ext])
else:
list_temp.append([time_modify, file_name, file_ext])
list_temp.sort()
for item in list_temp:
list_files.append(item[1])
return list_files
# EOF
|
# -*- coding: utf-8 -*-
from locust import task,TaskSet
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import config
from common.util import foreach,load_modules,Weight
from behavior.client import Client
# @task
def stop(self):
self.interrupt()
def add_stop(cls):
cls._stop = stop
mods = load_modules(except_file = config.EXCEPT_FILE)
# 增加stop方法
foreach(add_stop,mods)
class TaskWeight(Weight):
'''
任务权重类
'''
Signin = 100
Info = 100
Map = 100
Findred = 100
Getred = 100
@Client.action
class Mix(TaskSet):
tasks = TaskWeight()(mods)
# TODO 将Client.task_set 设计成一个装饰器
# Client.task_set = Mix
Client.min_wait = 3000
Client.max_wait = 5000
|
import re
from os import symlink
from os.path import join as pjoin
import os
import shutil
from .utils import (temp_working_dir, temp_dir, working_directory, eqsorted_,
cat, assert_raises)
from .test_ant_glob import makefiles
from .. import links
from ..links import (silent_makedirs, silent_unlink, silent_absolute_symlink,
silent_relative_symlink, silent_copy)
from pprint import pprint
def test_dry_run_simple():
rules = [dict(action='absolute_symlink', select=['/bin/cp', '/bin/ls'], prefix='/', target='$D'),
dict(action='absolute_symlink', select=['/usr/bin/gcc'], prefix='/usr', target='$D'),
dict(action='copy', source='/usr/bin/gcc', target='$D/foo/gcc'),
dict(action='exclude', source='/usr/bin/gcc'),
dict(action='absolute_symlink', source='/usr/bin/gcc', target='$D/gcc2'),
]
with temp_dir() as d:
# absolute `select` and `target`
env = dict(D=d)
actions = links.dry_run_links_dsl(rules, env)
assert actions == [(silent_makedirs, pjoin(d, 'bin')),
(silent_absolute_symlink, '/bin/cp', pjoin(d, 'bin/cp')),
(silent_absolute_symlink, '/bin/ls', pjoin(d, 'bin/ls')),
(silent_absolute_symlink, '/usr/bin/gcc', pjoin(d, 'bin/gcc')),
(silent_makedirs, pjoin(d, 'foo')),
(silent_copy, '/usr/bin/gcc', pjoin(d, 'foo', 'gcc'))]
# absolute `select`, relative `target`
env['D'] = 'subdir'
with working_directory('/'):
actions = links.dry_run_links_dsl(rules, env)
assert actions == [(silent_makedirs, 'subdir/bin'),
(silent_absolute_symlink, '/bin/cp', 'subdir/bin/cp'),
(silent_absolute_symlink, '/bin/ls', 'subdir/bin/ls'),
(silent_absolute_symlink, '/usr/bin/gcc', 'subdir/bin/gcc'),
(silent_makedirs, 'subdir/foo'),
(silent_copy, '/usr/bin/gcc', 'subdir/foo/gcc')]
# relative `select`, relative target
for rule in rules:
# remove / from all selects
if 'select' in rule:
rule['select'] = [x[1:] for x in rule['select']]
else:
rule['source'] = rule['source'][1:]
if 'prefix' in rule:
rule['prefix'] = rule['prefix'][1:]
with working_directory('/'):
actions = links.dry_run_links_dsl(rules, env)
assert actions == [(silent_makedirs, 'subdir/bin'),
(silent_absolute_symlink, 'bin/cp', 'subdir/bin/cp'),
(silent_absolute_symlink, 'bin/ls', 'subdir/bin/ls'),
(silent_absolute_symlink, 'usr/bin/gcc', 'subdir/bin/gcc'),
(silent_makedirs, 'subdir/foo'),
(silent_copy, 'usr/bin/gcc', 'subdir/foo/gcc')
]
# overwrite
for rule in rules:
rule['overwrite'] = True
with working_directory('/'):
actions = links.dry_run_links_dsl(rules, env)
assert actions == [(silent_makedirs, 'subdir/bin'),
(silent_unlink, 'subdir/bin/cp'),
(silent_absolute_symlink, 'bin/cp', 'subdir/bin/cp'),
(silent_unlink, 'subdir/bin/ls'),
(silent_absolute_symlink, 'bin/ls', 'subdir/bin/ls'),
(silent_unlink, 'subdir/bin/gcc'),
(silent_absolute_symlink, 'usr/bin/gcc', 'subdir/bin/gcc'),
(silent_makedirs, 'subdir/foo'),
(silent_unlink, 'subdir/foo/gcc'),
(silent_copy, 'usr/bin/gcc', 'subdir/foo/gcc')
]
def findfiles(path):
r = []
for dirname, subdirs, filenames in os.walk(path):
for filename in filenames:
r.append(pjoin(dirname, filename))
return r
def test_run_glob():
rules = [dict(action='absolute_symlink', select='**/*$SUFFIX', target='foo', prefix='')]
env = dict(SUFFIX='.txt')
with temp_working_dir() as d:
makefiles('a0/b0/c0/d0.txt a0/b0/c0/d1.txt a0/b1/c1/d0.txt a0/b.txt'.split())
links.execute_links_dsl(rules, env)
eqsorted_(['foo/a0/b.txt', 'foo/a0/b1/c1/d0.txt', 'foo/a0/b0/c0/d0.txt', 'foo/a0/b0/c0/d1.txt'],
findfiles('foo'))
shutil.rmtree('foo')
rules[0]['prefix'] = 'a0'
links.execute_links_dsl(rules, env)
eqsorted_(['foo/b.txt', 'foo/b0/c0/d0.txt', 'foo/b0/c0/d1.txt', 'foo/b1/c1/d0.txt'],
findfiles('foo'))
def test_overwrite_behaviour():
rules = [dict(action='absolute_symlink', select='**/*.txt', target='foo', prefix='')]
env = {}
with temp_working_dir() as d:
makefiles(['a0.txt', 'foo/a0.txt'])
links.execute_links_dsl(rules, env)
assert not os.path.islink('foo/a0.txt')
rules[0]['overwrite'] = True
links.execute_links_dsl(rules, env)
assert os.path.islink('foo/a0.txt')
# should also work if target *doesn't* exist...
shutil.rmtree('foo')
links.execute_links_dsl(rules, env)
assert os.path.islink('foo/a0.txt')
def test_launcher():
# we just use the /bin/cp program as a mock and check that the structure is correct
rules = [dict(action='launcher', select=['*'], target='foo', prefix='')]
with temp_working_dir() as d:
makefiles(['a', 'b', 'nonexec'])
with open('script', 'w') as f:
f.write('#!some-shebang')
os.chmod('a', 0o777)
os.chmod('b', 0o777)
os.chmod('script', 0o777)
os.symlink('a', 'c')
links.execute_links_dsl(rules, {}, launcher_program='/bin/cp')
os.chdir('foo')
assert os.path.exists('launcher')
assert os.stat('launcher').st_mode | 0o111
assert os.readlink('a') == 'launcher'
assert os.readlink('b') == 'launcher'
assert os.readlink('c') == 'a'
assert cat('a.link') == '../a'
assert cat('b.link') == '../b'
assert cat('script') == '#!some-shebang'
assert os.path.exists('nonexec') and os.path.islink('nonexec')
def test_dirs():
rules = [dict(action='absolute_symlink', select=['*'], target='foo', prefix='')]
with temp_working_dir() as d:
makefiles(['a0/f'])
links.execute_links_dsl(rules, {})
assert not os.path.exists('foo')
rules[0]['dirs'] = True
links.execute_links_dsl(rules, {})
assert os.path.exists('foo')
assert os.path.islink('foo/a0')
|
import pprint
import re # noqa: F401
import six
class StartWorkflowRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'version': 'int',
'correlation_id': 'str',
'input': 'dict(str, object)',
'task_to_domain': 'dict(str, str)',
'workflow_def': 'WorkflowDef',
'external_input_payload_storage_path': 'str',
'priority': 'int'
}
attribute_map = {
'name': 'name',
'version': 'version',
'correlation_id': 'correlationId',
'input': 'input',
'task_to_domain': 'taskToDomain',
'workflow_def': 'workflowDef',
'external_input_payload_storage_path': 'externalInputPayloadStoragePath',
'priority': 'priority'
}
def __init__(self, name=None, version=None, correlation_id=None, input=None, task_to_domain=None, workflow_def=None, external_input_payload_storage_path=None, priority=None): # noqa: E501
"""StartWorkflowRequest - a model defined in Swagger""" # noqa: E501
self._name = None
self._version = None
self._correlation_id = None
self._input = None
self._task_to_domain = None
self._workflow_def = None
self._external_input_payload_storage_path = None
self._priority = None
self.discriminator = None
self.name = name
if version is not None:
self.version = version
if correlation_id is not None:
self.correlation_id = correlation_id
if input is not None:
self.input = input
if task_to_domain is not None:
self.task_to_domain = task_to_domain
if workflow_def is not None:
self.workflow_def = workflow_def
if external_input_payload_storage_path is not None:
self.external_input_payload_storage_path = external_input_payload_storage_path
if priority is not None:
self.priority = priority
@property
def name(self):
"""Gets the name of this StartWorkflowRequest. # noqa: E501
:return: The name of this StartWorkflowRequest. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this StartWorkflowRequest.
:param name: The name of this StartWorkflowRequest. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def version(self):
"""Gets the version of this StartWorkflowRequest. # noqa: E501
:return: The version of this StartWorkflowRequest. # noqa: E501
:rtype: int
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this StartWorkflowRequest.
:param version: The version of this StartWorkflowRequest. # noqa: E501
:type: int
"""
self._version = version
@property
def correlation_id(self):
"""Gets the correlation_id of this StartWorkflowRequest. # noqa: E501
:return: The correlation_id of this StartWorkflowRequest. # noqa: E501
:rtype: str
"""
return self._correlation_id
@correlation_id.setter
def correlation_id(self, correlation_id):
"""Sets the correlation_id of this StartWorkflowRequest.
:param correlation_id: The correlation_id of this StartWorkflowRequest. # noqa: E501
:type: str
"""
self._correlation_id = correlation_id
@property
def input(self):
"""Gets the input of this StartWorkflowRequest. # noqa: E501
:return: The input of this StartWorkflowRequest. # noqa: E501
:rtype: dict(str, object)
"""
return self._input
@input.setter
def input(self, input):
"""Sets the input of this StartWorkflowRequest.
:param input: The input of this StartWorkflowRequest. # noqa: E501
:type: dict(str, object)
"""
self._input = input
@property
def task_to_domain(self):
"""Gets the task_to_domain of this StartWorkflowRequest. # noqa: E501
:return: The task_to_domain of this StartWorkflowRequest. # noqa: E501
:rtype: dict(str, str)
"""
return self._task_to_domain
@task_to_domain.setter
def task_to_domain(self, task_to_domain):
"""Sets the task_to_domain of this StartWorkflowRequest.
:param task_to_domain: The task_to_domain of this StartWorkflowRequest. # noqa: E501
:type: dict(str, str)
"""
self._task_to_domain = task_to_domain
@property
def workflow_def(self):
"""Gets the workflow_def of this StartWorkflowRequest. # noqa: E501
:return: The workflow_def of this StartWorkflowRequest. # noqa: E501
:rtype: WorkflowDef
"""
return self._workflow_def
@workflow_def.setter
def workflow_def(self, workflow_def):
"""Sets the workflow_def of this StartWorkflowRequest.
:param workflow_def: The workflow_def of this StartWorkflowRequest. # noqa: E501
:type: WorkflowDef
"""
self._workflow_def = workflow_def
@property
def external_input_payload_storage_path(self):
"""Gets the external_input_payload_storage_path of this StartWorkflowRequest. # noqa: E501
:return: The external_input_payload_storage_path of this StartWorkflowRequest. # noqa: E501
:rtype: str
"""
return self._external_input_payload_storage_path
@external_input_payload_storage_path.setter
def external_input_payload_storage_path(self, external_input_payload_storage_path):
"""Sets the external_input_payload_storage_path of this StartWorkflowRequest.
:param external_input_payload_storage_path: The external_input_payload_storage_path of this StartWorkflowRequest. # noqa: E501
:type: str
"""
self._external_input_payload_storage_path = external_input_payload_storage_path
@property
def priority(self):
"""Gets the priority of this StartWorkflowRequest. # noqa: E501
:return: The priority of this StartWorkflowRequest. # noqa: E501
:rtype: int
"""
return self._priority
@priority.setter
def priority(self, priority):
"""Sets the priority of this StartWorkflowRequest.
:param priority: The priority of this StartWorkflowRequest. # noqa: E501
:type: int
"""
self._priority = priority
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(StartWorkflowRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StartWorkflowRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from flask import Flask
from flask import request
from hildebrand import Glow
app = Flask(__name__)
glow = Glow()
@app.route('/metrics')
def Main():
return(
"timestamp " + str(glow.getElecCurrent['data'][0][0]) + "\nconsumption " + str(glow.getElecCurrent['data'][0][1]))
if __name__ == "__main__":
app.run(debug = True, host = "0.0.0.0")
|
import numpy as np
from utils import *
def select_node_to_expand(tree, state_space):
state_space = np.asarray(state_space)
space_origin, space_range = state_space
n_dim = len(space_origin)
# sample a random point in the space
random_point = np.random.rand(2) * space_range[0:2]
# theta = np.random.rand() * 2 * np.pi
# random_point = np.hstack((random_point, (np.cos(theta), np.sin(theta))))
# calculate the distance from random point to all nodes, excluding time dimension
nodes = list(tree.nodes())
d = cartesian_distance(np.array(nodes)[:, 0:2], random_point)
# return the node with shortest distance
return nodes[np.argmin(d)], random_point
def sample_new_point_with_control(m_g, dt, control_function, state_generator):
# sample controls
controls = control_function()
# calculate new state
m_new = state_generator(m_g, controls, dt)
return tuple((tuple(m_new), controls))
|
#!/usr/bin/env python3
"""Exporter of snippets for snippet crawler"""
from BaseLogger import BaseLogger
from DatabaseAccessor import DatabaseAccessor
from contextlib import closing
from csv import DictWriter
from json import dump
from os import remove
from platform import node
from time import strftime
from zipfile import ZipFile, ZIP_DEFLATED
class Exporter(BaseLogger):
def __init__(self, log_level=None):
BaseLogger.__init__(self, self.__class__.__name__, log_level)
self._db_conn = DatabaseAccessor()
self._log_info("exporter start @%s", node())
self._source_set_joke = [
"http://neihanshequ.com/joke/",
"http://neihanshequ.com/bar/1/",
"http://neihanshequ.com/bar/11/",
"http://neihanshequ.com/bar/76/",
"http://neihanshequ.com/bar/80/",
"http://neihanshequ.com/bar/82/",
"http://neihanshequ.com/bar/59/",
"http://neihanshequ.com/bar/5/",
]
self._source_set_art = [
"http://neihanshequ.com/bar/25/",
"http://neihanshequ.com/bar/26/",
"http://neihanshequ.com/bar/3/",
"http://neihanshequ.com/bar/53/",
"http://neihanshequ.com/bar/46/",
"http://neihanshequ.com/bar/49/",
"http://neihanshequ.com/bar/69/",
"http://neihanshequ.com/bar/51/",
"http://neihanshequ.com/bar/60/",
]
def process(self):
filelist = []
data = self._db_conn.snippet_read()
self._log_info("load all snippet data from database")
filelist.append(self._save_as_json(data))
filelist.append(self._save_as_csv(data))
data_joke = self._select_data_column(data, self._source_set_joke)
filelist.append(self._save_as_csv(data_joke, "snippet_joke.csv"))
data_art = self._select_data_column(data, self._source_set_art)
filelist.append(self._save_as_csv(data_art, "snippet_art.csv"))
self._archive_into_zipfile(filelist)
def _select_data_column(self, data_raw, source_set):
data_new = []
for item_raw in data_raw:
if item_raw["source"] not in source_set:
continue
for index in range(max(1, len(item_raw.get("comments", [])))):
item_new = {
"count_digg": item_raw["count"]["digg"],
"count_bury": item_raw["count"]["bury"],
"count_favorite": item_raw["count"]["favorite"],
"count_comment": item_raw["count"]["comment"],
"count_diggcomm": None,
"text": item_raw["content"],
"text_comment": None,
"source": item_raw["source_name"],
}
if "comments" in item_raw:
item_new["text_comment"] = item_raw["comments"][index]
item_new["count_diggcomm"] = item_raw["count"]["commdigg"][index]
data_new.append(item_new)
return data_new
def _save_as_json(self, data, filename="snippet.json"):
with open(filename, 'w') as jsonfile:
for item in data:
dump(item, jsonfile, sort_keys=True)
jsonfile.write("\n")
self._log_info("save %d items as json file: %s", len(data), filename)
return filename
def _save_as_csv(self, data, filename="snippet.csv"):
fields = set()
for item in data:
fields = fields.union(set(item.keys()))
with open(filename, 'w', encoding='utf8', newline='') as csvfile:
writer = DictWriter(csvfile, extrasaction='ignore', dialect='excel', fieldnames=sorted(fields, reverse=False))
writer.writeheader()
for item in data:
writer.writerow(item)
self._log_info("save %d items as csv file: %s", len(data), filename)
return filename
def _archive_into_zipfile(self, filelist):
zipname = "snippet_{}.zip".format(strftime("%Y-%m-%d_%H-%M-%S"))
with ZipFile(zipname, 'w', ZIP_DEFLATED) as zip:
for filename in filelist:
zip.write(filename)
remove(filename)
self._log_info("archive exported files into %s", zipname)
def close(self):
self._db_conn.close()
self._log_info("exporter exit")
self._close_logger()
def main():
with closing(Exporter()) as exporter:
exporter.process()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# Хронология выхода игр
from common import get_parsed_two_column_wikitable
def is_match_table_func(table) -> bool:
return 'TIMELINE OF RELEASE YEARS' in table.caption.text.strip().upper()
url = 'https://en.wikipedia.org/wiki/The_Elder_Scrolls'
for year, name in get_parsed_two_column_wikitable(url, is_match_table_func):
print(f'{year}: {name}')
# 1994: The Elder Scrolls: Arena
# 1996: The Elder Scrolls II: Daggerfall
# 1997: An Elder Scrolls Legend: Battlespire
# 1998: The Elder Scrolls Adventures: Redguard
# 2002: The Elder Scrolls III: Morrowind
# 2002: The Elder Scrolls III: Tribunal
# 2003: The Elder Scrolls III: Bloodmoon
# 2003: The Elder Scrolls Travels: Stormhold
# 2004: The Elder Scrolls Travels: Dawnstar
# 2004: The Elder Scrolls Travels: Shadowkey
# 2006: The Elder Scrolls IV: Oblivion
# 2006: The Elder Scrolls IV: Knights of the Nine
# 2007: The Elder Scrolls IV: Shivering Isles
# 2011: The Elder Scrolls V: Skyrim
# 2012: The Elder Scrolls V: Skyrim – Dawnguard
# 2012: The Elder Scrolls V: Skyrim – Hearthfire
# 2012: The Elder Scrolls V: Skyrim – Dragonborn
# 2014: The Elder Scrolls Online
# 2016: The Elder Scrolls V: Skyrim – Special Edition
# 2017: The Elder Scrolls: Legends
# 2017: The Elder Scrolls: Skyrim - VR
# 2017: The Elder Scrolls Online - Morrowind
# 2018: The Elder Scrolls Online - Summerset
# 2019: The Elder Scrolls: Blades
# 2019: The Elder Scrolls Online - Elsweyr
# TBA: The Elder Scrolls VI
|
import unittest
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from tests.constants import INPUT_DATA_DIR
from pm4py.log.importer import csv as csv_importer
from pm4py.log.importer.utils import df_filtering
from pm4py.log import transform
import time
class DataframePrefilteringTest(unittest.TestCase):
def test_prefiltering_dataframe(self):
inputLog = os.path.join(INPUT_DATA_DIR, "running-example.csv")
dataframe = csv_importer.import_dataframe_from_path_wo_timeconversion(inputLog, sep=',')
dataframe = df_filtering.filter_df_on_activities(dataframe, activity_key="concept:name")
dataframe = df_filtering.filter_df_on_ncases(dataframe, case_id_glue="case:concept:name")
dataframe = df_filtering.filter_df_on_case_length(dataframe, case_id_glue="case:concept:name")
dataframe = csv_importer.convert_timestamp_columns_in_df(dataframe)
dataframe = dataframe.sort_values('time:timestamp')
eventLog = csv_importer.convert_dataframe_to_event_log(dataframe)
traceLog = transform.transform_event_log_to_trace_log(eventLog)
if __name__ == "__main__":
unittest.main()
|
########################################
# CS/CNS/EE 155 2017
# Problem Set 5
#
# Author: Avishek Dutta
# Description: Set 5
########################################
class Utility:
'''
Utility for the problem files.
'''
def __init__():
pass
@staticmethod
def load_sequence(n):
'''
Load the file 'sequence_data<n>.txt' for a given n.
Arguments:
n: Sequence index.
Returns:
A: The transition matrix.
O: The observation matrix.
seqs: Input sequences.
'''
A = []
O = []
seqs = []
# For each file:
with open("data/sequence_data{}.txt".format(n)) as f:
# Read the parameters.
L, D = [int(x) for x in f.readline().strip().split('\t')]
# Read the transition matrix.
for i in range(L):
A.append([float(x) for x in f.readline().strip().split('\t')])
# Read the observation matrix.
for i in range(L):
O.append([float(x) for x in f.readline().strip().split('\t')])
# The rest of the file consists of sequences.
while True:
seq = f.readline().strip()
if seq == '':
break
seqs.append([int(x) for x in seq])
return A, O, seqs
@staticmethod
def load_ron():
'''
Loads the file 'ron.txt'.
Returns:
moods: Sequnces of states, i.e. a list of lists.
Each sequence represents half a year of data.
mood_map: A hash map that maps each state to an integer.
genres: Sequences of observations, i.e. a list of lists.
Each sequence represents half a year of data.
genre_map: A hash map that maps each observation to an integer.
'''
moods = []
mood_map = {}
genres = []
genre_map = {}
mood_counter = 0
genre_counter = 0
with open("data/ron.txt") as f:
mood_seq = []
genre_seq = []
while True:
line = f.readline().strip()
if line == '' or line == '-':
# A half year has passed. Add the current sequence to
# the list of sequences.
moods.append(mood_seq)
genres.append(genre_seq)
# Start new sequences.
mood_seq = []
genre_seq = []
if line == '':
break
elif line == '-':
continue
mood, genre = line.split()
# Add new moods to the mood state hash map.
if mood not in mood_map:
mood_map[mood] = mood_counter
mood_counter += 1
mood_seq.append(mood_map[mood])
# Add new genres to the genre observation hash map.
if genre not in genre_map:
genre_map[genre] = genre_counter
genre_counter += 1
# Convert the genre into an integer.
genre_seq.append(genre_map[genre])
return moods, mood_map, genres, genre_map
@staticmethod
def load_ron_hidden():
'''
Loads the file 'ron.txt' and hides the states.
Returns:
genres: The observations.
genre_map: A hash map that maps each observation to an integer.
'''
moods, mood_map, genres, genre_map = Utility.load_ron()
return genres, genre_map
|
# coding: utf-8
"""*****************************************************************************
* Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
################################################################################
#### Global Variables ####
################################################################################
global mpuHeaderFile1
global mpuHeaderFile2
global mpuSourceFile
global mpuSystemInitFile
global mpuSystemDefFile
global mpuRegions
global mpuSettings
global mpuSetUpLogicList
global mpuinterruptVector
global mpuinterruptHandlerLock
################################################################################
#### Business Logic ####
################################################################################
def mpuSetUpLogic(mpuSym, event):
global mpuSettings
if event["value"] in mpuSettings:
SymID = mpuSym.getID()
mpuSym.clearValue()
if "_Type" in str(SymID):
mpuSym.setSelectedKey(str(mpuSettings[event["value"]][0]), 2)
if "_Access" in str(SymID):
mpuSym.setSelectedKey(str(mpuSettings[event["value"]][1]), 2)
if "_Execute" in str(SymID):
mpuSym.setValue(bool(mpuSettings[event["value"]][2]),2)
if "_Share" in str(SymID):
mpuSym.setValue(bool(mpuSettings[event["value"]][3]),2)
if "_Address" in str(SymID):
hex_str = str(mpuSettings[event["value"]][4])
hex_int = int(hex_str, 0)
mpuSym.setValue(hex_int,2)
if "_Size" in str(SymID):
mpuSym.setSelectedKey(str(mpuSettings[event["value"]][5]), 2)
def enableFileGen(coreMPUMenu, event):
if(event["value"]==True):
mpuHeaderFile1.setEnabled(True)
mpuHeaderFile2.setEnabled(True)
mpuSourceFile.setEnabled(True)
mpuSystemDefFile.setEnabled(True)
else:
mpuHeaderFile1.setEnabled(False)
mpuHeaderFile2.setEnabled(False)
mpuSourceFile.setEnabled(False)
mpuSystemDefFile.setEnabled(False)
def storeLength(symbol, event):
symObj=event["symbol"]
key=symObj.getSelectedKey()
symbol.setValue(key,2)
def enableMenu(menu, event):
menu.setVisible(event["value"])
def mpuinterruptControl(symbol, event):
Database.clearSymbolValue("core", mpuinterruptVector)
Database.clearSymbolValue("core", mpuinterruptHandlerLock)
if (event["value"] == True):
Database.setSymbolValue("core", mpuinterruptVector, True, 2)
Database.setSymbolValue("core", mpuinterruptHandlerLock, True, 2)
else:
Database.setSymbolValue("core", mpuinterruptVector, False, 2)
Database.setSymbolValue("core", mpuinterruptHandlerLock, False, 2)
def mpuSetUp(conf, event):
if event["value"]:
for i in range (0, len(mpuSetUpLogicList)):
Database.setSymbolValue("core", "MPU_Region_" + str(i) + "_Enable", True, 2)
Database.setSymbolValue("core", "MPU_Region_Name" + str(i), mpuSetUpLogicList[i], 2)
else:
for i in range (0, len(mpuSetUpLogicList)):
Database.setSymbolValue("core", "MPU_Region_" + str(i) + "_Enable", False, 2)
################################################################################
#### Component ####
################################################################################
mpuRegions, mpuSettings, mpuSetUpLogicList = setMPUDefaultSettings()
mpuMenu = coreComponent.createMenuSymbol("MPU_MENU", cortexMenu)
mpuMenu.setLabel("MPU")
coreUseMPU = coreComponent.createBooleanSymbol("CoreUseMPU", mpuMenu)
coreUseMPU.setLabel("Enable MPU?")
mpuConfMenu = coreComponent.createMenuSymbol("MPU_MENU_CONF", mpuMenu)
mpuConfMenu.setLabel("MPU Configuration")
mpuConfMenu.setDependencies(enableMenu, ["CoreUseMPU"])
mpuConfMenu.setVisible(False)
mpuFileGen = coreComponent.createBooleanSymbol("MPU_BOOL_0", coreUseMPU)
mpuFileGen.setLabel("MPU File Generation")
mpuFileGen.setDependencies(enableFileGen, ["CoreUseMPU"])
mpuFileGen.setVisible(False)
mpuNumRegions= coreComponent.createIntegerSymbol("MPU_NUMBER_REGIONS", mpuConfMenu)
mpuNumRegions.setVisible(False)
mpuNumRegions.setDefaultValue(mpuRegions)
coreMPUHFNMIENA = coreComponent.createBooleanSymbol("CoreMPU_HFNMIENA", mpuConfMenu)
coreMPUHFNMIENA.setLabel("HFNMIENA")
coreMPUHFNMIENA.setDescription("Enables MPU during HardFault, NMI, or when FAULTMASK is set")
coreMPUHFNMIENA.setDefaultValue(False)
coreUseMPUPRIVDEFENA = coreComponent.createBooleanSymbol("CoreMPU_PRIVDEFENA", mpuConfMenu)
coreUseMPUPRIVDEFENA.setLabel("PRIVDEFENA")
coreUseMPUPRIVDEFENA.setDefaultValue(True)
coreUseMPUPRIVDEFENA.setDescription("Enables privileged software access to the default memory map")
coreUseDefault = coreComponent.createBooleanSymbol("CoreMPU_DEFAULT", mpuConfMenu)
coreUseDefault.setLabel("Use Recommended settings")
coreUseDefault.setDefaultValue(False)
coreUseDefault.setDescription("Sets up recommended settings for the different peripheral")
for i in range(0,mpuRegions):
coreMPURegEnable = coreComponent.createBooleanSymbol(("MPU_Region_" + str(i) + "_Enable"), mpuConfMenu)
coreMPURegEnable.setLabel("Enable MPU Region" + str(i))
coreMPURegMenu = coreComponent.createMenuSymbol("MPU_MENU_" + str(i), coreMPURegEnable)
coreMPURegMenu.setLabel("MPU Region " + str(i) + " Settings")
coreMPURegMenu.setDescription("Configuration for MPU Region"+ str(i))
coreMPURegMenu.setDependencies(enableMenu, ["MPU_Region_" + str(i) + "_Enable"])
coreMPURegMenu.setVisible(False)
coreMPURegNameOptions = coreComponent.createComboSymbol(("MPU_Region_Name" + str(i) +"_Options"), coreMPURegMenu, mpuSettings.keys())
coreMPURegNameOptions.setLabel("Region Name Options")
coreMPURegNameOptions.setVisible(False)
coreMPURegName = coreComponent.createStringSymbol(("MPU_Region_Name" + str(i)), coreMPURegMenu)
coreMPURegName.setLabel("Region Name")
# Default value is set later to trigger business logic for the first time
coreMPURegAddress = coreComponent.createHexSymbol(("MPU_Region_" + str(i) + "_Address"), coreMPURegMenu)
coreMPURegAddress.setLabel("Base Address")
coreMPURegAddress.setDependencies(mpuSetUpLogic, ["MPU_Region_Name" + str(i)])
coreMPURegSize = coreComponent.createKeyValueSetSymbol(("MPU_Region_" + str(i) + "_Size"), coreMPURegMenu)
coreMPURegSize.setLabel("Size")
coreMPURegSize.setOutputMode("Value")
coreMPURegSize.setDisplayMode("Description")
cpuArch = Database.getSymbolValue("core","CoreArchitecture")
if (cpuArch != "CORTEX-M0PLUS"):
coreMPURegSize.addKey("32B", str(4) , "32 Bytes" )
coreMPURegSize.addKey("64B", str(5) , "64 bytes" )
coreMPURegSize.addKey("128B", str(6) , "128 bytes" )
coreMPURegSize.addKey("256B", str(7) , "256 bytes" )
coreMPURegSize.addKey("512B", str(8) , "512 bytes" )
coreMPURegSize.addKey("1KB", str(9) , "1 KB" )
coreMPURegSize.addKey("2KB", str(10) , "2 KB" )
coreMPURegSize.addKey("4KB", str(11) , "4 KB" )
coreMPURegSize.addKey("8KB", str(12) , "8 KB" )
coreMPURegSize.addKey("16KB", str(13) , "16 KB" )
coreMPURegSize.addKey("32KB", str(14) , "32 KB" )
coreMPURegSize.addKey("64KB", str(15) , "64 KB" )
coreMPURegSize.addKey("128KB", str(16) , "128 KB" )
coreMPURegSize.addKey("256KB", str(17) , "256 KB" )
coreMPURegSize.addKey("512KB", str(18) , "512 KB" )
coreMPURegSize.addKey("1MB", str(19) , "1 MB" )
coreMPURegSize.addKey("2MB", str(20) , "2 MB" )
coreMPURegSize.addKey("4MB", str(21) , "4 MB" )
coreMPURegSize.addKey("8MB", str(22) , "8 MB" )
coreMPURegSize.addKey("16MB", str(23) , "16 MB" )
coreMPURegSize.addKey("32MB", str(24) , "32 MB" )
coreMPURegSize.addKey("64MB", str(25) , "64 MB" )
coreMPURegSize.addKey("128MB", str(26) , "128 MB" )
coreMPURegSize.addKey("256MB", str(27) , "256 MB" )
coreMPURegSize.addKey("512MB", str(28) , "512 MB" )
coreMPURegSize.addKey("1GB", str(29) , "1 GB" )
coreMPURegSize.addKey("2GB", str(30) , "2 GB" )
coreMPURegSize.addKey("4GB", str(31) , "4 GB" )
coreMPURegSize.setDependencies(mpuSetUpLogic, ["MPU_Region_Name" + str(i)])
coreMPURegLength = coreComponent.createStringSymbol(("MPU_Region_" + str(i)) + "_Length", coreMPURegMenu)
coreMPURegLength.setLabel("Region Length")
coreMPURegLength.setVisible(False)
coreMPURegLength.setDependencies(storeLength, ["MPU_Region_" + str(i) + "_Size"])
coreMPURegType = coreComponent.createKeyValueSetSymbol(("MPU_Region_" + str(i) + "_Type"), coreMPURegMenu)
coreMPURegType.setLabel("Memory Type and Cache policy")
coreMPURegType.setOutputMode("Key")
coreMPURegType.setDisplayMode("Description")
if (cpuArch == "CORTEX-M0PLUS"):
coreMPURegType.addKey("MPU_ATTR_STRONGLY_ORDERED", str(0) , "Strongly-Ordered Memory" )
coreMPURegType.addKey("MPU_ATTR_DEVICE", str(1) , "Device Memory" )
coreMPURegType.addKey("MPU_ATTR_NORMAL_WT", str(2) , "Normal memory, Write-through cache" )
coreMPURegType.addKey("MPU_ATTR_NORMAL_WB", str(3) , "Normal memory, Write-back cache" )
else:
coreMPURegType.addKey("MPU_ATTR_STRONGLY_ORDERED", str(0) , "Strongly-Ordered Memory" )
coreMPURegType.addKey("MPU_ATTR_DEVICE", str(1) , "Device Memory" )
coreMPURegType.addKey("MPU_ATTR_NORMAL_WT", str(2) , "Normal memory, Write-through cache" )
coreMPURegType.addKey("MPU_ATTR_NORMAL_WB", str(3) , "Normal memory, Write-back cache" )
coreMPURegType.addKey("MPU_ATTR_NORMAL_WB_WA", str(4) , "Normal memory, Write-back and write-allocate cache" )
coreMPURegType.addKey("MPU_ATTR_NORMAL", str(5) , "Normal memory, Non-cacheable" )
coreMPURegType.setDependencies(mpuSetUpLogic, ["MPU_Region_Name" + str(i)])
coreMPURegAccess = coreComponent.createKeyValueSetSymbol(("MPU_Region_" + str(i) + "_Access"), coreMPURegMenu)
coreMPURegAccess.setLabel("Data Access Permission")
coreMPURegAccess.setOutputMode("Key")
coreMPURegAccess.setDisplayMode("Description")
coreMPURegAccess.addKey("MPU_RASR_AP_NOACCESS_Val", str(0) , "User: No Access, Privileged: No Access" )
coreMPURegAccess.addKey("MPU_RASR_AP_NOACCESS_PRIV_READWRITE_Val", str(1) , "User: No Access, Privileged: Read/Write" )
coreMPURegAccess.addKey("MPU_RASR_AP_READONLY_PRIV_READWRITE_Val", str(2) , "User: Read only, Privileged: Read/Write" )
coreMPURegAccess.addKey("MPU_RASR_AP_READWRITE_Val", str(3) , "User: Read/Write, Privileged: Read/Write" )
coreMPURegAccess.addKey("MPU_RASR_AP_NOACCESS_PRIV_READONLY_Val", str(5) , "User: No Access, Privileged: Read only" )
coreMPURegAccess.addKey("MPU_RASR_AP_READONLY_Val", str(7) , "User: Read only, Privileged: Read only" )
coreMPURegAccess.setDependencies(mpuSetUpLogic, ["MPU_Region_Name" + str(i)])
coreMPURegExecute = coreComponent.createBooleanSymbol(("MPU_Region_" + str(i) + "_Execute"), coreMPURegMenu)
coreMPURegExecute.setLabel("Instruction Access Permission")
coreMPURegExecute.setDefaultValue(False)
coreMPURegExecute.setDependencies(mpuSetUpLogic, ["MPU_Region_Name" + str(i)])
coreMPURegShare= coreComponent.createBooleanSymbol(("MPU_Region_" + str(i) + "_Share" ), coreMPURegMenu)
coreMPURegShare.setLabel("Shareable Attribute")
coreMPURegShare.setDefaultValue(False)
coreMPURegShare.setDependencies(mpuSetUpLogic, ["MPU_Region_Name" + str(i)])
# Setup Peripheral Interrupt in Interrupt manager
mpuPeripId = Interrupt.getInterruptIndex("MemoryManagement")
mpuinterruptVector = "NVIC_" + str(mpuPeripId) + "_ENABLE"
mpuinterruptHandlerLock = "NVIC_" + str(mpuPeripId) + "_HANDLER_LOCK"
# NVIC Dynamic settings
MPU_interruptControl = coreComponent.createBooleanSymbol("NVIC_MPU_ENABLE", coreUseMPU)
MPU_interruptControl.setDependencies(mpuinterruptControl, ["CoreUseMPU"])
MPU_interruptControl.setVisible(False)
coreUseDefaultTrigger = coreComponent.createBooleanSymbol("CoreMPU_DEFAULT_TRIGGER", mpuConfMenu)
coreUseDefaultTrigger.setDefaultValue(False)
coreUseDefaultTrigger.setVisible(False)
coreUseDefaultTrigger.setDependencies(mpuSetUp, ["CoreMPU_DEFAULT"])
############################################################################
#### Code Generation ####
############################################################################
configName = Variables.get("__CONFIGURATION_NAME")
mpuHeaderFile1 = coreComponent.createFileSymbol("PLIB_MPU_H", None)
mpuHeaderFile1.setMarkup(True)
mpuHeaderFile1.setSourcePath("../peripheral/mpu/templates/plib_mpu.h.ftl")
mpuHeaderFile1.setOutputName("plib_mpu.h")
mpuHeaderFile1.setDestPath("/peripheral/mpu/")
mpuHeaderFile1.setProjectPath("config/" + configName + "/peripheral/mpu/")
mpuHeaderFile1.setType("HEADER")
mpuHeaderFile1.setOverwrite(True)
mpuHeaderFile1.setEnabled(False)
mpuHeaderFile2 = coreComponent.createFileSymbol("PLIB_MPU_LOCAL_H", None)
mpuHeaderFile2.setMarkup(True)
mpuHeaderFile2.setSourcePath("../peripheral/mpu/templates/plib_mpu_local.h.ftl")
mpuHeaderFile2.setOutputName("plib_mpu_local.h")
mpuHeaderFile2.setDestPath("/peripheral/mpu/")
mpuHeaderFile2.setProjectPath("config/" + configName + "/peripheral/mpu/")
mpuHeaderFile2.setType("HEADER")
mpuHeaderFile2.setOverwrite(True)
mpuHeaderFile2.setEnabled(False)
mpuSourceFile = coreComponent.createFileSymbol("PLIB_MPU_C", None)
mpuSourceFile.setMarkup(True)
mpuSourceFile.setSourcePath("../peripheral/mpu/templates/plib_mpu.c.ftl")
mpuSourceFile.setOutputName("plib_mpu.c")
mpuSourceFile.setDestPath("/peripheral/mpu/")
mpuSourceFile.setProjectPath("config/" + configName + "/peripheral/mpu/")
mpuSourceFile.setType("SOURCE")
mpuSourceFile.setOverwrite(True)
mpuSourceFile.setEnabled(False)
mpuSystemDefFile = coreComponent.createFileSymbol("MPU_SYSTEM_DEFINITIONS_H", None)
mpuSystemDefFile.setType("STRING")
mpuSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
mpuSystemDefFile.setSourcePath("../peripheral/mpu/templates/system/definitions.h.ftl")
mpuSystemDefFile.setMarkup(True)
mpuSystemDefFile.setEnabled(False)
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from string import ascii_uppercase as up
def solve(passwd):
if len(passwd) < 2: return 'IMPOSSIBLE'
for i in passwd:
if passwd.count(i) > 1: return up
sol = passwd[::-1]
for i in up:
if i not in sol: sol+=i
return sol
for case in range(int(input())):
plen, passwd = int(input()), str(raw_input().strip())
print 'Case #%d: %s' % (case+1, solve(passwd))
|
a = [1,2,3,4]
print a
|
######################################################################################################################
# 这次例子就是ORM的简单实现,重要部分已做注释
class Field(object):
def __init__(self, name, type):
print('Field name:',name)
self.name = name
self.type = type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
class IntegerField(Field):
def __init__(self, name, type='int(20)'):
super().__init__(name, type)
class StringField(Field):
def __init__(self, name, type='varchar(20)'):
super().__init__(name, type)
class SchoolMetaClass(type):
'''
第一个参数是元类对象,第二个参数要创建的类名,第三个是要创建的类的父类,第四个参数很重要,是类的属性和方法
__new__ 是在__init__之前被调用的特殊方法,__new__是用来创建对象并返回之的方法,而__init__只是用来将传入的参数初始化给对象
你很少用到__new__,除非你希望能够控制对象的创建。这里创建的对象是类,我们希望能够自定义它,所以我们这里改写__new__
'''
def __new__(cls, name, bases, attrs):
#排除掉对BaseSchool类的修改;
if name == 'BaseSchool':
return type.__new__(cls, name, bases, attrs)
mapping = dict()
print('SchoolMetaClass的attrs:\n',attrs)
#key是属性名字,就是id、name、email等,value是初始化id、name、email的后的对象
for key, value in attrs.items():
if isinstance(value, Field):
mapping[key] = value
# 移除类属性以免与对象属性发生冲突
for key in mapping.keys():
attrs.pop(key)
attrs['__mapping__'] = mapping # 对象和属性的映射
attrs['__table__'] = name # 表名
print('mapping:',mapping)
return type.__new__(cls, name, bases, attrs)
class BaseSchool(dict,metaclass=SchoolMetaClass):
def __init__(self, **keywords):
print('BaseSchool的传入参数', keywords)
# 继承字典,对象调用该方法传关键字参数
super().__init__(**keywords)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def save(self):
# 需要插入的列名
fields = []
# 需要插入的值
args = []
for key, value in self.__mapping__.items():
print('key:{}--value:{}'.format(key, type(value)))
#取出__mapping__中的value,value是IntegerField等类实例化后的对象,然后取出这些对象的name属性,也就是namexx、idxx等
fields.append(value.name)
#通过属性名字来取值,这里的属性名和值的对应关系就是下面的School(id='12345', name='Michael', email='test@orm.org', password='my-pwd')传递进来的,本质是就是给dict的赋值,因为baseschool本身是dict
args.append(getattr(self, key, 'None'))
sql = 'insert into %s (%s) value (%s)' % (self.__table__, ','.join(fields), ','.join(args))
print('sql语句:%s' % (sql))
print('fileds:{}---args:{}'.format(fields, args))
class School(BaseSchool):
#括号内是列名,前面的name是类的属性
name = StringField('namexx')
id = IntegerField('idxx')
email = StringField('emailxxx')
password = StringField('agexx')
# 测试,
# 设置键值对,必须保证参数名和上面的School的属性名一致,因为到时候需要根据school的属性名来取值
school = School(id='12345', name='Michael', email='test@orm.org', password='my-pwd')
school.save()
print(school,type(School))
###############################################################################################################
class UpperClass(type):
def __new__(cls, clsname, bases, clsdict):
upper_dict = {}
for key, val in clsdict.items():
if key.startswith('_'):
upper_dict[key] = val
else:
upper_dict[key.upper()] = val
return type.__new__(cls, clsname, bases, upper_dict)
class D(metaclass=UpperClass):
a = 'c'
d = 'e'
print("*"*60)
print('测试2:', D.A)
###############################################################################################################
#次例子是学习元类来定制(创建)类
def add(self,value):
self.append(value)
class MHMetaClass(type):
#第一个参数是元类对象,第二个参数要创建的类名,第三个是要创建的类的父类,第四个参数很重要,是类的属性和方法(请记住不是对象的属性和方法,请自行查阅廖大之前教程以作区分)
def __new__(cls,name,bases,attrs):
print("通过元类来创建一个类:%s,%s,%s,%s" % (cls,name,bases,attrs))
attrs['add'] = add
return type.__new__(cls,name,bases,attrs)
class MHList(list,metaclass=MHMetaClass):
pass
l = MHList()
l.append(1)
l.add(2)
print(l)
|
from typing import Dict, Any, Union, List
import sys
import math
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torch import nn
from deperceiver.datamodules import get_coco_api_from_dataset
from deperceiver.metrics.coco_eval import CocoEvaluator
from deperceiver.util.misc import NestedTensor, nested_tensor_from_tensor_list, interpolate
from .postprocess import PostProcess
DETRInput = Union[List, torch.Tensor, NestedTensor]
class DETR(pl.LightningModule):
def __init__(
self,
backbone: nn.Module,
transformer: nn.Module,
criterion: nn.Module,
num_classes: int,
num_queries: int,
args: Any,
aux_loss: bool = False,
) -> None:
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
self.args = args
self.criterion = criterion
self.postprocess = PostProcess()
def forward(self, samples: DETRInput) -> Dict[str, Any]:
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, pos = self.backbone(samples)
src, mask = features[-1].decompose()
assert mask is not None
projected_src = self.input_proj(src)
hs = self.transformer(projected_src, mask, self.query_embed.weight, pos[-1])[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# Make torchscript happy
return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
def _step(self, batch, batch_idx, phase='train'):
samples, targets = batch
outputs = self(samples)
loss_dict = self.criterion(outputs, targets)
weight_dict = self.criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
loss_dict_unscaled = {f'{k}_unscaled': v for k, v in loss_dict.items()}
loss_dict_scaled = {k: v * weight_dict[k] for k, v in loss_dict.items() if k in weight_dict}
losses_scaled = sum(loss_dict_scaled.values())
loss_value = losses_scaled.item()
# Append prefix to loss_dicts
loss_dict_unscaled = {f'{phase}/{k}': v for k, v in loss_dict_unscaled.items()}
loss_dict_scaled = {f'{phase}/{k}': v for k, v in loss_dict_scaled.items()}
self.log_dict(loss_dict_unscaled)
self.log_dict(loss_dict_scaled)
self.log(f'{phase}/loss', loss_value)
return losses, loss_value, outputs
def training_step(self, batch, batch_idx):
losses, loss_value, _ = self._step(batch, batch_idx)
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
return losses
def on_validation_epoch_start(self) -> None:
base_ds = get_coco_api_from_dataset(self.trainer.datamodule.dataset_val)
self.evaluator = CocoEvaluator(base_ds, ('bbox',))
def validation_step(self, batch, batch_idx):
samples, targets = batch
losses, loss_value, outputs = self._step(batch, batch_idx, phase='val')
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = self.postprocess(outputs, orig_target_sizes)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
self.evaluator.update(res)
def on_validation_epoch_end(self) -> None:
self.evaluator.synchronize_between_processes()
self.evaluator.accumulate()
self.evaluator.summarize()
stats = self.evaluator.coco_eval['bbox'].stats
self.log('val/ap', stats[0])
self.log('val/ap50', stats[1])
self.log('val/ap75', stats[2])
self.log('val/ap_s', stats[3])
self.log('val/ap_m', stats[4])
self.log('val/ap_l', stats[5])
def configure_optimizers(self) -> torch.optim.Optimizer:
param_dicts = [
{"params": [p for n, p in self.named_parameters() if 'backbone' not in n and p.requires_grad]},
{
"params": [p for n, p in self.named_parameters() if 'backbone' in n and p.requires_grad],
"lr": self.args.lr_backbone,
}
]
self.optimizer = torch.optim.AdamW(param_dicts, lr=self.args.lr, weight_decay=self.args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.args.lr_drop, verbose=True)
return [self.optimizer], [{"scheduler": lr_scheduler, "interval": "epoch"}]
class MLP(pl.LightningModule):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int
) -> None:
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
|
#!/usr/bin/env python3
import os
import re
curdir = os.path.dirname(os.path.realpath(__file__))
mem = {}
def apply_mask(value, mask):
ones_mask = int("".join(["1" if b=="1" else "0" for b in mask]), base=2)
zeros_mask = int("".join(["0" if b=="0" else "1" for b in mask]), base=2)
return (value|ones_mask)&zeros_mask
def main():
with open(curdir + "/input.txt") as f:
for line in f.readlines():
if "mask" in line:
match = re.match("mask = ([X01]+)", line.strip())
mask = match.group(1)
continue
else:
match = re.match("mem\[([0-9]+)\] = ([0-9]+)", line.strip())
mem_addr = int(match.group(1))
value = int(match.group(2))
mem[mem_addr] = apply_mask(value, mask)
solution = sum([v for v in mem.values()])
print(f"Solution: {solution}")
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`484`-compliant **new type hint utilities** (i.e.,
callables generically applicable to :pep:`484`-compliant types).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype.roar import BeartypeDecorHintPep484Exception
from beartype._data.hint.pep.sign.datapepsigns import HintSignNewType
from beartype._util.py.utilpyversion import IS_PYTHON_AT_LEAST_3_10
from types import FunctionType
from typing import Any
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ TESTERS }....................
# If the active Python interpreter targets Python >= 3.10 and thus defines
# "typing.NewType" type hints as instances of that class, implement this tester
# unique to prior Python versions to raise an exception.
if IS_PYTHON_AT_LEAST_3_10:
def is_hint_pep484_newtype_pre_python310(hint: object) -> bool:
raise BeartypeDecorHintPep484Exception(
'is_hint_pep484_newtype_pre_python310() assumes Python < 3.10, '
'but current Python interpreter targets Python >= 3.10.'
)
# Else, the active Python interpreter targets Python < 3.10 and thus defines
# "typing.NewType" type hints as closures returned by that function. Since
# these closures are sufficiently dissimilar from all other type hints to
# require unique detection, implement this tester unique to this obsolete
# Python version to detect these closures.
else:
def is_hint_pep484_newtype_pre_python310(hint: object) -> bool:
# Return true only if...
return (
# This hint is a pure-Python function *AND*...
#
# Note that we intentionally do *NOT* call the callable() builtin
# here, as that builtin erroneously returns false positives for
# non-standard classes defining the __call__() dunder method to
# unconditionally raise exceptions. Critically, this includes most
# PEP 484-compliant type hints, which naturally fail to define both
# the "__module__" *AND* "__qualname__" dunder instance variables
# accessed below. Shoot me now, fam.
isinstance(hint, FunctionType) and
# This callable is a closure created and returned by the
# typing.NewType() function. Note that:
#
# * The "__module__" and "__qualname__" dunder instance variables
# are *NOT* generally defined for arbitrary objects but are
# specifically defined for callables.
# * "__qualname__" is safely available under Python >= 3.3.
# * This test derives from the observation that the concatenation
# of this callable's "__qualname__" and "__module" dunder
# instance variables suffices to produce a string unambiguously
# identifying whether this hint is a "NewType"-generated closure:
# >>> from typing import NewType
# >>> UserId = t.NewType('UserId', int)
# >>> UserId.__qualname__
# >>> 'NewType.<locals>.new_type'
# >>> UserId.__module__
# >>> 'typing'
f'{hint.__module__}.{hint.__qualname__}'.startswith(
'typing.NewType.')
)
is_hint_pep484_newtype_pre_python310.__doc__ = '''
``True`` only if the passed object is a Python < 3.10-specific
:pep:`484`-compliant **new type** (i.e., closure created and returned by
the :func:`typing.NewType` closure factory function).
This tester is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Caveats
----------
**New type aliases are a complete farce and thus best avoided.**
Specifically, these PEP-compliant type hints are *not* actually types but
rather **identity closures** that return their passed parameters as is.
Instead, where distinct types are:
* *Not* required, simply annotate parameters and return values with the
desired superclasses.
* Required, simply:
* Subclass the desired superclasses as usual.
* Annotate parameters and return values with those subclasses.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
bool
``True`` only if this object is a Python < 3.10-specific
:pep:`484`-compliant new type.
'''
# ....................{ GETTERS }....................
def get_hint_pep484_newtype_class(hint: Any) -> type:
'''
User-defined class aliased by the passed :pep:`484`-compliant **new type**
(i.e., object created and returned by the :func:`typing.NewType` type hint
factory).
This getter is intentionally *not* memoized (e.g., by the
:func:`callable_cached` decorator), as the implementation trivially reduces
to an efficient one-liner.
Parameters
----------
hint : object
Object to be inspected.
Returns
----------
type
User-defined class aliased by this :pep:`484`-compliant new type.
Raises
----------
BeartypeDecorHintPep484Exception
If this object is *not* a :pep:`484`-compliant new type.
See Also
----------
:func:`is_hint_pep484_newtype`
Further commentary.
'''
# Avoid circular import dependencies.
from beartype._util.hint.pep.utilpepget import get_hint_pep_sign
# If this object is *NOT* a PEP 484-compliant "NewType" hint, raise an
# exception.
if get_hint_pep_sign(hint) is not HintSignNewType:
raise BeartypeDecorHintPep484Exception(
f'Type hint {repr(hint)} not "typing.NewType".')
# Else, this object is a PEP 484-compliant "NewType" hint.
# Return the unqualified classname referred to by this reference. Note
# that this requires violating privacy encapsulation by accessing a dunder
# instance variable unique to closures created by the typing.NewType()
# closure factory function.
return hint.__supertype__
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Updating counts.
"""
#end_pymotw_header
import collections
c = collections.Counter('abcdaab')
for letter in 'abcde':
print '%s : %d' % (letter, c[letter])
|
#
# Class for electron-migration limited SEI growth
#
import pybamm
from .base_sei import BaseModel
class ElectronMigrationLimited(BaseModel):
"""
Class for electron-migration limited SEI growth.
Parameters
----------
param : parameter class
The parameters to use for this submodel
reaction_loc : str
Where the reaction happens: "x-average" (SPM, SPMe, etc),
"full electrode" (full DFN), or "interface" (half-cell DFN)
options : dict, optional
A dictionary of options to be passed to the model.
**Extends:** :class:`pybamm.sei.BaseModel`
"""
def __init__(self, param, reaction_loc, options=None):
super().__init__(param, options=options)
self.reaction_loc = reaction_loc
def get_fundamental_variables(self):
if self.reaction_loc == "x-average":
L_inner_av = pybamm.standard_variables.L_inner_av
L_outer_av = pybamm.standard_variables.L_outer_av
L_inner = pybamm.PrimaryBroadcast(L_inner_av, "negative electrode")
L_outer = pybamm.PrimaryBroadcast(L_outer_av, "negative electrode")
elif self.reaction_loc == "full electrode":
L_inner = pybamm.standard_variables.L_inner
L_outer = pybamm.standard_variables.L_outer
elif self.reaction_loc == "interface":
L_inner = pybamm.standard_variables.L_inner_interface
L_outer = pybamm.standard_variables.L_outer_interface
variables = self._get_standard_thickness_variables(L_inner, L_outer)
variables.update(self._get_standard_concentration_variables(variables))
return variables
def get_coupled_variables(self, variables):
L_sei_inner = variables["Inner SEI thickness"]
if self.reaction_loc == "interface":
phi_s_n = variables["Lithium metal interface electrode potential"]
else:
phi_s_n = variables["Negative electrode potential"]
U_inner = self.param.U_inner_electron
C_sei = self.param.C_sei_electron
j_sei = (phi_s_n - U_inner) / (C_sei * L_sei_inner)
alpha = 0.5
j_inner = alpha * j_sei
j_outer = (1 - alpha) * j_sei
variables.update(self._get_standard_reaction_variables(j_inner, j_outer))
# Update whole cell variables, which also updates the "sum of" variables
variables.update(super().get_coupled_variables(variables))
return variables
def set_rhs(self, variables):
if self.reaction_loc == "x-average":
L_inner = variables["X-averaged inner SEI thickness"]
L_outer = variables["X-averaged outer SEI thickness"]
j_inner = variables["X-averaged inner SEI interfacial current density"]
j_outer = variables["X-averaged outer SEI interfacial current density"]
else:
L_inner = variables["Inner SEI thickness"]
L_outer = variables["Outer SEI thickness"]
j_inner = variables["Inner SEI interfacial current density"]
j_outer = variables["Outer SEI interfacial current density"]
v_bar = self.param.v_bar
Gamma_SEI = self.param.Gamma_SEI
self.rhs = {
L_inner: -Gamma_SEI * j_inner,
L_outer: -v_bar * Gamma_SEI * j_outer,
}
def set_initial_conditions(self, variables):
if self.reaction_loc == "x-average":
L_inner = variables["X-averaged inner SEI thickness"]
L_outer = variables["X-averaged outer SEI thickness"]
else:
L_inner = variables["Inner SEI thickness"]
L_outer = variables["Outer SEI thickness"]
L_inner_0 = self.param.L_inner_0
L_outer_0 = self.param.L_outer_0
self.initial_conditions = {L_inner: L_inner_0, L_outer: L_outer_0}
|
import logging
from tellus.configuration import (
TELLUS_USER_MODIFIED,
TELLUS_GO,
TELLUS_ABOUT_TELL,
TELLUS_USER,
TELLUS_INTERNAL,
TELLUS_APP_USERNAME,
)
from tellus.sources import Source
from tellus.tell import Tell
from tellus.tells import TheresNoTellingException
from tellus.tellus_sources.socializer import CoffeeBot, Socializer
from tellus.tellus_utils import now_string
from tellus.wiring import ui_route_to_tell
class TellusInitialization(Source):
"""
A special source that does some basic initialization of information for Tellus.
This includes:
- Setting up some internal Tellus Tells such as a default About tell
- Running data migrations and cleanups between versions of Tellus. These will often not run
if it is not presently in a version of Tellus that requires a migration.
This source needs to be very careful to only run in appropriate circumstances - it usually only runs
once on startup, and should only run migrations with particular versions of Tellus, etc.
"""
TELLUS_ABOUT_DESCRIPTION = "About Tellus."
def __init__(self, teller, active_migrations=None):
"""
:param teller: The usual.
:param active_migrations: For testing, to override with specific migration functions.
"""
super().__init__(
teller,
source_id="data-migration",
description="A special source for managing and migrating Tellus data between versions.",
datum_display_name="Data Migration",
run_restriction=Source.RUN_ON_STARTUP,
)
# This call is to ensure my Tell always exists - important for tests to run correctly
self.source_tell.get_data_dict()
self._migrations_run = 0
if active_migrations is not None:
self._active_migrations = active_migrations
else:
# THIS IS WHERE MIGRATIONS ARE SPECIFIED...
self._active_migrations = [self.migration_update_coffee_bot_history_2021_05]
async def load_source(self):
# Note that for these, they will each individually persist, as some of the migrations may want
# to persist sooner
await self._create_default_tells()
await self._run_migrations()
def verify_or_create_about_tell(self):
try:
tellus_about = self.teller.get(TELLUS_ABOUT_TELL)
except TheresNoTellingException:
logging.info(
"There is currently no 'About Tellus' Tell (%s). Tellus is creating one.",
TELLUS_ABOUT_TELL,
)
tellus_about = self.teller.create_tell(
TELLUS_ABOUT_TELL,
TELLUS_GO,
self.source_id,
url=ui_route_to_tell(TELLUS_ABOUT_TELL),
description=self.TELLUS_ABOUT_DESCRIPTION,
)
if not tellus_about.has_tag(TELLUS_APP_USERNAME):
tellus_about.add_tag(TELLUS_APP_USERNAME)
return True
return False
async def _create_default_tells(self):
should_persist = False
should_persist = self.verify_or_create_about_tell() or should_persist
if should_persist:
self.teller.persist()
async def _run_migrations(self):
if len(self._active_migrations) == 0:
logging.info(
"No current migrations specified. Will not run the Data Migration Source."
)
return
for migration in self._active_migrations:
migration_name = migration.__name__
if self.source_tell.get_data(migration_name) is not None:
logging.info(
"Migration %s has already been run. Migrations will only be run once."
)
else:
logging.info("Running %s", migration_name)
migration()
self.source_tell.update_datum_from_source(
migration_name, "Completed At", now_string()
)
logging.info("%s complete.", migration_name)
self._migrations_run += 1
self.teller.persist()
logging.info(
"Migrations complete - have run %s of %s migrations since startup.",
self._migrations_run,
len(self._active_migrations),
)
@staticmethod
def _clear_old_data(tell, data_key):
"""
Common use case.
"""
if tell.get_data(data_key):
old_data = tell.clear_data(data_key)
logging.info(
"Removed deprecated '%s' data from Tell '%s': %s",
data_key,
tell.alias,
old_data,
)
def migration_update_coffee_bot_history_2021_05(self):
"""
Update the User Tell coffee history for the new User page
"""
user_tells = self.teller.tells(TELLUS_USER)
coffee_tell = self.teller.get(CoffeeBot.TELL_COFFEE_BOT)
coffee_bot = CoffeeBot(coffee_tell)
logging.info(
"migration_update_coffee_bot_history_2021_05: Updating CoffeeBot histories for all Users for new User Page."
)
for user_tell in user_tells:
# Yes, all of this is totally cheating
if (
user_tell.get_datum(
Socializer.SOURCE_ID, CoffeeBot.DATUM_COFFEE_HISTORY
)
is None
):
coffee_bot.update_user_history_for(user_tell, user_tell.alias)
####
# Deprecated migrations - these can usually go away eventually, but keeping some around for a couple of iterations
# for examples and easy cribbing.
####
def deprecated_migration_users_remove_tellus_internal_2020_11(self):
"""
Had a situation where Users were all being added to TELLUS_INTERNAL when they logged in as a side effect of
the new Categories-are-just-data-sources thing. This cleans that up.
"""
user_tells = self.teller.tells(TELLUS_USER)
for user_tell in user_tells:
if user_tell.in_category(TELLUS_INTERNAL):
logging.info(
"migration_users_remove_tellus_internal_2020_11: Removing 'tellus-internal' from %s.",
user_tell.alias,
)
user_tell.remove_category(TELLUS_INTERNAL)
def deprecated_migration_data_dict_includes_user_modified_data_2010_10(self):
# pylint: disable=protected-access
# some weird special cases
ignore = [
"groot",
"prod-viewport",
"tellus-coffee-bot",
"tellus-config-tools",
"tellus-source-data-migration",
]
for tell in self.teller.tells():
if (
tell.in_any_categories([TELLUS_USER_MODIFIED, TELLUS_GO])
and tell.alias not in ignore
):
data = {}
if tell.description is not None and len(tell.description) > 0:
data[Tell.DESCRIPTION] = tell.description
else:
tell._description = None # Yes, need to do this to fix old blanks
if tell.go_url is not None and len(tell.go_url) > 0:
data[Tell.GO_URL] = tell.go_url
else:
tell._go_url = None # Yes, need to do this to fix old blanks
if len(tell.tags) > 0:
data[Tell.TAGS] = ", ".join(tell.tags)
tell.update_data_from_source(TELLUS_USER_MODIFIED, data, self.source_id)
|
from django.apps import AppConfig
class ExportappConfig(AppConfig):
name = 'django_exportapp'
def ready(self):
# super().ready()
self.module.autodiscover_wrapper()
# debug
from .helper import exporter
# print(exporter)
# for i in exporter.list():
# print(i, exporter.arr[i])
|
class TorsionList(list):
def __init__(self, pose, torsion_setter, *args, **kwargs):
self.pose = pose
self.torsion_setter = torsion_setter
super().__init__(*args, **kwargs)
def __setitem__(self, index, value):
if isinstance(index, int):
if index >= 0:
self.torsion_setter(self.pose, index+1, value)
else:
resid = self.pose.size + index + 1
self.torsion_setter(self.pose, resid, value)
# Slices are obtained when using semi-colons to get items, e.g. a[2:8:3]
elif isinstance(index, slice):
# Find concerned residue indexes
residues = range(1, self.pose.size+1)[index]
for resid, new_psi in zip(residues, value):
self.torsion_setter(self.pose, resid, new_psi)
else: raise TypeError(("TorsionList indices must be integers or"
" slices, not {}").format(type(index).__name__))
def _torsion_list_property(getter, setter):
"""Make a torsion_list attribute from torsion getter and setter
Parameters
----------
getter : function
function taking index argument returning a torsion angle
setter : function
function taking index and new torsion angle arguments
Return
------
property
Examples
--------
>>> Pose.psis = _torsion_list_property(Pose.psi, pose.set_psi)
>>> pose = get_pose('LYSE')
>>> print(pose.psi(1))
>>> pose.set_psi(1, 45.)
>>> print(pose.psis[0])
>>> pose.psis[0] = 45.
"""
# TODO: Change this for DNA torsion lists
def get_torsions(pose):
torsion_list = (getter(pose, resid) for resid in range(1, pose.total_residue+1)
if any((pose.residue(resid).is_protein(),
pose.residue(resid).is_carbohydrate(),
pose.residue(resid).is_peptoid()
))
)
return TorsionList(pose, setter, torsion_list)
def set_torsions(pose, new_torsions):
for resid in range(pose.size):
setter(pose, resid+1, new_torsions[resid])
return property(get_torsions, set_torsions)
|
if __name__ == '__main__':
import os
import time
import RPi.GPIO as GPIO
import systemd.daemon
GPIO.setmode(GPIO.BOARD)
GPIO.setup(16, GPIO.IN, pull_up_down=GPIO.PUD_UP)
systemd.daemon.notify('READY=1')
while True:
if GPIO.input(16) == GPIO.HIGH:
os.system('/usr/bin/gpio -1 pwm 33 1023')
else:
os.system('/usr/bin/gpio -1 pwm 33 600')
time.sleep(0.1)
|
from os import environ
domain_root = environ.get('DOMAIN_ROOT')
http_protocol = environ.get('HTTP_PROTOCOL', 'https')
config = {
'SECRET_KEY': environ['SECRET_KEY'],
'HONEYCOMB_WRITEKEY': environ.get('HONEYCOMB_WRITEKEY', None),
'HONEYCOMB_DATASET': environ.get('HONEYCOMB_DATASET', 'rws'),
'HONEYCOMB_CLIENT_DATASET': environ.get('HONEYCOMB_CLIENT_DATASET', 'mobile-apps'),
}
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from mogan.common.i18n import _
from mogan.conf import auth
opts = [
cfg.StrOpt('url',
help=_("URL for connecting to neutron.")),
cfg.IntOpt('url_timeout',
default=30,
help=_('Timeout value for connecting to neutron in seconds.')),
cfg.IntOpt('retries',
default=3,
help=_('Client retries in the case of a failed request.')),
]
opt_group = cfg.OptGroup(name='neutron',
title='Options for the neutron service')
def register_opts(conf):
conf.register_group(opt_group)
conf.register_opts(opts, group=opt_group)
auth.register_auth_opts(conf, 'neutron')
def list_opts():
return auth.add_auth_opts(opts)
|
"""Geometry-related objects."""
import logging
from math import pi, sqrt
from more_itertools import pairwise
import arcpy
LOG = logging.getLogger(__name__)
"""logging.Logger: Module-level logger."""
RATIO = {
"meter": {
"foot": 0.3048,
"feet": 0.3048,
"ft": 0.3048,
"yard": 0.9144,
"yards": 0.9144,
"yd": 0.9144,
"mile": 1609.34,
"miles": 1609.34,
"mi": 1609.34,
"meter": 1.0,
"meters": 1.0,
"m": 1.0,
"kilometer": 1000.0,
"kilometers": 1000.0,
"km": 1000.0,
}
}
"""dict: Two-level mapping of ratio between two types of measure.
Usage: `RATIO[to_measure][from_measure]`
"""
arcpy.SetLogHistory(False)
def compactness_ratio(area, perimeter):
"""Return compactness ratio (4pi * area / perimeter ** 2) result.
Args:
area (float): Area of geometry to evaluate.
perimeter (float): Perimeter of geometry to evaluate.
Keyword Args:
area (float): Area of geometry to evaluate. Only used if `geometry=None`.
perimeter (float): Perimeter of geometry to evaluate. Only used if
`geometry=None`.
Returns:
float: Ratio of compactness.
"""
if not area or not perimeter:
return None
return (4.0 * pi * float(area)) / (float(perimeter) ** 2.0)
def compactness_ratio_by_geometry(geometry):
"""Return compactness ratio (4pi * area / perimeter ** 2) result using geometry.
If geometry is None or one of the area & perimeter keyword arguments are undefined/
None, will return None.
Args:
geometry (arcpy.Geometry): Geometry to evaluate.
Returns:
float: Ratio of compactness.
"""
if not geometry or not geometry.area or not geometry.length:
return None
return compactness_ratio(geometry.area, geometry.length)
def convex_hull(*geometries):
"""Return convex hull polygon covering given geometries.
Args:
*geometries (arcpy.Geometry): Feature geometries in displacement order.
Returns:
arcpy.Polygon.
"""
hull_geom = None
for geom in geometries:
if geom:
hull_geom = hull_geom.union(geom).convexHull() if hull_geom else geom
if hull_geom and isinstance(hull_geom, (arcpy.PointGeometry, arcpy.Polyline)):
hull_geom = hull_geom.buffer(1)
return hull_geom
def coordinate_distance(*coordinates):
"""Return total distance between coordinates.
Args:
*coordinates: Collection of coordinates to compare. Coordinates can be `x,y` or
`x,y,z`.
Returns:
float: Euclidian distance between coordinates.
"""
distance = 0.0
for coord1, coord2 in pairwise(coordinates):
coord = {
1: dict(zip(["x", "y", "z"], coord1)),
2: dict(zip(["x", "y", "z"], coord2)),
}
coord[1].setdefault("z", 0)
coord[2].setdefault("z", 0)
distance += sqrt(
sum(
[
(coord[2]["x"] - coord[1]["x"]) ** 2,
(coord[2]["y"] - coord[1]["y"]) ** 2,
(coord[2]["z"] - coord[1]["z"]) ** 2,
]
)
)
return distance
def geometry_axis_bound(geometry, axis, bound):
"""Return value of axis-bound for given geometry.
Args:
geometry (arcpy.Geometry, None): Geometry to evaluate.
Returns:
float
"""
if not geometry:
return None
return getattr(geometry.extent, axis.upper() + bound.title())
def line_between_centroids(*geometries):
"""Return line geometry connecting given geometry centroids.
Args:
*geometries (list of arcpy.Geometry): Ordered collection of geometries.
Returns:
arcpy.Polyline
"""
points = [geom.centroid for geom in geometries]
line = arcpy.Polyline(arcpy.Array(points), geometries[0].spatialReference)
return line
def sexagesimal_angle_to_decimal(degrees, minutes=0, seconds=0, thirds=0, fourths=0):
"""Convert sexagesimal-parsed angles to a decimal.
Args:
degrees (int): Angle degrees count.
minutes (int): Angle minutes count.
seconds (int): Angle seconds count.
thirds (int): Angle thirds count.
fourths (int): Angle fourths count.
Returns:
float: Angle in decimal degrees.
"""
if degrees is None:
return None
# Degrees must be absolute or it will not sum right with subdivisions.
absolute_decimal = abs(float(degrees))
try:
sign_multiplier = abs(float(degrees)) / float(degrees)
except ZeroDivisionError:
sign_multiplier = 1
for count, divisor in [
(minutes, 60),
(seconds, 3600),
(thirds, 216000),
(fourths, 12960000),
]:
if count:
absolute_decimal += float(count) / divisor
return absolute_decimal * sign_multiplier
|
import unittest
from yaml_interpreter import YAMLInterpretor
class LoadYamlTest(unittest.TestCase):
def setUp(self):
self.si = YAMLInterpretor(fill_with_none=False)
def test_load_yaml_to_2d_list(self):
yaml = '''
a:
x: 1
y: 2
'''
res = self.si.load(yaml)
self.assertEqual(res, [
['a', 'x', 1],
['a', 'y', 2]
], '')
def test_load_yaml_with_null(self):
yaml = '''
a:
x: 1
y:
- null: 注释
- null
'''
res = self.si.load(yaml)
self.assertEqual(res, [
['a', 'x', 1],
['a', 'y', None, '注释'],
['a', 'y', None, None]
])
def test_load_yaml_file(self):
filepath = 'mock_data/01.yaml'
res = self.si.load_file(filepath)
self.assertEqual(res, [
['a', 'x', 1],
['a', 'y', 2]
])
def test_load_wrong_file(self):
res = self.si.load_file('abc.asdf')
self.assertIsNone(res)
|
from django.contrib.admin import SimpleListFilter
from django.contrib.redirects.models import Redirect
from link_report import link_report_settings
from .models import Sentry404Event
class UrlListFilter(SimpleListFilter):
title = 'Internal or External Source?'
parameter_name = 'is_internal'
def lookups(self, request, model_admin):
return (
('0', 'Internal'),
('1', 'External'),
)
def queryset(self, request, queryset):
if self.value():
if self.value() == '0':
internal = Sentry404Event.objects.filter(referer__icontains=link_report_settings.BASE_URL)
return queryset.filter(events=internal)
elif self.value() == '1':
assert isinstance(link_report_settings.BASE_URL, object)
external = Sentry404Event.objects.exclude(referer__icontains=link_report_settings.BASE_URL)
return queryset.filter(events=external)
return queryset
class RedirectedListFilter(SimpleListFilter):
title = 'Redirected?'
parameter_name = 'redirected'
def lookups(self, request, model_admin):
return (
('0', 'No'),
('1', 'Yes'),
)
def queryset(self, request, queryset):
if self.value():
if self.value() == '0':
return queryset.exclude(url__in=Redirect.objects.all().values_list('old_path', flat=True))
elif self.value() == '1':
return queryset.filter(
url__in=Redirect.objects.all().values_list('old_path', flat=True))
return queryset
|
import numpy as np
from tensorflow.keras import regularizers, Sequential, Model, Input
from tensorflow.keras.layers import Dense, Flatten, Reshape
from detectors.single_image_based_detectors.abs_single_image_autoencoder import AbstractSingleImageAD
from detectors.single_image_based_detectors.autoencoder_batch_generator import AutoencoderBatchGenerator
from detectors.anomaly_detector import AnomalyDetector
from utils import IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS
INPUT_SHAPE = (IMAGE_HEIGHT * IMAGE_WIDTH * IMAGE_CHANNELS,)
class SimpleAutoencoder(AbstractSingleImageAD, AnomalyDetector):
def __init__(self, name: str, args):
super(SimpleAutoencoder, self).__init__(name=name, is_keras_model=True, args=args)
def get_input_shape(self):
return INPUT_SHAPE
def _create_keras_model(self, input_shape, args=None):
model = Sequential()
model.add(Input(input_shape),)
model.add(Flatten())
model.add(Dense(64, activation='relu', activity_regularizer=regularizers.l1(10e-9)))
model.add(Dense(np.prod(input_shape), activation='sigmoid'))
model.add(Reshape((*input_shape,)))
return model
def normalize_and_reshape(self, x):
x = x.astype('float32') / 255.
x = x.reshape(-1, IMAGE_HEIGHT * IMAGE_WIDTH * IMAGE_CHANNELS)
return x
|
from dynatrace.main import Dynatrace
from dynatrace.http_client import TOO_MANY_REQUESTS_WAIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.