content stringlengths 5 1.05M |
|---|
from ascii_art import AsciiImage
def convert_image(image, output, neighborhood=1, print_image=True):
ascii_image = AsciiImage(filename=image, neighborhood=neighborhood)
if print_image:
print(ascii_image)
ascii_image.save(output)
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser(description="Convert image to ascii art.")
ap.add_argument("-i", "--image", required=True, type=str,
help="Input image.")
ap.add_argument("-o", "--output", default="output.txt", type=str,
help="Output file. Use `.txt` to save it as text and `.jpg` to save it as an")
ap.add_argument("-n", "--neighborhood", default=1, type=int,
help="Size of neighborhood of pixels to convert to ascii.")
ap.add_argument("-p", action="store_true", dest="print_image")
args = vars(ap.parse_args())
convert_image(**args) |
i = 0
t = int(input())
casos = []
while(i < t):
c = float(input())
casos.append(c)
i +=1
for i in range(0,len(casos)):
n = casos[i]
d = 0.0
while n > 1.00:
n /= 2
d +=1
print(int(d),"dias")
|
from unittest import TestCase
from mulearn.kernel import *
class TestLinearKernel(TestCase):
def test_compute(self):
k = LinearKernel()
self.assertEqual(k.compute([1, 0, 1], [2, 2, 2]), 4)
self.assertEqual(k.compute((1, 0, 2), (-1, 2, 5)), 9)
self.assertAlmostEqual(k.compute([1.2, -0.4, -2], [4, 1.2, .5]), 3.32)
self.assertAlmostEqual(k.compute((1.2, -0.4, -2), [4, 1.2, .5]), 3.32)
with self.assertRaises(ValueError):
k.compute([1, 0, 1], [2, 2])
class TestPolynomialKernel(TestCase):
def test_compute(self):
with self.assertRaises(ValueError):
PolynomialKernel(3.2)
with self.assertRaises(ValueError):
PolynomialKernel(-2)
p = PolynomialKernel(2)
self.assertEqual(p.compute((1, 0, 2), (-1, 2, 5)), 100)
self.assertAlmostEqual(p.compute([1.2, -0.4, -2], [4, 1.2, .5]),
18.6624)
p = PolynomialKernel(5)
self.assertEqual(p.compute((1, 0, 2), [-1, 2, 5]), 10 ** 5)
self.assertAlmostEqual(p.compute((1.2, -0.4, -2), (4, 1.2, .5)),
1504.59195, delta=10**-6)
with self.assertRaises(ValueError):
p.compute((1, 0, 2), (-1, 2))
class TestHomogeneousPolynomialKernel(TestCase):
def test_compute(self):
with self.assertRaises(ValueError):
HomogeneousPolynomialKernel(3.2)
with self.assertRaises(ValueError):
HomogeneousPolynomialKernel(-2)
h = HomogeneousPolynomialKernel(2)
self.assertEqual(h.compute((1, 0, 2), (-1, 2, 5)), 81.0)
self.assertAlmostEqual(h.compute([1.2, -0.4, -2], [4, 1.2, .5]),
11.0224)
h = HomogeneousPolynomialKernel(5)
self.assertEqual(h.compute((1, 0, 2), [-1, 2, 5]), 59049.0)
self.assertAlmostEqual(h.compute((1.2, -0.4, -2), (4, 1.2, .5)),
403.357761, delta=10**-6)
with self.assertRaises(ValueError):
h.compute((1, 0, 2), (-1, 2)
)
class TestGaussianKernel(TestCase):
def test_compute(self):
with self.assertRaises(ValueError):
GaussianKernel(-5)
k = GaussianKernel(1)
self.assertAlmostEqual(k.compute((1, 0, 1), (0, 0, 1)), 0.60653065)
self.assertAlmostEqual(k.compute([-3, 1, 0.5], [1, 1.2, -8]), 6.73e-20)
self.assertAlmostEqual(k.compute([-1, -4, 3.5], (1, 3.2, 6)), 3.29e-14)
with self.assertRaises(ValueError):
k.compute([-1, 3.5], (1, 3.2, 6))
class TestHyperbolicKernel(TestCase):
def test_compute(self):
k = HyperbolicKernel(1, 5)
self.assertAlmostEqual(k.compute((1, 0, 1), (0, 0, 1)), 0.9999877)
self.assertAlmostEqual(k.compute([-3, 1, 0.5], [1, 1.2, -8]),
-0.6640367, delta=10**-7)
self.assertAlmostEqual(k.compute([-1, -4, 3.5], (1, 3.2, 6)),
0.9999999, delta=10**-7)
with self.assertRaises(ValueError):
k.compute([-1, 3.5], (1, 3.2, 6))
class TestPrecomputedKernel(TestCase):
def test_compute(self):
with self.assertRaises(ValueError):
PrecomputedKernel(((1, 2), (3, 4, 5)))
k = PrecomputedKernel(((1, 2), (3, 4)))
self.assertEqual(k.compute([1], [1]), 4.0)
self.assertEqual(k.compute([1], [0]), 3.0)
with self.assertRaises(IndexError):
k.compute([1], [2])
with self.assertRaises(TypeError):
k.compute([0], [1.6])
|
# -*- coding: utf-8 -*-
###############################################################################
# Author: Gérald Fenoy, gerald.fenoy@cartoworks.com
# Copyright (c) 2010-2014, Cartoworks Inc.
###############################################################################
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import zoo
import sys
def getForm(conf, inputs, outputs):
import datastores.service as ds
import mm_access
ds.list(conf, inputs, outputs)
elements = eval(outputs["Result"]["value"])
dirs = []
j = 0
for i in range(0, len(elements["Directories"])):
print(elements["Directories"][i]["name"] + " rwx", file=sys.stderr)
if mm_access.checkDataStorePriv(conf, elements["Directories"][i]["name"], "rwx"):
dirs += [elements["Directories"][i]["name"]]
j += 1
import template.service as tmpl
inputs1 = inputs
inputs1["tmpl"] = {"value": inputs["form"]["value"]}
inputs1["dirs"] = dirs
tmpl.display(conf, inputs1, outputs)
return 3
def saveOnServer(conf, inputs, outputs):
import shutil
print("************ ok1 " + str(inputs), file=sys.stderr)
# TODO: confirm assumption: "conf" is a Python 3 dictionary object
# if list(conf.keys()).count("senv") > 0:
if "senv" in conf:
dir = conf["main"]["tmpPath"] + "/data_tmp_1111" + conf["senv"]["MMID"]
else:
dir = conf["main"]["tmpPath"] + "/data_tmp_1111" + conf["lenv"]["usid"]
try:
shutil.os.mkdir(dir)
except Exception as e:
print(str(e), file=sys.stderr)
pass
field = "file"
if "filename" in inputs:
field = inputs["filename"]["value"]
print("************ ok2 " + str(inputs), file=sys.stderr)
tmp = inputs[field]["lref"].split("/")
print("************ ok3 " + str(inputs), file=sys.stderr)
outFileName = dir + "/" + tmp[len(tmp) - 1]
print("************ ok4 " + str(inputs), file=sys.stderr)
shutil.move(inputs[field]["lref"], outFileName);
# TODO: confirm assumption: "conf" is a Python 3 dictionary object
# if list(conf.keys()).count("senv") > 0:
if "senv" in conf:
conf["senv"]["last_file"] = outFileName
conf["senv"]["last_ufile"] = outFileName
# import mmsession
# mmsession.save(conf)
# conf["lenv"]["cookie"]="MMID=MM"+conf["senv"]["MMID"]+"; path=/"
print("************ XXX " + str(conf["senv"]), file=sys.stderr)
print("************ ok5 " + str(outFileName), file=sys.stderr)
outputs["Result"]["value"] = "Your " + tmp[len(tmp) - 1] + " file was uploaded on the server"
print("************ ok6 " + str(inputs), file=sys.stderr)
return 3
def saveOnServer0(conf, inputs, outputs):
import shutil, json
print("ok1 INPUTS " + str(inputs), file=sys.stderr)
print("ok1 " + str(conf), file=sys.stderr)
dir = conf["main"]["tmpPath"] + "/data_tmp_1111" + conf["senv"]["MMID"]
try:
shutil.os.mkdir(dir)
except Exception as e:
print(str(e), file=sys.stderr)
pass
field = "file"
print(inputs, file=sys.stderr)
if "filename" in inputs:
field = inputs["filename"]["value"]
tmp = inputs[field]["lref"].split("/")
outFileName = dir + "/" + tmp[len(tmp) - 1]
shutil.move(inputs[field]["lref"], outFileName);
conf["senv"]["last_file"] = outFileName
conf["senv"]["last_ufile"] = outFileName
import mmsession
mmsession.save(conf)
res = {"files": [{"message": zoo._("Your [file] file was uploaded on the server").replace("\[file\]", tmp[len(tmp) - 1]), "fileName": outFileName}]}
outputs["Result"]["value"] = json.dumps(res)
return 3
def checkFile(conf, inputs, outputs):
import shutil
import osgeo.gdal
import osgeo.ogr
dir = conf["main"]["tmpPath"] + "/data_tmp_1111" + conf["senv"]["MMID"]
accepted = []
anames = []
dnames = []
deleted = []
tmp = shutil.os.listdir(dir)
for i in range(0, len(tmp)):
tmp1 = tmp[i].split(".")
t = None
for j in range(0, len(accepted)):
print("Accepted / tmp1", file=sys.stderr)
print(anames, file=sys.stderr)
print(tmp1, file=sys.stderr)
if tmp1[0] == anames[j].split(".")[0]:
print("OK", file=sys.stderr)
t = "OK"
break
if t is None:
t = osgeo.ogr.Open(dir + "/" + tmp[i])
if t is None:
t = osgeo.gdal.Open(dir + "/" + tmp[i])
if t is not None:
b = t.GetRasterBand(0)
t = b
if t is None:
deleted += [i]
dnames += [tmp[i]]
else:
accepted += [i]
anames += [tmp[i]]
k = 0
i = len(deleted) - 1
print(str(deleted) + " " + str(dnames), file=sys.stderr)
while i >= 0:
for j in range(0, len(accepted)):
if len(dnames) > i and anames[j].split(".")[0] == dnames[i].split(".")[0]:
accepted += [deleted[i]]
anames += [dnames[i]]
deleted.pop(i)
dnames.pop(i)
i -= 1
deletedList = []
for i in range(0, len(deleted)):
try:
shutil.os.unlink(dir + "/" + tmp[deleted[i]])
except:
pass
deletedList += [tmp[deleted[i]]]
acceptedList = []
for i in range(0, len(accepted)):
print(i, file=sys.stderr)
# try:
shutil.move(dir + "/" + tmp[accepted[i]], conf["main"]["dataPath"] + "/dirs/" + inputs["dest"]["value"] + "/" + tmp[accepted[i]])
acceptedList += [tmp[accepted[i]]]
# except:
# deletedList+=[tmp[accepted[i]]]
try:
shutil.os.unlink(conf["main"]["dataPath"] + "/dirs/" + inputs["dest"]["value"] + "/ds_ows.map")
except:
pass
try:
shutil.os.rmdir(dir)
except:
pass
import json
outputs["Result"]["value"] = json.dumps({"accepted": anames, "refused": dnames})
return 3
|
import pickle
new_dir = '/home/cail/Documents/causal-infogan/'
old_dir = '/home/thanard/Downloads/'
filename = "imgs_skipped_1.pkl"
f = open(filename,"rb")
hs = pickle.load(f)
nhs = []
for h in hs:
k = ((h[0][0].replace(old_dir,new_dir),h[0][1]),(h[1][0].replace(old_dir, new_dir),h[1][1]))
nhs.append(k)
f.close()
with open(filename,"wb") as f:
pickle.dump(nhs, f)
|
"""
Leakage Resilient Primitive (AN12304).
NOTE: This implementation is suitable only for use on PCD side (the device which reads/interacts with the NFC tag).
You shouldn't use this code on PICC (NFC tag/card) side and it shouldn't be ported to JavaCards or similar,
because in such case it may be not resistant to the side channel attacks.
"""
import binascii
import io
import os
import struct
from typing import Generator, List, Union, Tuple
from Crypto.Cipher import AES
from Crypto.Protocol.SecretSharing import _Element
from Crypto.Util.Padding import unpad
from Crypto.Util.strxor import strxor
from comm import require, BaseComm, CommMode
def remove_pad(pt: bytes):
padl = 0
for b in pt[::-1]:
padl += 1
if b == 0x80:
break
if b != 0x00:
raise RuntimeError('Invalid padding')
return pt[:-padl]
def nibbles(x: Union[bytes, str]) -> Generator[int, None, None]:
"""
Generate integers out of x (bytes), applicable for m = 4
"""
if isinstance(x, bytes):
x = x.hex()
for nb in x:
yield binascii.unhexlify("0" + nb)[0]
def incr_counter(r: bytes):
max_bit_len = len(r) * 8
ctr_orig = int.from_bytes(r, byteorder='big', signed=False)
ctr_incr = ctr_orig + 1
if ctr_incr.bit_length() > max_bit_len:
# we have overflow, reset counter to zero
return b"\x00" * len(r)
return ctr_incr.to_bytes(len(r), byteorder='big')
def e(k: bytes, v: bytes) -> bytes:
"""
Simple AES/ECB encrypt `v` with key `k`
"""
cipher = AES.new(k, AES.MODE_ECB)
return cipher.encrypt(v)
def d(k: bytes, v: bytes) -> bytes:
"""
Simple AES/ECB decrypt `v` with key `k`
"""
cipher = AES.new(k, AES.MODE_ECB)
return cipher.decrypt(v)
class LRP:
def __init__(self, key: bytes, u: int, r: bytes = None, pad: bool = True):
"""
Leakage Resilient Primitive
:param key: secret key from which updated keys will be derived
:param u: number of updated key to use (counting from 0)
:param r: IV/counter value (default: all zeros)
:param pad: whether to use bit padding or no (default: True)
"""
if r is None:
r = b"\x00" * 16
self.key = key
self.u = u
self.r = r
self.pad = pad
self.p = LRP.generate_plaintexts(key)
self.ku = LRP.generate_updated_keys(key)
self.kp = self.ku[self.u]
@staticmethod
def generate_plaintexts(k: bytes, m: int = 4) -> List[bytes]:
"""
Algorithm 1
"""
h = k
h = e(h, b"\x55" * 16)
p = []
for i in range(0, 2**m):
p.append(e(h, b"\xaa" * 16))
h = e(h, b"\x55" * 16)
return p
@staticmethod
def generate_updated_keys(k: bytes, q: int = 4) -> List[bytes]:
"""
Algorithm 2
"""
h = k
h = e(h, b"\xaa" * 16)
uk = []
for i in range(0, q):
uk.append(e(h, b"\xaa" * 16))
h = e(h, b"\x55" * 16)
return uk
@staticmethod
def eval_lrp(p: List[bytes], kp: bytes, x: Union[bytes, str], final: bool) -> bytes:
"""
Algorithm 3 assuming m = 4
"""
y = kp
for x_i in nibbles(x):
p_j = p[x_i]
y = e(y, p_j)
if final:
y = e(y, b"\x00" * 16)
return y
def encrypt(self, data: bytes) -> bytes:
"""
LRICB encrypt and update counter (LRICBEnc)
:param data: plaintext
:return: ciphertext
"""
ptstream = io.BytesIO()
ctstream = io.BytesIO()
ptstream.write(data)
if self.pad:
ptstream.write(b"\x80")
while ptstream.getbuffer().nbytes % AES.block_size != 0:
ptstream.write(b"\x00")
elif ptstream.getbuffer().nbytes % AES.block_size != 0:
raise RuntimeError("Parameter pt must have length multiple of AES block size.")
elif ptstream.getbuffer().nbytes == 0:
raise RuntimeError("Zero length pt not supported.")
ptstream.seek(0)
while True:
block = ptstream.read(AES.block_size)
if not len(block):
break
y = LRP.eval_lrp(self.p, self.kp, self.r, final=True)
ctstream.write(e(y, block))
self.r = incr_counter(self.r)
return ctstream.getvalue()
def decrypt(self, data: bytes) -> bytes:
"""
LRICB decrypt and update counter (LRICBDecs)
:param data: ciphertext
:return: plaintext
"""
ctstream = io.BytesIO()
ctstream.write(data)
ctstream.seek(0)
ptstream = io.BytesIO()
while True:
block = ctstream.read(AES.block_size)
if not len(block):
break
y = LRP.eval_lrp(self.p, self.kp, self.r, final=True)
ptstream.write(d(y, block))
self.r = incr_counter(self.r)
pt = ptstream.getvalue()
if self.pad:
pt = remove_pad(pt)
return pt
def cmac(self, data: bytes) -> bytes:
"""
Calculate CMAC_LRP
(Huge thanks to @Pharisaeus for help with polynomial math.)
:param data: message to be authenticated
:return: CMAC result
"""
stream = io.BytesIO(data)
k0 = LRP.eval_lrp(self.p, self.kp, b"\x00" * 16, True)
k1 = (_Element(k0) * _Element(2)).encode()
k2 = (_Element(k0) * _Element(4)).encode()
y = b"\x00" * AES.block_size
while True:
x = stream.read(AES.block_size)
if len(x) < AES.block_size or stream.tell() == stream.getbuffer().nbytes:
break
y = strxor(x, y)
y = LRP.eval_lrp(self.p, self.kp, y, True)
pad_bytes = 0
if len(x) < AES.block_size:
pad_bytes = AES.block_size - len(x)
x = x + b"\x80" + (b"\x00" * (pad_bytes - 1))
y = strxor(x, y)
if not pad_bytes:
y = strxor(y, k1)
else:
y = strxor(y, k2)
return LRP.eval_lrp(self.p, self.kp, y, True)
class AuthenticateLRP:
def __init__(self, auth_key):
self.auth_key = auth_key
self.rnda = None
self.rndb = None
def init(self, key_no: bytes) -> bytes:
return b"\x90\x71\x00\x00\x03" + key_no + b"\x01\x02\x00"
def generate_rnda(self):
return os.urandom(16)
def part1(self, part1_resp: bytes) -> bytes:
require("R-APDU length", len(part1_resp) == 19)
require("status code 91AF", part1_resp[-2:] == b"\x91\xAF")
require("Auth mode = 01", part1_resp[0:1] == b"\x01")
self.rndb = part1_resp[1:17]
self.rnda = self.generate_rnda()
sv = lrp_gen_sv(self.rnda, self.rndb)
crypto_macing, crypto_encing = lrp_get_crypto(self.auth_key, sv)
pcd_resp = crypto_macing.cmac(self.rnda + self.rndb)
return b"\x90\xAF\x00\x00\x20" + self.rnda + pcd_resp + b"\x00"
def part2(self, part2_resp: bytes) -> 'CryptoCommLRP':
# F4FC209D9D60623588B299FA5D6B2D710125F8547D9FB8D572C90D2C2A14E2359100
require("R-APDU length", len(part2_resp) == 34)
require("status code 9100", part2_resp[-2:] == b"\x91\x00")
picc_data, picc_response = part2_resp[0:16], part2_resp[16:32]
sv = lrp_gen_sv(self.rnda, self.rndb)
print('auth key', self.auth_key.hex())
print('sv', sv.hex())
crypto_macing, crypto_encing = lrp_get_crypto(self.auth_key, sv)
dec_picc_data = crypto_encing.decrypt(picc_data)
require("generated PICCResponse == received PICCResponse",
crypto_macing.cmac(self.rndb + self.rnda + picc_data) == picc_response)
comm = CryptoCommLRP(crypto_macing, crypto_encing, ti=dec_picc_data[0:4], cmd_counter=1)
comm.cmd_counter = 0
return comm
def lrp_gen_sv(rnda, rndb):
stream = io.BytesIO()
# they are counting from right to left :D
stream.write(b"\x00\x01\x00\x80")
stream.write(rnda[0:2]) # [RndA[15:14]
stream.write(strxor(rnda[2:8], rndb[0:6])) # [ (RndA[13:8] ⊕ RndB[15:10]) ]
stream.write(rndb[-10:]) # [RndB[9:0]
stream.write(rnda[-8:]) # RndA[7:0]
stream.write(b"\x96\x69")
return stream.getvalue()
def lrp_get_crypto(key, sv):
crypto = LRP(key, 0)
ses_auth_master_key = crypto.cmac(sv)
crypto_macing = LRP(ses_auth_master_key, 0)
crypto_encing = LRP(ses_auth_master_key, 1, r=b"\x00\x00\x00\x00", pad=False)
return crypto_macing, crypto_encing
class CryptoCommLRP(BaseComm):
"""
This class represents an authenticated session after AuthentivateEV2 command.
It offers the ability to prepare APDUs for CommMode.MAC or CommMode.FULL and validate R-APDUs in these modes.
"""
def __init__(self, crypto_macing,
crypto_encing,
*,
ti: bytes = None,
cmd_counter: int = 0,
pdcap2: bytes = None,
pcdcap2: bytes = None):
self.crypto_macing = crypto_macing
self.crypto_encing = crypto_encing
self.ti = ti
self.cmd_counter = cmd_counter
self.pdcap2 = pdcap2
self.pcdcap2 = pcdcap2
def calc_raw_data(self, data: bytes) -> bytes:
"""
Calculate CMAC for raw data.
:param data: raw data
:return: CMAC
"""
mac = self.crypto_macing.cmac(data)
return bytes(bytearray([mac[i] for i in range(16) if i % 2 == 1]))
def perform_enc(self, plaintext):
return self.crypto_encing.encrypt(plaintext)
def perform_dec(self, ciphertext):
return self.crypto_encing.decrypt(ciphertext)
__all__ = ['LRP', 'AuthenticateLRP', 'CryptoCommLRP']
|
# Code to handle DMD related operations
from speck_rem.holography import *
def main():
pass
class Mask(Image):
"""
Class to handle the pattern masks to be projected onto the DMD
"""
def __init__(self, width=1920, height=1080, pitch=7.6e-6):
self.width = width
self.height = height
self.pitch = pitch
super(Mask, self).__init__()
def plane_phase(self, period, theta):
"""
Compute a plane wave phase with a specified angle with the horizontal. This gets represented in the tilt of the
fringes produced.
:return: numpy array with the phase
"""
u, v = np.meshgrid(np.linspace(0, self.width, self.width, endpoint=False) + 1/2,
np.linspace(0, self.height, self.height, endpoint=False) + 1/2)
phase = (u * np.round(np.cos(theta), decimals=2) + v * np.round(np.sin(theta), decimals=2))
return (2.0 * math.pi / period) * phase
def compute_plane_mask(self, period, theta):
"""
Compute the mask for a tilted plane with an specific frequency and rotation
:param period: in pixels
:param theta: angle in radians
:return: None
"""
phase = self.plane_phase(period, theta)
self.image_array = 1/2 + 1/2 * np.sign(np.sin(phase))
def compute_fairness_constraint_mask(self, period, theta, pattern, grain):
"""
Compute a random mask under the fairness constraint sampling with a specified block/grain size
:param period: in pixels
:param theta: angle in radians
:param pattern: numpy array with the sampling under fcn generated from another function
:param grain: size of individual elements in pixels
:return: None
"""
phase = self.plane_phase(period, theta)
large_pattern = pattern.repeat(grain, axis=0).repeat(grain, axis=1)
# WARNING: hard-coded to pad to DMD size
large_pattern = np.pad(large_pattern, ((28, 28), (448, 448)), 'constant', constant_values=(0))
self.image_array = 1 / 2 + 1 / 2 * np.sign(np.sin(phase - large_pattern*np.pi))
def compute_random_mask(self, period, theta, grain):
"""
Compute a random mask with a specified block/grain size
:param period: in pixels
:param theta: angle in radians
:param grain: size of individual elements in pixels
:return: None
"""
phase = self.plane_phase(period, theta)
# WARNING: resizing is done with magic numbers
window = int(np.ceil(1080/grain))
pattern = np.random.randint(2, size=( window, window))
large_pattern = pattern.repeat(grain, axis=0).repeat(grain, axis=1)
large_pattern = large_pattern[0:1080, 0:1080] # Forced-fix size inconsistencies
large_pattern = np.pad(large_pattern, ((0, 0), (420,420)), 'constant', constant_values=(0)) # Pad to DMD size
self.image_array = 1 / 2 + 1 / 2 * np.sign(np.sin(phase - large_pattern * np.pi))
if __name__ == "__main__":
main()
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFlitCore(Package):
"""Distribution-building parts of Flit."""
homepage = "https://github.com/takluyver/flit"
url = "https://pypi.io/packages/py3/f/flit-core/flit_core-3.3.0-py3-none-any.whl"
maintainers = ['takluyver']
version('3.3.0', sha256='9b247b3095cb3c43933a59a7433f92ddfdd7fc843e08ef0f4550d53a9cfbbef6', expand=False)
extends('python')
depends_on('python@3.4:', type=('build', 'run'))
depends_on('py-pip', type='build')
depends_on('py-toml', type=('build', 'run'))
def install(self, spec, prefix):
# Install wheel instead of installing from source
# to prevent circular dependency on flit
pip = which('pip')
pip('install', self.stage.archive_file, '--prefix={0}'.format(prefix))
|
#coding:utf-8
#
# id: bugs.core_5783
# title: execute statement ignores the text of the SQL-query after a comment of the form "-"
# decription:
# We concatenate query from several elements and use '
# ' delimiter only to split this query into lines.
# Also, we put single-line comment in SEPARATE line between 'select' and column/value that is obtained from DB.
# Final query will lokk like this (lines are separated only by SINGLE delimiter, ascii_char(13), NO '
# ' here!):
# ===
# select
# -- comment N1
# 'foo' as msg'
# from
# -- comment N2
# rdb$database
# ===
# This query should NOT raise any exception and must produce normal output (string 'foo').
# Thanks to hvlad for suggestions.
#
# Confirmed bug on:
# 3.0.4.32924
# 4.0.0.918
# -- got:
# Error while preparing SQL statement:
# - SQLCODE: -104
# - Dynamic SQL Error
# - SQL error code = -104
# - Unexpected end of command - line 1, column 1
# -104
# 335544569
# Checked on:
# 3.0.4.32941: OK, 1.187s.
# 4.0.0.947: OK, 1.328s.
#
# tracker_id: CORE-5783
# min_versions: ['3.0.4']
# versions: 3.0.4
# qmid: None
import pytest
from firebird.qa import db_factory, python_act, Action
# version: 3.0.4
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import sys
# import os
#
# cur = db_conn.cursor()
#
# # NB: one need to use TWO backslash characters ('\\r') as escape for CR only within fbtest.
# # Single '' should be used when running under "pure" Python control:
#
# sql_expr = ' '.join( ('select', '\\r', '-- comment N1', '\\r', "'foo' as msg", '\\r', 'from', '\\r', '-- comment N2', '\\r', 'rdb$database') )
#
# for i in sql_expr.split('\\r'):
# print('Query line: ' + i)
#
# #sql_expr = 'select 1 FROM test'
# cur.execute( sql_expr )
# for r in cur:
# print( 'Query result: ' + r[0] )
#
# cur.close()
#
#
#---
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
Query line: select
Query line: -- comment N1
Query line: 'foo' as msg
Query line: from
Query line: -- comment N2
Query line: rdb$database
Query result: foo
"""
@pytest.mark.version('>=3.0.4')
def test_1(act_1: Action, capsys):
with act_1.db.connect() as con:
c = con.cursor()
sql_expr = "select \r -- comment N1 \r 'foo' as msg \r from \r -- comment N2 \r rdb$database"
for line in sql_expr.split('\r'):
print(f'Query line: {line}')
c.execute(sql_expr)
for row in c:
print(f'Query result: {row[0]}')
#
act_1.expected_stdout = expected_stdout_1
act_1.stdout = capsys.readouterr().out
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
import os
import numpy as np
import sys
from random import shuffle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
## Download data if necessary
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
# Download ShapeNet point clouds
if not os.path.exists(os.path.join(DATA_DIR, 'ShapeNet7')):
www = 'https://www.dropbox.com/s/nlcswrxul1ymypw/ShapeNet7.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
# Download ShapeNet renderings and the shape ids matching ShapeNet7
if not os.path.exists(os.path.join(DATA_DIR, 'ShapeNetRenderings')):
www = 'https://www.dropbox.com/s/vx3ky2ttienxh2x/ShapeNetRenderings.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def loadPC(file_path, num_pt):
""" Load point cloud for a given file_path
"""
data = np.load(file_path) # (n, num_point, 3)
return data[:, :num_pt, :]
def shuffleData(points, labels):
""" Shuffle the order of point clouds and their labels
"""
indices = range(len(points))
shuffle(indices)
return points[indices], labels[indices]
def shuffleConditionData(points, conditions, labels):
""" Shuffle the order of point clouds, their conditions and labels
"""
indices = range(len(points))
shuffle(indices)
return points[indices], conditions[indices], labels[indices]
def voxelizeData(points, vol_dim=200):
""" voxelize point clouds
Input:
points: (n, num_pt, 3) ranging from 0.0 to 1.0, ordered as (z, y, x)
vol_dim: the number of bins to discretize point coordinates (or the dimension of a volume).
Return:
Voxelized point clouds, ranging from 0 to VOL_DIM-1
"""
voxel = 1.0 / vol_dim
points = points / voxel
points[points>(vol_dim-1)] = vol_dim-1
points[points<0] = 0
points = points.astype(np.int32) # raning from [0, 199]
return points
##################################################
######### For farthest point sampling ############
##################################################
def calTriangleArea(p1, p2, p3):
""" Calculate the area of the given triangle (p1, p2, p3)
"""
p1p2 = np.array(p2) - np.array(p1)
p1p3 = np.array(p3) - np.array(p1)
return 0.5 * np.linalg.norm(np.cross(p1p2, p1p3))
def samplePoint(p1, p2, p3):
""" p1, p2, p3: list of (3,)
"""
r1 = random()
r2 = random()
coef_p1 = 1.0 - math.sqrt(r1)
coef_p2 = math.sqrt(r1)*(1.0-r2)
coef_p3 = math.sqrt(r1)*r2
x = p1[0] * coef_p1 + p2[0] * coef_p2 + p3[0] * coef_p3
y = p1[1] * coef_p1 + p2[1] * coef_p2 + p3[1] * coef_p3
z = p1[2] * coef_p1 + p2[2] * coef_p2 + p3[2] * coef_p3
return x, y, z
def calc_distances(p0, points):
return ((p0 - points)**2).sum(axis=1)
def farthestPointSampler(pts, K):
""" pts: (num_pt, 3)
K: an int to indicate the number of sampled points
"""
# the distances between each sampled point to all the points
sample_distances = []
farthest_pts = np.zeros((K, 3))
farthest_pts[0] = pts[0]
distances = calc_distances(farthest_pts[0], pts)
sample_distances.append(distances)
for i in range(1, K):
farthest_pts[i] = pts[np.argmax(distances)]
distances_i = calc_distances(farthest_pts[i], pts)
sample_distances.append(distances_i)
distances = np.minimum(distances, distances_i)
return farthest_pts, sample_distances
##################################################
##################################################
|
"""Handler.py : Concatenate individual datsets into one dataset"""
import pandas as pd
df = pd.read_csv('app/data/events/geoConflicts.csv')
df2 = pd.read_csv('app/data/events/usEvents.csv')
df3 = pd.read_csv('app/data/events/GND.csv')
pre_merge = [df, df2, df3]
merged = pd.concat(pre_merge)
filename = "app/data/events/dataset.csv"
merged.to_csv(filename, mode='w', index=False)
|
import tools
import numpy as np
import pickle
import pdb
import os
import view
from sklearn.svm import SVC
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.svm.libsvm import decision_function
# CREATE RESULT FOLDER
if not os.path.exists("results"):
os.makedirs("results")
# READ DATA
print("LOAD/READ DATA")
xtrain,ytrain,xtest,ytest = tools.read_data()
numbers = [4,9]
x,y = tools.choose_numbers(xtrain,ytrain,numbers)
xt,yt = tools.choose_numbers(xtest,ytest,numbers)
print("LOAD/READ DATA --- DONE!")
# TRAIN SVM
clf = SVC()
clf.fit(x, y)
# GENERATE RANDOM SAMPLES
samplesize = 5000
samples = np.random.uniform(-1.,1.,(samplesize,len(x[0])))#np.random.uniform(0.,1.,(samplesize,len(x[0])))
# INSTANCE-BASED MFI
print("COMPUTE INSTANCE-BASED MFI")
C = np.array([0,5,77,113])
N=len(C)
ibr = []
compute = True
if compute:
for i in range(N):
ibr.append(np.sign(clf.decision_function(x[C[i]]))*tools.mfi_ib(clf, 'decision_function', samples, x[C[i]]))
fobj = open('test.pkl','wb')
pickle.dump(ibr,fobj)
fobj.close()
else:
fobj = open('test.pkl','rb')
ibr = pickle.load(fobj)
fobj.close()
# PIXEL FLIPPING
#flibos = []
#for i in range(len(ibr)):
# flibos.append(tools.flibo_relation_rep(clf,x[C[i]],ibr[i],N=100,rep=5))
#print flibos
view.PLOTnum3(ibr,"results/mfi_ibr.png",x[C],clf.decision_function(x[C]))
# GENERATE RANDOM SAMPLES
print("COMPUTE MODEL-BASED MFI")
samplesize = 1000
samples = np.random.uniform(-1.,1.,(samplesize,len(x[0])))#np.random.uniform(0.,1.,(samplesize,len(x[0])))
metric = 'rbf'
# MODEL-BASED MFI
mbr = tools.mfi_mb(clf.decision_function(samples), samples, metric, degree=2)
#view.PLOTnum(mbr,"results/mfi_mbr.pdf",np.reshape(np.mean(x,axis=0),(16,16)))
view.PLOTnum3([mbr],"results/mfi_mbr.png",[np.mean(x[y==numbers[0]],axis=0)])
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class CixClient(Client):
"""Cix
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(CixClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = None
def get_configurations(self, project, repository_type=None, repository_id=None, branch=None, service_connection_id=None):
"""GetConfigurations.
[Preview API] Gets a list of existing configuration files for the given repository.
:param str project: Project ID or project name
:param str repository_type: The type of the repository such as GitHub, TfsGit (i.e. Azure Repos), Bitbucket, etc.
:param str repository_id: The vendor-specific identifier or the name of the repository, e.g. Microsoft/vscode (GitHub) or e9d82045-ddba-4e01-a63d-2ab9f040af62 (Azure Repos)
:param str branch: The repository branch where to look for the configuration file.
:param str service_connection_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TfsGit (i.e. Azure Repos).
:rtype: [ConfigurationFile]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if repository_type is not None:
query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str')
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if branch is not None:
query_parameters['branch'] = self._serialize.query('branch', branch, 'str')
if service_connection_id is not None:
query_parameters['serviceConnectionId'] = self._serialize.query('service_connection_id', service_connection_id, 'str')
response = self._send(http_method='GET',
location_id='8fc87684-9ebc-4c37-ab92-f4ac4a58cb3a',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ConfigurationFile]', self._unwrap_collection(response))
def create_project_connection(self, create_connection_inputs, project):
"""CreateProjectConnection.
[Preview API] Creates a new Pipeline connection between the provider installation and the specified project. Returns the PipelineConnection object created.
:param :class:`<CreatePipelineConnectionInputs> <azure.devops.v5_1.cix.models.CreatePipelineConnectionInputs>` create_connection_inputs:
:param str project:
:rtype: :class:`<PipelineConnection> <azure.devops.v5_1.cix.models.PipelineConnection>`
"""
query_parameters = {}
if project is not None:
query_parameters['project'] = self._serialize.query('project', project, 'str')
content = self._serialize.body(create_connection_inputs, 'CreatePipelineConnectionInputs')
response = self._send(http_method='POST',
location_id='00df4879-9216-45d5-b38d-4a487b626b2c',
version='5.1-preview.1',
query_parameters=query_parameters,
content=content)
return self._deserialize('PipelineConnection', response)
def get_detected_build_frameworks(self, project, repository_type=None, repository_id=None, branch=None, detection_type=None, service_connection_id=None):
"""GetDetectedBuildFrameworks.
[Preview API] Returns a list of build frameworks that best match the given repository based on its contents.
:param str project: Project ID or project name
:param str repository_type: The type of the repository such as GitHub, TfsGit (i.e. Azure Repos), Bitbucket, etc.
:param str repository_id: The vendor-specific identifier or the name of the repository, e.g. Microsoft/vscode (GitHub) or e9d82045-ddba-4e01-a63d-2ab9f040af62 (Azure Repos)
:param str branch: The repository branch to detect build frameworks for.
:param str detection_type:
:param str service_connection_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TfsGit (i.e. Azure Repos).
:rtype: [DetectedBuildFramework]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if repository_type is not None:
query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str')
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if branch is not None:
query_parameters['branch'] = self._serialize.query('branch', branch, 'str')
if detection_type is not None:
query_parameters['detectionType'] = self._serialize.query('detection_type', detection_type, 'str')
if service_connection_id is not None:
query_parameters['serviceConnectionId'] = self._serialize.query('service_connection_id', service_connection_id, 'str')
response = self._send(http_method='GET',
location_id='29a30bab-9efb-4652-bf1b-9269baca0980',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[DetectedBuildFramework]', self._unwrap_collection(response))
def get_template_recommendations(self, project, repository_type=None, repository_id=None, branch=None, service_connection_id=None):
"""GetTemplateRecommendations.
[Preview API] Returns a list of all YAML templates with weighting based on which would best fit the given repository.
:param str project: Project ID or project name
:param str repository_type: The type of the repository such as GitHub, TfsGit (i.e. Azure Repos), Bitbucket, etc.
:param str repository_id: The vendor-specific identifier or the name of the repository, e.g. Microsoft/vscode (GitHub) or e9d82045-ddba-4e01-a63d-2ab9f040af62 (Azure Repos)
:param str branch: The repository branch which to find matching templates for.
:param str service_connection_id: If specified, the ID of the service endpoint to query. Can only be omitted for providers that do not use service endpoints, e.g. TfsGit (i.e. Azure Repos).
:rtype: [Template]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if repository_type is not None:
query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str')
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if branch is not None:
query_parameters['branch'] = self._serialize.query('branch', branch, 'str')
if service_connection_id is not None:
query_parameters['serviceConnectionId'] = self._serialize.query('service_connection_id', service_connection_id, 'str')
response = self._send(http_method='GET',
location_id='63ea8f13-b563-4be7-bc31-3a96eda27220',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Template]', self._unwrap_collection(response))
def create_resources(self, creation_parameters, project):
"""CreateResources.
[Preview API]
:param {ResourceCreationParameter} creation_parameters:
:param str project: Project ID or project name
:rtype: :class:`<CreatedResources> <azure.devops.v5_1.cix.models.CreatedResources>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(creation_parameters, '{ResourceCreationParameter}')
response = self._send(http_method='POST',
location_id='43201899-7690-4870-9c79-ab69605f21ed',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('CreatedResources', response)
def render_template(self, template_parameters, template_id):
"""RenderTemplate.
[Preview API]
:param :class:`<TemplateParameters> <azure.devops.v5_1.cix.models.TemplateParameters>` template_parameters:
:param str template_id:
:rtype: :class:`<Template> <azure.devops.v5_1.cix.models.Template>`
"""
route_values = {}
if template_id is not None:
route_values['templateId'] = self._serialize.url('template_id', template_id, 'str')
content = self._serialize.body(template_parameters, 'TemplateParameters')
response = self._send(http_method='POST',
location_id='eb5d6d1d-98a2-4bbd-9028-f9a6b2d66515',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Template', response)
|
"""libdarknet module.
Low level module for interacting with ```libdarknet.so``` static library.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceded by a blank line.
"""
import ctypes
import os
from cached_property import cached_property
from ..config import config
from ..utils import chroot
from .structs import Detection
from .structs import Image
from .structs import Metadata
class Libdarknet:
"""Class for libdarknet.so shared library.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attributes
----------
root : str, optional
Root directory for darknet. Default: ~/.darknet
weight_dir : str, optional
Description of `attr2`.
Adapted From:
https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_numpy.html
# Documentation is Attribution-NonCommercial-ShareAlike 4.0
# International (CC BY-NC-SA 4.0)
# Copyright (c) 2018, Jed Frey.
"""
def __init__(self, root=None, weight_dir=None, shared_lib="libdarknet.so"):
r"""Initialize a libdarknet object.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
long_var_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
"""
if root is None:
root = config["darknet"]["root"]
if weight_dir is None:
weight_dir = config["darknet"]["weight_dir"]
self.root = os.path.abspath(root)
self.weight_dir = os.path.abspath(weight_dir)
self.shared_lib = shared_lib
@property
def _lib(self):
"""Return path to the darknet shared library."""
return os.path.abspath(os.path.join(self.root, self.shared_lib))
@property
def exists(self):
"""Determine if shared library exists."""
return os.path.exists(self._lib)
@cached_property
def lib(self):
"""Return CDLL shared library."""
lib_ = ctypes.CDLL(self._lib, ctypes.RTLD_GLOBAL)
lib_.network_width.argtypes = [ctypes.c_void_p]
lib_.network_width.restype = ctypes.c_int
lib_.network_height.argtypes = [ctypes.c_void_p]
lib_.network_height.restype = ctypes.c_int
return lib_
@chroot
def get_metadata(self, path):
"""Get metadata from a path."""
path = os.path.abspath(path)
self.lib.get_metadata.argtypes = [ctypes.c_char_p]
self.lib.get_metadata.restype = Metadata
metadata_ = ctypes.c_char_p(path.encode("UTF-8"))
metadata = self.lib.get_metadata(metadata_)
return metadata
@chroot
def load_network(self, cfg_file, weight_file, clear=0):
"""Load a darknet network.
Parameters
----------
cfg_file : str
Config file to load. Relative paths are relative to the
darknet directory.
weight_file : str
Weight file to load. Relative paths are relative to the
darknet directory.
clear : int
Clear network.
Returns
-------
network_ptr: ctypes.c_void_p
Pointer to the loaded network.
"""
cfg_file = os.path.abspath(cfg_file)
weight_file = os.path.abspath(weight_file)
assert os.path.exists(cfg_file), "Configuration file not found."
assert os.path.exists(weight_file), "Weight file not found."
# Specify the shared library argument types.
self.lib.load_network.argtypes = [
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_int,
]
# Specify the shared library return type.
self.lib.load_network.restype = ctypes.c_void_p
# Cast the inputs to their ctypes.
cfg_file_ = ctypes.c_char_p(cfg_file.encode("UTF-8"))
weight_file_ = ctypes.c_char_p(weight_file.encode("UTF-8"))
clear_ = ctypes.c_int(clear)
# Return the output of load_network.
return self.lib.load_network(cfg_file_, weight_file_, clear_)
@chroot
def load_image_color(self, path, width=0, height=0, colors=0):
"""Load an image from a path.
Parameters
----------
path : str
Image file to load. Relative paths are relative to the
darknet directory.
width : int
Image width
height : int
Image height.
colors : int
Number of image colors.
Returns
-------
img : Image
Loaded image structure.
"""
path = os.path.abspath(path)
assert os.path.exists(path)
load_image_ = self.lib.load_image_color
load_image_.argtypes = [
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
]
load_image_.restype = Image
path_ = ctypes.c_char_p(path.encode("UTF-8"))
width_ = ctypes.c_int(width)
height_ = ctypes.c_int(height)
colors_ = ctypes.c_int(colors)
img = load_image_(path_, width_, height_, colors_)
return img
@chroot
def network_predict_image(self, network_ptr, image):
"""Load an image from a path.
Parameters
----------
network_ptr : ctypes.c_void_p
Pointer to darknet network object.
image: Image
Darknet Image.
Returns
-------
detections : Detections
Loaded image structure.
"""
self.lib.network_predict_image.argtypes = [ctypes.c_void_p, Image]
self.lib.network_predict_image.restype = ctypes.POINTER(ctypes.c_float)
return self.lib.network_predict_image(network_ptr, image)
@chroot
def get_network_boxes(self, network, image, threshold=0.5, heir_thresh=0.5):
"""Get network boxes for a given image classified by network.
Parameters
----------
network : Network
Pointer to darknet network object.
image : Image
Darknet Image object.
height : int
Image height.
colors : int
Number of image colors.
Returns
-------
img : Image
Loaded image structure.
C Definition of ```get_network_boxes```
get_network_boxes(
network *net,
int w,
int h,
float
thresh,
float hier,
int *map,
int relative,
int *num)
"""
# Alias C-function.
get_network_boxes_ = self.lib.get_network_boxes
# Argument and return types:
get_network_boxes_.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_float,
ctypes.c_float,
ctypes.POINTER(ctypes.c_int),
ctypes.c_int,
ctypes.POINTER(ctypes.c_int),
]
get_network_boxes_.restype = ctypes.POINTER(Detection)
# Create a number and pointer to that number for pass by ref.
num = ctypes.c_int(0)
pnum = ctypes.pointer(num)
# Get the detections and
dets = get_network_boxes_(
network, image.w, image.h, threshold, heir_thresh, None, 0, pnum
)
return num.value, dets
@chroot
def get_labels(self, path):
"""Get labels from a path.
Parameters
----------
path : str
Label file to load. Relative paths are relative to the
darknet directory.
Returns
-------
labels : ctypes.POINTER(ctypes.c_char_p)
List of strings of labels.
"""
path = os.path.abspath(path)
assert os.path.exists(path), "Label file does not exist"
get_labels_ = self.lib.get_labels
# Set ctypes argument & return types.
get_labels_.argtypes = [ctypes.c_char_p]
get_labels_.restype = ctypes.POINTER(ctypes.c_char_p)
path_ = ctypes.c_char_p(path.encode("UTF-8"))
return get_labels_(path_)
@chroot
def do_nms_obj(self, dets, total, classes, thresh):
"""Do a nms obj.
C Definition of ```do_nms_obj```
void do_nms_obj(
detection *dets,
int total,
int classes,
float thresh);
"""
self.lib.do_nms_obj.argtypes = [
ctypes.POINTER(Detection),
ctypes.c_int,
ctypes.c_int,
ctypes.c_float,
]
self.lib.do_nms_obj.restype = None
self.lib.do_nms_obj(dets, total, classes, thresh)
return None
@chroot
def do_nms_sort(self, dets, total, classes, thresh):
"""Do a nms sort.
C Definition of ```do_nms_sort```
do_nms_sort(
detection *dets,
int total,
int classes,
float thresh);
"""
self.lib.do_nms_sort.argtypes = [
ctypes.POINTER(Detection),
ctypes.c_int,
ctypes.c_int,
ctypes.c_float,
]
self.lib.do_nms_sort.restype = None
self.lib.do_nms_sort(dets, total, classes, thresh)
return None
@chroot
def free_image(self, image):
"""Free image."""
self.lib.free_image.argtypes = [Image]
self.lib.free_image(image)
@chroot
def free_detections(self, dets, num):
"""Free Detections."""
self.lib.free_detections.argtypes = [
ctypes.POINTER(Detection),
ctypes.c_int,
]
self.lib.free_detections(dets, num)
|
from typing import Union
from fastapi import Cookie, FastAPI
app = FastAPI()
@app.get("/items/")
async def read_items(ads_id: Union[str, None] = Cookie(default=None)):
return {"ads_id": ads_id}
|
import numpy as np
import pandas as pd
from mpfin import mpPandasObj
def getDailyVol(close, span0=100):
'''
Computes the daily volatility of price returns.
It takes a closing price series, applies a diff sample to sample
(assumes each sample is the closing price), computes an EWM with
`span0` samples and then the standard deviation of it.
See Advances in Financial Analytics, snippet 3.1
@param[in] close A series of prices where each value is the closing price of an asset.
The index of the series must be a valid datetime type.
@param[in] span0 The sample size of the EWM.
@return A pandas series of daily return volatility.
'''
df0 = close.index.searchsorted(close.index-pd.Timedelta(days=1))
df0 = df0[df0 > 0]
df0 = pd.Series(close.index[df0-1],
index=close.index[close.shape[0]-df0.shape[0]:])
df0 = close.loc[df0.index] / \
close.loc[df0.values].values-1 # Daily returns
df0 = df0.ewm(span=span0).std()
return df0
def getVerticalBarrier(tEvents, close, numDays=0):
"""
Adding a Vertical Barrier
For each index in t_events, it finds the timestamp of the next price bar at or immediately after
a number of days num_days. This vertical barrier can be passed as an optional argument t1 in get_events.
This function creates a series that has all the timestamps of when the vertical barrier would be reached.
Advances in Financial Machine Learning, Snippet 3.4 page 49.
@param tEvents A pd.DateTimeIndex of events.
@param close A pd.Series of close prices.
@param numDays The number of days to add for vertical barrier.
@return A pd.Series of Timestamps of vertical barriers
"""
verticalBarrier = close.index.searchsorted(
tEvents + pd.Timedelta(days=numDays))
verticalBarrier = verticalBarrier[verticalBarrier < close.shape[0]]
# NaNs at the end
return pd.Series(close.index[verticalBarrier], index=tEvents[:verticalBarrier.shape[0]])
def applyPtSlOnT1(close, events, ptSl, molecule):
'''
Apply stop loss/profit taking, if it takes place before t1 (vertical barrier)
(end of event).
Advances in Financial Machine Learning, snippet 3.2 page 45.
@param close
@param events
@param ptSl
@param molecule
@return
'''
events_ = events.loc[molecule]
out = events_[['t1']].copy(deep=True)
if ptSl[0] > 0:
pt = ptSl[0] * events_['trgt']
else:
pt = pd.Series(index=events.index) # NaNs
if ptSl[1] > 0:
sl = -ptSl[1] * events_['trgt']
else:
sl = pd.Series(index=events.index) # NaNs
for loc, t1 in events_['t1'].fillna(close.index[-1]).iteritems():
df0 = close[loc:t1] # path prices
df0 = (df0 / close[loc] - 1) * events_.at[loc, 'side'] # path returns
out.loc[loc, 'sl'] = df0[df0 < sl[loc]
].index.min() # earliest stop loss
out.loc[loc, 'pt'] = df0[df0 > pt[loc]
].index.min() # earliest profit taking
return out
def getEvents(close, tEvents, ptSl, trgt, minRet, numThreads, t1=False, side=None):
# 1) get target
trgt = trgt.loc[tEvents]
trgt = trgt[trgt > minRet] # minRet
# 2) get t1 (max holding period)
if t1 is False:
t1 = pd.Series(pd.NaT, index=tEvents)
# 3) form events object, apply stop loss on t1
if side is None:
side_, ptSl_ = pd.Series(1., index=trgt.index), [ptSl[0], ptSl[0]]
else:
side_, ptSl_ = side.loc[trgt.index], ptSl[:2]
events = (pd.concat({'t1': t1, 'trgt': trgt,
'side': side_}, axis=1) .dropna(subset=['trgt']))
df0 = mpPandasObj(func=applyPtSlOnT1, pdObj=('molecule', events.index),
numThreads=numThreads, close=close, events=events,
ptSl=ptSl_)
events['t1'] = df0.dropna(how='all').min(axis=1) # pd.min ignores nan
if side is None:
events = events.drop('side', axis=1)
return events
def getBinsOld(events, close):
# Snippet 3.5
# 1) prices aligned with events
events_ = events.dropna(subset=['t1'])
px = events_.index.union(events_['t1'].values).drop_duplicates()
px = close.reindex(px, method='bfill')
# 2) create out object
out = pd.DataFrame(index=events_.index)
out['ret'] = px.loc[events_['t1'].values].values/px.loc[events_.index]-1
out['bin'] = np.sign(out['ret'])
# Where out index and t1 (vertical barrier) intersect label 0
# See page 49, it is a suggested exercise.
try:
locs = out.query('index in @t1').index
out.loc[locs, 'bin'] = 0
except:
pass
return out
def getBins(events, close):
'''
Compute event's outcome (including side information, if provided).
Snippet 3.7
Case 1: ('side' not in events): bin in (-1,1) <-label by price action
Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)
@param events It's a dataframe whose
- index is event's starttime
- Column t1 is event's endtime
- Column trgt is event's target
- Column side (optional) implies the algo's position side.
@param close It's a close price series.
'''
# 1) prices aligned with events
events_ = events.dropna(subset=['t1'])
px = events_.index.union(events_['t1'].values).drop_duplicates()
px = close.reindex(px, method='bfill')
# 2) create out object
out = pd.DataFrame(index=events_.index)
out['ret'] = px.loc[events_['t1'].values].values/px.loc[events_.index]-1
if 'side' in events_:
out['ret'] *= events_['side'] # meta-labeling
out['bin'] = np.sign(out['ret'])
if 'side' in events_:
out.loc[out['ret'] <= 0, 'bin'] = 0 # meta-labeling
return out
def dropLabels(events, minPct=.05):
'''
Takes a dataframe of events and removes those labels that fall
below minPct (minimum percentil).
Snippet 3.8
@param events An events dataframe, such as the output of getBins()
@param minPct The minimum percentil of rare labels to have.
@return The input @p events dataframe but filtered.
'''
# apply weights, drop labels with insufficient examples
while True:
df0 = events['bin'].value_counts(normalize=True)
if df0.min() > minPct or df0.shape[0] < 3:
break
print('dropped label: ', df0.argmin(), df0.min())
events = events[events['bin'] != df0.argmin()]
return events
|
import requests
import json
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
x = np.linspace(0, 10, 5000)
y = np.sin(2*np.pi*59*x) + np.sin(2*np.pi*33*x)
url = 'http://localhost:5000/run_command'
data = json.dumps((x.tolist(), y.tolist()))
req = requests.put(url, data={'command': 'deeming', 'args': data})
try:
f, a = json.loads(req.json())
except TypeError:
print(req.json())
plt.plot(f, a)
plt.show()
|
from board import *
FLAG = 11
BOMB = 12
LAKE = -1
class Piece:
def __init__(self, x, y, rank, team, count, knownStat = []):
self.X = x
self.Y = y
self.rank = rank # switche id to rank for better naming
self.count = count
self.team = team
self.isKnown = knownStat
def print(self, show = True):
if show:
output = "Piece: (" + str(self.X) +"," + str(self.Y) + "), rank: " + str(self.rank) + ", team:" + str(self.team) +", count:" + str(self.count)
print(output)
else:
output = "Piece(" + str(self.X) +"," + str(self.Y) + ", " + str(self.rank) + ", " + str(self.team) + ", " + str(self.count) + ")"
return output
def inBoard(self, pos):
if pos >= 0 and pos <=9:
return True
return False
def isvalidPiece(self, piece, team):
if piece == 0 or piece == -1 or piece.rank == BOMB or piece.rank == FLAG:
# print("invalid piece selection")
return False
elif piece.rank == 11:
return False
if not (piece.inBoard(piece.X) and piece.inBoard(piece.Y)):
return False
if piece.team != team:
return False
return True
def isValidMove(self, piece, team):
if piece == 0:
return True
if piece == LAKE:
return False
if piece.team == team:
return False
else:
# Attacks
return True
# gets all the valid moves for the piece
def getValidMoves(self,board, formatted = False):
if board.board[self.X][self.Y]==0 or board.board[self.X][self.Y] ==11 or board.board[self.X][self.Y]==12 or board.board[self.X][self.Y] == FLAG:
if formatted:
return [0,0,0,0]
else:
return []
elif self.rank != 9:
out = []
form = []
neibs = [(self.X + 1, self.Y), (self.X - 1, self.Y),
(self.X, self.Y + 1), (self.X, self.Y-1)]
for i in neibs:
if self.inBoard(i[0]) and self.inBoard(i[1]):
if self.isValidMove(board.board[i[0]][i[1]],board.board[self.X][self.Y].team):
out.append((self.X, self.Y, i[0], i[1]))
form.append((self.X, self.Y, i[0], i[1]))
else:
form.append(0) # placeholder value for direction used for ManualPlayer
else:
form.append(0)
if formatted:
return form
else:
return out
else:
# Scout handling
# order [Down, Up, Right, Left]
directions = [(1,0),(-1,0),(0,1),(0,-1)]
out = []
for d in directions:
val = self.scoutHandle(board,d[0],d[1],formatted)
if formatted:
out.append(val)
elif val != None:
out.append(val)
return out
# Returns the valid moves for a scout
def scoutHandle(self, board, xMove, yMove, formatted=False):
team = board.board[self.X][self.Y].team
x = self.X
y = self.Y
while True:
if self.inBoard(x+xMove) and self.inBoard(y+yMove):
if board.board[x+xMove][y+yMove] == 0:
x+=xMove
y+=yMove
else:
if board.board[x+xMove][y+yMove] != LAKE and board.board[x+xMove][y+yMove].team != team:
x+=xMove
y+=yMove
break
else:
# out of bounds
break
if not (x == self.X and y == self.Y):
return (self.X, self.Y, x, y)
elif formatted:
# used for formatting for manual player
return 0
|
import sys
import matplotlib
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.collections import PatchCollection
from niftynet.io.image_reader import ImageReader
from niftynet.engine.image_window_dataset import ImageWindowDataset
from niftynet.engine.sampler_uniform_v2 import UniformSampler
from niftynet.engine.sampler_grid_v2 import GridSampler
from niftynet.engine.windows_aggregator_base import ImageWindowsAggregator as IA
from niftynet.layer.pad import PadLayer
def vis_coordinates(image, coordinates=None, saving_name='image.png', dpi=50):
"""
Plot image, and on top of it, draw boxes with the window coordinates
the figure is saved at `saving_name`
"""
fig, ax = plt.subplots(1)
ax.imshow(image, cmap='gray')
all_patch = []
if coordinates is not None:
for win in coordinates[::-1]:
patch = patches.Rectangle(
xy=(win[2], win[1]),
width=win[5] - win[2],
height=win[4] - win[1],
linewidth=1)
all_patch.append(patch)
if all_patch:
all_pc = PatchCollection(
all_patch, alpha=0.6, edgecolor='r', facecolor='#f5e44c')
ax.add_collection(all_pc)
# plt.show()
if saving_name:
fig.savefig(saving_name, bbox_inches='tight', pad_inches=0, dpi=dpi)
return
###
# config parameters
###
spatial_window_size = (100, 100)
border = (12, 12)
volume_padding_size = (50, 50)
data_param = \
{'MR': {
'path_to_search': '~/Desktop/useful_scripts/visualise_windows',
'filename_contains': 'example.png'}}
###
# create an image reader
###
reader = ImageReader().initialise(data_param)
reader.add_preprocessing_layers( # add volume padding layer
[PadLayer(image_name=['MR'],
border=volume_padding_size, mode='constant')])
###
# show 'volume' -- without window sampling
###
image_2d = ImageWindowDataset(reader)()['MR'][0, :, :, 0, 0, 0]
vis_coordinates(image_2d, saving_name='output/image.png')
###
# create & show uniform random samples
###
uniform_sampler = UniformSampler(
reader, spatial_window_size, windows_per_image=100)
next_window = uniform_sampler.pop_batch_op()
coords = []
with tf.Session() as sess:
for _ in range(20):
uniform_windows = sess.run(next_window)
coords.append(uniform_windows['MR_location'])
coords = np.concatenate(coords, axis=0)
vis_coordinates(image_2d, coords, 'output/uniform.png')
###
# create & show all grid samples
###
grid_sampler = GridSampler(
reader, spatial_window_size, window_border=border)
next_grid = grid_sampler.pop_batch_op()
coords = []
with tf.Session() as sess:
while True:
window = sess.run(next_grid)
if window['MR_location'][0, 0] == -1:
break
coords.append(window['MR_location'])
coords = np.concatenate(coords, axis=0)
vis_coordinates(image_2d, coords, 'output/grid.png')
###
# create & show cropped grid samples (in aggregator)
###
n_window = coords.shape[0]
dummy_window = np.zeros((n_window, 800, 800, 1, 1))
_, coords = IA.crop_batch(dummy_window, coords, border=border)
vis_coordinates(image_2d, coords, 'output/grid_cropped.png')
|
import os
import configparser
from boto3.session import Session
from fastapi.responses import FileResponse
from fastapi import APIRouter,File, UploadFile,Request,Depends
from pydantic import BaseModel
import json
from sqlalchemy import and_
from pydantic.types import FilePath
from sqlalchemy.sql.elements import Null
from sqlalchemy.sql.expression import null
import rasterio
from botocore import UNSIGNED
from botocore.client import Config
from rasterio.session import AWSSession
from rasterstats import zonal_stats, point_query
from db.database import get_db
from models.index import Parameter
from datetime import datetime
import time
from shapely.geometry import shape
from schemas.index import Gettrend
config = configparser.ConfigParser()
config.read('config/config.ini')
AWS_ACCESS_KEY = config['AWS']['ACCESSKEY']
AWS_SECRET_KEY = config['AWS']['SECRETKEY']
PARAMETER_PATH = config['azureblob']['Filepath']
trend=APIRouter()
def toUnix(datestring):
unixtime=time.mktime(datetime.strptime(datestring,'%d-%m-%Y').timetuple())
return int(unixtime*1000)
def getcentroid(polygon_feature):
"""
Function for getting centroid of a polygon feature
Parameters
polygon_feature - Polygon feture object from the geojson
"""
polygon_shape=shape(polygon_feature)
centrod_of_polygon=polygon_shape.centroid
return(centrod_of_polygon)
class Layer(BaseModel):
layerdate:str
# @trend.get('/gettrend', status_code=200)
# async def get_trend(geojson:str,
# parameter:str,
# startdate:str,
# enddate:str,
# db:Session=Depends(get_db)):
# format="%Y-%m-%d"
# try:
# geom=json.loads(geojson, strict=False)
# except ValueError:
# return {
# "message":"Please provide a valid geojson geometry"
# }
# try:
# datetime.strptime(startdate, format)
# datetime.strptime(enddate, format)
# except ValueError:
# return {
# "message":"start date and end date in should be YYYY-MM-DD"
# }
# availabledates=db.query(Parameter.available_date).filter(
# and_(Parameter.parameter_name==parameter,Parameter.available_date.between(
# str(startdate),str(enddate)
# ))).order_by(Parameter.available_date).all()
# if(len(availabledates)<1):
# return {
# 'code':404,
# 'message':"No data available for the given date range"
# }
# else:
# session = Session(aws_access_key_id=AWS_ACCESS_KEY,
# aws_secret_access_key=AWS_SECRET_KEY)
# zstats=[]
# with rasterio.Env(AWSSession(session)) as env:
# for i in availabledates:
# filedate = datetime.strptime(str(i.available_date), '%Y-%m-%d').strftime('%d-%m-%Y')
# file_path='s3://undp-dataforpolicy/parameters/'+parameter+'/RASTER/'+str(filedate)+'.tif'
# stats = zonal_stats(geom,
# file_path,
# stats=['mean','count'])
# if(stats[0]['count']==0):
# pointvalue=point_query(getcentroid(geom),file_path)
# try:
# zstats.append([toUnix(filedate),round(pointvalue[0],2)])
# except TypeError:
# return {
# "message":"Data not available plase draw inside telangana"
# }
# else:
# zstats.append([toUnix(filedate),round(stats[0]['mean'],2)])
# return {
# 'code':200,
# 'trend':zstats
# }
@trend.post('/gettrend', status_code=200)
async def get_trend(details:Gettrend,db:Session=Depends(get_db)):
format="%Y-%m-%d"
try:
datetime.strptime(details.startdate, format)
datetime.strptime(details.enddate, format)
except ValueError:
return {
"message":"start date and end date in should be YYYY-MM-DD"
}
availabledates=db.query(Parameter.available_date).filter(
and_(Parameter.parameter_name==details.parameter,Parameter.available_date.between(
str(details.startdate),str(details.enddate)
))).order_by(Parameter.available_date).all()
db.close()
if(len(availabledates)<1):
return {
'code':404,
'message':"No data available for the given date range"
}
else:
zstats=[]
for i in availabledates:
filedate = datetime.strptime(str(i.available_date), '%Y-%m-%d').strftime('%d-%m-%Y')
file_path=PARAMETER_PATH+details.parameter+'/RASTER/'+str(filedate)+'.tif'
stats = zonal_stats(details.geojson,
file_path,
stats=['mean','count','sum'])
if(stats[0]['count']==0):
pointvalue=point_query(getcentroid(details.geojson),file_path)
if(pointvalue[0] is None):
zstats.append([toUnix(filedate),None])
else:
zstats.append([toUnix(filedate),round(pointvalue[0],2)])
else:
if(details.parameter=='POPULATION'):
zstats.append([toUnix(filedate),round(stats[0]['sum'],2)])
else:
zstats.append([toUnix(filedate),round(stats[0]['mean'],2)])
return {
'code':200,
'trend':zstats
}
|
import tkinter as tk
#--- functions ---
def on_click():
for number, var in enumerate(all_variables):
print('optionmenu:', number, '| selected:', var.get(), '| all:', data[number])
#--- main ---
data = ['a,b,c', 'x,y,z']
root = tk.Tk()
all_variables = []
for options in data:
options = options.split(',')
var = tk.StringVar(value=options[0])
all_variables.append(var)
op = tk.OptionMenu(root, var, *options)
op.pack()
b = tk.Button(root, text='OK', command=on_click)
b.pack()
root.mainloop()
|
# -*- coding: utf-8 -*-
from random import randint
from numpy.random import choice
from items.equipment_properties import *
from items.basic_item import *
from items.consumables.potions import endurance_potion, health_potion, mana_potion
drop_list = []
potion_list = [endurance_potion.endurance_potion_list,
health_potion.health_potion_list,
mana_potion.mana_potion_list]
item_categorys = ["potions", "helm", "chest_armor", "belt",
"boot", "gloves", "pant",
"shield", "shoulder","amulet",
"ring","sword", "axe", "mace",
"spear", "tow_handed_sword",
"tow_handed_axe", "tow_handed_mace"]
armor_categorys = ["helm", "chest_armor", "belt",
"boot", "glove", "pant",
"shield", "shoulder"]
def property_1(level):
stat = main_properties[randint(0, len(main_properties)-1)]
stat_name = stat[0]
value = randint(stat[1], stat[2]) * level
return [stat_name, value]
def property_2(level):
stat = primary_properties[randint(0, len(main_properties)-1)]
stat_name = stat[0]
value = randint(stat[1], stat[2]) * level
return [stat_name, value]
def armor(level):
armor = randint(5, 20) * level
return armor
def drop_chance():
items = "common","uncommon","rare","super rare"
probabilities = [0.5,0.35,0.1,0.05]
return str(choice(items , p=probabilities))
def item_name(equipment_category):
name_list = []
path = "items\Item_Name"
name = open("{}/{}.txt".format(path, equipment_category), "r")
for zeile in name:
if "\n" in zeile:
zeile = zeile[:-1]
name_list.append(zeile)
name.close()
return name_list[randint(0,len(name_list)-1)]
# roll droop changs and loot
def loot():
rand = 1 #randint(0,2)
item_quantity = randint(1,3)
if rand == 1:
for number in range(item_quantity):
player_level = 1
drop_chanc_category = drop_chance()
equipment_category = item_categorys[randint(0,len(item_categorys)-1)]
number = 1
if equipment_category == "potions":
potion_category = randint(0,2)
drop_list.append(potion_list[potion_category][randint(0, 2)])
else:
name = item_name(equipment_category)
weight = 10
worth = 10
durability = 10
if equipment_category in armor_categorys:
if drop_chanc_category == "common":
item = common_armor(player_level, drop_chanc_category, equipment_category, number, name, weight, worth, durability, armor(player_level), property_1(player_level))
elif drop_chanc_category == "uncommon":
item = uncommon_armor(player_level, drop_chanc_category, equipment_category, number, name, weight, worth, durability, armor(player_level), property_1(player_level), property_2(player_level))
elif drop_chanc_category == "rare":
item = rare_armor(player_level, drop_chanc_category, equipment_category, number, name, weight, worth, durability, armor(player_level), property_1(player_level), property_2(player_level), property_2(player_level))
elif drop_chanc_category == "super_rare":
item = super_rare_armor(player_level, drop_chanc_category, equipment_category, number, name, weight, worth, durability, armor(player_level), property_1(player_level), property_2(player_level), property_2(player_level), property_2(player_level))
else:
if drop_chanc_category == "common":
item = common(player_level, drop_chanc_category, equipment_category, number, name, weight, worth, durability, property_1(player_level))
elif drop_chanc_category == "uncommon":
item = uncommon(player_level, drop_chanc_category, equipment_category, number, name, weight, worth, durability, property_1(player_level), property_2(player_level))
elif drop_chanc_category == "rare":
item = rare(player_level, drop_chanc_category, equipment_category, number, name, weight, worth, durability, property_1(player_level), property_2(player_level), property_2(player_level))
elif drop_chanc_category == "super_rare":
item = super_rare(player_level, drop_chanc_category, equipment_category, number, name, weight, worth, durability, property_1(player_level), property_2(player_level), property_2(player_level), property_2(player_level))
drop_list.append(item) |
import py
from pypy.config.makerestdoc import register_config_role
docdir = py.magic.autopath().dirpath()
pytest_plugins = "pytest_restdoc"
class PyPyDocPlugin:
def pytest_addoption(self, parser):
group = parser.addgroup("pypy-doc options")
group.addoption('--pypy-doctests', action="store_true",
dest="pypy_doctests", default=False,
help="enable doctests in .txt files")
group.addoption('--generate-redirections',
action="store_true", dest="generateredirections",
default=True, help="Generate redirecting HTML files")
def pytest_configure(self, config):
self.config = config
register_config_role(docdir)
def pytest_doctest_prepare_content(self, content):
if not self.config.getvalue("pypy_doctests"):
py.test.skip("specify --pypy-doctests to run doctests")
l = []
for line in content.split("\n"):
if line.find('>>>>') != -1:
line = ""
l.append(line)
return "\n".join(l)
ConftestPlugin = PyPyDocPlugin
|
import os, glob
import cv2, dlib
import numpy as np
from threading import Thread
from __init__ import raw_dir, raw_fids_dir, ref_dir, log_dir
from pca import PCA
from face import get_landmark, LandmarkIndex, facefrontal
srcdir = '%s/mp4' % raw_dir
dstdir = '%s/fids' % raw_dir
subdirs = os.listdir(dstdir)
subdirs = [x[:-4] for x in subdirs]
unqeles, unqinds, inverids, unqcnts = np.unique(subdirs, return_index=True, return_inverse=True, return_counts=True)
with open(os.path.join(raw_dir, 'links.txt')) as linkf:
files = linkf.readlines()
files = [x[32:43] for x in files]
def check_raw(width=10, samples=50):
vsubdirs = os.listdir(raw_fids_dir)
vsubdirs = [x[:-4] for x in vsubdirs]
unqlinks, unqcnts = np.unique(vsubdirs, return_counts=True)
log_path = '%s/check_raw.log' % log_dir
if os.path.exists(log_path):
os.remove(log_path)
for link, cnt in zip(unqlinks, unqcnts):
vsubdir_list = [link + "}}" + str(100+i)[1:3] + '/' for i in range(cnt)]
mp4f = glob.glob('%s/mp4/*_%s.mp4' % (raw_dir, link))[0]
cap = cv2.VideoCapture(mp4f)
for idx, vsubdir in enumerate(vsubdir_list):
# in each video fragments
with open('%s/%s/startframe.txt' % (raw_fids_dir, vsubdir)) as f:
vstartfr = int(f.read())
with open('%s/%s/nframe.txt' % (raw_fids_dir, vsubdir)) as f:
vnfr = int(f.read())
print('%03d: %s from %d to %d' % (idx, link, vstartfr, vstartfr+vnfr))
cap.set(cv2.CAP_PROP_POS_FRAMES, vstartfr)
for cnt in range(vstartfr, vstartfr+min(width, vnfr)):
ret, frame = cap.read()
assert(ret)
det, p2d = get_landmark(frame, LandmarkIndex.FULL, False)
if det is None or p2d is None:
with open(log_path, 'a') as f:
f.write('[ERROR] %s}}%02d: frame %d is unacceptable.\n' % (mp4f, idx, cnt))
cap.set(cv2.CAP_PROP_POS_FRAMES, vstartfr+max(0, vnfr-width))
for cnt in range(vstartfr+max(0, vnfr-width), vstartfr+vnfr):
ret, frame = cap.read()
assert(ret)
det, p2d = get_landmark(frame, LandmarkIndex.FULL, False)
if det is None or p2d is None:
with open(log_path, 'a') as f:
f.write('[ERROR] %s}}%02d: frame %d is unacceptable.\n' % (mp4f, idx, cnt))
# generate reference statistics
indices = np.random.randint(vstartfr, vstartfr+vnfr, samples)
data = []
for i in indices:
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = cap.read()
assert(ret)
det, p2d = get_landmark(frame, LandmarkIndex.FULL, False)
data.append([det.left(), det.top(), det.width()])
data = np.array(data)
mean = np.mean(data, axis=0)
with open('%s/%s/stat.txt' % (raw_fids_dir, vsubdir), 'w') as f:
f.write('%f %f %f\n'%(mean[0], mean[1], mean[2]))
print('ALL DONE')
def video_landmark(mp4_path, args, means, detector, predictor):
'''
### parameters
mp4_path: path of mp4 file \\
args: (n, 2) fragmentation arguments. The 1st column is startfr # and the 2nd column is nfr.
Totally n fragments
### retval
feature_list: a list whose length is # of fragments. Each element of the list is features data
in ndarray format of each fragment with the shape of (nfr, 40)
'''
feature_list = []
frag_id = 0
cap = cv2.VideoCapture(mp4_path)
for arg, mean in zip(args, means):
# initialization
cnt = 0
startfr, nfr = arg
cap.set(cv2.CAP_PROP_POS_FRAMES, startfr)
fragment_feature = None
while cnt < nfr:
if (startfr+cnt) % 10 == 0:
print(mp4_path[-15:-4],'#:', startfr+cnt)
# fetch the frame
ret, img_ = cap.read()
assert(ret)
# frontalization
img = facefrontal(img_, detector=detector, predictor=predictor, mean=mean) # img.shape = None or (320, 320, 3)
if img is None:
with open('%s/error.log' % log_dir, 'a') as f:
f.write('%s - %04d: p2d not found before frontalization.\n' % (mp4_path, startfr+cnt))
return None
# lip landmark extraction and normalization
det, landmarks = get_landmark(img, LandmarkIndex.FULL, True, detector, predictor)
if det is None or landmarks is None:
with open('%s/error.log' % log_dir, 'a') as f:
f.write('%s - %04d: p2d not found after frontalization.\n' % (mp4_path, startfr+cnt))
return None
frame_feature = landmarks.reshape((landmarks.shape[0]*landmarks.shape[1])) # (40, )
# add frame_feature to fragment_feature
if fragment_feature is None:
fragment_feature = np.copy(frame_feature)
else:
fragment_feature = np.vstack((fragment_feature, frame_feature))
cnt += 1
# fragment_feature.shape = (fr?, 40)
feature_list.append(fragment_feature)
frag_id += 1
return feature_list
def vprocess(links, cnts, tid):
'''get lip landmark coordinates from given mp4 links
### parameters
links: list of link strings, indicating mp4 file names \\
cnts: list of int, indicating fragment numbers of each mp4 file
### notice
The length of links should equal to that of cnts.
'''
total_cnt = 0
tname = 'Thread-' + str(100+tid)[1:3]
from __init__ import detdir, pdctdir
detector = cv2.CascadeClassifier(detdir)
predictor = dlib.shape_predictor(pdctdir)
for link, cnt in zip(links, cnts):
idx = files.index(link)
srcfile = '%s/%s_%s.mp4' % (srcdir, str(1001+idx)[1:4], files[idx])
args, means = [], []
subdir_list = ['%s}}%s' % (link, str(100+i)[1:3]) for i in range(cnt)]
for subdir in subdir_list:
with open('%s/%s/stat.txt' % (dstdir, subdir)) as f:
s = f.read()
means.append([float(i) for i in s.split()])
with open('%s/%s/startframe.txt' % (dstdir, subdir)) as f:
startfr = int(f.read())
with open('%s/%s/nframe.txt' % (dstdir, subdir)) as f:
nfr = int(f.read())
args.append([startfr, nfr])
args = np.array(args)
means = np.array(means)
feature_list = video_landmark(srcfile, args, means, detector, predictor)
with open('%s/%s.log' % (log_dir, tname), 'a') as logf:
if feature_list is not None:
for i, subdir in enumerate(subdir_list):
features = feature_list[i]
np.save('%s/%s/features.npy' % (dstdir, subdir), features)
logf.write('%s/features.npy saved\n' % subdir)
else:
# error when extracting features
logf.write('%s}}*/features.npy failed\n' % link)
logf.write(str(total_cnt+1)+'/'+str(len(cnts))+' done\n====================\n')
total_cnt += 1
def video_process(start_batch=0, end_batch=301, links=None, cnts=None, nthreads=10):
logs = glob.glob('%s/*.log' % log_dir)
for log in logs:
os.remove(log)
if links is None and cnts is None:
links = unqeles[start_batch:end_batch]
cnts = unqcnts[start_batch:end_batch]
neles = len(links) / nthreads
frag_links = [links[round(i*neles):round((i+1)*neles)] for i in range(nthreads)]
frag_cnts = [cnts[round(i*neles):round((i+1)*neles)] for i in range(nthreads)]
threads = [Thread(target=vprocess, args=(frag_links[i], frag_cnts[i], i, )) for i in range(nthreads)]
for tid, thread in enumerate(threads):
thread.name = 't'+str(tid)
print('%s start!' % thread.name)
thread.start()
# processing...
for thread in threads:
thread.join()
print('All done')
def reduce_dim(dim=20):
feature_list = []
dstdir = '%s/fids' % raw_dir
subdirs = os.listdir(dstdir)
for subdir in subdirs:
fea = np.load('%s/%s/features.npy' % (dstdir, subdir))
fea = fea[:, LandmarkIndex.LIP[0]*2:]
with open('%s/%s/nframe.txt' % (dstdir, subdir)) as f:
nfr = int(f.read())
assert(fea.shape[0] == nfr and fea.shape[1] == 40)
feature_list.append(fea)
features = np.vstack(feature_list)
eigvector, eigvalue = PCA(features)
A = eigvector[:, :dim] # A.shape = (40, 20)
np.save('%s/PCA_MAT.npy' % ref_dir, A)
print('PCA_MAT saved')
for fea, subdir in zip(feature_list, subdirs):
# fea.shape = (nfr, 40)
pca_fea = np.matmul(fea, A) #pca_fea.shape = (nfr, 20)
np.save('%s/%s/fidsCoeff.npy' % (dstdir, subdir), pca_fea)
print('%s/fidsCoeff.npy saved' % subdir)
if __name__ == '__main__':
pass |
from keras.models import Model
from keras.layers import *
from algorithms.keraswtacnn import KerasWTACNN
class KerasRandomInitCNN(KerasWTACNN):
def __init__(self, results_dir, config):
super(KerasRandomInitCNN, self).__init__(results_dir, config)
def build_sparsity(self):
inp = Input(shape=(self.config['shape1'], self.config['shape2'], self.config['shape3'], self.config['filters']))
self.sparsity = Model(inp, inp)
def train(self, X_train, X_val=None):
self.autoencoder_base.save_weights(self.results_dir + '/autoencoder.hdf5')
pass
|
from agstoolbox.core.ags.ags_editor import AgsEditor
class Release(AgsEditor):
"""a GitHub release object"""
url = None
id = None
tag = None
is_pre_release = False
text_details = None
published_at = None
published_at_timestamp = None
archive_name = None
archive_url = None
archive_size = None
archive_id = None
|
# method overriding - 2
class Animal:
def animalSound(self):
print('Animal sound !!')
def animalType(self):
print('Carnivorous or Herbivorous or Omnivorous !!')
class Dog(Animal):
def animalSound(self):
super().animalSound()
print('Dog sound !!')
d = Dog()
d.animalSound()
d.animalType()
|
"""
LeetCode Problem: 332. Reconstruct Itinerary
Link: https://leetcode.com/problems/reconstruct-itinerary/
Language: Python
Written by: Mostofa Adib Shakib
Time Complexity: O(N)
Space Complexity: O(N)
"""
class Solution:
def findItinerary(self, tickets: List[List[str]]) -> List[str]:
self.hashMap = defaultdict(list)
stack = []
for ticket in tickets:
origin = ticket[0]
destination = ticket[1]
self.hashMap[origin].append(destination)
# sort the itinerary based on the lexical order
for origin, itinerary in self.hashMap.items():
# Note that we could have multiple identical flights, i.e. same origin and destination.
itinerary.sort(reverse=True)
self.result = []
self.DFS('JFK')
# reconstruct the route backwards
return self.result[::-1]
def DFS(self, origin):
destinationList = self.hashMap[origin]
while destinationList:
#while we visit the edge, we trim it off from graph.
nextDestination = destinationList.pop()
self.DFS(nextDestination)
self.result.append(origin) |
"""
Implements similar functionality as tf.train.Checkpoint and tf.train.CheckpointManager.
https://gist.github.com/kevinzakka/5d345421f7abefd5dbaf6a77f829e70a.
"""
import logging
import os
import signal
import numpy as np
import os.path as osp
import torch
from glob import glob
def mkdir(s):
"""Create a directory if it doesn't already exist.
"""
if not osp.exists(s):
os.makedirs(s)
def get_files(d, pattern, sort=True):
"""Return a list of files in a given directory.
Args:
d (str): The path to the directory.
pattern (str): The wildcard to filter files with.
sort (bool): Whether to sort the returned list.
"""
files = glob(osp.join(d, pattern))
files = [f for f in files if osp.isfile(f)]
if sort:
files.sort(key=lambda x: int(os.path.basename(x).split(".")[0]))
return files
class Checkpoint:
"""Save and restore model and optimizer states.
"""
def __init__(self, model, optimizer=None):
"""Constructor.
"""
self.model = model
self.optimizer = optimizer
def restore(self, save_path, device=None):
"""Restore a state from a saved checkpoint.
Args:
save_path (str): The filepath to the saved checkpoint.
device (torch.device): The device on which to
restore the state.
"""
try:
state = torch.load(save_path, map_location=device)
try:
self.model.load_state_dict(state['model_weights'])
if self.optimizer is not None:
self.optimizer.load_state_dict(state['optim_state'])
logging.info('Successfully loaded model weights from {}.'.format(save_path))
return True
except Exception as e:
# there was an issue loading the state which means
# either the model definition and saved weights
# do not agree or they were not saved in the first
# place.
# since this is a severe issue, we raise an error
# rather than allowing the program to proceed.
raise e
except FileNotFoundError as e:
logging.error(e)
return False
def save(self, save_path):
"""Save a state to disk.
Modified from brentyi/fannypack.
Args:
save_path (str): The name of the checkpoint to save.
"""
state = {'model_weights': self.model.state_dict()}
if self.optimizer is not None:
state['optim_state'] = self.optimizer.state_dict()
# ignore ctrl+c while saving
try:
orig_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, lambda _sig, _frame: None)
except ValueError:
# signal throws a ValueError if we're not in the main thread
orig_handler = None
# atomic save
save_dir = osp.dirname(save_path)
tmp_path = osp.join(save_dir, "tmp-{}.ckpt".format(np.random.randint(1e9)))
torch.save(state, tmp_path)
# rename is an atomic operation in python
# it is POSIX compliant according to docs
# https://docs.python.org/3/library/os.html#os.rename
os.rename(tmp_path, save_path)
logging.info('Saved checkpoint at {}.'.format(save_path))
# restore SIGINT handler
if orig_handler is not None:
signal.signal(signal.SIGINT, orig_handler)
class CheckpointManager:
"""A model and optimizer checkpoint manager.
"""
def __init__(self, checkpoint, directory, device, max_to_keep=10):
"""Constructor.
Args:
checkpoint (Checkpoint): An instance of `Checkpoint`.
directory (str): The directory in which checkpoints will be saved.
device (torch.device): The computing device on which to restore
checkpoints.
max_to_keep (int): The maximum number of checkpoints to keep.
Amongst all saved checkpoints, checkpoints will be deleted
oldest first, until `max_to_keep` remain.
"""
assert max_to_keep > 0, "max_to_keep should be a positive integer."
self.checkpoint = checkpoint
self.directory = directory
self.max_to_keep = max_to_keep
self.device = device
self.latest_checkpoint = None
# create checkpoint directory if it doesn't
# already exist
mkdir(self.directory)
def restore_or_initialize(self):
"""Restore items in checkpoint from the latest checkpoint file.
Returns:
The global iteration step. This is parsed from the latest
checkpoint file if one is found, else 0 is returned.
"""
ckpts = get_files(self.directory, "*.ckpt")
if ckpts:
last_ckpt = ckpts[-1]
status = self.checkpoint.restore(last_ckpt, self.device)
if not status:
logging.info('Could not restore latest checkpoint file.')
return 0
self.latest_checkpoint = last_ckpt
return int(osp.basename(last_ckpt).split('.')[0])
return 0
def save(self, global_step):
"""Create a new checkpoint.
Args:
global_step (int): The iteration number which will be used
to name the checkpoint.
"""
save_path = osp.join(self.directory, "{:09d}.ckpt".format(global_step))
self.checkpoint.save(save_path)
self.latest_checkpoint = save_path
self._trim_checkpoints()
def _trim_checkpoints(self):
"""Trim older checkpoints until `max_to_keep` remain.
"""
# get a list of checkpoints in reverse
# chronological order
ckpts = get_files(self.directory, "*.ckpt")[::-1]
# remove until `max_to_keep` remain
num_remove = len(ckpts) - self.max_to_keep
while num_remove > 0:
ckpt_name = ckpts.pop()
os.remove(ckpt_name)
num_remove -= 1
@staticmethod
def load_latest_checkpoint(checkpoint, directory, device):
ckpts = get_files(directory, "*.ckpt")
if ckpts:
last_ckpt = ckpts[-1]
checkpoint.restore(last_ckpt, device)
else:
logging.error('No checkpoints found in {}.'.format(directory))
|
# Code is based on scikit-learns permutation importance.
import numpy as np
from joblib import Parallel
from sklearn.metrics import check_scoring
from sklearn.utils import Bunch
from sklearn.utils import check_random_state
from sklearn.utils import check_array
from sklearn.utils.fixes import delayed
from sklearn.inspection._permutation_importance import _weights_scorer
from grouped_permutation_importance._adapted_permutation_importance import \
_calculate_permutation_scores
from sklearn.base import clone
from sklearn.metrics import get_scorer
def grouped_permutation_importance(estimator, X, y, *, scoring=None,
n_repeats=5, idxs=None, n_jobs=None,
random_state=None, sample_weight=None,
cv=None, perm_set=None, verbose=0,
min_performance=-1, mode="abs"):
if not hasattr(X, "iloc"):
X = check_array(X, force_all_finite='allow-nan', dtype=None)
if cv is not None:
if perm_set not in ["train", "test"]:
raise AttributeError("Parameter cv needs perm_set and set "
"to 'train' or 'test'.")
importances = np.empty((len(idxs), 0))
for train_idx, test_idx in cv.split(X, y):
model = clone(estimator)
model.fit(X[train_idx], y[train_idx])
if perm_set == "train":
idx = train_idx
else:
idx = test_idx
added = True
if min_performance > 0:
perf = get_scorer(scoring). \
_score_func(model.predict(X[test_idx]), y[test_idx])
if perf < min_performance:
added = False
if added:
importances = np.concatenate(
[importances,
grouped_permutation_importance(model, X[idx], y[idx],
scoring=scoring,
n_repeats=n_repeats,
idxs=idxs, n_jobs=n_jobs,
random_state=None,
sample_weight=None,
cv=None,
mode=mode)["importances"]],
axis=1)
if verbose:
perf = get_scorer(scoring). \
_score_func(model.predict(X[test_idx]), y[test_idx])
print(f"Test-Score: {perf}")
if mode == "rel":
importances = importances / np.sum(np.mean(importances, axis=1))
return Bunch(importances_mean=np.mean(importances, axis=1),
importances_std=np.std(importances, axis=1),
importances=importances)
else:
if perm_set is not None:
raise AttributeError("Parameter perm_set needs cv.")
# Precompute random seed from the random state to be used
# to get a fresh independent RandomState instance for each
# parallel call to _calculate_permutation_scores, irrespective of
# the fact that variables are shared or not depending on the active
# joblib backend (sequential, thread-based or process-based).
random_state = check_random_state(random_state)
random_seed = random_state.randint(np.iinfo(np.int32).max + 1)
scorer = check_scoring(estimator, scoring=scoring)
baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight)
scores = Parallel(n_jobs=n_jobs)(delayed(_calculate_permutation_scores)(
estimator, X, y, sample_weight, col_idx,
random_seed, n_repeats, scorer
) for col_idx in idxs)
importances = baseline_score - np.array(scores)
return Bunch(importances_mean=np.mean(importances, axis=1),
importances_std=np.std(importances, axis=1),
importances=importances)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 22:57:58 2020
@author: ashokubuntu
"""
# Importing Packages
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import pickle
# Loading Weight Height Data
wh = pd.read_csv('data/weight-height.csv')
# Converting Categorical values to Numerical values
wh['Gender'] = wh['Gender'].apply(lambda x : {'Male' : 1, 'Female' : 0}[x])
# Extracting Prediction and Features values
X = wh.iloc[:, [1, 0]].values
y = wh.iloc[:, 2].values
# Splitting data for training and testing
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25,
random_state = 42)
# Creating Support Vector Machine classifier Model
weight_predictor = make_pipeline(StandardScaler(), LinearRegression())
weight_predictor.fit(X_train, y_train)
# Dumping Model to a pickle file
pickle.dump(weight_predictor, open("../models/weight_predictor.pkl", "wb")) |
from .launcher import gen_launch_element_tree
|
######################################################
#
# Generate Harmonics data
#
######################################################
import numpy as np
def generate(sineCoeffArray, sinePeriodsArray, cosineCoeffArray, cosinePeriodsArray, timeSteps, tStart = 0):
if (len(sineCoeffArray) != len(sinePeriodsArray)):
raise Exception('sineCoeffArray and sinePeriodsArray must be of the same length.')
if (len(cosineCoeffArray) != len(cosinePeriodsArray)):
raise Exception('cosineCoeffArray and cosinePeriodsArray must be of the same length.')
outputArray = np.zeros(timeSteps)
T = float(timeSteps)
for i in range(tStart, timeSteps):
value = 0.0
for j in range(0, len(sineCoeffArray)):
value += (sineCoeffArray[j] * np.sin(i * sinePeriodsArray[j] * 2.0 * np.pi / T ))
for k in range(0, len(cosineCoeffArray)):
value += (cosineCoeffArray[k] * np.cos(i * cosinePeriodsArray[k] * 2.0 * np.pi / T))
outputArray[i] = value
return outputArray |
"""
This script analyzes the partition results
"""
import networkx as nx
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import csv
import numpy as np
import genetic_partition_test as gp
import itertools
from networkx.drawing.nx_agraph import graphviz_layout
import matplotlib.image as mpimg
import seaborn as sns
from collections import Counter
from scipy import stats
def get_node_number (edgelist):
G = nx.read_edgelist (edgelist, nodetype = str, create_using=nx.DiGraph())
return len(list(G.nodes()))
def load_graph (edgelist):
G = nx.read_edgelist (edgelist, nodetype = str, create_using=nx.DiGraph())
return G
def load_data (filename):
"""Load data file"""
data = {}
data_reader = csv.reader(open(filename, 'rU'), delimiter='\t')
# Ignore header
header = next(data_reader)
# Process each line
sol = 1
for row in data_reader:
if len(row) == len(header):
sample = sol
sample_data = {}
for el_idx, el in enumerate(header):
sample_data[el] = row[el_idx]
data[sample] = sample_data
sol += 1
return data
def load_sol (filename):
"""Load best_solns file"""
data = {}
data_reader = csv.reader(open(filename, 'rU'), delimiter='\t')
# Ignore header
header = next(data_reader)
# Process each line
for row in data_reader:
if len(row) == len(header):
sample = row[1]
sample_data = {}
for el_idx, el in enumerate(header[2:],2):
sample_data[el] = row[el_idx]
data[sample] = sample_data
return data
def plot_network (G, outfig):
# pos = nx.kamada_kawai_layout(G)
# # pos = nx.random_layout(G)
# plt.figure(num=None, figsize=(3,3), dpi=80)
# nx.draw(
# G,
# pos=pos,
# horizontalalignment='left',
# verticalalignment='bottom',
# node_color='powderblue'
# )
# # plt.savefig(outfig, dpi=200)
# plt.show()
### graphviz layout
pos = nx.kamada_kawai_layout(G)
print(len(G.nodes()))
# pos = nx.random_layout(G)
# pos = graphviz_layout(G, prog='dot')
plt.figure(num=None, figsize=(3,3), dpi=80)
nx.draw (G, pos, node_color='powderblue', width=0.25, node_size=30, arrowsize=6, with_labels=False)
plt.savefig (outfig, dpi=200)
plt.show()
### plot flipped positions
# print(len(G.nodes()))
# plt.figure(num=None, figsize=(4,3), dpi=80)
# flip_xy_pos = {}
# flipped_pos = {node: (-y, x) for (node, (x, y)) in pos.items()}
# nx.draw (G, pos=flipped_pos, node_color='powderblue', width=0.25, node_size=30, arrowsize=6,horizontalalignment='left',verticalalignment='bottom')
# plt.savefig (outfig, dpi=200)
# plt.show()
### plot circular layout
# plt.figure(figsize=(4,4))
# pos = nx.circular_layout(G)
# nx.draw(G, pos=pos, node_size=30, node_color='powderblue', width=0.25, arrowsize=6, connectionstyle='arc3, rad=0.1')
# # nx.draw_networkx_nodes(G, pos, node_size=20, node_color='powderblue')
# # nx.draw_networkx_edges(G, pos, width=0.25, arrowsize=6, connectionstyle='arc3, rad=0.1')
# plt.savefig(outfig, dpi=200)
# plt.show()
def plot_centrality (G, outfig):
""" compute and plot closeness centrality of nodes, betweenness centrality of nodes, and degree of nodes """
closenessDict = nx.algorithms.centrality.closeness_centrality (G)
sorted_closeness = sorted(closenessDict.items(), key=lambda x: x[1])[::-1]
betweennessDict = nx.algorithms.centrality.betweenness_centrality (G)
sorted_betweenness = sorted(betweennessDict.items(), key=lambda x: x[1])[::-1]
degreeDict = nx.algorithms.centrality.degree_centrality (G)
sorted_degree = sorted(degreeDict.items(), key=lambda x: x[1])[::-1]
fig = plt.figure(figsize=(5,3))
nodes = list(G.nodes())
y1 = [closenessDict[x] for x in nodes]
y1_norm = [(y-min(y1))/(max(y1)-min(y1)) for y in y1]
zscore1 = stats.zscore(y1)
y2 = [betweennessDict[x] for x in nodes]
y2_norm = [(y-min(y2))/(max(y2)-min(y2)) for y in y2]
zscore2 = stats.zscore(y2)
y3 = [degreeDict[x] for x in nodes]
y3_norm = [(y-min(y3))/(max(y3)-min(y3)) for y in y3]
zscore3 = stats.zscore(y3)
x = range(1, len(nodes)+1)
ax1 = fig.add_subplot(311)
ax1.plot(x, zscore1, 'o-', markersize=4, c='k')
ax2 = fig.add_subplot(312)
ax2.plot(x, zscore2, 'o-', markersize=4, c='k')
ax3 = fig.add_subplot(313)
ax3.plot(x, zscore3, 'o-', markersize=4, c='k')
# ax1.set_ylim([0, 0.4])
ax1.set_xticks([])
# ax2.set_ylim([0, 0.4])
ax2.set_xticks([])
# ax3.set_ylim([0, 0.4])
plt.savefig(outfig, dpi=200)
plt.show()
def count_nodes (indir1, indir2):
""" count the number of nodes in graphs """
count1, count2 = [], []
for benchmark in os.listdir(indir1):
if benchmark.startswith('.') == False:
edgelist1 = PATH + 'runs/benchmark/4-input-boolean-circuits/' + benchmark + '/DAG.edgelist'
node_number1 = get_node_number (edgelist1)
count1.append( node_number1 )
for benchmark in os.listdir(indir2):
if benchmark.startswith('.') == False:
edgelist2 = PATH + 'runs/benchmark/5-input-boolean-circuits/' + benchmark + '/DAG.edgelist'
node_number2 = get_node_number (edgelist2)
count2.append( node_number2 )
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
colors = ['orange', 'blue']
labels = ['4-input Boolean circuits', '5-input Boolean circuits']
plt.hist([count1, count2], 10, histtype='step', stacked=False, fill=False, label=labels)
# ax.hist(count1, ec='k', histtype='step', fill=False, facecolor='wheat')
# ax.hist(count2, ec='k', histtype='step', fill=False, facecolor='g')
ax.set_xlabel('Number of vertices')
ax.set_ylabel('Count')
ax.set_xlim([0, 50])
plt.legend(loc='upper right', frameon=False)
plt.gca().yaxis.set_major_locator(mticker.MultipleLocator(4))
plt.gca().xaxis.set_major_locator(mticker.MultipleLocator(5))
for spine in ['top', 'right']:
ax.spines[spine].set_linewidth(0)
plt.savefig('Node distribution.pdf', dpi=200)
plt.show()
def partition_stats ():
ori_hc_valid_count, ori_lc_valid_count, hc_valid_count, lc_valid_count = 0, 0, 0, 0
bm_path = PATH + 'runs/benchmark/5-input-boolean-circuits/'
rs_path = PATH + 'runs/results/5-input-boolean-circuits/'
for benchmark in os.listdir(bm_path):
# determine whether original partition satisfy hc or lc
if benchmark.startswith('.') == False:
# check whether original partition satisfy lc/hc
ori_hc_valid_bm, ori_lc_valid_bm = 0, 0
edgelist = bm_path + benchmark + '/DAG.edgelist'
G = load_graph (edgelist)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
G_primitive = gp.get_G_primitive (G, nonprimitives)
for npart in os.listdir(rs_path + benchmark + '/nparts/'):
if npart.isdigit():
part_sol = rs_path + benchmark + '/nparts/' + npart + '/part_solns.txt'
cut, partDict = gp.load_metis_part_sol (part_sol)
part_opt = (cut, [gp.get_part(partDict, n) for n in G_primitive.nodes()])
matrix, partG = gp.partition_matrix (G_primitive, part_opt)
loop_free = gp.check_cycles(partG)
motif_allowed_hc = gp.check_motif_allowed(matrix, '2,2')
motif_allowed_lc = gp.check_motif_allowed(matrix, '4')
if loop_free and motif_allowed_hc:
ori_hc_valid_bm += 1
if loop_free and motif_allowed_lc:
ori_lc_valid_bm += 1
# check whehter optimized solution satisfy lc/hc
best_sol = rs_path + benchmark + '/nparts/best_solns.txt'
if os.path.exists(best_sol):
data = load_data (best_sol)
hc_valid_bm, lc_valid_bm = 0, 0
for sol in data:
constraint = data[sol]['Constraint']
if constraint == 'hc':
hc_valid_bm += 1
else:
lc_valid_bm += 1
if ori_hc_valid_bm > 0:
ori_hc_valid_count += 1
if ori_lc_valid_bm > 0:
ori_lc_valid_count += 1
if hc_valid_bm > 0:
hc_valid_count += 1
if lc_valid_bm > 0:
lc_valid_count += 1
else:
print('best sol for benchmark', benchmark, 'does not exist')
f_out = open (PATH+'runs/results/analysis/Constraint stats-5-input.txt', 'w')
f_out.write('Original Partition\thc\t'+str(ori_hc_valid_count)+'\n')
f_out.write('Original Partition\tlc\t'+str(ori_lc_valid_count)+'\n')
f_out.write('Optimized Partition\thc\t'+str(hc_valid_count)+'\n')
f_out.write('Optimized Partition\tlc\t'+str(lc_valid_count)+'\n')
def compile_best_solutions ():
bm_path = PATH + 'runs/benchmark/5-input-boolean-circuits/'
rs_path = PATH + 'runs/results/5-input-boolean-circuits/'
f_out = open (PATH+'runs/results/5-input-boolean-circuits/best_solns.txt', 'w')
f_out.write('\t'.join(['Category', 'Circuit', 'Npart', 'Sol', 'Nodes','Constraint','Valid Motif_METIS','Valid Motif_Optimized','Cycle Free_METIS','Cycle Free_Optimized','T_Metis','T_Optimized','cut_Metis','cut_Optimized'])+'\n')
for benchmark in os.listdir(bm_path):
if benchmark.startswith('.') == False:
best_sol = rs_path + benchmark + '/nparts/best_solns.txt'
if os.path.exists(best_sol):
data = load_data (best_sol)
if data != {}:
best_sol = data[1]
hc_sol = [sol for sol in data.keys() if data[sol]['Constraint'] == 'hc']
lc_sol = [sol for sol in data.keys() if data[sol]['Constraint'] == 'lc']
if hc_sol != []:
# first choose from hc sols
minT = min([data[sol]['T_Optimized'] for sol in hc_sol])
minN = min([data[sol]['Npart'] for sol in hc_sol if data[sol]['T_Optimized']==minT])
candidates = [sol for sol in hc_sol if data[sol]['T_Optimized']==minT and data[sol]['Npart']==minN]
cd = data[candidates[0]]
f_out.write('\t'.join(['5-input', benchmark, cd['Npart'], cd['Sol'], cd['Nodes'], cd['Constraint'], cd['Valid Motif_METIS'], cd['Valid Motif_Optimized'], cd['Cycle Free_METIS'], cd['Cycle Free_Optimized'], cd['T_Metis'], cd['T_Optimized'], cd['cut_Metis'], cd['cut_Optimized']])+'\n')
else:
# choose from lc sols if no hc sol
minT = min([data[sol]['T_Optimized'] for sol in lc_sol])
minN = min([data[sol]['Npart'] for sol in lc_sol if data[sol]['T_Optimized']==minT])
candidates = [sol for sol in lc_sol if data[sol]['T_Optimized']==minT and data[sol]['Npart']==minN]
cd = data[candidates[0]]
f_out.write('\t'.join(['5-input', benchmark, cd['Npart'], cd['Sol'], cd['Nodes'], cd['Constraint'], cd['Valid Motif_METIS'], cd['Valid Motif_Optimized'], cd['Cycle Free_METIS'], cd['Cycle Free_Optimized'], cd['T_Metis'], cd['T_Optimized'], cd['cut_Metis'], cd['cut_Optimized']])+'\n')
else:
print(benchmark)
def load_timestep (filename):
lines = [open(filename, 'r').read().strip("\n")][0].split('\n')
timeList = []
for line in lines[1:]:
times = line.split('\t')[1].split(',')
timeList = timeList + times
return list(filter(None, timeList))
def load_median_connectivity (input_n):
bm_path = PATH + 'runs/benchmark/'+input_n+'-input-boolean-circuits/'
rs_path = PATH + 'runs/results/'+input_n+'-input-boolean-circuits/'
connDict = {}
for benchmark in os.listdir(rs_path):
if benchmark.isdigit():
edgelist = bm_path + benchmark + '/DAG.edgelist'
G = load_graph (edgelist)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
G_primitive = gp.get_G_primitive (G, nonprimitives)
connDict[benchmark] = {}
for npart in os.listdir(rs_path + benchmark + '/nparts/'):
if npart.isdigit():
part_sol = rs_path + benchmark + '/nparts/' + npart + '/part_solns.txt'
cut, partDict = gp.load_metis_part_sol (part_sol)
part_opt = (cut, [gp.get_part(partDict, n) for n in G_primitive.nodes()])
matrix, partG = gp.partition_matrix (G_primitive, part_opt)
# calculate the median connectivity
sum_row = matrix.sum(axis=1)
sum_col = matrix.sum(axis=0)
mean_degree = np.median(sum_col + sum_row.T)
connDict[benchmark][npart] = mean_degree
return connDict
def plot_timestep ():
""" plot the distribution of timesteps at which improvement is made """
bm_path = PATH + 'runs/benchmark/5-input-boolean-circuits/'
rs_path = PATH + 'runs/results/5-input-boolean-circuits/'
hc_timeDict, lc_timeDict = {}, {}
hc_timeList, lc_timeList = [], []
connectivityDict = load_median_connectivity ('5')
hc_avgConn_opt_List, lc_avgConn_opt_List = [], [] # associate average degree of connectivity among subgraphs, and number of optimized steps
hc_avgNode_opt_List, lc_avgNode_opt_List = [], [] # associate number of nodes in partitioned subgraphs, and number of optimized steps
for benchmark in os.listdir(rs_path):
if benchmark.isdigit():
for npart in os.listdir(rs_path + benchmark + '/nparts/'):
if npart.isdigit():
hc_opt_file = rs_path+benchmark+'/nparts/'+npart+'/optimized_hc/'+'part improved.txt'
lc_opt_file = rs_path+benchmark+'/nparts/'+npart+'/optimized_lc/'+'part improved.txt'
G = load_graph (bm_path + benchmark + '/DAG.edgelist')
primN = len(list(G.nodes())) - 6 # number of primitive vertices
if os.path.exists(hc_opt_file):
timeList = load_timestep (hc_opt_file)
hc_timeList = hc_timeList + timeList
if npart in hc_timeDict:
hc_timeDict[npart] = hc_timeDict[npart] + timeList
else:
hc_timeDict[npart] = timeList
hc_avgNode_opt_List.append((primN/int(npart), len(timeList)))
hc_avgConn_opt_List.append((connectivityDict[benchmark][npart], len(timeList)))
if os.path.exists(lc_opt_file):
timeList = load_timestep (lc_opt_file)
lc_timeList = lc_timeList + timeList
if npart in lc_timeDict:
lc_timeDict[npart] = lc_timeDict[npart] + timeList
else:
lc_timeDict[npart] = timeList
lc_avgNode_opt_List.append((primN/int(npart), len(timeList)))
lc_avgConn_opt_List.append((connectivityDict[benchmark][npart], len(timeList)))
### plot distribution of optimization at timesteps (grouped by constraints, and N)
# fig = plt.figure(figsize=(9,9))
# # ax.hist([int(t) for t in hc_timeList], fc='orange', histtype='step', label='High Constraint')
# # ax.hist([int(t) for t in lc_timeList], fc='blue', histtype='step', label='Low Constraint')
# idx = 1
# for npart in ['3', '4', '5', '6', '7', '8', '9', '10', '11']:
# ax = fig.add_subplot(3,3,idx)
# ax.hist([int(t) for t in lc_timeDict[npart]], histtype='step', label = npart)
# ax.set_ylim([0, 2000])
# ax.set_xlim([0, 10000])
# if idx != 7:
# ax.set_xticks([])
# ax.set_yticks([])
# if idx == 7:
# ax.set_xlabel('Time Step')
# ax.set_ylabel('Count')
# ax.set_title('N='+npart)
# for spine in ['top', 'right']:
# ax.spines[spine].set_linewidth(0)
# idx += 1
# plt.savefig(PATH+'runs/results/analysis/Optimization at Timestep (lc grouped by N).pdf', dpi=200)
# plt.show()
### plot average node in subgraphs vs. number of optimized steps
fig = plt.figure( figsize=(5,5) )
ax = fig.add_subplot (111)
x = list(list(zip(*hc_avgNode_opt_List))[0])
y = list(list(zip(*hc_avgNode_opt_List))[1])
ax.plot (x, y, 'o', c='#1f77b4', fillstyle='none', label='High Constraint')
x = list(list(zip(*lc_avgNode_opt_List))[0])
y = list(list(zip(*lc_avgNode_opt_List))[1])
ax.plot (x, y, 'o', c='#ff7f0f', fillstyle='none', label='Low Constraint')
ax.set_xlim([2.5, 7.5])
ax.set_ylim([0, 80])
ax.set_xlabel('Average vertices per subgraph')
ax.set_ylabel('Number of optimization')
plt.legend(loc='upper right', frameon=False)
plt.savefig(PATH+'runs/results/analysis/Number of optimization vs. Average Nodes.pdf', dpi=200)
plt.show()
### plot number of optimization vs. median degree of connectivity
fig = plt.figure( figsize=(5,5) )
ax = fig.add_subplot (111)
x = list(list(zip(*hc_avgConn_opt_List))[0])
y = list(list(zip(*hc_avgConn_opt_List))[1])
ax.plot (x, y, 'o', c='#1f77b4', fillstyle='none', label='High Constraint')
x = list(list(zip(*lc_avgConn_opt_List))[0])
y = list(list(zip(*lc_avgConn_opt_List))[1])
ax.plot (x, y, 'o', c='#ff7f0f', fillstyle='none', label='Low Constraint')
# ax.set_xlim([2.5, 7.5])
ax.set_ylim([0, 80])
ax.set_xlabel('Subgraph median degree of connectivity')
ax.set_ylabel('Number of optimization')
plt.legend(loc='upper right', frameon=False)
plt.savefig(PATH+'runs/results/analysis/Number of optimization vs. Subgraph connectivity.pdf', dpi=200)
plt.show()
def to_tuple (matrix):
return tuple(tuple(arr.tolist()) for arr in matrix)
def count_motif ():
motifDict = {}
for N in [4, 5]:
bm_path = PATH + 'runs/benchmark/'+str(N)+'-input-boolean-circuits/'
rs_path = PATH + 'runs/results/'+str(N)+'-input-boolean-circuits/'
best_sol = PATH + 'runs/results/'+str(N)+'-input-boolean-circuits/best_solns.txt'
data = load_sol (best_sol)
for benchmark in data.keys():
print(benchmark)
edgelist = bm_path + benchmark + '/DAG.edgelist'
G = load_graph (edgelist)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
G_primitive = gp.get_G_primitive (G, nonprimitives)
if int(data[benchmark]['Sol']) != 0:
part_sol = rs_path + benchmark + '/nparts/' + data[benchmark]['Npart'] + '/optimized_' + data[benchmark]['Constraint'] + '/part_solns.txt'
solDict = gp.load_opt_part_sol (part_sol)
cut = int(solDict[int(data[benchmark]['Sol'])]['cut'])
part = solDict[int(data[benchmark]['Sol'])]['part']
part_opt = (cut, [gp.get_part(part, n) for n in G_primitive.nodes()])
matrix, partG = gp.partition_matrix (G_primitive, part_opt)
else:
part_sol = rs_path + benchmark + '/nparts/' + data[benchmark]['Npart'] + '/part_solns.txt'
cut, partDict = gp.load_metis_part_sol (part_sol)
part_opt = (cut, [gp.get_part(partDict, n) for n in G_primitive.nodes()])
matrix, partG = gp.partition_matrix (G_primitive, part_opt)
# count the occurrence of each subnetwork
for cell in list(partG.nodes()):
neighbors = []
for e in list(partG.edges()):
c1, c2 = e[0], e[1]
if e[0] == cell: neighbors.append(e[1])
elif e[1] == cell: neighbors.append(e[0])
subG_cells = list(set(neighbors)) + [cell]
subG_matrix, subG_partG = gp.get_part_matrix_subG (matrix, partG, subG_cells)
dim = subG_matrix.shape[0]
# print('part matrix', matrix)
I = np.identity(n=dim) # identity matrix
dmList = []
for pm in itertools.permutations(I): # permutation matrix
pm = np.array(pm)
# print('perm matrix', pm)
dm = np.dot(pm, subG_matrix) # permutate by rows
# print('product', np.dot(dm, pm.T))
dm = np.dot(dm, pm.T)
dmList.append (to_tuple(dm)) # append all diagnizable matrix of partition matrix
# print('all diag matrix', dmList)
# motif_exist = set(dmList).intersect(set(motifDict.keys()))
if any(dm in motifDict for dm in dmList) == False:
# print('motif not found in dict', matrix)
motifDict[to_tuple(subG_matrix)] = (1/dim)/len(list(data.keys()))
# print('new motifdict', motifDict)
else:
motif = set(dmList).intersection(set(motifDict.keys()))
# print('motif', motif,'already in dict')
motifDict[tuple(motif)[0]] += (1/dim)/len(list(data.keys()))
# write output
f_out = open(PATH + 'runs/results/analysis/motif freq.txt', 'w')
f_out.write('Motif matrix\tOccur\n')
for motif in motifDict:
f_out.write(str(motif)+'\t'+str(motifDict[motif])+'\n')
print(motif, motifDict[motif])
def load_motif_data (inputfile):
lines = [open(inputfile, 'r').read().strip("\n")][0].split('\n')
motif_dict = {}
for line in lines[1:]:
tokens = line.split('\t')
motif_dict[tokens[0]] = float(tokens[1])
return motif_dict
def generate_comm_graph (matrix):
# generate DAG representing cell-cell communication from the adjency matrix
rows, cols = np.where(matrix != 0)
edges = zip(rows.tolist(), cols.tolist())
partG = nx.DiGraph()
partG.add_edges_from(edges)
return partG
def plot_motif_occurence (inputfile):
""" plot the 10 highest occuring motifs from motif frequency results """
data = load_motif_data (inputfile)
sort_occur = sorted(data, key=data.get, reverse=True) # sort occur from highest to lowest
sort_occur_freq = [data[k]/sum(data.values()) for k in sort_occur]
## plot motif
# fig = plt.figure(figsize=(12,12))
# sp = 1
# for motif in sort_occur:
# print(sp, motif)
# if sp <= 25:
# ax = fig.add_subplot(5,5,sp)
# matrix = np.array(eval(motif))
# partG = generate_comm_graph(matrix)
# pos = graphviz_layout(partG, prog='dot')
# nx.draw_networkx_nodes(partG, pos, node_size=30, node_color='#b18ea6')
# labels={n:n for n in partG.nodes()}
# nx.draw_networkx_edges(partG, pos)
# ax.axis('off')
# sp += 1
# plt.savefig(PATH + 'runs/results/analysis/motif occur.pdf', dpi=200)
# plt.show()
## plot frequency
# fig = plt.figure(figsize=(8,3))
# ax = fig.add_subplot(111)
# x = list(range(1, len(sort_occur_freq)+1))
# ax.bar(x, sort_occur_freq, color='white', edgecolor='k')
# plt.axvline(x=25.5, color='r', linestyle='--')
# ax.set_xlabel('Subgraph Network Motif')
# ax.set_ylabel('Frequency (%)')
# plt.legend(loc='upper right', frameon=False)
# for spine in ['top', 'right']:
# ax.spines[spine].set_linewidth(0)
# plt.subplots_adjust(bottom=0.15)
# plt.savefig(PATH + 'runs/results/analysis/motif frequncy.pdf', dpi=200)
# plt.show()
# print(sum(sort_occur_freq[0:25]),len(sort_occur_freq[0:25]), sum(sort_occur_freq))
def load_deltaD (N):
best_soln = PATH + 'runs/results/'+str(N)+'-input-boolean-circuits/best_solns.txt'
data = load_sol (best_soln)
deltaD = []
for bm in data:
D_ori = data[bm]['T_Metis']
D_opt = data[bm]['T_Optimized']
deltaD.append (int(D_ori) - int(D_opt))
return deltaD
def plot_deltaD ():
""" plot delta T of all 4- and 5-input circuits """
deltaD_4 = load_deltaD (4)
deltaD_5 = load_deltaD (5)
fig = plt.figure()
ax = fig.add_subplot(111)
colors = ['orange', 'blue']
labels = ['4-input Boolean circuits', '5-input Boolean circuits']
plt.hist([deltaD_4, deltaD_5], 10, histtype='step', stacked=False, fill=False, label=labels)
# ax.hist(count1, ec='k', histtype='step', fill=False, facecolor='wheat')
# ax.hist(count2, ec='k', histtype='step', fill=False, facecolor='g')
ax.set_xlabel('Delta Depth')
ax.set_ylabel('Count')
plt.legend(loc='upper right', frameon=False)
plt.gca().yaxis.set_major_locator(mticker.MultipleLocator(4))
plt.gca().xaxis.set_major_locator(mticker.MultipleLocator(5))
for spine in ['top', 'right']:
ax.spines[spine].set_linewidth(0)
# plt.savefig('Node distribution.pdf', dpi=200)
plt.show()
def visualize_subnetworks_unmet_constraint (path, constraint):
""" for each optimization attempt, visualize the nsubnetworks that unmet constraints """
bm_path = path + 'runs/benchmark/bionetwork/random_graph/n20_p0.03/'
sol_path = path + 'runs/results/bionetwork/RG_n20_p0.03/nparts/'
npart = 5
# for npart in os.listdir(sol_path):
# if npart.startswith('.') == False and npart != 'best_solns.txt':
# print ('npart', npart)
# load graph and metis partition
edgelist = bm_path + 'DAG.edgelist'
G = load_graph (edgelist)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
# G_primitive = gp.get_G_primitive (G, nonprimitives)
G_primitive = G
# visualize original partition
cut, partDict = gp.load_metis_part_sol (sol_path+str(npart)+'/part_solns.txt')
part_opt = [gp.get_part(partDict, n) for n in G_primitive.nodes()]
matrix, partG = gp.partition_matrix (G_primitive, part_opt)
# qs_matrix = gp.calc_qs (G_primitive, part_opt)
cell_unmet_const, cell_met_const = gp.get_cells_unmet_constraint (matrix, partG, [1], 'FALSE')
# cell_unmet_const, cell_met_const = gp.get_cells_unmet_qs_constraint (matrix, partG, qs_matrix, [4], 'TRUE')
gp.visualize_assignment_graphviz (G, part_opt, nonprimitives, 'FALSE', sol_path, 0, [])
for conn in os.listdir(sol_path+str(npart)+'/optimized'):
print(conn)
if conn == '3':
part_sol = sol_path+str(npart)+'/part_solns.txt'
# part_sol = sol_path + 'part_solns.txt'
cut, partDict = gp.load_metis_part_sol (part_sol)
# f_out = open (sol_path + 'optimized_lc/iteration-2.txt', 'w')
opt_file = sol_path+str(npart)+'/optimized/'+conn+'/part_solns.txt'
solDict = gp.load_opt_part_sol (opt_file)
for iteration in solDict.keys():
print('iteration', iteration)
part = solDict[iteration]['part']
if part != partDict:
part_opt = [gp.get_part(part, n) for n in G_primitive.nodes()]
matrix, partG = gp.partition_matrix (G_primitive, part_opt)
# qs_matrix = gp.calc_qs (G_primitive, part_opt)
# median_qs_best = np.mean(np.sum(qs_matrix, axis=1))
# cell_unmet_const, cell_met_const = gp.get_cells_unmet_qs_constraint (matrix, partG, qs_matrix, [4], 'TRUE')
cell_unmet_const, cell_met_const = gp.get_cells_unmet_constraint (matrix, partG, [int(conn)], 'FALSE')
if len(cell_unmet_const) == 0: print ('solution')
# for idx, p in enumerate(part):
# # print('Partition '+str(part_num)+' '+','.join(part[p]))
# qs = list(qs_matrix[idx])
# sumqs = sum(qs)
# f_out.write('Partition '+str(idx+1)+'\t'+str(sumqs)+'\t'+str(len(part[p]))+'\t'+', '.join([str(int(v)) for v in qs])+'\t'+', '.join(part[p])+'\t'+'\t'+', '.join([v for v in part[p]])+'\n')
# print('Partition '+str(idx+1)+'\t'+str(sumqs)+'\t'+str(len(part[p]))+'\t'+', '.join([str(int(v)) for v in qs])+'\t'+', '.join(part[p])+'\t'+'\t'+', '.join([v for v in part[p]])+'\n')
# print(iteration, median_qs_best, solDict[iteration]['T'], len(cell_unmet_const))
gp.visualize_assignment_graphviz (G, part_opt, nonprimitives, 'FALSE', sol_path+str(npart)+'/optimized/'+conn, iteration, cell_unmet_const)
def compare_runs (path, constraint):
""" for each optimization attempt, visualize the nsubnetworks that unmet constraints """
bm_path = path + 'runs/benchmark/electronic-circuits/alu/'
# for npart in os.listdir(sol_path):
# if npart.startswith('.') == False and npart != 'best_solns.txt':
# print ('npart', npart)
# load graph and metis partition
edgelist = bm_path + '/DAG.edgelist'
G = load_graph (edgelist)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
G_primitive = gp.get_G_primitive (G, nonprimitives)
nparts = [47, 48, 49, 50]
dataDict = {}
f_out = open (path + 'runs/results/electronic-circuits/alu/nparts/Compare Runs Results.txt', 'w')
f_out.write('Cell number\tIteration\tEnd Cell Number\tMedian QS Per Cell\tNumber of Cells with >=3 QS\tFraction of Cells with >=3 QS\tNetwork Depth\n')
# plot constrant unmet vs.
for idx, npart in enumerate(nparts):
print(npart)
sol_path = path + 'runs/results/electronic-circuits/md5Core/nparts/'+str(npart)
part_sol = sol_path + '/part_solns.txt'
cut, partDict = gp.load_metis_part_sol (part_sol)
opt_file = sol_path + '/optimized_lc/part_solns.txt'
if os.path.exists (opt_file):
x, y = [], []
timeList = load_timestep (opt_file)
if timeList != []:
solDict = gp.load_opt_part_sol (opt_file)
for iteration in solDict.keys():
part = solDict[iteration]['part']
if part != partDict:
part_opt = [gp.get_part(part, n) for n in G_primitive.nodes()]
endN = len(part.keys())
matrix, partG = gp.partition_matrix (G_primitive, part_opt)
qs_matrix = gp.calc_qs (G_primitive, part_opt)
median_qs_best = np.mean([x for x in np.sum(qs_matrix, axis=1) if x>0])
cell_unmet_const, cell_met_const = gp.get_cells_unmet_qs_constraint (matrix, partG, qs_matrix, [3], 'TRUE')
# print(iteration, median_qs_best, solDict[iteration]['T'], len(cell_unmet_const), len(cell_unmet_const)/endN)
f_out.write('\t'.join([str(npart), str(iteration), str(endN), str(round(median_qs_best, 2)), str(len(cell_unmet_const)), str(round(len(cell_unmet_const)/endN, 2)), str(solDict[iteration]['T'])])+'\n')
if endN not in dataDict:
dataDict[endN] = {}
dataDict[endN][1] = {'qs': median_qs_best, 'unmet': len(cell_unmet_const), 'unmetf': len(cell_unmet_const)/endN}
else:
dataDict[endN][max(dataDict[endN].keys())+1] = {'qs': median_qs_best, 'unmet': len(cell_unmet_const), 'unmetf': len(cell_unmet_const)/endN}
colors = sns.color_palette("husl", len(dataDict.keys()))
fig = plt.figure (figsize=(7,5))
ax = fig.add_subplot(111)
for idx, N in enumerate(sorted(dataDict)):
print(idx, N)
x, y = [], []
c = colors[idx]
for itr in sorted(dataDict[N]):
x.append (dataDict[N][itr]['qs'])
y.append (dataDict[N][itr]['unmetf'])
ax.plot (x, y, marker='o', markersize=3, linestyle='', c=c, label=N)
ax.set_xlabel('Average QS per cell')
ax.set_ylabel('Fraction of Cells with >= 3 QS')
plt.legend(bbox_to_anchor=(1.05, 1))
fig.subplots_adjust (right=0.7)
plt.savefig(path + 'runs/results/electronic-circuits/md5Core/nparts/Compare Runs QS vs Fraction of cells with >=3QS.pdf', dpi=200)
plt.show()
def get_gatenum_distribution (partDict):
gatenumDict = {}
gates = list(partDict.values())
gatenum = [len(v) for v in gates]
min_gatenum, max_gatenum = min(gatenum), max(gatenum)
for n in range(min_gatenum, max_gatenum+1):
count = gatenum.count(n)
gatenumDict[n] = count
return gatenumDict
def compare_gatenum_distribution (path, constraint):
"""
compare the gate number distribution in each run
"""
bm_path = path + 'runs/benchmark/electronic-circuits/md5Core/'
# for npart in os.listdir(sol_path):
# if npart.startswith('.') == False and npart != 'best_solns.txt':
# print ('npart', npart)
# load graph and metis partition
edgelist = bm_path + '/DAG.edgelist'
G = load_graph (edgelist)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
G_primitive = gp.get_G_primitive (G, nonprimitives)
nparts = [47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60]
GateNumDict = {}
f_out = open (path + 'runs/results/electronic-circuits/md5Core/nparts-run2/Compare Runs Gate Number Distribution.txt', 'w')
f_out.write('Start cell number\tIteration\tEnd cell number\tGate Number Per Cell\tGate Number Distribution\n')
# plot constrant unmet vs.
for idx, npart in enumerate(nparts):
print(npart)
sol_path = path + 'runs/results/electronic-circuits/md5Core/nparts-run2/'+str(npart)
part_sol = sol_path + '/part_solns.txt'
cut, partDict = gp.load_metis_part_sol (part_sol)
opt_file = sol_path + '/optimized_lc/part_solns.txt'
if os.path.exists (opt_file):
timeList = load_timestep (opt_file)
if timeList != []:
solDict = gp.load_opt_part_sol (opt_file)
for i_idx, iteration in enumerate(solDict.keys()):
part = solDict[iteration]['part']
endN = len(part.keys())
gatecounts = get_gatenum_distribution (part)
x = list(gatecounts.keys())
y = list(gatecounts.values())
f_out.write('\t'.join([str(npart), str(iteration), str(endN), str([str(v for v in x)]), str([str(v for v in y)])])+'\n')
if endN not in GateNumDict:
GateNumDict[endN] = {}
GateNumDict[endN][1] = gatecounts
else:
GateNumDict[endN][max(GateNumDict[endN].keys())+1] = gatecounts
maxIter = max([max(GateNumDict[N].keys()) for N in GateNumDict])
colors = sns.color_palette("husl", maxIter)
fig = plt.figure (figsize=(10,3))
for idx, N in enumerate(sorted(GateNumDict), 1):
print(idx, N)
ax = fig.add_subplot(2,6,idx)
for i, itr in enumerate(sorted(GateNumDict[N])):
x = GateNumDict[N][itr].keys()
y = normalize_data( GateNumDict[N][itr].values() )
c = colors[i]
ax.plot (x, y, marker='o', markersize=3, c=c, label=itr)
ax.set_title(str(N))
ax.set_xlim([0, 6])
ax.set_ylim([-0.1, 1.1])
if idx != 7:
ax.set_xticks([])
ax.set_yticks([])
else:
ax.set_xticks([1,2,3,4,5])
fig.subplots_adjust (hspace=0.3)
plt.savefig(path + 'runs/results/electronic-circuits/md5Core/nparts-run2/Compare Runs Gate Number Distribution.pdf', dpi=200)
plt.show()
def get_qs_distribution (qs_matrix):
qsDist = {}
qs_sum = list(np.sum(qs_matrix, axis=1))
for e in qs_sum:
if e != 0:
if e not in qsDist:
qsDist[e] = 1
else:
qsDist[e] += 1
return dict(sorted(qsDist.items()))
def normalize_data (data):
""" normalize data to within 0-1 region """
return [(d-min(data))/(max(data)- min(data))for d in data]
def compare_qs_distribution (path, constraint):
"""
compare the qs distribution in each run
"""
bm_path = path + 'runs/benchmark/electronic-circuits/alu/'
# for npart in os.listdir(sol_path):
# if npart.startswith('.') == False and npart != 'best_solns.txt':
# print ('npart', npart)
# load graph and metis partition
edgelist = bm_path + '/DAG.edgelist'
G = load_graph (edgelist)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
G_primitive = gp.get_G_primitive (G, nonprimitives)
nparts = [45]
QSDict = {}
f_out = open (path + 'runs/results/electronic-circuits/alu/nparts/Compare QS Distribution.txt', 'w')
f_out.write('Start cell number\tIteration\tEnd cell number\tQS Number\tQS Distribution by cell\n')
# plot constrant unmet vs.
for idx, npart in enumerate(nparts):
print(npart)
sol_path = path + 'runs/results/electronic-circuits/alu/nparts/'+str(npart)
part_sol = sol_path + '/part_solns.txt'
cut, partDict = gp.load_metis_part_sol (part_sol)
opt_file = sol_path + '/optimized_lc/part_solns.txt'
if os.path.exists (opt_file):
timeList = load_timestep (opt_file)
if timeList != []:
solDict = gp.load_opt_part_sol (opt_file)
for i_idx, iteration in enumerate(solDict.keys()):
part = solDict[iteration]['part']
endN = len(part.keys())
part_opt = [gp.get_part(part, n) for n in G_primitive.nodes()]
matrix, partG = gp.partition_matrix (G_primitive, part_opt)
qs_matrix = gp.calc_qs (G_primitive, part_opt)
qs_dist = get_qs_distribution (qs_matrix)
print(list(qs_dist.keys()), list(qs_dist.values()))
f_out.write('\t'.join([str(npart), str(iteration), str(len(part.keys())), ','.join([str(v) for v in list(qs_dist.keys())]), ','.join([str(v) for v in list(qs_dist.values())])])+'\n')
if endN not in QSDict:
QSDict[endN] = {}
QSDict[endN][1] = qs_dist
else:
QSDict[endN][max(QSDict[endN].keys())+1] = qs_dist
# for each ##endN##, plot QS distribution
maxIter = max([max(QSDict[N].keys()) for N in QSDict])
colors = sns.color_palette("husl", maxIter)
fig = plt.figure (figsize=(10,3))
for idx, N in enumerate(sorted(QSDict), 1):
print(idx, N)
ax = fig.add_subplot(2,6,idx)
for i, itr in enumerate(sorted(QSDict[N])):
x = QSDict[N][itr].keys()
y = normalize_data( QSDict[N][itr].values() )
c = colors[i]
ax.plot (x, y, marker='o', markersize=3, c=c, label=itr)
ax.set_title(str(N))
ax.set_xlim([0, 8])
ax.set_ylim([-0.1, 1.1])
if idx != 7:
ax.set_xticks([])
ax.set_yticks([])
else:
ax.set_xticks([1,2,3,4,5,6,7,8])
fig.subplots_adjust (hspace=0.3)
plt.savefig(path + 'runs/results/electronic-circuits/alu/nparts/Compare Runs QS Distribution.pdf', dpi=200)
plt.show()
def generate_edgelist_of_cells (outdir):
""" for partitioned cells, generate an edgelist """
bm_path = outdir + 'runs/benchmark/electronic-circuits/alu/'
sol_path = outdir + 'runs/results/electronic-circuits/alu/nparts/45/optimized_lc/'
part_sol = sol_path + 'part_solns.txt'
edgelist = bm_path + '/DAG.edgelist'
G = load_graph (edgelist)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
G_primitive = gp.get_G_primitive (G, nonprimitives)
solDict = gp.load_opt_part_sol (part_sol)
iteration = 2
part = solDict[iteration]['part']
part_opt = [gp.get_part(part, n) for n in G_primitive.nodes()]
node_to_partDict = {}
for n in G_primitive.nodes():
node_to_partDict[n] = str(gp.get_part(part, n))
matrix, partG = gp.partition_matrix (G_primitive, part_opt)
# nx.write_edgelist (partG, sol_path+'part_iteration2.edgelist')
f_out = open (sol_path + 'neighbors.txt', 'w')
for node in G_primitive.nodes():
neighbors = G_primitive.neighbors(node)
f_out.write(node + '\t' + ','.join(neighbors)+'\n')
return node_to_partDict
def get_cut_edges (g, node_to_partDict):
cut_edges, internal_edges = [], []
for e in g.edges():
n1, n2 = e[0], e[1]
if node_to_partDict[n1] != node_to_partDict[n2]:
cut_edges.append(e)
else:
internal_edges.append(e)
return cut_edges, internal_edges
def _scale_xy (array, x_scaler, y_scaler):
new_array = np.array ([array[0] * x_scaler, array[1] * y_scaler])
return new_array
def plot_cell_edgelist(
input_edgelist_fp: str,
part_edgelist_fp: str,
partition: dict,
save_file: bool = False,
output_filename: str = 'test.jpg',
):
original_network = False
g = nx.read_edgelist(input_edgelist_fp, create_using=nx.DiGraph())
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (g)
g = gp.get_G_primitive (g, nonprimitives)
pg = nx.read_edgelist(part_edgelist_fp)
cut_edges, internal_edges = get_cut_edges (g, partition)
plt.figure(num=None, figsize=(15, 15), dpi=80)
img_path = "/Users/jgzhang/Work/Densmore_lab/Partition/code_version/v2/genetic-circuit-partitioning/2021.4/runs/results/Logic-gate-nor-us.png"
graph_path = "/Users/jgzhang/Work/Densmore_lab/Partition/code_version/v2/genetic-circuit-partitioning/2021.4/runs/results/electronic-circuits/alu/"
img_list = []
nor_img = mpimg.imread(img_path)
for _ in pg.nodes():
img_list.append(nor_img)
positions = nx.nx_agraph.graphviz_layout(pg, prog='dot')
positions = nx.drawing.layout.rescale_layout_dict (positions, scale=10)
positions = {key: _scale_xy(value, 1,1) for (key, value) in positions.items()}
## position of nodes
pos_communities = dict()
for node, part in partition.items():
pos_communities[node] = positions[part]
# position nodes within partition
partDict = dict()
for node, part in partition.items():
try:
partDict[part] += [node]
except KeyError:
partDict[part] = [node]
pos_nodes = dict()
for ci, nodes in partDict.items():
subgraph = g.subgraph(nodes)
pos_subgraph = nx.planar_layout(subgraph)
# pos_subgraph = nx.nx_agraph.graphviz_layout(subgraph)
pos_nodes.update(pos_subgraph)
# combine position
pos = dict()
for node in g.nodes():
print('community position', pos_communities[node])
print('node position', pos_nodes[node])
print('new position', np.array(pos_communities[node]) + np.array(pos_nodes[node]))
pos[node] = np.array(pos_communities[node]) + _scale_xy (pos_nodes[node], 0.5, 0.5)
# Calculate the number of ranks in here so you can figure out how many
# colors you need...
y_pos = sorted(list({position[1] for position in positions.values()}))
sns.color_palette('Set2', len(y_pos))
colors = [y_pos.index(position[1]) for position in positions.values()]
# plt.title('RCA4 Boolean Logic Network', fontsize=30, ha='center')
if original_network:
nx.draw (
pg,
pos=positions,
with_labels=True,
node_color=colors,
width=0,
node_size=1000,
node_shape='s',
linewidths=30,
horizontalalignment='left',
verticalalignment='bottom',
alpha=0.2,
)
nx.draw_networkx_edges (g, pos=pos, edgelist=cut_edges, edge_color='red')
nx.draw_networkx_edges (g, pos=pos, edgelist=internal_edges, edge_color='k')
nx.draw(
g,
pos=pos,
with_labels=True,
width=0,
horizontalalignment='left',
verticalalignment='bottom',
alpha=0.7,
)
plt.draw()
plt.savefig(graph_path + 'original_network.jpg')
return
# I'm going to rotate the graph so it makes more sense in terms of an
# electrical circuit.
flip_xy_pos = {}
flipped_pos = {node: (-y, x) for (node, (x, y)) in pos.items()}
flipped_positions = {node: (-y, x) for (node, (x, y)) in positions.items()}
nx.draw (
pg,
pos=flipped_positions,
with_labels=True,
node_color=colors,
width=0,
node_size=2000,
node_shape='s',
linewidths=40,
horizontalalignment='left',
verticalalignment='bottom',
alpha=0.2,
)
nx.draw_networkx_edges (g, pos=flipped_pos, edgelist=cut_edges, edge_color='k')
nx.draw_networkx_edges (g, pos=flipped_pos, edgelist=internal_edges, edge_color='k')
nx.draw(
g,
pos=flipped_pos,
with_labels=True,
width=0,
horizontalalignment='left',
verticalalignment='bottom',
alpha=0.7,
)
# position nodes within each partition
# plt.imshow(nor_img)
# ax = plt.gca()
# fig = plt.gcf()
# trans = ax.transData.transform
# trans2 = fig.transFigure.inverted().transform
# imsize = 0.05 # this is the image size
# for index, n in enumerate(pg.nodes()):
# (x, y) = flipped_pos[n]
# xx, yy = trans((x, y)) # figure coordinates
# xa, ya = trans2((xx, yy)) # axes coordinates
# print(f'{xa=}')
# print(f'{ya=}')
# print(f'{imsize=}')
# print(f'{xa - imsize / 2.0=}')
# print(f'{ya - imsize / 2.0=}')
# a = plt.axes([(xa / 0.7975) - 0.17, (ya / 0.805) - 0.155, imsize, imsize])
# a.imshow(img_list[index])
# a.set_aspect('equal')
# a.axis('off')
# plt.show()
plt.draw()
plt.savefig(graph_path + 'flipped_network_with_label.pdf', dpi=300)
def convert_name (inputfile):
""" convert MD5 gate name to Jai's names"""
lines = [open(inputfile, 'r').read().strip("\n")][0].split('\n')
converted_names = {}
for line in lines:
tokens = line.split('\t')
converted_names[tokens[0]] = tokens[1]
return converted_names
def parameter_scan_bionetworks (PATH):
""" plot solutions scanned by initial n and connectivity constraint """
bm = PATH + 'runs/benchmark/bionetwork/random_graph/n30_p0.05/DAG.edgelist'
sol_path = PATH + 'runs/results/bionetwork/RG_n30_p0.05/nparts/'
G = load_graph (bm)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
G_primitive = gp.get_G_primitive (G, nonprimitives)
print(nonprimitives)
sols = []
# for targetn in os.listdir(sol_path):
for targetn in ['8']:
if targetn.startswith('.') == False and targetn.isdigit():
print('target n', targetn)
part_sol = sol_path + targetn + '/part_solns.txt'
cut, partDict = gp.load_metis_part_sol (part_sol)
opt_path = sol_path + targetn + '/optimized/'
if os.path.exists(opt_path):
# for conn in os.listdir(opt_path):
for conn in ['7']:
if conn.startswith('.') == False and conn.isnumeric():
print('connectivity', conn)
opt_part_sol = opt_path + conn + '/part_solns.txt'
if os.path.exists(opt_part_sol):
solDict = gp.load_opt_part_sol (opt_part_sol)
for ite in solDict.keys():
print(solDict[ite])
part = solDict[ite]['part']
part_opt = (cut, [gp.get_part(part, n) for n in G.nodes()])
for n in list(G.nodes()):
print(n, gp.get_part(part, n))
endN = len(set(part_opt[1]))
matrix, partG = gp.partition_matrix (G, part_opt[1])
motif_allowed = gp.check_motif_allowed(matrix, conn)
if motif_allowed:
print('iteration', ite)
sols.append((endN, int(conn)))
gp.visualize_assignment_graphviz (G, part_opt[1], nonprimitives, 'FALSE', opt_path + str(conn) + '/', 'iter_'+str(ite)+'_N_'+str(endN)+'_conn_'+str(conn), [])
# plot solutions
# fig = plt.figure(figsize=(5,5))
# ax = fig.add_subplot(111)
# sol_count = []
# counted = []
# for sol in sols:
# if sol not in counted:
# count = sols.count(sol)
# sol_count.append ((sol, count))
# counted.append(sol)
# print(sol_count)
# x, y, z = [], [], []
# for sol in sol_count:
# x.append (sol[0][0])
# y.append (sol[0][1])
# z.append (sol[1])
# print(x)
# print(y)
# print(z)
# ax.scatter(np.array(x), np.array(y), s=np.array(z)*20, alpha=0.4, c='blue', edgecolors='grey', linewidth=1)
# # ax.set_xlim([0, 10])
# # ax.set_ylim([0, 7])
# ax.set_xlabel('Number of Submodules in Valid Solutions')
# ax.set_ylabel('Maximum Connuections between submodules')
# plt.savefig(sol_path+'Solution_space.png', dpi=100)
# plt.show()
def plot_histogram() :
""" plot histogram of number of nodes in each cell"""
PATH = "/Users/jgzhang/Work/Densmore_lab/Partition/code_version/v2/genetic-circuit-partitioning/2021.4/runs/"
sol_path = "results/electronic-circuits/alu/nparts/45/optimized_lc/"
bm_file = PATH + "benchmark/electronic-circuits/alu/DAG.edgelist"
G = load_graph (bm_file)
in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (G)
G_primitive = gp.get_G_primitive (G, nonprimitives)
solDict = gp.load_opt_part_sol (PATH + sol_path + 'part_solns.txt')
iteration = 5
part = solDict[iteration]['part']
part_opt = [gp.get_part(part, n) for n in G_primitive.nodes()]
# gp.visualize_assignment_graphviz (G, part_opt, nonprimitives, 'TRUE', PATH + sol_path, iteration, [])
node_count = [len(part[p]) for p in part]
print(node_count)
x = sorted(set(node_count))
y = [node_count.count(n) for n in x]
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111)
ax.bar(x,y)
ax.set_xlabel('No. nodes/cell')
ax.set_ylabel('Count')
plt.savefig(PATH+sol_path+str(iteration)+'_dist_nodecount.pdf', dpi=100)
plt.show()
if __name__ == '__main__':
PATH = '/home/ubuntu/genetic-circuit-partitioning/2021.4/'
# PATH = '/Users/jgzhang/Work/Densmore_lab/Partition/code_version/v2/genetic-circuit-partitioning/2021.4/'
# count_nodes (PATH + 'runs/results/4-input-boolean-circuits', PATH + 'runs/results/5-input-boolean-circuits')
# partition_stats ()
# compile_best_solutions ()
# plot_timestep ()
# count_motif()
# plot_motif_occurence (PATH + 'runs/results/analysis/motif freq.txt')
# plot_deltaD ()
# G = load_graph (PATH + 'runs/benchmark/6-input-boolean-circuits/30/DAG.edgelist')
# plot_network (G, PATH + 'runs/benchmark/bionetwork/random_graph/n50_p0.02/DAG.pdf')
# plot_centrality (G, PATH + 'runs/benchmark/6-input-boolean-circuits/centrality.pdf')
# best_soln = PATH + 'runs/results/5-input-boolean-circuits/best_solns.txt'
# data = load_sol (best_soln)
# avgNode = np.mean([(int(data[bm]['Nodes'])-6)/int(data[bm]['Npart']) for bm in data])
# print(avgNode)
# partition = generate_edgelist_of_cells (PATH)
# converted_names = convert_name (PATH + 'runs/results/electronic-circuits/md5Core/Jai_solution/gate_name_conversion.txt')
visualize_subnetworks_unmet_constraint (PATH, 'lc')
# compare_runs (PATH, 'lc')
# compare_gatenum_distribution (PATH, 'lc')
# compare_qs_distribution (PATH, 'lc')
# plot_cell_edgelist (PATH+'runs/benchmark/electronic-circuits/alu/DAG.edgelist', PATH+'runs/results/electronic-circuits/alu/nparts/45/optimized_lc/part_iteration2.edgelist', partition)
# g = nx.read_edgelist(PATH+'runs/benchmark/electronic-circuits/md5Core/DAG.edgelist', create_using=nx.DiGraph())
# in_nodes, out_nodes, nonprimitives = gp.get_nonprimitive_nodes (g)
# parameter_scan_bionetworks (PATH)
# plot_histogram ()
|
a=[]
for r in range (5):
a.append(input("Enter a number : "))
print(a)
def QuickSort(a,p,r):
if p < r:
q = Partition (a,p,r)
QuickSort(a,p,q-1)
QuickSort(a,q+1,r)
def Partition(a,p,r):
x = a[r]
i = p-1
for j in range (p,r-1):
if a[j]<=x:
i=i+1
temp=a[i]
a[i]=a[j]
a[j]=temp
temp=a[i+1]
a[i+1]=a[r]
a[r]=temp
return i+1
QuickSort(a,0,4)
print("sorted array :")
print(a)
|
#!/usr/bin/python3
# Copyright (c) 2018-2019 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Gather stats on a rocksdb dump. Prepare the dump files with:
#
# rocksdb-5.7.3/ldb --db=<rocksdb directory> \
# --path=<file in directory> \
# --hex dump > <dump file>
# inspectdb.py <dump file>
#
# Also supports directly piping ldb output to stdin input.
#
# Suggested improvements:
# - use a real histogram implementation (numpy?)
# - directly open the rocksdb database, instead of reading a dump
import argparse
import math
import re
import sys
parser = argparse.ArgumentParser(description="Analyze data from an ldb dump")
parser.add_argument("filename",
help="The filename of the ldb dump (stdin if omitted)",
nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument("-p","--prefix", help="Only analyze keys with this prefix",
default="", dest="prefix")
args = parser.parse_args()
# some dumps look like:
# 0x1234 ==> 0x567890
# others look like:
# '1234' seq:0, type:1 => '567890'
pattern = re.compile("(?:0x)?'?([0-9A-Fa-f]*).*=> (?:0x)?'?([0-9A-Fa-f]*)'?")
def key_type(key):
return key[:2]
class Histogram:
'''
Overly simple histogram implementation. It buckets values into
log-base-2 bins (1, 2, 4, 8, ...). It also keeps rough track of any
items that were too large for the largest bin.
'''
def __init__(self):
self._bins = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self._over_count = 0
self._over_min = sys.maxsize
self._over_max = 0
def update(self, value):
index = math.ceil(math.log(value, 2))
if index < len(self._bins):
self._bins[index] += 1
else:
self._over_count += 1
self._over_min = min(self._over_min, value)
self._over_max = max(self._over_max, value)
def __str__(self):
'''
Representation is each bin on a line by itself, with the upper bin
limit followed by the count of items in that bin.
'''
result = ""
limit = 1
for v in self._bins:
result += "<={:>4}: {:>5}\n".format(limit, v)
limit *= 2
result += " >{:>4}: {:>5} ({} - {})".format(
limit, self._over_count, self._over_min, self._over_max)
return result
class DBStats:
'''
Overly simple stats about the sizes of values for each key
type. Stores a count of the number of items for that type, the sum of
the value sizes for that type, and a histogram of the value sizes.
'''
def __init__(self):
self._stats = {}
def update(self, key, value):
if not key.startswith(args.prefix):
return
ktype = key_type(key[len(args.prefix):])
if ktype in self._stats:
key_stats = self._stats[ktype]
else:
key_stats = {"count": 0, "size": 0, "histo": Histogram()}
key_stats["count"] += 1
key_stats["size"] += len(value)//2
key_stats["histo"].update(len(value)//2)
self._stats[ktype] = key_stats
def __str__(self):
'''
Representation is a block of lines for each type, including the
key type (prefix), count, value size sum, and histogram.
'''
result = ""
for k in self._stats.keys():
result += "Type: {}{}\n".format(args.prefix, k)
result += " count: {:>10}\n".format(self._stats[k]["count"])
result += " sum: {:>10}\n".format(self._stats[k]["size"])
result += " histo:\n"
histo_result = str(self._stats[k]["histo"])
for l in histo_result.split("\n"):
result += " {}\n".format(l)
return result
stats = DBStats()
for line in args.filename:
match = pattern.match(line)
if match:
stats.update(match.group(1), match.group(2))
print(stats)
|
# -*- coding: utf-8 -*-
# dcf
# ---
# A Python library for generating discounted cashflows.
#
# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]
# Version: 0.7, copyright Friday, 14 January 2022
# Website: https://github.com/sonntagsgesicht/dcf
# License: Apache License 2.0 (see LICENSE file)
|
import cookielib
import requests
import logging
import os
import json
from .config import config, CONFIG_FOLDER
BASE_URL = 'https://leetcode.com'
LOGIN_URL = BASE_URL + '/accounts/login/'
API_URL = BASE_URL + '/api/problems/algorithms/'
COOKIE_PATH = os.path.join(CONFIG_FOLDER, 'cookies')
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2',
'Connection': 'keep-alive',
'Host': 'leetcode.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36',
'Referer': 'https://leetcode.com/accounts/login/',
}
session = requests.Session()
session.cookies = cookielib.LWPCookieJar(COOKIE_PATH)
try:
session.cookies.load(ignore_discard=True)
except:
pass
logger = logging.getLogger(__name__)
class NetworkError(Exception):
def __init__(self, message, code=0):
if not message or message == '':
self.message = 'Network error!'
else:
self.message = '%s code: %d' % (message, code)
logger.error(self.message)
def login():
logger = logging.getLogger(__name__)
if not config.username or not config.password:
return False
login_data = {}
r = retrieve(LOGIN_URL, headers=headers)
if r.status_code != 200:
logger.error('login failed')
return False
if 'csrftoken' in r.cookies:
csrftoken = r.cookies['csrftoken']
login_data['csrfmiddlewaretoken'] = csrftoken
login_data['login'] = config.username
login_data['password'] = config.password
login_data['remember'] = 'on'
r = retrieve(LOGIN_URL, method='POST', headers=headers, data=login_data)
if r.status_code != 200:
logger.error('login failed')
return False
logger.info("login success")
session.cookies.save()
return True
def is_login():
r = retrieve(API_URL, headers=headers)
if r.status_code != 200:
return False
text = r.text.encode('utf-8')
data = json.loads(text)
return 'user_name' in data and data['user_name'] != ''
def retrieve(url, headers=None, method='GET', data=None):
try:
if method == 'GET':
r = session.get(url, headers=headers)
elif method == 'POST':
r = session.post(url, headers=headers, data=data)
return r
except requests.exceptions.RequestException as e:
raise NetworkError('Network error: url: %s' % url)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateCenRouteMapRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateCenRouteMap')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CommunityMatchMode(self): # String
return self.get_query_params().get('CommunityMatchMode')
def set_CommunityMatchMode(self, CommunityMatchMode): # String
self.add_query_param('CommunityMatchMode', CommunityMatchMode)
def get_MapResult(self): # String
return self.get_query_params().get('MapResult')
def set_MapResult(self, MapResult): # String
self.add_query_param('MapResult', MapResult)
def get_NextPriority(self): # Integer
return self.get_query_params().get('NextPriority')
def set_NextPriority(self, NextPriority): # Integer
self.add_query_param('NextPriority', NextPriority)
def get_DestinationCidrBlockss(self): # RepeatList
return self.get_query_params().get('DestinationCidrBlocks')
def set_DestinationCidrBlockss(self, DestinationCidrBlocks): # RepeatList
for depth1 in range(len(DestinationCidrBlocks)):
self.add_query_param('DestinationCidrBlocks.' + str(depth1 + 1), DestinationCidrBlocks[depth1])
def get_SourceInstanceIdss(self): # RepeatList
return self.get_query_params().get('SourceInstanceIds')
def set_SourceInstanceIdss(self, SourceInstanceIds): # RepeatList
for depth1 in range(len(SourceInstanceIds)):
self.add_query_param('SourceInstanceIds.' + str(depth1 + 1), SourceInstanceIds[depth1])
def get_SourceRegionIdss(self): # RepeatList
return self.get_query_params().get('SourceRegionIds')
def set_SourceRegionIdss(self, SourceRegionIds): # RepeatList
for depth1 in range(len(SourceRegionIds)):
self.add_query_param('SourceRegionIds.' + str(depth1 + 1), SourceRegionIds[depth1])
def get_MatchAsnss(self): # RepeatList
return self.get_query_params().get('MatchAsns')
def set_MatchAsnss(self, MatchAsns): # RepeatList
for depth1 in range(len(MatchAsns)):
self.add_query_param('MatchAsns.' + str(depth1 + 1), MatchAsns[depth1])
def get_Preference(self): # Integer
return self.get_query_params().get('Preference')
def set_Preference(self, Preference): # Integer
self.add_query_param('Preference', Preference)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Priority(self): # Integer
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # Integer
self.add_query_param('Priority', Priority)
def get_DestinationChildInstanceTypess(self): # RepeatList
return self.get_query_params().get('DestinationChildInstanceTypes')
def set_DestinationChildInstanceTypess(self, DestinationChildInstanceTypes): # RepeatList
for depth1 in range(len(DestinationChildInstanceTypes)):
self.add_query_param('DestinationChildInstanceTypes.' + str(depth1 + 1), DestinationChildInstanceTypes[depth1])
def get_SourceRouteTableIdss(self): # RepeatList
return self.get_query_params().get('SourceRouteTableIds')
def set_SourceRouteTableIdss(self, SourceRouteTableIds): # RepeatList
for depth1 in range(len(SourceRouteTableIds)):
self.add_query_param('SourceRouteTableIds.' + str(depth1 + 1), SourceRouteTableIds[depth1])
def get_SourceChildInstanceTypess(self): # RepeatList
return self.get_query_params().get('SourceChildInstanceTypes')
def set_SourceChildInstanceTypess(self, SourceChildInstanceTypes): # RepeatList
for depth1 in range(len(SourceChildInstanceTypes)):
self.add_query_param('SourceChildInstanceTypes.' + str(depth1 + 1), SourceChildInstanceTypes[depth1])
def get_CommunityOperateMode(self): # String
return self.get_query_params().get('CommunityOperateMode')
def set_CommunityOperateMode(self, CommunityOperateMode): # String
self.add_query_param('CommunityOperateMode', CommunityOperateMode)
def get_OperateCommunitySets(self): # RepeatList
return self.get_query_params().get('OperateCommunitySet')
def set_OperateCommunitySets(self, OperateCommunitySet): # RepeatList
for depth1 in range(len(OperateCommunitySet)):
self.add_query_param('OperateCommunitySet.' + str(depth1 + 1), OperateCommunitySet[depth1])
def get_RouteTypess(self): # RepeatList
return self.get_query_params().get('RouteTypes')
def set_RouteTypess(self, RouteTypes): # RepeatList
for depth1 in range(len(RouteTypes)):
self.add_query_param('RouteTypes.' + str(depth1 + 1), RouteTypes[depth1])
def get_CidrMatchMode(self): # String
return self.get_query_params().get('CidrMatchMode')
def set_CidrMatchMode(self, CidrMatchMode): # String
self.add_query_param('CidrMatchMode', CidrMatchMode)
def get_CenId(self): # String
return self.get_query_params().get('CenId')
def set_CenId(self, CenId): # String
self.add_query_param('CenId', CenId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_SourceInstanceIdsReverseMatch(self): # Boolean
return self.get_query_params().get('SourceInstanceIdsReverseMatch')
def set_SourceInstanceIdsReverseMatch(self, SourceInstanceIdsReverseMatch): # Boolean
self.add_query_param('SourceInstanceIdsReverseMatch', SourceInstanceIdsReverseMatch)
def get_DestinationRouteTableIdss(self): # RepeatList
return self.get_query_params().get('DestinationRouteTableIds')
def set_DestinationRouteTableIdss(self, DestinationRouteTableIds): # RepeatList
for depth1 in range(len(DestinationRouteTableIds)):
self.add_query_param('DestinationRouteTableIds.' + str(depth1 + 1), DestinationRouteTableIds[depth1])
def get_TransmitDirection(self): # String
return self.get_query_params().get('TransmitDirection')
def set_TransmitDirection(self, TransmitDirection): # String
self.add_query_param('TransmitDirection', TransmitDirection)
def get_DestinationInstanceIdss(self): # RepeatList
return self.get_query_params().get('DestinationInstanceIds')
def set_DestinationInstanceIdss(self, DestinationInstanceIds): # RepeatList
for depth1 in range(len(DestinationInstanceIds)):
self.add_query_param('DestinationInstanceIds.' + str(depth1 + 1), DestinationInstanceIds[depth1])
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_DestinationInstanceIdsReverseMatch(self): # Boolean
return self.get_query_params().get('DestinationInstanceIdsReverseMatch')
def set_DestinationInstanceIdsReverseMatch(self, DestinationInstanceIdsReverseMatch): # Boolean
self.add_query_param('DestinationInstanceIdsReverseMatch', DestinationInstanceIdsReverseMatch)
def get_PrependAsPaths(self): # RepeatList
return self.get_query_params().get('PrependAsPath')
def set_PrependAsPaths(self, PrependAsPath): # RepeatList
for depth1 in range(len(PrependAsPath)):
self.add_query_param('PrependAsPath.' + str(depth1 + 1), PrependAsPath[depth1])
def get_AsPathMatchMode(self): # String
return self.get_query_params().get('AsPathMatchMode')
def set_AsPathMatchMode(self, AsPathMatchMode): # String
self.add_query_param('AsPathMatchMode', AsPathMatchMode)
def get_MatchCommunitySets(self): # RepeatList
return self.get_query_params().get('MatchCommunitySet')
def set_MatchCommunitySets(self, MatchCommunitySet): # RepeatList
for depth1 in range(len(MatchCommunitySet)):
self.add_query_param('MatchCommunitySet.' + str(depth1 + 1), MatchCommunitySet[depth1])
def get_CenRegionId(self): # String
return self.get_query_params().get('CenRegionId')
def set_CenRegionId(self, CenRegionId): # String
self.add_query_param('CenRegionId', CenRegionId)
|
from unittest import TestCase
import mobile_codes
class TestCountries(TestCase):
def test_mcc(self):
countries = mobile_codes.mcc(u'302')
self.assertEqual(len(countries), 1)
self.assertEqual(countries[0].mcc, u'302')
def test_mcc_multiple_codes(self):
countries = mobile_codes.mcc(u'313')
self.assertEqual(len(countries), 1)
self.assertEqual(countries[0].mcc, [u'310', u'311', u'313', u'316'])
# We even get multiple countries with multiple MCC each
countries = mobile_codes.mcc(u'310')
self.assertTrue(len(countries) > 1)
for country in countries:
self.assertTrue(len(country.mcc) > 1)
def test_mcc_multiple_countries(self):
countries = mobile_codes.mcc(u'505')
self.assertEqual(len(countries), 2)
def test_mcc_fail(self):
countries = mobile_codes.mcc(u'000')
self.assertEqual(len(countries), 0)
def test_alpha2(self):
country = mobile_codes.alpha2(u'CA')
self.assertEqual(country.alpha2, u'CA')
def test_alpha2_fail(self):
self.assertRaises(KeyError, mobile_codes.alpha2, u'XX')
def test_alpha3(self):
country = mobile_codes.alpha3(u'CAN')
self.assertEqual(country.alpha3, u'CAN')
def test_alpha3_fail(self):
self.assertRaises(KeyError, mobile_codes.alpha3, u'XYZ')
def test_name(self):
country = mobile_codes.name(u'canada')
self.assertEqual(country.name, u'Canada')
def test_name_fail(self):
self.assertRaises(KeyError, mobile_codes.name, u'Neverland')
def test_numeric(self):
country = mobile_codes.numeric(u'124')
self.assertEqual(country.numeric, u'124')
def test_numeric_fail(self):
self.assertRaises(KeyError, mobile_codes.numeric, u'000')
def test_countries_match_mnc_operators(self):
operators = mobile_codes._mnc_operators()
operator_mccs = set([o.mcc for o in operators])
# exclude test / worldwide mcc values
operator_mccs -= set([u'001', u'901'])
# exclude:
# 312 - Northern Michigan University
operator_mccs -= set([u'312'])
countries = mobile_codes._countries()
countries_mccs = []
for country in countries:
mcc = country.mcc
if not mcc:
continue
elif isinstance(mcc, list):
countries_mccs.extend(list(mcc))
else:
countries_mccs.append(mcc)
countries_mccs = set(countries_mccs)
# No country should have a mcc value, without an operator
self.assertEqual(countries_mccs - operator_mccs, set())
# No operator should have a mcc value, without a matching country
self.assertEqual(operator_mccs - countries_mccs, set())
class TestCountriesNoMCC(TestCase):
def test_alpha2(self):
country = mobile_codes.alpha2(u'AQ')
self.assertEqual(country.mcc, None)
def test_alpha3(self):
country = mobile_codes.alpha3(u'ATA')
self.assertEqual(country.mcc, None)
def test_name(self):
country = mobile_codes.name(u'antarctica')
self.assertEqual(country.mcc, None)
def test_numeric(self):
country = mobile_codes.numeric(u'010')
self.assertEqual(country.mcc, None)
class TestCountriesSpecialCases(TestCase):
def test_puerto_rico(self):
# Allow mainland US 310 as a valid code for Puerto Rico.
# At least AT&T has cell networks with a mcc of 310 installed
# in Puerto Rico, see
# https://github.com/andymckay/mobile-codes/issues/10
country = mobile_codes.alpha2(u'PR')
self.assertEqual(country.mcc, [u'310', '330'])
class TestMNCOperators(TestCase):
def test_mcc(self):
operators = mobile_codes.operators(u'302')
mccs = set([o.mcc for o in operators])
self.assertEqual(mccs, set([u'302']))
def test_mcc_fail(self):
operators = mobile_codes.operators(u'000')
self.assertEqual(len(operators), 0)
def test_mcc_mnc(self):
operator = mobile_codes.mcc_mnc(u'722', '070')
self.assertEqual(operator.mcc, u'722')
self.assertEqual(operator.mnc, u'070')
def test_mcc_mnc_fail(self):
self.assertRaises(KeyError, mobile_codes.mcc_mnc, u'000', '001')
class TestSIDOperators(TestCase):
def test_sid_operators(self):
operators = mobile_codes.sid_operators(u'1')
countries = set([operator.country for operator in operators])
mccs = set()
for operator in operators:
mccs = mccs.union(set([mcc for mcc in operator.mcc]))
self.assertEquals(countries, set(['United States']))
self.assertEquals(mccs, set([u'313', u'311', u'310', u'316']))
def test_sid_operators_fail(self):
operators = mobile_codes.operators(u'000')
self.assertEqual(len(operators), 0)
|
# “价值 2 个亿”的 AI 代码
while True:
print('AI: 你好,我是价值 2 个亿 AI 智能聊天机器人! 有什么想问的的吗?')
message = input('我: ')
print('AI: ' + message.replace('吗','').replace('?','!')) |
s = """
.a.fy
def boom:
real x
subboom(x)
def subboom:
real inout x
x = 1 / 0
"""
from fython.test import *
shell('rm -rf a/ a.* b.*')
writer(s)
w = load('.a', force=1, release=0, verbose=0, run_main=1)
w.verbose()
try:
w.boom()
except Exception as e:
print(e)
assert 'sigfpe' in str(e)
|
from HSTB.kluster.__version__ import __version__
|
import json
import xml.etree.ElementTree as etree
from pathlib import Path
from pytiled_parser.common_types import OrderedPair, Size
from pytiled_parser.exception import UnknownFormat
from pytiled_parser.parsers.json.tileset import parse as parse_json_tileset
from pytiled_parser.parsers.tmx.layer import parse as parse_layer
from pytiled_parser.parsers.tmx.properties import parse as parse_properties
from pytiled_parser.parsers.tmx.tileset import parse as parse_tmx_tileset
from pytiled_parser.tiled_map import TiledMap, TilesetDict
from pytiled_parser.util import check_format, parse_color
def parse(file: Path) -> TiledMap:
"""Parse the raw Tiled map into a pytiled_parser type.
Args:
file: Path to the map file.
Returns:
TiledMap: A parsed TiledMap.
"""
with open(file) as map_file:
tree = etree.parse(map_file)
raw_map = tree.getroot()
parent_dir = file.parent
raw_tilesets = raw_map.findall("./tileset")
tilesets: TilesetDict = {}
for raw_tileset in raw_tilesets:
if raw_tileset.attrib.get("source") is not None:
# Is an external Tileset
tileset_path = Path(parent_dir / raw_tileset.attrib["source"])
parser = check_format(tileset_path)
with open(tileset_path) as tileset_file:
if parser == "tmx":
raw_tileset_external = etree.parse(tileset_file).getroot()
tilesets[int(raw_tileset.attrib["firstgid"])] = parse_tmx_tileset(
raw_tileset_external,
int(raw_tileset.attrib["firstgid"]),
external_path=tileset_path.parent,
)
elif parser == "json":
tilesets[int(raw_tileset.attrib["firstgid"])] = parse_json_tileset(
json.load(tileset_file),
int(raw_tileset.attrib["firstgid"]),
external_path=tileset_path.parent,
)
else:
raise UnknownFormat(
"Unkown Tileset format, please use either the TSX or JSON format."
)
else:
# Is an embedded Tileset
tilesets[int(raw_tileset.attrib["firstgid"])] = parse_tmx_tileset(
raw_tileset, int(raw_tileset.attrib["firstgid"])
)
layers = []
for element in raw_map.getchildren():
if element.tag in ["layer", "objectgroup", "imagelayer", "group"]:
layers.append(parse_layer(element, parent_dir))
map_ = TiledMap(
map_file=file,
infinite=bool(int(raw_map.attrib["infinite"])),
layers=layers,
map_size=Size(int(raw_map.attrib["width"]), int(raw_map.attrib["height"])),
next_layer_id=int(raw_map.attrib["nextlayerid"]),
next_object_id=int(raw_map.attrib["nextobjectid"]),
orientation=raw_map.attrib["orientation"],
render_order=raw_map.attrib["renderorder"],
tiled_version=raw_map.attrib["tiledversion"],
tile_size=Size(
int(raw_map.attrib["tilewidth"]), int(raw_map.attrib["tileheight"])
),
tilesets=tilesets,
version=raw_map.attrib["version"],
)
layers = [layer for layer in map_.layers if hasattr(layer, "tiled_objects")]
for my_layer in layers:
for tiled_object in my_layer.tiled_objects:
if hasattr(tiled_object, "new_tileset"):
if tiled_object.new_tileset is not None:
already_loaded = None
for val in map_.tilesets.values():
if val.name == tiled_object.new_tileset.attrib["name"]:
already_loaded = val
break
if not already_loaded:
print("here")
highest_firstgid = max(map_.tilesets.keys())
last_tileset_count = map_.tilesets[highest_firstgid].tile_count
new_firstgid = highest_firstgid + last_tileset_count
map_.tilesets[new_firstgid] = parse_tmx_tileset(
tiled_object.new_tileset,
new_firstgid,
tiled_object.new_tileset_path,
)
tiled_object.gid = tiled_object.gid + (new_firstgid - 1)
else:
tiled_object.gid = tiled_object.gid + (
already_loaded.firstgid - 1
)
tiled_object.new_tileset = None
tiled_object.new_tileset_path = None
if raw_map.attrib.get("backgroundcolor") is not None:
map_.background_color = parse_color(raw_map.attrib["backgroundcolor"])
if raw_map.attrib.get("hexsidelength") is not None:
map_.hex_side_length = int(raw_map.attrib["hexsidelength"])
properties_element = raw_map.find("./properties")
if properties_element:
map_.properties = parse_properties(properties_element)
if raw_map.attrib.get("staggeraxis") is not None:
map_.stagger_axis = raw_map.attrib["staggeraxis"]
if raw_map.attrib.get("staggerindex") is not None:
map_.stagger_index = raw_map.attrib["staggerindex"]
return map_
|
from copy import deepcopy
import numpy as np
import networkx as nx
def uniformWeights(G) -> dict:
"""
ユーザー間の影響力を一様分布で設定する.
各ユーザー間の影響力 = 各ユーザーの次数の逆数 とする.
"""
Ew = dict()
for u in G:
in_edges = G.in_edges([u], data=True)
dv = sum([edata['weight'] for v1, v2, edata in in_edges])
for v1,v2,_ in in_edges:
Ew[(v1,v2)] = 1/dv
return Ew
def randomWeights(G) -> dict:
"""
ユーザー間の影響力を[0,1]の一様乱数で設定
"""
Ew = dict()
for u in G:
in_edges = G.in_edges([u], data=True)
ew = [np.random.random() for e in in_edges]
total = 0
for num, (v1, v2, edata) in enumerate(in_edges):
total += edata['weight']*ew[num]
for num, (v1, v2, _) in enumerate(in_edges):
Ew[(v1,v2)] = ew[num]/total
return Ew
def runLT(G, S, Ew) -> list:
'''
Input:
G: 有向グラフ
S: インフルエンサーとなるユーザーの初期集合
Ew : ユーザー間の重み(ユーザー間の影響度)
Output
T: 最終的にインフルエンサーだと推定したユーザーの集合.つまり|T| = k
'''
T = deepcopy(S)
# 各ユーザーの閾値を一様乱数で設定
threshold = dict()
for u in G:
threshold[u] = np.random.random()
W = dict(zip(G.nodes(), [0]*len(G))) # 隣人となるユーザー間の重み(隣人の影響度)
for u in T:
for v in G[u]:
if v not in T:
W[v] += Ew[(u,v)]*G[u][v]['weight'] # 複数のユーザーがある同じユーザに対して影響を与えようとする状況を考慮
if W[v] >= threshold[v]:
T.append(v)
return T
def avgLT(G, S, Ew, iterations) -> float:
avgSize = 0
for i in range(iterations):
T = runLT(G, S, Ew)
avgSize = len(T)/iterations
return avgSize |
"""Compound passes for rendering a shadowed scene...
The rendering passes here provide the bulk of the algorithm
for rendering shadowed scenes. You can follow the algorithm
starting at the OverallShadowPass, which manages 3 sets of
sub-passes:
ambient light passes (subPasses)
opaque-ambient-light pass
transparent-ambient-light pass
light pass (perLightPasses)
stencil setup pass
opaque-1-light pass
transparent-1-light pass
regular selection pass (postPasses)
The ambient light passes perform two major functions.
They set up the depth buffer with the scene's geometry
They add all of the ambient and emissive contribution
of the lights in the scene.
Each light then has two or three passes (depending on whether
there are transparent objects in the scene). The first pass
renders the shadow volume objects into the stencil buffer,
which creates a stencil shadow which marks off those areas
of the scene which are not illuminated by the current light.
The second pass renders the opaque geometry blending in the
contribution of the current light, with the stencil buffer
masking of those areas which are shadowed from the light.
If there are transparent objects, eventually we will render
them in much the same way, but at the moment, the depth buffer
is not being updated during the opaque render pass, so the
transparent objects will have undergo almost arbitrary lighting.
XXX Obviously that should change.
After the lighting passes are finished, the standard selection
rendering passes can occur.
"""
from OpenGL.GL import *
from OpenGLContext import visitor, doinchildmatrix, frustum
from OpenGLContext.passes import renderpass, rendervisitor
from vrml import cache
from OpenGLContext.scenegraph import basenodes, indexedfaceset
from OpenGLContext.shadow import edgeset
from OpenGLContext.debug import state
from vrml import protofunctions
try:
from OpenGLContext.debug import bufferimage
except ImportError:
bufferimage = None
from OpenGLContext.arrays import *
from math import pi
import sys
import logging
log = logging.getLogger( __name__ )
class OverallShadowPass (renderpass.OverallPass):
"""Pass w/ ambient, light-specific, and selection sub-passes
If we are doing no visible passes, then we are
basically just a regular selection pass.
Otherwise we need to do the ambient rendering
pass (possibly two if we have transparent objects),
with this pass finding and registering all
lights and edge-sets.
Then for each light:
Do the stencil-buffer set up pass, which walks
the list of edge sets (creating and) rendering
the shadow volume for the current light.
Do an opaque render with just the single light
enabled, and the stencil buffer excluding
shadowed geometry.
If there are transparent objects, render them
using the same set up as the opaque render.
Finally:
kill off the stencil buffer set up
"""
passDebug = 1
debugShadowSilouhette = 1
debugShadowVolume = 1
debugShadowNoStencil = 0
debugShadowNoBackFaces = 0
debugShadowNoFrontFaces = 0
debugShadowNoCaps = 0
debugShadowNoBoots = 0
debugShadowNoEdges = 0
def __init__ (
self,
context=None,
subPasses = (),
startTime = None,
perLightPasses = (),
postPasses = (),
):
"""Initialise OverallShadowPass
perLightPasses -- list of passes to be applied to each
light in the active light set
postPasses -- list of passes applied after all of the
light-specific passes are applied
"""
super( OverallShadowPass, self).__init__(context, subPasses, startTime)
self.perLightPasses = perLightPasses
self.postPasses = postPasses
def __call__( self ):
"""Render the pass and all sub-passes"""
if __debug__:
if self.passDebug:
sys.stderr.write( """START NEW PASS\n""" )
changed = 0
passCount = -1
# XXX not the right place for this.
glStencilFunc(GL_ALWAYS, 0, ~0);
glStencilOp(GL_KEEP,GL_KEEP,GL_KEEP);
glDepthFunc(GL_LESS);
glColorMask(GL_TRUE,GL_TRUE,GL_TRUE,GL_TRUE);
glColor( 1.0, 1.0, 1.0, 1.0 )
glDisable(GL_BLEND);
glDisable(GL_STENCIL_TEST);
glDepthMask(1)
glBlendFunc(GL_ONE,GL_ZERO);
glStencilMask(~0)
# run through the regular pass-types
for passObject in self.subPasses:
if __debug__:
if self.passDebug:
sys.stderr.write( 'SUB-PASS %s\n'%( passObject))
changed += passObject()
passCount = passObject.passCount
self.visibleChange = changed
passCount += 1
# now run through the per-light passes
# creating a new pass for each light's
# version of each pass
try:
for light in self.lightPaths:
self.currentLight = light
for passClass in self.perLightPasses:
passObject = passClass( self, passCount )
if __debug__:
if self.passDebug:
sys.stderr.write( 'SUB-PASS %s\n'%( passObject))
passCount += 1
changed += passObject()
self.visibleChange = changed
finally:
# do some cleanup to make sure next pass is visible
glDisable(GL_BLEND);
glDisable(GL_STENCIL_TEST);
glDepthMask(1);
for passClass in self.postPasses:
passObject = passClass( self, passCount )
if __debug__:
if self.passDebug:
sys.stderr.write( 'SUB-PASS %s\n'%( passObject))
passCount += 1
changed += passObject()
self.visibleChange = changed
return changed
class AmbientOnly( object ):
"""Render with only ambient lights
"""
lighting = 1
lightingAmbient = 1 # whether ambient lighting should be used
lightingDiffuse = 0 # whether diffuse lighting should be used (non-ambient)
class AmbientOpaque( AmbientOnly, renderpass.OpaqueRenderPass ):
"""Opaque rendering pass with only ambient lights
This will be the only pass which actually writes
to the depth buffer. It will also be responsible
for registering each light, and edge-set with the
appropriate matrices.
"""
class AmbientTransparent( AmbientOnly, renderpass.TransparentRenderPass ):
"""Transparent rendering pass with only ambient lights"""
class SpecificLight( object ):
"""Mix-in to run lighting for a specific light
The overall pass keeps track of currentLight for us
"""
lightID = GL_LIGHT0
frustumCulling = 0 # the shadows shouldn't be culled if the objects are off-screen
def SceneGraph( self, node ):
"""Render lights for a scenegraph"""
def tryLight( lightPath, ID, visitor):
"""Try to enable a light, returns either
None or the ID used during enabling."""
lightPath.transform()
return lightPath[-1].Light( ID, visitor )
if self.lighting:
doinchildmatrix.doInChildMatrix( tryLight, self.currentLight, self.lightID, self )
def shouldDraw( self ):
"""Whether we should draw"""
return self.visibleChange or super( SpecificLight,self).shouldDraw()
class LightStencil (SpecificLight, renderpass.OpaqueRenderPass):
"""Sets up the stencil buffer for the current light"""
lighting = 0
lightingAmbient = 0 # whether ambient lighting should be used
lightingDiffuse = 0 # whether diffuse lighting should be used (non-ambient)
stencil = 1
cacheKey = 'edgeSet'
def Context( self, node):
"""Setup stencil buffer where we render shadow volumes"""
# disable depth buffer writing
glDepthMask(GL_FALSE);
# force depth-testing on
glEnable(GL_DEPTH_TEST);
glDepthFunc( GL_LESS );
# enable blending
glEnable(GL_BLEND);
glBlendFunc(GL_ONE,GL_ONE);
# clear the stencil buffer
glClear(GL_STENCIL_BUFFER_BIT);
# disable color-buffer writing
glColorMask(0,0,0,0);
# enable use of the stencil buffer for determining whether to write
glEnable(GL_STENCIL_TEST);
glStencilFunc(GL_ALWAYS,0,~0);
glStencilMask(~0);
glEnable( GL_CULL_FACE )
## try:
## self.DebugSaveDepthBuffer()
## except:
## print 'failed to save depth buffer'
def Rendering( self, node ):
"""Regular rendering isn't desirable..."""
### should have a way to specify non-occluding geometry...
node = node.geometry
if not isinstance( node, basenodes.IndexedFaceSet ):
return
cc = indexedfaceset.ArrayGeometryCompiler( node )
ag = cc.compile(
visible=False,lit=False,textured=False,transparent=False,
mode = self,
)
if ag is indexedfaceset.DUMMY_RENDER:
return None
# okay, we have an array-geometry object
edgeSet = self.cache.getData(node,self.cacheKey)
if not edgeSet:
if ag.vertices:
edgeSet = edgeset.EdgeSet(
points = ag.vertices.data,
ccw = ag.ccw == GL_CCW,
)
holder = self.cache.holder(
client = node,
key = self.cacheKey,
data = edgeSet,
)
for (n, attr) in [
(node, 'coordIndex'),
(node, 'ccw'),
(node.coord, 'point'),
]:
if n:
holder.depend( n, protofunctions.getField(n,attr) )
else:
edgeSet = None
if not edgeSet:
return
# okay, we have an edge-set object...
volume = edgeSet.volume( self.currentLight, self.currentStack )
if not volume:
return
# now we have a shadow-volume for this light and edge-set
volume.render( self )
def DebugSaveDepthBuffer( self, file = "depth_buffer.jpg" ):
if not bufferimage:
return
width, height = self.context.getViewPort()
image = bufferimage.depth(0,0, width, height )
image.save( file, "JPEG" )
class LightOpaque(SpecificLight, renderpass.OpaqueRenderPass):
"""Opaque rendering pass for a specific light"""
lighting = 1
lightingAmbient = 0 # whether ambient lighting should be used
lightingDiffuse = 1 # whether diffuse lighting should be used (non-ambient)
def Context( self, node):
glCullFace(GL_BACK);
glStencilFunc(GL_EQUAL, 0, ~0);
# this should be INCR according to the article...
glStencilOp(GL_KEEP,GL_KEEP,GL_INCR);
glDepthFunc(GL_EQUAL);
glDepthMask(~1);
glColorMask(1,1,1,1);
glColor( 1.,1.,1.)
# enable blending
glEnable(GL_BLEND);
glEnable(GL_LIGHTING);
glBlendFunc(GL_ONE,GL_ONE);
def SaveStencilDebug( self ):
if not bufferimage:
return
width, height = self.context.getViewPort()
image = bufferimage.stencil(0,0, width, height )
image.save( "buffer.jpg", "JPEG" )
def SceneGraph( self, node ):
"""Render lights for a scenegraph"""
def tryLight( lightPath, ID, visitor):
"""Try to enable a light, returns either
None or the ID used during enabling."""
lightPath.transform()
return lightPath[-1].Light( ID, visitor )
if self.lighting:
doinchildmatrix.doInChildMatrix( tryLight, self.currentLight, self.lightID, self )
if not self.frustum:
self.frustum = frustum.Frustum.fromViewingMatrix(normalize = 1)
else:
log.warn( """SceneGraphCamera called twice for the same rendering pass %s""", self)
class LightTransparent(SpecificLight, renderpass.TransparentRenderPass):
lighting = 1
lightingAmbient = 0 # whether ambient lighting should be used
lightingDiffuse = 1 # whether diffuse lighting should be used (non-ambient)
def ContextSetupDisplay( self, node):
super( LightTransparent, self).ContextSetupDisplay( node )
glStencilFunc(GL_EQUAL, 0, ~0);
glStencilOp(GL_KEEP,GL_KEEP,GL_INCR);
glDepthFunc(GL_EQUAL);
glColorMask(1,1,1,1);
### Finalisation tokens
class ShadowPassSet( object ):
"""Callable list of sub-passes"""
def __init__ (
self,
overallClass,
subPasses = (),
perLightPasses = (),
postPasses = (),
):
self.overallClass = overallClass
self.subPasses = subPasses
self.perLightPasses = perLightPasses
self.postPasses = postPasses
def __call__( self, context):
"""initialise and run a render pass with our elements"""
overall = self.overallClass(
context = context,
subPasses = self.subPasses,
perLightPasses = self.perLightPasses,
postPasses = self.postPasses,
)
return overall()
### following object only used for testing
ambientRenderPass = renderpass.PassSet(
OverallShadowPass,
[
AmbientOpaque,
AmbientTransparent,
renderpass.SelectRenderPass,
],
)
### The normal pass-set
defaultRenderPasses = ShadowPassSet(
OverallShadowPass,
subPasses = [
AmbientOpaque,
AmbientTransparent,
],
perLightPasses = [
LightStencil ,
LightOpaque,
LightTransparent,
],
postPasses = [
renderpass.SelectRenderPass,
]
)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register/', views.RegistrationFormView.as_view(), name='register'),
url(r'^orders/$', views.ProfileOrdersView.as_view(), name='orders'),
url(r'^orders/(?P<pk>\d+)/$', views.ProfileOrderDetailView.as_view(), name='order_detail'),
url(r'^login/', views.AuthenticationForm.as_view(), name='login'),
url(r'^logout/', views.logout_view, name='logout'),
url(r'^profile/$', views.ProfileDetail.as_view(), name='detail'),
url(r'^profile/update/$', views.UpdateProfileForm.as_view(), name='update'),
]
|
import logging
import sedate
from datetime import timedelta
from itertools import groupby
from libres.context.core import ContextServicesMixin
from libres.db.models import Allocation, Reservation, ReservedSlot
from libres.modules import errors, events
from sqlalchemy import func, null
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import and_, or_
log = logging.getLogger('libres')
class Queries(ContextServicesMixin):
""" Contains helper methods independent of the resource (as owned by
:class:`.scheduler.Scheduler`)
Some contained methods require the current context (for the session).
Some contained methods do not require any context, they are marked
as staticmethods.
"""
def __init__(self, context):
self.context = context
def all_allocations_in_range(self, start, end):
return self.allocations_in_range(
self.session.query(Allocation), start, end
)
@staticmethod
def allocations_in_range(query, start, end):
""" Takes an allocation query and limits it to the allocations
overlapping with start and end.
"""
return query.filter(
or_(
and_(
Allocation._start <= start,
start <= Allocation._end
),
and_(
start <= Allocation._start,
Allocation._start <= end
)
)
)
@staticmethod
def availability_by_allocations(allocations):
"""Takes any iterator with alloctions and calculates the availability.
Counts missing mirrors as 100% free and returns a value between 0-100
in any case.
For single allocations check the allocation.availability property.
"""
total, expected_count, count = 0, 0, 0
for allocation in allocations:
total += allocation.availability
count += 1
# Sum up the expected number of allocations. Missing allocations
# indicate mirrors that have not yet been physically created.
if allocation.is_master:
expected_count += allocation.quota
if not expected_count:
return 0
missing = expected_count - count
total += missing * 100
return total / expected_count
def availability_by_range(self, start, end, resources):
"""Returns the availability for the given resources in the given range.
The exposure is used to check if the allocation is visible.
"""
query = self.all_allocations_in_range(start, end)
query = query.filter(Allocation.mirror_of.in_(resources))
query = query.options(joinedload(Allocation.reserved_slots))
allocations = (a for a in query if self.is_allocation_exposed(a))
return self.availability_by_allocations(allocations)
def availability_by_day(self, start, end, resources):
"""Availability by range with a twist. Instead of returning a grand
total, a dictionary is returned with each day in the range as key and
a tuple of availability and the resources counted for that day.
WARNING, this function should run as linearly as possible as a lot
of records might be processed.
"""
query = self.all_allocations_in_range(start, end)
query = query.filter(Allocation.mirror_of.in_(resources))
query = query.options(joinedload(Allocation.reserved_slots))
query = query.order_by(Allocation._start)
group = groupby(query, key=lambda a: a._start.date())
days = {}
for day, allocations in group:
exposed = []
members = set()
for a in (a for a in allocations if self.is_allocation_exposed(a)):
members.add(a.mirror_of)
exposed.append(a)
if not exposed:
continue
total = self.availability_by_allocations(exposed)
days[day] = (total, members)
return days
def reservations_by_session(self, session_id):
# be sure to not query for all reservations. since a query should be
# returned in any case we just use an impossible clause
# this is mainly a security feature
if not session_id:
log.warn('empty session id')
return self.session.query(Reservation).filter("0=1")
query = self.session.query(Reservation)
query = query.filter(Reservation.session_id == session_id)
query = query.order_by(Reservation.created)
return query
def confirm_reservations_for_session(self, session_id, token=None):
""" Confirms all reservations of the given session id. Optionally
confirms only the reservations with the given token. All if None.
"""
assert session_id
reservations = self.reservations_by_session(session_id)
if token:
reservations = reservations.filter(Reservation.token == token)
reservations = reservations.all()
if not reservations:
raise errors.NoReservationsToConfirm
for reservation in reservations:
reservation.session_id = None
events.on_reservations_confirmed(
self.context, reservations, session_id
)
def remove_reservation_from_session(self, session_id, token):
""" Removes the reservation with the given session_id and token. """
assert token and session_id
query = self.reservations_by_session(session_id)
query = query.filter(Reservation.token == token)
reservation = query.one()
self.session.delete(reservation)
# if we get here the token must be valid, we should then check if the
# token is used in the reserved slots, because with autoapproval these
# slots may be created straight away.
slots = self.session.query(ReservedSlot).filter(
ReservedSlot.reservation_token == token
)
slots.delete('fetch')
# we also update the timestamp of existing reservations within
# the same session to ensure that we account for the user's activity
# properly during the session expiration cronjob. Otherwise it is
# possible that a user removes the latest reservations only to see
# the rest of them vanish because his older reservations were
# already old enough to be counted as expired.
query = self.session.query(Reservation)
query = query.filter(Reservation.session_id == session_id)
query.update({"modified": sedate.utcnow()})
def find_expired_reservation_sessions(self, expiration_date):
""" Goes through all reservations and returns the session ids of the
unconfirmed ones which are older than the given expiration date.
By default the expiration date is now - 15 minutes.
Note that this method goes through ALL RESERVATIONS OF THE CURRENT
SESSION. This is NOT limited to a specific context or scheduler.
"""
expiration_date = expiration_date or (
sedate.utcnow() - timedelta(minutes=15)
)
# first get the session ids which are expired
query = self.session.query(
Reservation.session_id,
func.max(Reservation.created),
func.max(Reservation.modified)
)
query = query.group_by(Reservation.session_id)
# != null() because != None is not allowed by PEP8
query = query.filter(Reservation.session_id != null())
# only pending reservations are considered
query = query.filter(Reservation.status == 'pending')
# the idea is to remove all reservations belonging to sessions whose
# latest update is expired - either delete the whole session or let
# all of it be
expired_sessions = []
for session_id, created, modified in query.all():
modified = modified or created
assert created and modified
if max(created, modified) < expiration_date:
expired_sessions.append(session_id)
return expired_sessions
def remove_expired_reservation_sessions(self, expiration_date=None):
""" Removes all reservations which have an expired session id.
By default the expiration date is now - 15 minutes.
See :func:`find_expired_reservation_sessions`
Note that this method goes through ALL RESERVATIONS OF THE CURRENT
SESSION. This is NOT limited to a specific context or scheduler.
"""
expired_sessions = self.find_expired_reservation_sessions(
expiration_date
)
# remove those session ids
if expired_sessions:
reservations = self.session.query(Reservation)
reservations = reservations.filter(
Reservation.session_id.in_(expired_sessions)
)
slots = self.session.query(ReservedSlot)
slots = slots.filter(
ReservedSlot.reservation_token.in_(
reservations.with_entities(Reservation.token).subquery()
)
)
slots.delete('fetch')
reservations.delete('fetch')
return expired_sessions
|
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
stack = []
for c in s:
if c in ['(', '{', '[']:
stack.append(c)
continue
elif c == ')':
if len(stack) > 0 and stack[-1] == '(':
stack.pop()
else:
return False
continue
elif c == '}':
if len(stack) > 0 and stack[-1] == '{':
stack.pop()
else:
return False
continue
elif c == ']':
if len(stack) > 0 and stack[-1] == '[':
stack.pop()
else:
return False
continue
else:
return False
return stack == []
if __name__ == '__main__':
s = "[]"
v = Solution()
print v.isValid(s) |
"""
Contains functions for computing thermal noise in conductive thin objects.
"""
__all__ = [
"compute_current_modes",
"noise_covar",
"noise_covar_dir",
"noise_var",
"visualize_current_modes",
]
import numpy as np
import trimesh
from .suhtools import SuhBasis
from .mesh_conductor import MeshConductor
from .mesh_magnetics import magnetic_field_coupling
def compute_current_modes(
obj,
T,
resistivity,
thickness,
mode="AC",
freqs=np.array((0,)),
Nmodes=None,
return_eigenvals=False,
**kwargs
):
"""
Calculates the (AC or DC) Johnson noise current modes on the conducting surface.
Parameters
----------
obj: Trimesh-object or MeshConductor-object
Represents the boundary on which current density is specified
or MeshConductor object that wraps the mesh
T: float
Temperature in Kelvins
resistivity: float or array (Nfaces)
Resistivity value in Ohm/meter
thickness: float or array (Nfaces)
Thickness of the surface. NB! Must be small in comparison to observation distance
mode: 'AC' or 'DC'
Calculate modes as a function of frequency or just at DC?
freqs: Nfreqs array
The frequencies at which the eddy-current modes are computed. Obsolete for
'DC' mode. In 'AC', calculate at DC in default.
Nmodes: int
How many modes are computed? If None, all Nvertices modes are computed
return_eigenvals: boolean
Return also the eigenvalues (the inverse circuit time constants)?
kwargs: dict
Passed to Conductor creation if a Trimesh object is passed as 'obj'
Returns
-------
vl: (Nvertices x Nmodes x Nfreqs) (AC) or (Nvertices x Nmodes) (DC) array
The spectral eddy-current modes
u: Nmodes array
The eigenvalues
"""
kB = 1.38064852e-23
if isinstance(obj, MeshConductor):
obj.resistivity = resistivity
obj.thickness = thickness
print(
"Updated MeshConductor object thickness and resistivity attributes according to function parameters"
)
elif isinstance(obj, trimesh.Trimesh):
obj = MeshConductor(
mesh_obj=obj,
resistivity=resistivity,
thickness=thickness,
resistance_full_rank=False,
**kwargs
)
else:
raise TypeError("obj type should be either Trimesh or Conductor")
if mode == "AC":
suh = SuhBasis(obj, Nc=Nmodes, magnetic="AC")
elif mode == "DC":
suh = SuhBasis(obj, Nc=Nmodes, magnetic="DC")
else:
raise ValueError("Mode should be either 'AC' or 'DC'")
v = suh.basis
u = suh.eigenvals
Nfreqs = len(freqs)
if mode == "AC":
Nfreqs = len(freqs)
vl = np.zeros((len(suh.mesh_conductor.mesh.vertices), v.shape[1], Nfreqs))
# Scale the eigenmodes with the spectral density of the thermal noise current
for i in range(v.shape[1]):
amp = (
2
* np.sqrt(kB * T / u[i])
* np.sqrt(1 / (1 + (2 * np.pi * freqs / u[i]) ** 2))
)
vl[:, i, :] = (
(
np.zeros((Nfreqs, vl.shape[0]))
+ suh.mesh_conductor.inner2vert @ v[:, i]
).T
) * amp
elif mode == "DC":
vl = np.zeros((len(suh.mesh_conductor.mesh.vertices), v.shape[1]))
# Scale the eigenmodes with the spectral density of the thermal noise current
for i in range(v.shape[1]):
amp = 2 * np.sqrt(kB * T / u[i])
vl[:, i] = suh.mesh_conductor.inner2vert @ v[:, i] * amp
if return_eigenvals:
return vl, u
else:
return vl
def noise_covar(B_coupling, vl, Nmodes=None):
"""
Calculates (AC or DC) magnetic noise covariance along x, y and z from the
modes vl.
Parameters
----------
B_coupling: ndarray (Np, 3, Nvertices)
Magnetic field coupling matrix from the mesh
vl: ndarray (Nvertices, Nmodes, x Nfreqs) or (Nvertices, Nmodes)
The Johnson noise current modes on the mesh
Nmodes: int
How many modes are included? If None, all modes in vl are included
Returns
-------
Bcov: ndarray (Np, Np, 3, Nfreqs) or (Np, Np, 3)
Magnetic noise covariance
"""
if Nmodes is None:
Nmodes = vl.shape[1]
if vl.ndim == 2:
b = np.einsum("ihj,jl->ilh", B_coupling, vl[:, 0:Nmodes])
Bcov = np.einsum("jih,lih->jlh", b, b)
else:
b = np.einsum("ihj,jlk->ilhk", B_coupling, vl[:, 0:Nmodes, :])
Bcov = np.einsum("jihk,lihk->jlhk", b, b)
return Bcov
def noise_var(B_coupling, vl, Nmodes=None):
"""
Calculates (AC or DC) magnetic noise variance along x, y and z from the
modes vl.
Parameters
----------
B_coupling: ndarray (Np, 3, Nvertices)
Magnetic field coupling matrix from the mesh
vl: ndarray (Nvertices, Nmodes, x Nfreqs) or (Nvertices, Nmodes)
The Johnson noise current modes on the mesh
Nmodes: int
How many modes are included? If None, all modes in vl are included
Returns
-------
Bcov: ndarray (Np, 3, Nfreqs) or (Np, 3)
Magnetic noise variance
"""
if Nmodes is None:
Nmodes = vl.shape[1]
if vl.ndim == 2:
b = np.einsum("ihj,jl->ilh", B_coupling, vl[:, 0:Nmodes])
Bcov = np.einsum("ijh,ijh->ih", b, b)
else:
b = np.einsum("ihj,jlk->ilhk", B_coupling, vl[:, 0:Nmodes, :])
Bcov = np.einsum("ijhk,ijhk->ihk", b, b)
return Bcov
def noise_covar_dir(B_coupling, vl, Nmodes=None):
"""
Calculates (AC or DC) magnetic noise covariance between x, y and z directions
from the modes vl.
Parameters
----------
B_coupling: ndarray (Np, 3, Nvertices)
Magnetic field coupling matrix from the mesh
vl: ndarray (Nvertices, Nmodes, x Nfreqs) or (Nvertices, Nmodes)
The Johnson noise current modes on the mesh
Nmodes: int
How many modes are included? If None, all modes in vl are included
Returns
-------
Bcov: ndarray (Np, 3, 3, Nfreqs) or (Np, 3, 3)
Magnetic noise covariance x, y and z field components
"""
if Nmodes is None:
Nmodes = vl.shape[1]
if vl.ndim == 2:
b = np.einsum("ihj,jl->ilh", B_coupling, vl[:, 0:Nmodes])
Bcov = np.einsum("ihj,ihl-> ijl", b, b)
else:
b = np.einsum("ihj,jlk->ilhk", B_coupling, vl[:, 0:Nmodes, :])
Bcov = np.einsum("ihjk,ihlk-> ijlk", b, b)
return Bcov
def sensornoise_covar(mesh, p, n, w, vl, Nmodes=None):
"""
Calculates the upper diagonal of (AC or DC) magnetic noise covariance
on a sensor array described by integration points.
Assumes same number of integration points for each sensor.
Parameters
----------
mesh: Trimesh-object
The boundary on which current density is specified
p: ndarray (Nsensors, Np, 3)
Coordinates of the integration points of the Nsensors
n: ndarray (Nsensors, Np, 3)
Orientations of the integration points of the Nsensors
w: ndarray (Nsensors, Np)
The weights of the integration points
vl: ndarray (Nvertices, Nmodes, x Nfreqs) or (Nvertices, Nmodes)
The Johnson noise current modes on the mesh
Nmodes: int
How many modes are included? If None, all modes in vl are included
Returns
-------
Bcov: ndarray (Nsensors, Nsensors, Nfreqs) or (Nsensors, Nsensors)
Magnetic noise covariance
"""
if Nmodes is None:
Nmodes = vl.shape[1]
Nsensors = p.shape[0]
Np = p.shape[1]
# Compute the magnetic field coupling matrices along the orientations
# of the integration points
b = np.zeros((Np, Nsensors, mesh.vertices.shape[0]))
for i in range(Nsensors):
B_coupling = magnetic_field_coupling(mesh, p[i], analytic=True)
b[:, i] = np.einsum("ijk,ij->ik", B_coupling, n[i])
# Compute the magnetic noise covariance on the sensor array using the
# weights of the integration points
if vl.ndim == 2:
Bcov = np.zeros((Nsensors, Nsensors))
for i in range(Nsensors):
for j in range(i, Nsensors):
Bcov[i, j] = (
w[i].T
@ b[:, i]
@ vl[:, 0:Nmodes]
@ vl[:, 0:Nmodes].T
@ b[:, j].T
@ w[j]
)
else:
Bcov = np.zeros((Nsensors, Nsensors, vl.shape[2]))
for i in range(Nsensors):
for j in range(i, Nsensors):
temp = w[i].T @ b[:, i]
bi = np.einsum("ij,jkh->ikh", temp[None, :], vl[:, 0:Nmodes, :])
temp = w[j].T @ b[:, j]
bj = np.einsum("ij,jkh->ikh", temp[None, :], vl[:, 0:Nmodes, :])
Bcov[i, j] = np.einsum("ijh,ijh->h", bi, bj)
return Bcov
def visualize_current_modes(
mesh, vl, Nmodes, scale, contours=True, colormap="bwr", dist=0.5
):
"""
Visualizes current modes up to Nmodes.
Parameters
----------
mesh: Trimesh mesh object
The surface mesh
vl: Nvertices x Nvertices array
The normalized eddy-current modes vl[:,i]
Nmodes: int
Number of modes to be plotted
scale: float
Scaling factor
contours: boolean
If True, show contours
colormap: string
Which (matplotlib) colormap to use
"""
from mayavi import mlab
N1 = np.floor(np.sqrt(Nmodes))
dx = (mesh.vertices[:, 0].max() - mesh.vertices[:, 0].min()) * (1 + dist)
dy = (mesh.vertices[:, 1].max() - mesh.vertices[:, 1].min()) * (1 + dist)
i = 0
j = 0
for n in range(Nmodes):
print(i, j)
points = mesh.vertices.copy()
points[:, 0] += i * dx
points[:, 1] += j * dy
s = mlab.triangular_mesh(
*points.T, mesh.faces, scalars=vl[:, n], colormap=colormap
)
limit = np.max(np.abs(vl[:, n]))
s.module_manager.scalar_lut_manager.number_of_colors = 256
s.module_manager.scalar_lut_manager.data_range = np.array([-limit, limit])
s.actor.mapper.interpolate_scalars_before_mapping = True
s.enable_contours = contours
if i < N1:
i += 1
else:
j += 1
i = 0
return s
|
from .ascim import ASCIM
class ASCIMDraw:
"""Draw text and shapes on an ASCIM Image. Operations will be done in-place."""
def __init__(self, im):
"""Construct a ASCIMDraw object on a canvas.
:param im: ASCIM Image this ASCIMDraw object applies modifications to.
"""
if hasattr(im, 'size'):
# HACK: very naïve duck type checking
self.__image = im
else:
raise TypeError('`ASCIMDraw` requires an ASCIM Image as argument.')
def text(self, box: tuple, text: str, transparency=False):
"""Draw text on ASCIM Image.
:param box: box to fit the text in. (left, top, width, height)
:param text: text to draw on ASCIM Image.
:param transparency: if set to True, when the ``text`` to be drawn overlaps
with non-whitespace characters in the original image, the character
from the latter is kept. Otherwise, the character is a space.
"""
wrapped = ''
words = text.split(' ')
width = 0
for wd in words:
if width + len(wd) <= box[2]:
wrapped += wd + ' '
width += len(wd) + 1
else:
wrapped += '\n' + wd + ' '
width = len(wd) + 1
# remove trailing space on each line, incl. last line
wrapped = wrapped.replace(' \n', '\n')[:-1]
self.__image.paste(ASCIM(wrapped), (box[0], box[1]))
|
"""Contains the classes and functions for scraping a yahoo finance summary page."""
from collections import ChainMap
from typing import Dict, Iterable, List, Optional
from pandas import DataFrame
from pendulum.date import Date
from pydantic import BaseModel as Base
from pydantic import Field
from requests_html import HTML
from .cleaner import cleaner, CommonCleaners, table_cleaner
from .lookup import fuzzy_search
from .multidownloader import _download_pages_with_threads, _download_pages_without_threads
from .quote import parse_quote_header_info, Quote
from .requestor import requestor
class SummaryPage(Base):
"""Data scraped from the yahoo finance summary page.
Attributes:
symbol (str): Ticker Symbol
name (str): Ticker Name
quote (Quote): Quote header section of the page.
open (float): Open price.
high (float): Days high.
low (float): Days low.
close (float): Days close price.
change (float): Dollar change in price.
percent_change (float): Percent change in price.
previous_close (float): Previous days close price.
bid_price (float): Bid price.
bid_size (int): Bid size.
ask_price (float): Ask price.
ask_size (int): Ask size.
fifty_two_week_high (float): High of the fifty two week range.
fifty_two_week_low (float): Low of the fifty two week range.
volume (int): Volume.
average_volume (int): Average Volume.
market_cap (int): Market capitalization.
beta_five_year_monthly (float): Five year monthly prices benchmarked against the SPY.
pe_ratio_ttm (float): Share Price divided by Earnings Per Share trailing twelve months.
eps_ttm (float): Earnings per share trailing twelve months.
earnings_date (Date): Estimated earnings report release date.
forward_dividend_yield (float): Estimated dividend yield.
forward_dividend_yield_percentage (float): Estimated divided yield percentage.
exdividend_date (Date): Ex-Dividend Date.
one_year_target_est (float): One year target estimation.
Notes:
This class inherits from the pydantic BaseModel which allows for the use
of .json() and .dict() for serialization to json strings and dictionaries.
.json(): Serialize to a JSON object.
.dict(): Serialize to a dictionary.
"""
symbol: str
name: str # from quote
quote: Quote
open: Optional[float]
high: Optional[float] = Field(alias="days_range")
low: Optional[float] = Field(alias="days_range")
close: Optional[float] # pre-cleaned from quote
change: Optional[float] # pre-cleaned from quote
percent_change: Optional[float] # pre-cleaned from quote
previous_close: Optional[float]
bid_price: Optional[float] = Field(alias="bid")
bid_size: Optional[int] = Field(alias="bid")
ask_price: Optional[float] = Field(alias="ask")
ask_size: Optional[int] = Field(alias="ask")
fifty_two_week_low: Optional[float] = Field(alias="fifty_two_week_range")
fifty_two_week_high: Optional[float] = Field(alias="fifty_two_week_range")
volume: Optional[int]
average_volume: Optional[int] = Field(alias="avg_volume")
market_cap: Optional[int]
beta_five_year_monthly: Optional[float]
pe_ratio_ttm: Optional[float]
eps_ttm: Optional[float]
earnings_date: Optional[Date]
forward_dividend_yield: Optional[float]
forward_dividend_yield_percentage: Optional[float] = Field(alias="forward_dividend_yield")
exdividend_date: Optional[Date]
one_year_target_est: Optional[float]
_clean_symbol = cleaner("symbol")(CommonCleaners.clean_symbol)
_clean_highs = cleaner("low", "fifty_two_week_low")(
CommonCleaners.clean_first_value_split_by_dash
)
_clean_lows = cleaner("high", "fifty_two_week_high")(
CommonCleaners.clean_second_value_split_by_dash
)
_clean_date = cleaner("earnings_date", "exdividend_date")(CommonCleaners.clean_date)
_clean_common_values = cleaner(
"open",
"previous_close",
"market_cap",
"volume",
"average_volume",
"pe_ratio_ttm",
"beta_five_year_monthly",
"eps_ttm",
"one_year_target_est",
)(CommonCleaners.clean_common_values)
_clean_forward_dividend_yield = cleaner("forward_dividend_yield")(
CommonCleaners.clean_first_value_split_by_space
)
_clean_forward_dividend_yield_percentage = cleaner("forward_dividend_yield_percentage")(
CommonCleaners.clean_second_value_split_by_space
)
_clean_bid_ask_price = cleaner("bid_price", "ask_price")(
CommonCleaners.clean_first_value_split_by_x
)
_clean_bid_ask_volume = cleaner("bid_size", "ask_size")(
CommonCleaners.clean_second_value_split_by_x
)
def __lt__(self, other) -> bool: # noqa: ANN001
"""Compare SummaryPage objects to allow ordering by symbol."""
if other.__class__ is self.__class__:
return self.symbol < other.symbol
return None
class SummaryPageGroup(Base):
"""Group of SummaryPage objects from multiple symbols.
Attributes:
pages (SummaryPage):
Notes:
This class inherits from the pydantic BaseModel which allows for the use
of .json() and .dict() for serialization to json strings and dictionaries.
.json(): Serialize to a JSON object.
.dict(): Serialize to a dictionary.
"""
pages: List[SummaryPage] = list()
def append(self, page: SummaryPage) -> None:
"""Append a SummaryPage to the SummaryPageGroup.
Args:
page (SummaryPage): A SummaryPage object to add to the group.
"""
if page.__class__ is SummaryPage:
self.pages.append(page)
else:
raise AttributeError("Can only append SummaryPage objects.")
@property
def symbols(self: "SummaryPageGroup") -> List[str]:
"""List of symbols in the SummaryPageGroup."""
return [symbol.symbol for symbol in self]
def sort(self: "SummaryPageGroup") -> None:
"""Sort SummaryPage objects by symbol."""
self.pages = sorted(self.pages)
@property
def dataframe(self: "SummaryPageGroup") -> Optional[DataFrame]:
"""Return a dataframe of multiple SummaryPage objects."""
pages = self.dict().get("pages")
if pages:
dataframe = DataFrame.from_dict(pages)
dataframe.set_index("symbol", inplace=True)
dataframe.drop("quote", axis=1, inplace=True)
dataframe.sort_index(inplace=True)
return dataframe # TODO: none or nan
return None
def __iter__(self: "SummaryPageGroup") -> Iterable:
"""Iterate over SummaryPage objects."""
return iter(self.pages)
def __len__(self: "SummaryPageGroup") -> int:
"""Length of SummaryPage objects."""
return len(self.pages)
def parse_summary_table(html: HTML) -> Optional[Dict]:
"""Parse data from summary table HTML element."""
quote_summary = html.find("div#quote-summary", first=True)
if quote_summary:
return table_cleaner(quote_summary)
return None
def get_summary_page(
symbol: str,
use_fuzzy_search: bool = True,
page_not_found_ok: bool = False,
**kwargs, # noqa: ANN003
) -> Optional[SummaryPage]:
"""Get summary page data.
Args:
symbol (str): Ticker symbol.
use_fuzzy_search (bool): If True does a symbol lookup validation prior
to requesting summary page data.
page_not_found_ok (bool): If True Returns None when page is not found.
**kwargs: Pass (session, proxies, and timeout) to the requestor function.
Returns:
SummaryPage: When data is found.
None: No data is found and page_not_found_ok is True.
Raises:
AttributeError: When a page is not found and the page_not_found_ok arg is false.
"""
if use_fuzzy_search:
fuzzy_response = fuzzy_search(symbol, first_ticker=True, **kwargs)
if fuzzy_response:
symbol = fuzzy_response.symbol
url = f"https://finance.yahoo.com/quote/{symbol}?p={symbol}"
response = requestor(url, **kwargs)
if response.ok:
html = HTML(html=response.text, url=url)
quote_data = parse_quote_header_info(html)
summary_page_data = parse_summary_table(html)
if quote_data and summary_page_data:
data = ChainMap(quote_data.dict(), summary_page_data)
data["symbol"] = symbol
data["quote"] = quote_data
return SummaryPage(**data)
if page_not_found_ok:
return None
raise AttributeError(f"{symbol} summary page not found.")
def get_multiple_summary_pages( # pylint: disable=too-many-arguments
symbols: List[str],
use_fuzzy_search: bool = True,
page_not_found_ok: bool = True,
with_threads: bool = False,
thread_count: int = 5,
progress_bar: bool = True,
**kwargs, # noqa: ANN003
) -> Optional[SummaryPageGroup]:
"""Get multiple summary pages.
Args:
symbols (List[str]): Ticker symbols or company names.
use_fuzzy_search (bool): If True does a symbol lookup validation prior
to requesting data.
page_not_found_ok (bool): If True Returns None when page is not found.
with_threads (bool): If True uses threading.
thread_count (int): Number of threads to use if with_threads is set to True.
**kwargs: Pass (session, proxies, and timeout) to the requestor function.
progress_bar (bool): If True shows the progress bar else the progress bar
is not shown.
Returns:
SummaryPageGroup: When data is found.
None: No data is found and page_not_found_ok is True.
Raises:
AttributeError: When a page is not found and the page_not_found_ok arg is false.
"""
symbols = list(set(symbols))
group_object = SummaryPageGroup
callable_ = get_summary_page
if with_threads:
return _download_pages_with_threads(
group_object,
callable_,
symbols,
use_fuzzy_search=use_fuzzy_search,
page_not_found_ok=page_not_found_ok,
thread_count=thread_count,
progress_bar=progress_bar,
**kwargs,
)
return _download_pages_without_threads(
group_object,
callable_,
symbols,
use_fuzzy_search=use_fuzzy_search,
page_not_found_ok=page_not_found_ok,
progress_bar=progress_bar,
**kwargs,
)
|
from nltk.util import bigrams
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
import pandas as pd
import random
#from sentiments import SENTIMENTS
DATAFRAME_PATH = "../../dataset/dataframes/"
def preprocess(lyric: str) -> list:
lyric = lyric.lower()
lyric = lyric.replace("can't", 'can not')
lyric = lyric.replace("won't", 'will not')
lyric = lyric.replace("'d", ' would')
lyric = lyric.replace("gonna", 'going to')
lyric = lyric.replace("wanna", 'want to')
lyric = lyric.replace("gotta", 'got to')
lyric = lyric.replace("'cause", 'because')
lyric = lyric.replace("'bout", 'about')
#tokenize
remove_list = ["'s", "n't", "'m", "'re", "'ll", "'ve", "'d", "'ll"]
word_list = word_tokenize(lyric)
words = [x for x in word_list if x not in remove_list]
#remove punctuation
punctuation = set(string.punctuation)
custom_punctuation = set(["...", "''", "'", "`", "``"])
punctuation = punctuation.union(custom_punctuation)
return [w for w in words if w not in punctuation]
def preprocess_unigram(words: list) -> list:
#remove stopwords
stoplist = stopwords.words('english')
words = [w for w in words if w not in stoplist]
return words
def process_input(user_input: str) -> list:
preprocessed = preprocess(user_input)
unigrams_list = preprocess_unigram(preprocessed)
bigrams_list = list(bigrams(preprocessed))
return unigrams_list, bigrams_list
def load_unigrams_data(sentiment: str) -> pd.DataFrame:
unigram_df = pd.read_csv(DATAFRAME_PATH + sentiment + "_unigram_data.csv",
dtype={'word': str, 'totalRepetitions': int, 'fileOcurrences': int, 'weight': float})
return unigram_df
def load_bigrams_data(sentiment: str) -> pd.DataFrame:
bigram_df = pd.read_csv(DATAFRAME_PATH + sentiment + "_bigram_data.csv")
return bigram_df
def load_similarities(sentiment: str) -> pd.DataFrame:
similarities_df = pd.read_csv(DATAFRAME_PATH + sentiment + "_similarities.csv", index_col= 'word')
return similarities_df
def create_dict_recursively(dictionary: dict,number: int = 4, depth: int = 2, bigram_df: pd.DataFrame = None):
depth -= 1
if(depth == 0):
return dictionary
for el in dictionary:
new_items = next_words_list(el, bigram_df, number)
new_d = {}
if(depth > 1):
for item in new_items:
new_d[item] = None # espacio para el nuevo dict
dictionary[el] = create_dict_recursively(new_d, number, depth, bigram_df)
else:
dictionary[el] = new_items
return dictionary
def shuffle_dict(dictionary: dict):
keys = list(dictionary.keys())
random.shuffle(keys)
shuffle_dict = {}
for key in keys:
if(isinstance(dictionary[key], list)):
lista = dictionary[key]
random.shuffle(lista)
shuffle_dict[key] = lista
else:
shuffle_dict[key] = dictionary[key]
return shuffle_dict
def next_words_dict(word, bigram_df, number):
next_df = bigram_df.loc[bigram_df['word1'] == word]
next_words = next_df.head(number)['word2'].tolist()
next_words_dict = {}
for el in next_words:
next_words_dict[el] = None
return next_words_dict
def next_words_list(word, bigram_df, number):
next_df = bigram_df.loc[bigram_df['word1'] == word]
next_words = next_df.head(number)['word2'].tolist()
return next_words
def flatten_dict(d, parent_key='', sep=' '):
import collections
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def convert_dict_to_list(d):
strings = []
for k in d:
for el in d[k]:
strings.append(k + ' ' + el)
return strings
def convert_values_to_new_range(values: list, new_min: float = 15, new_max: float = 40):
old_min = min(values)
old_max = max(values)
old_range = (old_max - old_min)
new_range = (new_max - new_min)
new_list = []
i =0
for el in values:
new_value = (((el - old_min) * new_range) / old_range) + new_min
new_list.append(new_value)
i+=1
return new_list
#RECOMMENDATION
def recommend_most_common_words(unigram_df: pd.DataFrame) -> list:
N_UNIGRAM_RECOMMENDATIONS = 30
words_df = unigram_df.nlargest(N_UNIGRAM_RECOMMENDATIONS, 'weight')
words_list = words_df['word'].tolist()
weights_list = words_df['weight'].tolist()
converted_weights = convert_values_to_new_range(weights_list)
mostCommonUnigrams = []
for i in range(0,len(words_list)-1):
mostCommonUnigrams.append({
"value": words_list[i],
"count": converted_weights[i],
})
return mostCommonUnigrams
#RECOMMENDATION
def recommend_most_common_ngrams(ngram_df: pd.DataFrame) -> list:
N_NGRAM_RECOMMENDATIONS = 50
ngram_df = ngram_df.nlargest(N_NGRAM_RECOMMENDATIONS, 'weight')
word1_list = ngram_df['word1'].tolist()
word2_list = ngram_df['word2'].tolist()
weights_list = ngram_df['weight'].tolist()
converted_weights = convert_values_to_new_range(weights_list)
mostCommonNGrams = []
for i in range(0, len(word1_list)-1):
ngram = (word1_list[i] + ' ' + word2_list[i])
mostCommonNGrams.append({
"value": ngram,
"count": converted_weights[i],
})
return mostCommonNGrams
#RECOMMENDATION
def recommend_keyed_vectors(unigrams: list, similarities: pd.DataFrame, number: int = 5):
if(len(unigrams) <1):
return
recommendations = {}
for word in unigrams:
if word in similarities:
column_series = similarities[word]
most_similar = column_series.sort_values(ascending=False).nlargest(number+1)
most_similar_words = most_similar.index.values.tolist()
if(most_similar_words[0] == word):
most_similar_words.pop(0)
recommendations[word] = most_similar_words
shuffled_recommendations = shuffle_dict(recommendations)
return shuffled_recommendations
# RECOMMENDATION
#RECOMMENDATION BASED ON LAST WORD
def recommend_bigrams(bigrams_input: list, bigram_df: pd.DataFrame, number:int):
if(len(bigrams_input) <1):
return
last_word = bigrams_input[len(bigrams_input)-1][1]
next_words = next_words_dict(last_word, bigram_df, number)
recommendations_dict = create_dict_recursively(next_words, number, 3, bigram_df)
flat_dict = flatten_dict(recommendations_dict)
strings = convert_dict_to_list(flat_dict)
return strings
def pretty_dict(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty_dict(value, indent+1)
else:
print('\t' * (indent+1) + str(value))
def sentiment_selection(sentiment: str) -> dict:
unigram_df = load_unigrams_data(sentiment)
bigram_df = load_bigrams_data(sentiment)
most_common_words = recommend_most_common_words(unigram_df)
most_common_bigrams = recommend_most_common_ngrams(bigram_df)
return {
'most_common_words': most_common_words,
'most_common_bigrams': most_common_bigrams
}
def get_recommendations(sentiment: str, user_input: str, n_similar_words: int, n_next_bigrams: int):
n_words = user_input.split()
unigrams_input, bigrams_input = process_input(user_input)
unigram_df = load_unigrams_data(sentiment)
bigram_df = load_bigrams_data(sentiment)
similarities_df = load_similarities(sentiment)
most_similar_words = recommend_keyed_vectors(unigrams_input, similarities_df, n_similar_words)
if(len(n_words) < 2):
next_bigrams = recommend_bigrams(unigrams_input, bigram_df, n_next_bigrams)
return {
'most_similar_words': most_similar_words,
'next_bigrams': next_bigrams
}
else:
next_bigrams = recommend_bigrams(bigrams_input, bigram_df, n_next_bigrams)
return {
'most_similar_words': most_similar_words,
'next_bigrams': next_bigrams}
|
from rest_framework.exceptions import ValidationError
class BaseElementSet(object):
'''
A `BaseElementSet` is a collection of unique `BaseElement` instances
(more likely the derived children classes) and is typically used as a
metadata data structure attached to some "real" data. For instance,
given a matrix of gene expressions, the `ObservationSet` (a child of
BaseElementSet) is the set of samples that were assayed.
We depend on the native python set data structure and appropriately
hashable/comparable `BaseElement` instances (and children).
This essentially copies most of the functionality of the native set class,
simply passing through the operations, but includes some additional members
specific to our application.
Notably, we disallow (i.e. raise exceptions) if there are attempts to create
duplicate `BaseElement`s, in contrast to native sets which silently
ignore duplicate elements.
A serialized representation (where the concrete `BaseElement` type is `Observation`)
would look like:
```
{
"multiple": <bool>,
"elements": [
<Observation>,
<Observation>,
...
]
}
```
'''
# the element_typename allows us to keep the logic here, but raise
# appropriate warnings/exceptions based on the concrete implementation.
# For instance, an ObservationSet derives from this class. If someone
# passes duplicate elements, we want to tell them it came from specifying
# "observations" incorrectly. For a FeatureSet, we would obviously want
# to warn about incorrect "features". Child classes will set this field
# in their __init__(self)
element_typename = None
def __init__(self, init_elements, multiple=True):
'''
Creates a `BaseElementSet` instance.
`init_elements` is an iterable of `BaseElement` instances
`multiple` defines whether we should permit multiple `BaseElement`
instances.
'''
if self.element_typename is None:
raise NotImplementedError('Set the member "element_typename"'
' in your child class implementation')
if (not multiple) and (len(init_elements) > 1):
raise ValidationError({'elements':
'The {element_typename}Set was declared to be a singleton, but'
' multiple elements were passed to the constructor.'.format(
element_typename=self.element_typename.capitalize())
})
self.elements = set(init_elements)
if len(self.elements) < len(init_elements):
raise ValidationError({'elements':
'Attempted to create an {element_typename}Set with a'
' duplicate element.'.format(
element_typename=self.element_typename.capitalize())
})
self.multiple = multiple
def add_element(self, new_element):
'''
Adds a new `Observation` to the `ObservationSet`
(or `Feature` to `FeatureSet`)
'''
# if it's a singleton (multiple=False), prevent adding more
# if the set length is already 1.
if not self.multiple and len(self.elements) == 1:
raise ValidationError(
'Tried to add a second {element_type} to a singleton'
' {element_type}Set.'.format(
element_type=self.element_typename.capitalize()
)
)
prev_length = len(self.elements)
self.elements.add(new_element)
if len(self.elements) == prev_length:
raise ValidationError(
'Tried to add a duplicate entry to an {element_type}Set.'.format(
element_type=self.element_typename.capitalize()
)
)
@staticmethod
def _get_element_with_id(element_list, id):
'''
Utility method to get the list element that has
the passed id
'''
for el in element_list:
if el.id == id:
return el
def _set_intersection(self, other):
'''
Returns a list of dicts that represent
the intersection of the input sets. Will be turned into
the properly typed sets by the child/calling class.
'''
return_list = []
intersection_set = self.elements.intersection(other.elements)
for x in intersection_set:
attr_dict = {}
_id = x.id
el1 = BaseElementSet._get_element_with_id(self.elements, _id)
el2 = BaseElementSet._get_element_with_id(other.elements, _id)
if el1 and el2:
# check that we don't have conflicting info.
# e.g. if one attribute dict sets a particular attribute
# to one value and the other is different, reject it.
# Don't make any assumptions about how that conflict should be handled.
d1 = el1.attributes
d2 = el2.attributes
intersecting_attributes = [x for x in d1 if x in d2]
for k in intersecting_attributes:
if d1[k] != d2[k]:
raise ValidationError('When performing an intersection'
' of two sets, encountered a conflict in the attributes.'
' The key "{k}" has differing values of {x} and {y}'.format(
k = k,
x = d1[k],
y = d2[k]
)
)
attr_dict.update(el1.attributes)
attr_dict.update(el2.attributes)
return_list.append({'id':_id, 'attributes': attr_dict})
return return_list
def _set_union(self, other):
'''
Return a list of dicts that represent the UNION of the input sets.
Will be turned into properly typed sets (e.g. ObservationSet, FeatureSet)
by the calling class (a child class)
'''
return_list = []
# need to check that the intersecting elements don't have any issues like
# conflicting attributes:
intersection_set = self.set_intersection(other)
union_set = self.elements.union(other.elements)
for x in union_set:
_id = x.id
el = BaseElementSet._get_element_with_id(intersection_set.elements, _id)
if el: # was part of the intersection set
return_list.append(el.to_dict())
else: # was NOT part of the intersection set
return_list.append({'id':_id, 'attributes': x.attributes})
return return_list
def _set_difference(self, other):
'''
Returns a set of Observation or Feature instances
to the calling class of the child, which will be responsible
for creating a full ObservationSet or FeatureSet
'''
return self.elements.difference(other.elements)
def is_equivalent_to(self, other):
return self.__eq__(other)
def is_proper_subset_of(self, other):
return len(other.set_difference(self)) > 0
def is_proper_superset_of(self, other):
return len(self.set_difference(other)) > 0
def __len__(self):
return len(self.elements)
def __eq__(self, other):
return (self.elements == other.elements) \
& (self.multiple == other.multiple)
def __hash__(self):
return hash(tuple(self.elements))
def __repr__(self):
s = ','.join([str(x) for x in self.elements])
return 'A set of {element_type}s:{{{obs}}}'.format(
obs=s,
element_type=self.element_typename.capitalize()
)
def to_dict(self):
d = {}
d['multiple'] = self.multiple
d['elements'] = [x.to_dict() for x in self.elements]
return d |
from ass2 import create_graph
from random import random
import sys
import heapq
import time
class Vertex:
def __init__(self, idx, adj=None):
# adj is a [list of (vertex, weight) pairs]
self.adj = adj
self.cost = 1
self.prev = None
self.idx = idx
def __cmp__(self, other):
return cmp(self.cost, other.cost)
def randomMST(size):
# initial node 0
v = set()
minV = None
ans = 0
# create size vertices
for i in range(size):
newVertex = Vertex(i)
# initialize cost of some vertex to 0
if i == 0:
newVertex.cost = 0
minV = newVertex
v.add(newVertex)
while (len(v) > 0):
cur = minV
v.remove(cur)
ans += cur.cost
minV = Vertex(-1)
for j in v:
newEdge = random()
if j.cost > newEdge:
j.cost = newEdge
j.prev = cur.idx
if j.cost < minV.cost:
minV = j
return ans
if __name__ == '__main__':
# size can be between 30 - 50k
if len(sys.argv) < 2:
print('Please input a size argument!')
exit()
t0 = time.time()
print(randomMST(int(sys.argv[1])))
t1 = time.time()
print('total time to execute: ' + str(t1 - t0))
#create_graph(int(sys.argv[1]), None, randomWithinCircle)
|
import pytest
from scraper import nyc
PARSE_TRIP_ID_ARGNAMES = [
"trip_id",
"sub_division",
"effective_date",
"service_day",
"origin_time",
"trip_path",
"route_id",
"direction",
"path_identifier",
]
PARSE_TRIP_ID_PARAMS = [
(
"AFA19GEN-1037-Sunday-00_010600_1..S03R",
"A",
"FA19GEN",
nyc.ServiceDay.SUNDAY,
"010600",
"1..S03R",
"1",
"S",
"03R",
),
(
"AFA19GEN-GS010-Saturday-00_036400_GS.S01R",
"A",
"FA19GEN",
nyc.ServiceDay.SATURDAY,
"036400",
"GS.S01R",
"GS",
"S",
"01R",
),
(
"BFA19GEN-N058-Sunday-00_096300_N..S36R",
"B",
"FA19GEN",
nyc.ServiceDay.SUNDAY,
"096300",
"N..S36R",
"N",
"S",
"36R",
),
(
"BFA19SUPP-L024-Sunday-99_090000_L..N01R",
"B",
"FA19SUPP",
nyc.ServiceDay.SUNDAY,
"090000",
"L..N01R",
"L",
"N",
"01R",
),
(
"SIR-FA2017-SI017-Weekday-08_121100_SI..N03R",
"SIR",
"FA2017",
nyc.ServiceDay.WEEKDAY,
"121100",
"SI..N03R",
"SI",
"N",
"03R",
),
]
@pytest.mark.parametrize(",".join(PARSE_TRIP_ID_ARGNAMES), PARSE_TRIP_ID_PARAMS)
def test_parse_trip_id(
trip_id,
sub_division,
effective_date,
service_day,
origin_time,
trip_path,
route_id,
direction,
path_identifier,
):
parsed = nyc.parse_trip_id(trip_id)
assert parsed.sub_division == sub_division
assert parsed.effective_date == effective_date
assert parsed.service_day == service_day
assert parsed.origin_time == origin_time
assert parsed.trip_path == trip_path
assert parsed.route_id == route_id
assert parsed.direction == direction
assert parsed.path_identifier == path_identifier
SHORT_TRIP_ID_PARAMS = [
("AFA19GEN-1037-Sunday-00_010600_1..S03R", ["010600_1..S", "010600_1..S03R"]),
("AFA19GEN-GS010-Saturday-00_036400_GS.S01R", ["036400_GS.S", "036400_GS.S01R"]),
("BFA19GEN-N058-Sunday-00_096300_N..S36R", ["096300_N..S", "096300_N..S36R"]),
("BFA19SUPP-L024-Sunday-99_090000_L..N01R", ["090000_L..N", "090000_L..N01R"]),
(
"SIR-FA2017-SI017-Weekday-08_121100_SI..N03R",
["121100_SI..N", "121100_SI..N03R"],
),
]
@pytest.mark.parametrize("trip_id,short_trip_ids", SHORT_TRIP_ID_PARAMS)
def test_short_trip_id(trip_id, short_trip_ids):
assert set(nyc.short_trip_ids(trip_id)) == set(short_trip_ids)
|
import ffmpeg
import os
import subprocess
class ffmpeg_image_pipe:
encoders = {
'gif': {
'pix_fmt':'rgb8',
'codec':"gif",
'filter_complex': '[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse'
},
'png': {
'pix_fmt':'rgba',
'codec':"png"
},
'vp8':{
'pix_fmt':'yuv422p',
'codec':"libvpx"
},
'vp9':{
'pix_fmt':'yuv422p',
'codec':"libvpx-vp9"
}
}
def getAvailableEncoders():
return list(ffmpeg_image_pipe.encoders.keys())
def isAnimatedEncoder(encoder):
if encoder in ['gif','vp8','vp9']:
return True
else:
return False
def __init__(self,w,h,filename,fps=25,transparent=False,encoder='png',removeAudio = True,quiet=False):
self.width = w
self.height = h
self.out_filename = filename
self.fps = float(fps)
self.quiet = quiet
#validate encoder
supported_encoders = list(ffmpeg_image_pipe.encoders.keys())
if encoder not in supported_encoders:
raise ValueError('Invalid encoder please choose on of the following {}'.format(supported_encoders))
self.multi_output = False
self.options = ffmpeg_image_pipe.encoders[encoder];
#Set FPS
self.options['r'] = self.fps
#Remove audio track
self.options['an'] = None
def __enter__(self):
global_args = []
ffinput = ffmpeg.input('pipe:',framerate=self.fps, format='rawvideo', pix_fmt='rgb32',hwaccel='auto',s='{}x{}'.format(self.width, self.height))
if self.quiet:
global_args = ['-hide_banner', '-nostats', '-loglevel', 'fatal']
self.proc = (
ffinput
.output(self.out_filename, **self.options)
.overwrite_output()
.global_args(*global_args)
.run_async(pipe_stdin=True)
)
return self.proc
def __exit__(self, exc_type, exc_value, traceback):
#Simple Cleanup
self.proc.stdin.close()
self.proc.wait()
|
#!/usr/bin/env python3
"""Generate AST classes from grammar."""
import pkg_resources
from scoff.misc.textx import parse_textx_grammar, build_python_class_text
if __name__ == "__main__":
the_grammar = pkg_resources.resource_filename("sm", "state_machine.tx")
grammar_rules = parse_textx_grammar(the_grammar)
dump_txt = """
\"\"\"Generated AST classes.\"\"\"
from scoff.ast import ScoffASTObject
"""
cls_names = []
for rule_name, rule_members in grammar_rules.items():
class_name, class_txt = build_python_class_text(
rule_name, "ScoffASTObject", *rule_members
)
dump_txt += class_txt + "\n\n"
cls_names.append(class_name)
dump_txt += "GENERATED_AST_CLASSES = ({})".format(", ".join(cls_names))
print(dump_txt)
|
#!/usr/bin/env python
from itertools import groupby
from operator import attrgetter,itemgetter
import sys
#----------------------------------------------
def printHelp():
s = '''
To Use: Add the Tracer Service to the cmsRun job you want to check for
stream stalls. Make sure to use the 'printTimstamps' option
cms.Service("Tracer", printTimestamps = cms.untracked.bool(True))
After running the job, execute this script and pass the name of the
log file to the script as the only command line argument.
To Read: The script will then print an 'ASCII art' stall graph which
consists of the name of the module which either started or stopped
running on a stream, and the number of modules running on each
stream at that the moment in time. If the module just started, you
will also see the amount of time the module spent between finishing
its prefetching and starting. The state of a module is represented
by a symbol:
plus ("+") the stream has just finished waiting and is starting a module
minus ("-") the stream just finished running a module
If a module had to wait more than 0.1 seconds, the end of the line
will have "STALLED". Once the first 4 events have finished
processing, the program prints "FINISH INIT". This is useful if one
wants to ignore stalled caused by startup actions, e.g. reading
conditions.
Once the graph is completed, the program outputs the list of modules
which had the greatest total stall times. The list is sorted by
total stall time and written in descending order. In addition, the
list of all stall times for the module is given.'''
return s
kStallThreshold=100 #in milliseconds
kTracerInput=False
#Stream states
kStarted=0
kFinished=1
kPrefetchEnd=2
#Special names
kSourceFindEvent = "sourceFindEvent"
kSourceDelayedRead ="sourceDelayedRead"
#----------------------------------------------
def parseStallMonitorOutput(f):
processingSteps = []
numStreams = 0
maxNameSize = 0
foundEventToStartFrom = False
moduleNames = {}
for rawl in f:
l = rawl.strip()
if not l or l[0] == '#':
if len(l) > 5 and l[0:2] == "#M":
(id,name)=tuple(l[2:].split())
moduleNames[id] = name
continue
(step,payload) = tuple(l.split(None,1))
payload=payload.split()
# Payload format is:
# <stream id> <..other fields..> <time since begin job>
stream = int(payload[0])
time = int(payload[-1])
trans = None
numStreams = max(numStreams, stream+1)
# 'S' = begin of event creation in source
# 's' = end of event creation in source
if step == 'S' or step == 's':
name = kSourceFindEvent
trans = kStarted
# The start of an event is the end of the framework part
if step == 's':
trans = kFinished
else:
# moduleID is the second payload argument for all steps below
moduleID = payload[1]
# 'p' = end of module prefetching
# 'M' = begin of module processing
# 'm' = end of module processing
if step == 'p' or step == 'M' or step == 'm':
trans = kStarted
if step == 'p':
trans = kPrefetchEnd
elif step == 'm':
trans = kFinished
name = moduleNames[moduleID]
# Delayed read from source
# 'R' = begin of delayed read from source
# 'r' = end of delayed read from source
if step == 'R' or step == 'r':
trans = kStarted
if step == 'r':
trans = kFinished
name = kSourceDelayedRead
maxNameSize = max(maxNameSize, len(name))
processingSteps.append((name,trans,stream,time))
f.close()
return (processingSteps,numStreams,maxNameSize)
#----------------------------------------------
def getTime(line):
time = line.split(" ")[1]
time = time.split(":")
time = int(time[0])*60*60+int(time[1])*60+float(time[2])
time = int(1000*time) # convert to milliseconds
return time
#----------------------------------------------
def parseTracerOutput(f):
processingSteps = []
numStreams = 0
maxNameSize = 0
startTime = 0
for l in f:
if l.find("processing event :") != -1:
time = getTime(l)
if startTime == 0:
startTime = time
time = time - startTime
streamIndex = l.find("stream = ")
stream = int(l[streamIndex+9:l.find(" ",streamIndex+10)])
name = kSourceFindEvent
trans = kFinished
#the start of an event is the end of the framework part
if l.find("starting:") != -1:
trans = kStarted
processingSteps.append((name,trans,stream,time))
numStreams = max(numStreams, stream+1)
if l.find("processing event for module") != -1:
time = getTime(l)
if startTime == 0:
startTime = time
time = time - startTime
trans = kStarted
stream = 0
delayed = False
if l.find("finished:") != -1:
if l.find("prefetching") != -1:
trans = kPrefetchEnd
else:
trans = kFinished
else:
if l.find("prefetching") != -1:
#skip this since we don't care about prefetch starts
continue
streamIndex = l.find("stream = ")
stream = int( l[streamIndex+9:l.find(" ",streamIndex+10)])
name = l.split("'")[1]
maxNameSize = max(maxNameSize, len(name))
processingSteps.append((name,trans,stream,time))
numStreams = max(numStreams, stream+1)
if l.find("event delayed read from source") != -1:
time = getTime(l)
if startTime == 0:
startTime = time
time = time - startTime
trans = kStarted
stream = 0
delayed = False
if l.find("finished:") != -1:
trans = kFinished
streamIndex = l.find("stream = ")
stream = int(l[streamIndex+9:l.find(" ",streamIndex+10)])
name = kSourceDelayedRead
maxNameSize = max(maxNameSize, len(name))
processingSteps.append((name,trans,stream,time))
numStreams = max(numStreams, stream+1)
f.close()
return (processingSteps,numStreams,maxNameSize)
#----------------------------------------------
def chooseParser(inputFile):
firstLine = inputFile.readline().rstrip()
inputFile.seek(0) # Rewind back to beginning
if firstLine.find("# Step") != -1:
print "> ... Parsing StallMonitor output."
return parseStallMonitorOutput
elif firstLine.find("++") != -1:
global kTracerInput
kTracerInput = True
print "> ... Parsing Tracer output."
return parseTracerOutput
else:
inputFile.close()
print "Unknown input format."
exit(1)
#----------------------------------------------
def readLogFile(inputFile):
parseInput = chooseParser(inputFile)
return parseInput(inputFile)
#----------------------------------------------
# Patterns:
#
# source: The source just records how long it was spent doing work,
# not how long it was stalled. We can get a lower bound on the stall
# time by measuring the time the stream was doing no work up till
# the source was run.
# modules: The time between prefetch finished and 'start processing' is
# the time it took to acquire any resources
#
def findStalledModules(processingSteps, numStreams):
streamTime = [0]*numStreams
stalledModules = {}
modulesActiveOnStream = [{} for x in xrange(numStreams)]
for n,trans,s,time in processingSteps:
waitTime = None
modulesOnStream = modulesActiveOnStream[s]
if trans == kPrefetchEnd:
modulesOnStream[n] = time
if trans == kStarted:
if n in modulesOnStream:
waitTime = time - modulesOnStream[n]
if n == kSourceDelayedRead:
if 0 == len(modulesOnStream):
waitTime = time - streamTime[s]
if trans == kFinished:
if n != kSourceDelayedRead and n!=kSourceFindEvent:
modulesOnStream.pop(n, None)
streamTime[s] = time
if waitTime is not None:
if waitTime > kStallThreshold:
t = stalledModules.setdefault(n,[])
t.append(waitTime)
return stalledModules
#----------------------------------------------
def createAsciiImage(processingSteps, numStreams, maxNameSize):
streamTime = [0]*numStreams
streamState = [0]*numStreams
modulesActiveOnStreams = [{} for x in xrange(numStreams)]
for n,trans,s,time in processingSteps:
modulesActiveOnStream = modulesActiveOnStreams[s]
waitTime = None
if trans == kPrefetchEnd:
modulesActiveOnStream[n] = time
continue
if trans == kStarted:
if n != kSourceFindEvent:
streamState[s] +=1
if n in modulesActiveOnStream:
waitTime = time - modulesActiveOnStream[n]
if n == kSourceDelayedRead:
if streamState[s] == 0:
waitTime = time-streamTime[s]
if trans == kFinished:
if n != kSourceDelayedRead and n!=kSourceFindEvent:
modulesActiveOnStream.pop(n, None)
if n != kSourceFindEvent:
streamState[s] -=1
streamTime[s] = time
states = "%-*s: " % (maxNameSize,n)
if trans == kStarted:
states +="+ "
if trans == kFinished:
states +="- "
for index, state in enumerate(streamState):
if n==kSourceFindEvent and index == s:
states +="* "
else:
states +=str(state)+" "
if waitTime is not None:
states += " %.2f"% (waitTime/1000.)
if waitTime > kStallThreshold:
states += " STALLED "+str(time/1000.)+" "+str(s)
print states
return stalledModules
#----------------------------------------------
def printStalledModulesInOrder(stalledModules):
priorities = []
maxNameSize = 0
for name,t in stalledModules.iteritems():
maxNameSize = max(maxNameSize, len(name))
t.sort(reverse=True)
priorities.append((name,sum(t),t))
def sumSort(i,j):
return cmp(i[1],j[1])
priorities.sort(cmp=sumSort, reverse=True)
nameColumn = "Stalled Module"
maxNameSize = max(maxNameSize, len(nameColumn))
stallColumn = "Tot Stall Time"
stallColumnLength = len(stallColumn)
print "%-*s" % (maxNameSize, nameColumn), "%-*s"%(stallColumnLength,stallColumn), " Stall Times"
for n,s,t in priorities:
paddedName = "%-*s:" % (maxNameSize,n)
print paddedName, "%-*.2f"%(stallColumnLength,s/1000.), ", ".join([ "%.2f"%(x/1000.) for x in t])
#--------------------------------------------------------
class Point:
def __init__(self, x_, y_):
self.x = x_
self.y = y_
def __str__(self):
return "(x: {}, y: {})".format(self.x,self.y)
def __repr__(self):
return self.__str__()
#--------------------------------------------------------
def reduceSortedPoints(ps):
if len(ps) < 2:
return ps
reducedPoints = []
tmp = ps[0]
for p in ps[1:]:
if tmp.x == p.x:
tmp.y += p.y
else:
reducedPoints.append(tmp)
tmp = p
reducedPoints.append(tmp)
reducedPoints = [p for p in reducedPoints if p.y != 0]
return reducedPoints
# -------------------------------------------
def adjacentDiff(*pairLists):
points = []
for pairList in pairLists:
points += [Point(x[0], 1) for x in pairList if x[1] != 0]
points += [Point(sum(x),-1) for x in pairList if x[1] != 0]
points.sort(key=attrgetter('x'))
return points
stackType = 'stack'
# --------------------------------------------
class Stack:
def __init__(self):
self.data = []
def update(self, graphType, points):
tmp = points
if len(self.data) != 0:
tmp += self.data[-1][1]
tmp.sort(key=attrgetter('x'))
tmp = reduceSortedPoints(tmp)
self.data.append((graphType, tmp))
#---------------------------------------------
# StreamInfoElement
class StreamInfoElement:
def __init__(self, begin_, delta_, color_):
self.begin=begin_
self.delta=delta_
self.color=color_
def unpack(self):
return self.begin, self.delta, self.color
#----------------------------------------------
# Consolidating contiguous blocks with the same color
# drastically reduces the size of the pdf file.
def consolidateContiguousBlocks(numStreams, streamInfo):
oldStreamInfo = streamInfo
streamInfo = [[] for x in xrange(numStreams)]
for s in xrange(numStreams):
lastStartTime,lastTimeLength,lastColor = oldStreamInfo[s][0].unpack()
for info in oldStreamInfo[s][1:]:
start,length,color = info.unpack()
if color == lastColor and lastStartTime+lastTimeLength == start:
lastTimeLength += length
else:
streamInfo[s].append(StreamInfoElement(lastStartTime,lastTimeLength,lastColor))
lastStartTime = start
lastTimeLength = length
lastColor = color
streamInfo[s].append(StreamInfoElement(lastStartTime,lastTimeLength,lastColor))
return streamInfo
#----------------------------------------------
# Consolidating contiguous blocks with the same color drastically
# reduces the size of the pdf file. Same functionality as the
# previous function, but with slightly different implementation.
def mergeContiguousBlocks(blocks):
oldBlocks = blocks
blocks = []
lastStartTime,lastTimeLength,lastHeight = oldBlocks[0]
for start,length,height in oldBlocks[1:]:
if height == lastHeight and lastStartTime+lastTimeLength == start:
lastTimeLength += length
else:
blocks.append((lastStartTime,lastTimeLength,lastHeight))
lastStartTime = start
lastTimeLength = length
lastHeight = height
blocks.append((lastStartTime,lastTimeLength,lastHeight))
return blocks
#----------------------------------------------
def createPDFImage(pdfFile, shownStacks, processingSteps, numStreams, stalledModuleInfo):
stalledModuleNames = set([x for x in stalledModuleInfo.iterkeys()])
streamInfo = [[] for x in xrange(numStreams)]
modulesActiveOnStreams = [{} for x in xrange(numStreams)]
streamLastEventEndTimes = [None]*numStreams
streamRunningTimes = [[] for x in xrange(numStreams)]
maxNumberOfConcurrentModulesOnAStream = 1
streamInvertedMessageFromModule = [set() for x in xrange(numStreams)]
for n,trans,s,time in processingSteps:
startTime = None
if streamLastEventEndTimes[s] is None:
streamLastEventEndTimes[s]=time
if trans == kStarted:
if n == kSourceFindEvent:
# We assume the time from the end of the last event
# for a stream until the start of a new event for that
# stream is taken up by the source.
startTime = streamLastEventEndTimes[s]
moduleNames = set(n)
else:
activeModules = modulesActiveOnStreams[s]
moduleNames = set(activeModules.iterkeys())
if n in streamInvertedMessageFromModule[s] and kTracerInput:
# This is the rare case where a finished message
# is issued before the corresponding started.
streamInvertedMessageFromModule[s].remove(n)
continue
activeModules[n] = time
nModulesRunning = len(activeModules)
streamRunningTimes[s].append(Point(time,1))
maxNumberOfConcurrentModulesOnAStream = max(maxNumberOfConcurrentModulesOnAStream, nModulesRunning)
if nModulesRunning > 1:
# Need to create a new time span to avoid overlaps in graph.
startTime = min(activeModules.itervalues())
for k in activeModules.iterkeys():
activeModules[k]=time
if trans == kFinished:
if n == kSourceFindEvent:
streamLastEventEndTimes[s]=time
else:
activeModules = modulesActiveOnStreams[s]
if n not in activeModules and kTracerInput:
# This is the rare case where a finished message
# is issued before the corresponding started.
streamInvertedMessageFromModule[s].add(n)
continue
streamRunningTimes[s].append(Point(time,-1))
startTime = activeModules[n]
moduleNames = set(activeModules.iterkeys())
del activeModules[n]
nModulesRunning = len(activeModules)
if nModulesRunning > 0:
# Reset start time for remaining modules to this time
# to avoid overlapping time ranges when making the plot.
for k in activeModules.iterkeys():
activeModules[k] = time
if startTime is not None:
c="green"
if (kSourceDelayedRead in moduleNames) or (kSourceFindEvent in moduleNames):
c = "orange"
else:
for n in moduleNames:
if n in stalledModuleNames:
c="red"
break
streamInfo[s].append(StreamInfoElement(startTime, time-startTime, c))
streamInfo = consolidateContiguousBlocks(numStreams, streamInfo)
nr = 1
if shownStacks:
nr += 1
fig, ax = plt.subplots(nrows=nr, squeeze=True)
axStack = None
if shownStacks:
[xH,yH] = fig.get_size_inches()
fig.set_size_inches(xH,yH*4/3)
ax = plt.subplot2grid((4,1),(0,0), rowspan=3)
axStack = plt.subplot2grid((4,1),(3,0))
ax.set_xlabel("Time (sec)")
ax.set_ylabel("Stream ID")
ax.set_ylim(-0.5,numStreams-0.5)
ax.yaxis.set_ticks(xrange(numStreams))
height = 0.8/maxNumberOfConcurrentModulesOnAStream
allStackTimes={'green': [], 'red': [], 'blue': [], 'orange': []}
for i,perStreamInfo in enumerate(streamInfo):
times=[(x.begin/1000., x.delta/1000.) for x in perStreamInfo] # Scale from msec to sec.
colors=[x.color for x in perStreamInfo]
ax.broken_barh(times,(i-0.4,height),facecolors=colors,edgecolors=colors,linewidth=0)
for info in perStreamInfo:
allStackTimes[info.color].append((info.begin, info.delta))
# Now superimpose the number of concurrently running modules on to the graph.
if maxNumberOfConcurrentModulesOnAStream > 1:
for i,perStreamRunningTimes in enumerate(streamRunningTimes):
perStreamTimes = sorted(perStreamRunningTimes, key=attrgetter('x'))
perStreamTimes = reduceSortedPoints(perStreamTimes)
streamHeight = 0
preparedTimes = []
for t1,t2 in zip(perStreamTimes, perStreamTimes[1:]):
streamHeight += t1.y
if streamHeight < 2:
continue
preparedTimes.append((t1.x,t2.x-t1.x, streamHeight))
preparedTimes.sort(key=itemgetter(2))
preparedTimes = mergeContiguousBlocks(preparedTimes)
for nthreads, ts in groupby(preparedTimes, itemgetter(2)):
theTS = [(t[0],t[1]) for t in ts]
theTimes = [(t[0]/1000.,t[1]/1000.) for t in theTS]
yspan = (i-0.4+height,height*(nthreads-1))
ax.broken_barh(theTimes, yspan, facecolors='blue', edgecolors='blue', linewidth=0)
allStackTimes['blue'].extend(theTS*(nthreads-1))
if shownStacks:
print "> ... Generating stack"
stack = Stack()
for color in ['green','blue','red','orange']:
tmp = allStackTimes[color]
tmp = reduceSortedPoints(adjacentDiff(tmp))
stack.update(color, tmp)
for stk in reversed(stack.data):
color = stk[0]
# Now arrange list in a manner that it can be grouped by the height of the block
height = 0
xs = []
for p1,p2 in zip(stk[1], stk[1][1:]):
height += p1.y
xs.append((p1.x, p2.x-p1.x, height))
xs.sort(key = itemgetter(2))
xs = mergeContiguousBlocks(xs)
for height, xpairs in groupby(xs, itemgetter(2)):
finalxs = [(e[0]/1000.,e[1]/1000.) for e in xpairs]
axStack.broken_barh(finalxs, (0, height), facecolors=color, edgecolors=color, linewidth=0)
axStack.set_xlabel("Time (sec)");
axStack.set_ylabel("# threads");
axStack.set_xlim(ax.get_xlim())
axStack.tick_params(top='off')
fig.text(0.1, 0.95, "modules running", color = "green", horizontalalignment = 'left')
fig.text(0.5, 0.95, "stalled module running", color = "red", horizontalalignment = 'center')
fig.text(0.9, 0.95, "read from input", color = "orange", horizontalalignment = 'right')
fig.text(0.5, 0.92, "multiple modules running", color = "blue", horizontalalignment = 'center')
print "> ... Saving to file: '{}'".format(pdfFile)
plt.savefig(pdfFile)
#=======================================
if __name__=="__main__":
import argparse
import re
import sys
# Program options
parser = argparse.ArgumentParser(description='Convert a cmsRun log with Tracer info into a stream stall graph.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=printHelp())
parser.add_argument('filename',
type=argparse.FileType('r'), # open file
help='log file to process')
parser.add_argument('-g', '--graph',
nargs='?',
metavar="'stall.pdf'",
const='stall.pdf',
dest='graph',
help='''Create pdf file of stream stall graph. If -g is specified
by itself, the default file name is \'stall.pdf\'. Otherwise, the
argument to the -g option is the filename.''')
parser.add_argument('-s', '--stack',
action='store_true',
help='''Create stack plot, combining all stream-specific info.
Can be used only when -g is specified.''')
args = parser.parse_args()
# Process parsed options
inputFile = args.filename
pdfFile = args.graph
shownStacks = args.stack
doGraphic = False
if pdfFile is not None:
doGraphic = True
import matplotlib
# Need to force display since problems with CMSSW matplotlib.
matplotlib.use("PDF")
import matplotlib.pyplot as plt
if not re.match(r'^[\w\.]+$', pdfFile):
print "Malformed file name '{}' supplied with the '-g' option.".format(pdfFile)
print "Only characters 0-9, a-z, A-Z, '_', and '.' are allowed."
exit(1)
if '.' in pdfFile:
extension = pdfFile.split('.')[-1]
supported_filetypes = plt.figure().canvas.get_supported_filetypes()
if not extension in supported_filetypes:
print "A graph cannot be saved to a filename with extension '{}'.".format(extension)
print "The allowed extensions are:"
for filetype in supported_filetypes:
print " '.{}'".format(filetype)
exit(1)
if pdfFile is None and shownStacks:
print "The -s (--stack) option can be used only when the -g (--graph) option is specified."
exit(1)
sys.stderr.write(">reading file: '{}'\n".format(inputFile.name))
processingSteps,numStreams,maxNameSize = readLogFile(inputFile)
sys.stderr.write(">processing data\n")
stalledModules = findStalledModules(processingSteps, numStreams)
if not doGraphic:
sys.stderr.write(">preparing ASCII art\n")
createAsciiImage(processingSteps, numStreams, maxNameSize)
else:
sys.stderr.write(">creating PDF\n")
createPDFImage(pdfFile, shownStacks, processingSteps, numStreams, stalledModules)
printStalledModulesInOrder(stalledModules)
|
import pygame
class Brick:
def __init__(self, x, y,color,ID):
self.x = x
self.y = y
self.width = 32
self.height = 32
self.ID = ID
self.color = color
def render(self, window):
if self.ID == 'g':
img = pygame.image.load('Assets/Images/grass.bmp')
elif self.ID == 'b':
img = pygame.image.load('Assets/Images/brick.bmp')
elif self.ID == 's':
img = pygame.image.load('Assets/Images/stone.bmp')
elif self.ID == 'q':
img = pygame.image.load('Assets/Images/qmark.bmp')
window.blit(img, (self.x, self.y))
|
from django.urls import path
from . import views
from .custom_views import product_views, customer_views, order_views
urlpatterns = [
path('', views.index, name='index'),
path('products/', product_views.Products, name='products'),
path('products/<int:product_id>/', product_views.ProductDetails, name='productDedails'),
path('cart/', product_views.CartProducts, name='cartProducts'),
path('checkout/', order_views.CheckoutOrder, name='chekoutOrder'),
path('proceed-payment/', order_views.ProceedPayment, name='proceedPayment'),
path('register/', customer_views.RegisterCustomer, name='registerCustomer'),
path('register/<uuid:token_id>/', customer_views.ConfirmEmail, name='confirmEmail'),
path('login/', customer_views.LoginCustomer, name='loginCustomer'),
path('logout/', customer_views.LogoutCustomer, name='logoutCustomer'),
] |
#!/usr/bin/env python3
from ..result import ExpectResult
from ..util.include import *
from ..util import py_exec, py_eval
def expect(vals, rules, peval="#", pexec="%", mode="", **kwargs):
if mode == peval:
return [run_expect("", rules, "eval", val=vals, **kwargs)]
if mode == pexec:
return [run_expect("", rules, "exec", val=vals, **kwargs)]
if not rules:
return []
results = []
_expect_recursive("", results, vals, rules, peval=peval, pexec=pexec, **kwargs)
return results
def _expect_recursive(root: str, results: list[ExpectResult], vals, rules, peval="#", pexec="%", **kwargs):
if isinstance(rules, dict):
for key, rule in rules.items():
root_dot_key = "{}.{}".format(root, key.lstrip(peval).lstrip(pexec)).lstrip(".")
if key.startswith(pexec):
if key[len(pexec):] not in vals:
results.append(ExpectResult(is_pass=False, message="NoSuchKey", node=root_dot_key, val=None, expect=rule))
else:
results.append(run_expect(root_dot_key, rule, "exec", val=vals[key[len(pexec):]], **kwargs))
elif key.startswith(peval):
if key[len(peval):] not in vals:
results.append(ExpectResult(is_pass=False, message="NoSuchKey", node=root_dot_key, val=None, expect=rule))
else:
results.append(run_expect(root_dot_key, rule, "eval", val=vals[key[len(peval):]], **kwargs))
elif key not in vals:
results.append(ExpectResult(is_pass=False, message="NoSuchKey", node=root_dot_key, val=None, expect=rule))
elif isinstance(rule, dict) or isinstance(rule, list):
if not isinstance(vals[key], type(rule)):
results.append(ExpectResult(is_pass=False, message="TypeDiff", node=root_dot_key, val=vals[key], expect=rule))
else:
_expect_recursive(root_dot_key, results, vals[key], rule, peval=peval, pexec=pexec, **kwargs)
else:
results.append(run_expect(root_dot_key, rule, "equal", val=vals[key], **kwargs))
elif isinstance(rules, list):
for idx, rule in enumerate(rules):
root_dot_key = "{}.{}".format(root, idx).lstrip(".")
if idx >= len(vals):
results.append(ExpectResult(is_pass=False, message="NoSuchKey", node=root_dot_key, val=None, expect=rule))
elif isinstance(rule, dict) or isinstance(rule, list):
if not isinstance(vals[idx], type(rule)):
results.append(ExpectResult(is_pass=False, message="TypeDiff", node=root_dot_key, val=vals[idx], expect=rule))
else:
_expect_recursive(root_dot_key, results, vals[idx], rule, peval=peval, pexec=pexec, **kwargs)
else:
results.append(run_expect(root_dot_key, rule, "equal", val=vals[idx], **kwargs))
else:
if vals == rules:
results.append(ExpectResult(is_pass=True, message="OK", node=root, val=vals, expect=rules))
else:
results.append(ExpectResult(is_pass=False, message="NotEqual", node=root, val=vals, expect=rules))
def run_expect(root, rule, func, val=None, **kwargs):
if func == "eval":
ok, res = expect_eval(rule, val=val, **kwargs)
if not ok:
return ExpectResult(is_pass=False, message="EvalFail", node=root, val=val, expect="{} = {}".format(res, rule))
return ExpectResult(is_pass=True, message="OK", node=root, val=val, expect=rule)
elif func == "exec":
ok, res = expect_exec(rule, val=val, **kwargs)
if not ok:
return ExpectResult(is_pass=False, message="ExecFail", node=root, val=val, expect="{} = {}".format(res, rule))
return ExpectResult(is_pass=True, message="OK", node=root, val=val, expect=rule)
else:
if val != rule:
return ExpectResult(is_pass=False, message="NotEqual", node=root, val=val, expect=rule)
return ExpectResult(is_pass=True, message="OK", node=root, val=val, expect=rule)
def expect_eval(rule, val=None, **kwargs):
res = py_eval(rule, val=val, **kwargs)
if not isinstance(res, bool):
return res == val, res
return res, res
def expect_exec(rule, val=None, **kwargs):
res = py_exec(rule, val=val, **kwargs)
if not isinstance(res, bool):
return res == val, res
return res, res
def check(rule, val=None, **kwargs):
ok, res = expect_eval(rule, val=val, **kwargs)
return ok
|
import errno
from pypy.interpreter.error import oefmt
from pypy.module.cpyext.api import cpython_api, CONST_STRING
from pypy.module.cpyext.pyobject import PyObject
from rpython.rlib import rdtoa
from rpython.rlib import rfloat
from rpython.rlib import rposix, jit
from rpython.rlib.rarithmetic import intmask
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem import rffi
# PyOS_double_to_string's "type", if non-NULL, will be set to one of:
Py_DTST_FINITE = 0
Py_DTST_INFINITE = 1
Py_DTST_NAN = 2
# Match the "type" back to values in CPython
DOUBLE_TO_STRING_TYPES_MAP = {
rfloat.DIST_FINITE: Py_DTST_FINITE,
rfloat.DIST_INFINITY: Py_DTST_INFINITE,
rfloat.DIST_NAN: Py_DTST_NAN
}
@cpython_api([CONST_STRING, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0)
@jit.dont_look_inside # direct use of _get_errno()
def PyOS_string_to_double(space, s, endptr, w_overflow_exception):
"""Convert a string s to a double, raising a Python
exception on failure. The set of accepted strings corresponds to
the set of strings accepted by Python's float() constructor,
except that s must not have leading or trailing whitespace.
The conversion is independent of the current locale.
If endptr is NULL, convert the whole string. Raise
ValueError and return -1.0 if the string is not a valid
representation of a floating-point number.
If endptr is not NULL, convert as much of the string as
possible and set *endptr to point to the first unconverted
character. If no initial segment of the string is the valid
representation of a floating-point number, set *endptr to point
to the beginning of the string, raise ValueError, and return
-1.0.
If s represents a value that is too large to store in a float
(for example, "1e500" is such a string on many platforms) then
if overflow_exception is NULL return Py_HUGE_VAL (with
an appropriate sign) and don't set any exception. Otherwise,
overflow_exception must point to a Python exception object;
raise that exception and return -1.0. In both cases, set
*endptr to point to the first character after the converted value.
If any other error occurs during the conversion (for example an
out-of-memory error), set the appropriate Python exception and
return -1.0.
"""
user_endptr = True
try:
if not endptr:
endptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
user_endptr = False
result = rdtoa.dg_strtod(s, endptr)
endpos = (rffi.cast(rffi.LONG, endptr[0]) -
rffi.cast(rffi.LONG, s))
if endpos == 0 or (not user_endptr and not endptr[0][0] == '\0'):
raise oefmt(space.w_ValueError,
"invalid input at position %d", endpos)
err = rffi.cast(lltype.Signed, rposix._get_errno())
if err == errno.ERANGE:
rposix._set_errno(rffi.cast(rffi.INT, 0))
if w_overflow_exception is None:
if result > 0:
return rfloat.INFINITY
else:
return -rfloat.INFINITY
else:
raise oefmt(w_overflow_exception, "value too large")
return result
finally:
if not user_endptr:
lltype.free(endptr, flavor='raw')
@cpython_api([rffi.DOUBLE, lltype.Char, rffi.INT_real, rffi.INT_real, rffi.INTP], rffi.CCHARP)
def PyOS_double_to_string(space, val, format_code, precision, flags, ptype):
"""Convert a double val to a string using supplied
format_code, precision, and flags.
format_code must be one of 'e', 'E', 'f', 'F',
'g', 'G' or 'r'. For 'r', the supplied precision
must be 0 and is ignored. The 'r' format code specifies the
standard repr() format.
flags can be zero or more of the values Py_DTSF_SIGN,
Py_DTSF_ADD_DOT_0, or Py_DTSF_ALT, or-ed together:
Py_DTSF_SIGN means to always precede the returned string with a sign
character, even if val is non-negative.
Py_DTSF_ADD_DOT_0 means to ensure that the returned string will not look
like an integer.
Py_DTSF_ALT means to apply "alternate" formatting rules. See the
documentation for the PyOS_snprintf() '#' specifier for
details.
If ptype is non-NULL, then the value it points to will be set to one of
Py_DTST_FINITE, Py_DTST_INFINITE, or Py_DTST_NAN, signifying that
val is a finite number, an infinite number, or not a number, respectively.
The return value is a pointer to buffer with the converted string or
NULL if the conversion failed. The caller is responsible for freeing the
returned string by calling PyMem_Free().
"""
buffer, rtype = rfloat.double_to_string(val, format_code,
intmask(precision),
intmask(flags))
if ptype != lltype.nullptr(rffi.INTP.TO):
ptype[0] = rffi.cast(rffi.INT, DOUBLE_TO_STRING_TYPES_MAP[rtype])
bufp = rffi.str2charp(buffer)
return bufp
|
import random
import tkinter
import math
class Blackjack():
def __init__(self, master):
#Menu
glavniMenu = tkinter.Menu(master)
master.config(menu = glavniMenu)
menuBJ = tkinter.Menu(glavniMenu)
glavniMenu.add_cascade(label = 'Blackjack', menu=menuBJ)
menuBJ.add_cascade(label = 'New Game', command = self.newGame)
menuBJ.add_cascade(label = 'Quit', command = master.destroy)
#zeleno platno (igralna povrsina)
self.platno = tkinter.Canvas(master, width = 700, height = 480, bg = 'green')
self.platno.grid(row = 0, column = 0)
#Crte
self.pokoncaCrta = self.platno.create_line(500, 0, 500, 600, width = 4)
self.lezecaCrta = self.platno.create_line(0, 410, 500, 410, width = 4)
#####Spremenljivke
self.sezKart = ['karte/1.gif', 'karte/2.gif', 'karte/3.gif', 'karte/4.gif', 'karte/5.gif',
'karte/6.gif', 'karte/7.gif', 'karte/8.gif', 'karte/9.gif', 'karte/10.gif',
'karte/11.gif', 'karte/12.gif', 'karte/13.gif', 'karte/4.gif', 'karte/15.gif',
'karte/16.gif', 'karte/17.gif', 'karte/18.gif', 'karte/19.gif', 'karte/20.gif',
'karte/21.gif', 'karte/22.gif', 'karte/23.gif', 'karte/24.gif', 'karte/25.gif',
'karte/26.gif', 'karte/27.gif', 'karte/28.gif', 'karte/29.gif', 'karte/30.gif',
'karte/31.gif', 'karte/32.gif', 'karte/33.gif', 'karte/34.gif', 'karte/35.gif',
'karte/36.gif', 'karte/37.gif', 'karte/38.gif', 'karte/39.gif', 'karte/40.gif',
'karte/41.gif', 'karte/42.gif', 'karte/43.gif', 'karte/44.gif', 'karte/45.gif',
'karte/46.gif', 'karte/47.gif', 'karte/48.gif', 'karte/49.gif', 'karte/50.gif',
'karte/51.gif', 'karte/52.gif']
self.credit = 1000 #toliko dobimo na zacetku
self.vsotaStave = 0 #trenutna stava
self.scorePlayer = 0 #tocke igralca
self.scoreDealer = 0 #tocke dealerja
#seznam igralcevih kart
self.indexIgralceveKarte = 0 #katero karto damo na platno, dve karti dobimo ob inic. izbiramo tretjo
self.sezKartIgralec = ['','',''] #dejanska imena kart za sklicevanje
self.sezKartIgralecPlatno = ['','',''] #da bodo karte vidne na zaslonu
#Pozicja (x,y) za igralceve karte
self.pX = 260
self.pY = 350
#seznam dealerjevih kart
self.indexDealerjeveKarte = 0 #katero karto damo na platno, dve karti dobimo ob inic. izbiramo tretjo
self.sezKartDealer = ['','',''] #dejanska imena kart za sklicevanje
self.sezKartDealerPlatno = ['','',''] #da bodo karte vidne na zaslonu
#Pozicja (x,y) za dealerjeve karte
self.dX = 260
self.dY = 120
#####
#####Napisi
#Zgolj napis kje so igralceve karte
self.napisIgralec = tkinter.Label(text = 'Player:', bg = 'green', fg = 'blue', font = ('Helvetica', 18, 'bold'))
self.napisIgralecNaPlatnu = self.platno.create_window(55, 280, window = self.napisIgralec)
#Zgolj napis kje do Dealerjeve karte
self.napisDealer = tkinter.Label(text = 'Dealer:', bg = 'green', fg = 'red', font = ('Helvetica', 18, 'bold'))
self.napisDealerNaPlatnu = self.platno.create_window(55, 50, window = self.napisDealer)
#Zgolj napis, da je pod tem napisom igralcevo financno stanje
self.napisCredit = tkinter.Label(text = 'Credit:', bg = 'green', font = ('Helvetica', 23, 'bold'))
self.napisCreditNaPlatnu = self.platno.create_window(600, 40, window = self.napisCredit)
#Zgolj napis, da je pod tem napisom igralceva trenutna stava
self.napisCurrentBet = tkinter.Label(text = 'Current Bet:', bg = 'green', font = ('Helvetica', 23, 'bold'))
self.napisCurrentBetNaPlatnu = self.platno.create_window(600, 250, window = self.napisCurrentBet)
#Dejanski napis, ki prikazuje igralcevo financno stanje
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
#Dejanski napis, ki prikazuje igralcevo trenutno stavo
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 295, window = self.vsotaStaveNapis)
##Tukaj so napisi za navodila igralcu
self.pWin = tkinter.Label(text = 'Player Wins!', bg = 'green', font = ('Helvetica', 24, 'bold'))
self.pBlackjack = tkinter.Label(text = 'Blackjack! Player Wins!', bg = 'green', font = ('Helvetica', 21, 'bold'))
self.pBust = tkinter.Label(text = 'Player Busts!', bg = 'green', fg = 'red', font = ('Helvetica', 21, 'bold'))
self.dWin = tkinter.Label(text = 'Delaer Wins!', bg = 'green', fg = 'red', font = ('Helvetica', 23, 'bold'))
self.dBlackjack = tkinter.Label(text = 'Dealer hits Blackjack! You Lose!', bg = 'green', font = ('Helvetica', 25, 'bold'))
self.dBust = tkinter.Label(text = 'Dealer Busts! Player Wins!', bg = 'green', font = ('Helvetica', 25, 'bold'))
self.draw = tkinter.Label(text = 'It is a Draw!', bg = 'green', font = ('Helvetica', 23, 'bold'))
self.hitORstand = tkinter.Label(text = 'Hit or Stand', bg = 'green', font = ('Helvetica', 23, 'bold'))
self.maxReached = tkinter.Label(text = 'Maximum of 5 cards reached!', bg = 'green', fg = 'red', font = ('Helvetica', 15, 'bold'))
self.placeBet = tkinter.Label(text = 'Place your bet and decide wether to Hit or Stand', bg = 'green', font = ('Helvetica', 11, 'bold'))
self.emptyBank = tkinter.Label(text = 'Player ran out of money. Please choose new game.', bg = 'green', font = ('Helvetica', 8, 'bold'))
#####
#####Gumbi
self.gumbHit = tkinter.Button(master, text = 'HIT', command = self.hit, state = 'disabled')
self.gumbHitNaPlatnu = self.platno.create_window(30, 450, window = self.gumbHit)
self.gumbStand = tkinter.Button(master, text = 'STAND', command = self.stand, state = 'disabled')
self.gumbStandNaPlatnu = self.platno.create_window(90, 450, window = self.gumbStand)
self.gumbNaprej = tkinter.Button(master, text = 'Next Round', command = self.naslednjaRoka)
#To bos se potreboval
self.gumbNaprejNaPlatnu = ''
self.gumb10 = tkinter.Button(master, text = '$10', command = self.dodaj10)
self.gumb10NaPlatnu = self.platno.create_window(300, 450, window = self.gumb10)
self.gumb20 = tkinter.Button(master, text = '$20', command = self.dodaj20)
self.gumb20NaPlatnu = self.platno.create_window(360, 450, window = self.gumb20)
self.gumb50 = tkinter.Button(master, text = '$50', command = self.dodaj50)
self.gumb50NaPlatnu = self.platno.create_window(420, 450, window = self.gumb50)
#####
#Prva vrstica
#self.SlikaNaPlatnu11 = self.platno.create_image(60, 120, image = self.karta1)
#self.SlikaNaPlatnu12 = self.platno.create_image(160, 120, image = self.karta1)
#self.SlikaNaPlatnu13 = self.platno.create_image(260, 120, image = self.karta1)
#self.SlikaNaPlatnu14 = self.platno.create_image(360, 120, image = self.karta1)
#self.SlikaNaPlatnu15 = self.platno.create_image(460, 120, image = self.karta1)
#Druga vrstica
#self.SlikaNaPlatnu21 = self.platno.create_image(60, 350, image = self.karta2)
#self.SlikaNaPlatnu22 = self.platno.create_image(160, 350, image = self.karta2)
#self.SlikaNaPlatnu23 = self.platno.create_image(260, 350, image = self.karta2)
#self.SlikaNaPlatnu24 = self.platno.create_image(360, 350, image = self.karta2)
#self.SlikaNaPlatnu25 = self.platno.create_image(460, 350, image = self.karta2)
#Tukaj inicializiramo igro...
random.shuffle(self.sezKart)#premesamo kup kart
##Najprej inicializiramo igralca
self.prvaKartaPlayer = self.sezKart.pop() #izberemo prvo karto igralcu
self.vrednost = self.vrednostKarte(self.prvaKartaPlayer) # dolocimo vrednost prve karte
self.scorePlayer += self.vrednost
self.prvaKartaPlayer = tkinter.PhotoImage(file = self.prvaKartaPlayer) #Playing with fire
self.prvaKartaPlayerNaPlatnu = self.platno.create_image(60, 350, image = self.prvaKartaPlayer)
self.drugaKartaPlayer = self.sezKart.pop() #izberemo drugo karto igralcu
self.vrednost = self.vrednostKarte(self.drugaKartaPlayer) # dolocimo vrednost druge karte
#ce dobimo se enega asa bi presegli 21, torej se as steje kot 1
if self.vrednost == 11 and self.scorePlayer > 10:
self.vrednost = 1
self.scorePlayer += self.vrednost
self.drugaKartaPlayer = tkinter.PhotoImage(file = self.drugaKartaPlayer) #Playing with fire
self.drugaKartaPlayerNaPlatnu = self.platno.create_image(160, 350, image = self.drugaKartaPlayer)
##
##Potem inicializiramo dealerja
self.prvaKartaDealer = self.sezKart.pop() #izberemo prvo karto dealerju
self.vrednost = self.vrednostKarte(self.prvaKartaDealer) # dolocimo vrednost prve karte
self.scoreDealer += self.vrednost
self.prvaKartaDealer = tkinter.PhotoImage(file = self.prvaKartaDealer) #Playing with fire
self.prvaKartaDealerNaPlatnu = self.platno.create_image(60, 120, image = self.prvaKartaDealer)
self.drugaKartaDealer = self.sezKart.pop() #izberemo drugo karto dealerju
self.vrednost = self.vrednostKarte(self.drugaKartaDealer) # dolocimo vrednost druge karte
#ce dobimo se enega asa bi presegli 21, torej se as steje kot 1
if self.vrednost == 11 and self.scoreDealer > 10:
self.vrednost = 1
self.scoreDealer += self.vrednost
self.drugaKartaDealer = tkinter.PhotoImage(file = self.drugaKartaDealer) #Playing with fire
self.drugaKartaDealerNaPlatnu = self.platno.create_image(160, 120, image = self.drugaKartaDealer)
#Po pravilih je druga karta dealerja zakrita
self.zakritaKarta = tkinter.PhotoImage(file = 'karte/back.gif')
self.zakritaKartaNaPlatnu = self.platno.create_image(160, 120, image = self.zakritaKarta)
##
#Na platno postavimo navodila, kaj naj igralec stori
self.navodilaPlatno = self.platno.create_window(270, 230, window = self.placeBet)
def vrednostKarte(self, karta):
if karta == 'karte/1.gif' or karta == 'karte/2.gif' or karta == 'karte/3.gif' or karta == 'karte/4.gif':
return 11
if karta == 'karte/5.gif' or karta == 'karte/6.gif' or karta == 'karte/7.gif' or karta == 'karte/8.gif' or karta == 'karte/9.gif' or karta == 'karte/10.gif' or karta == 'karte/11.gif' or karta == 'karte/12.gif' or karta == 'karte/13.gif' or karta == 'karte/14.gif' or karta == 'karte/15.gif' or karta == 'karte/16.gif' or karta == 'karte/17.gif' or karta == 'karte/18.gif' or karta == 'karte/19.gif' or karta == 'karte/20.gif':
return 10
if karta == 'karte/21.gif' or karta == 'karte/22.gif' or karta == 'karte/23.gif' or karta == 'karte/24.gif':
return 9
if karta == 'karte/25.gif' or karta == 'karte/26.gif' or karta == 'karte/27.gif' or karta == 'karte/28.gif':
return 8
if karta == 'karte/29.gif' or karta == 'karte/30.gif' or karta == 'karte/31.gif' or karta == 'karte/32.gif':
return 7
if karta == 'karte/33.gif' or karta == 'karte/34.gif' or karta == 'karte/35.gif' or karta == 'karte/36.gif':
return 6
if karta == 'karte/37.gif' or karta == 'karte/38.gif' or karta == 'karte/39.gif' or karta == 'karte/40.gif':
return 5
if karta == 'karte/41.gif' or karta == 'karte/42.gif' or karta == 'karte/43.gif' or karta == 'karte/44.gif':
return 4
if karta == 'karte/45.gif' or karta == 'karte/46.gif' or karta == 'karte/47.gif' or karta == 'karte/48.gif':
return 3
if karta == 'karte/49.gif' or karta == 'karte/50.gif' or karta == 'karte/51.gif' or karta == 'karte/52.gif':
return 2
def dodaj10(self):
if self.credit >= 10:
self.gumbHit.config(state = 'normal')
self.gumbStand.config(state = 'normal')
#stava se poveca
self.vsotaStave += 10
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
#credit se zmanjsa
self.credit -= 10
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
else:
pass
def dodaj20(self):
if self.credit >= 20 :
self.gumbHit.config(state = 'normal')
self.gumbStand.config(state = 'normal')
#stava se poveca
self.vsotaStave += 20
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
#credit se zmanjsa
self.credit -= 20
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
else:
pass
def dodaj50(self):
if self.credit >= 50:
self.gumbHit.config(state = 'normal')
self.gumbStand.config(state = 'normal')
#stava se poveca
self.vsotaStave += 50
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
#credit se zmanjsa
self.credit -= 50
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
else:
pass
def hit(self):
self.gumbStand.config(state = 'normal')
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
if self.indexIgralceveKarte >= 3: #Ne moremo imeti vec kot 5 kart
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(310, 230, window = self.maxReached)
self.gumbHit.config(state = 'disabled')
else:
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(310, 230, window = self.hitORstand)
#iz premesanega kupa izberemo karto
self.sezKartIgralec[self.indexIgralceveKarte] = self.sezKart.pop()
#dolocimo vrednost izbrane karte
self.vrednost = self.vrednostKarte(self.sezKartIgralec[self.indexIgralceveKarte])
#ce smo dobili asa, ki je vreden 11 vendar bi nam bolj pasala 1 si stejemo 1
if self.vrednost == 11 and self.scorePlayer > 10:
self.vrednost = 1
#povecamo skupno vsoto
self.scorePlayer += self.vrednost
#karto nalozimo, da bi jo lahko prikazali
self.sezKartIgralec[self.indexIgralceveKarte] = tkinter.PhotoImage(file = self.sezKartIgralec[self.indexIgralceveKarte])
#karto prikazemo na zaslonu
self.sezKartIgralecPlatno[self.indexIgralceveKarte] = self.platno.create_image(self.pX, self.pY, image = self.sezKartIgralec[self.indexIgralceveKarte])
if self.scorePlayer == 21:
#Sporocimo igralcu kaj se je zgodilo
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(310, 230, window = self.pBlackjack)
#zmagali smo torej si zasluzimo denar
self.credit += (self.vsotaStave*2)
self.vsotaStave = 0
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbNaprejNaPlatnu = self.platno.create_window(610, 400, window = self.gumbNaprej)
if self.credit == 0:
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(270, 230, window = self.emptyBank)
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.platno.delete(self.gumbNaprejNaPlatnu)
if self.scorePlayer > 21:
#igralcu sporocimo, kaj se je zgodilo
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(310, 230, window = self.pBust)
#izgubili smo, torej smo izgubili trenutno stavo
self.vsotaStave = 0
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbNaprejNaPlatnu = self.platno.create_window(610, 400, window = self.gumbNaprej)
if self.credit == 0:
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(270, 230, window = self.emptyBank)
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.platno.delete(self.gumbNaprejNaPlatnu)
self.indexIgralceveKarte += 1 #s tem naredimo prostor za naslednjo karto
self.pX += 100 #premaknemo pozicijo, da bi lahko prikazali novo karto
def stand(self):
#self.gumbStand.config(state = 'disabled')
#self.gumbHit.config(state = 'normal')
self.platno.delete(self.zakritaKartaNaPlatnu)
while self.scoreDealer < 15:
#iz premesanega kupa izberemo karto
self.sezKartDealer[self.indexDealerjeveKarte] = self.sezKart.pop()
#dolocimo vrednost izbrane karte
self.vrednost = self.vrednostKarte(self.sezKartDealer[self.indexDealerjeveKarte])
#spremenimo vrednost asa iz 11 v ena ce nam bolj pase
if self.vrednost == 11 and self.scoreDealer > 10:
self.vrednost = 1
self.scoreDealer += self.vrednost
#karto nalozimo, da bi jo lahko prikazali
self.sezKartDealer[self.indexDealerjeveKarte] = tkinter.PhotoImage(file = self.sezKartDealer[self.indexDealerjeveKarte])
#karto prikazemo na zaslonu
self.sezKartDealerPlatno[self.indexDealerjeveKarte] = self.platno.create_image(self.dX, self.dY, image = self.sezKartDealer[self.indexDealerjeveKarte])
self.indexDealerjeveKarte += 1 #s tem naredimo prostor za naslednjo karto
self.dX += 100 #premaknemo pozicijo, da bi lahko prikazali novo karto
if self.scoreDealer > 21:
#Sporocimo igralcu kaj se je zgodilo
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(310, 230, window = self.dBust)
#zmagali smo torej si zasluzimo denar
self.credit += (self.vsotaStave*2)
self.vsotaStave = 0
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbNaprejNaPlatnu = self.platno.create_window(610, 400, window = self.gumbNaprej)
if self.credit == 0:
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(270, 230, window = self.emptyBank)
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.platno.delete(self.gumbNaprejNaPlatnu)
elif self.scoreDealer == 21:
#igralcu sporocimo, kaj se je zgodilo
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(310, 230, window = self.dBlackjack)
#izgubili smo, torej smo izgubili trenutno stavo
self.vsotaStave = 0
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbNaprejNaPlatnu = self.platno.create_window(610, 400, window = self.gumbNaprej)
if self.credit == 0:
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(270, 230, window = self.emptyBank)
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.platno.delete(self.gumbNaprejNaPlatnu)
self.oceniIgro()
def oceniIgro(self):
'''Metoda namenjena oceni igre, če noben ne preseže 21 oz. ga ne pogodi blackjack'''
if self.scorePlayer > self.scoreDealer:
#Zmaga igralec
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(310, 230, window = self.pWin)
self.credit += (self.vsotaStave*2)
self.vsotaStave = 0
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
self.gumbNaprejNaPlatnu = self.platno.create_window(610, 400, window = self.gumbNaprej)
if self.credit == 0:
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(270, 230, window = self.emptyBank)
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.platno.delete(self.gumbNaprejNaPlatnu)
elif self.scorePlayer == self.scoreDealer:
#Neodloceno
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(310, 230, window = self.draw)
self.credit += int(math.ceil(0.5 * self.vsotaStave))
self.vsotaStave = 0
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
self.gumbNaprejNaPlatnu = self.platno.create_window(610, 400, window = self.gumbNaprej)
if self.credit == 0:
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(270, 230, window = self.emptyBank)
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.platno.delete(self.gumbNaprejNaPlatnu)
elif self.scoreDealer > self.scorePlayer and self.scoreDealer <= 21:
#Zmaga dealer
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(310, 230, window = self.dWin)
self.vsotaStave = 0
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
self.gumbNaprejNaPlatnu = self.platno.create_window(610, 400, window = self.gumbNaprej)
if self.credit == 0:
self.platno.delete(self.navodilaPlatno)
self.navodilaPlatno = self.platno.create_window(270, 230, window = self.emptyBank)
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.platno.delete(self.gumbNaprejNaPlatnu)
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.gumb10.config(state = 'disabled')
self.gumb20.config(state = 'disabled')
self.gumb50.config(state = 'disabled')
def naslednjaRoka(self):
'''Metoda se izvede ob zakljucku vsake igre. Njen glavni namen je pobrisati stvari, ki so ostale
iz prejsnje igre iz platna, ter ponovno nastaviti nekatere spremenljivke. Npr. seznam kart mora, biti
zopet poln, pa se premesati ga moremo. Poleg tega moramo se ponastaviti tocke igralca in
dealerja na 0.'''
##Najprej pobrisemo vse kar bomo hoteli imeti na platnu na novo
self.platno.delete(self.navodilaPlatno) #To moramo najprej, drugace bodo prekrivanja
#pobrisemo dve karte ob inicializaciji
self.platno.delete(self.prvaKartaPlayer)
self.platno.delete(self.prvaKartaDealer)
self.platno.delete(self.drugaKartaPlayer)
self.platno.delete(self.drugaKartaDealer)
#Ponovno nastavimo tudi tri karte, ki jih dobimo kasneje
for i in range(0, 3):
self.sezKartIgralec[i] = ''
self.sezKartDealer[i] = ''
self.sezKartIgralecPlatno[i] = ''
self.sezKartDealerPlatno[i] = ''
##
##Ponastavimo spremenljivke
self.sezKart = ['karte/1.gif', 'karte/2.gif', 'karte/3.gif', 'karte/4.gif', 'karte/5.gif',
'karte/6.gif', 'karte/7.gif', 'karte/8.gif', 'karte/9.gif', 'karte/10.gif',
'karte/11.gif', 'karte/12.gif', 'karte/13.gif', 'karte/4.gif', 'karte/15.gif',
'karte/16.gif', 'karte/17.gif', 'karte/18.gif', 'karte/19.gif', 'karte/20.gif',
'karte/21.gif', 'karte/22.gif', 'karte/23.gif', 'karte/24.gif', 'karte/25.gif',
'karte/26.gif', 'karte/27.gif', 'karte/28.gif', 'karte/29.gif', 'karte/30.gif',
'karte/31.gif', 'karte/32.gif', 'karte/33.gif', 'karte/34.gif', 'karte/35.gif',
'karte/36.gif', 'karte/37.gif', 'karte/38.gif', 'karte/39.gif', 'karte/40.gif',
'karte/41.gif', 'karte/42.gif', 'karte/43.gif', 'karte/44.gif', 'karte/45.gif',
'karte/46.gif', 'karte/47.gif', 'karte/48.gif', 'karte/49.gif', 'karte/50.gif',
'karte/51.gif', 'karte/52.gif']
self.scorePlayer = 0
self.scoreDealer = 0
self.pX = 260
self.dX = 260
self.indexIgralceveKarte = 0
self.indexDealerjeveKarte = 0
##
##Ponovno inicializiramo igro
random.shuffle(self.sezKart)#premesamo kup kart
##Najprej inicializiramo igralca
self.prvaKartaPlayer = self.sezKart.pop() #izberemo prvo karto igralcu
self.scorePlayer += self.vrednostKarte(self.prvaKartaPlayer) # dolocimo vrednost prve karte
self.prvaKartaPlayer = tkinter.PhotoImage(file = self.prvaKartaPlayer) #Playing with fire
self.prvaKartaPlayerNaPlatnu = self.platno.create_image(60, 350, image = self.prvaKartaPlayer)
self.drugaKartaPlayer = self.sezKart.pop() #izberemo drugo karto igralcu
self.scorePlayer += self.vrednostKarte(self.drugaKartaPlayer) # dolocimo vrednost druge karte
self.drugaKartaPlayer = tkinter.PhotoImage(file = self.drugaKartaPlayer) #Playing with fire
self.drugaKartaPlayerNaPlatnu = self.platno.create_image(160, 350, image = self.drugaKartaPlayer)
##
##Potem inicializiramo dealerja
self.prvaKartaDealer = self.sezKart.pop() #izberemo prvo karto dealerju
self.scoreDealer += self.vrednostKarte(self.prvaKartaDealer) # dolocimo vrednost prve karte
self.prvaKartaDealer = tkinter.PhotoImage(file = self.prvaKartaDealer) #Playing with fire
self.prvaKartaDealerNaPlatnu = self.platno.create_image(60, 120, image = self.prvaKartaDealer)
self.drugaKartaDealer = self.sezKart.pop() #izberemo drugo karto dealerju
self.scoreDealer += self.vrednostKarte(self.drugaKartaDealer) # dolocimo vrednost druge karte
self.drugaKartaDealer = tkinter.PhotoImage(file = self.drugaKartaDealer) #Playing with fire
self.drugaKartaDealerNaPlatnu = self.platno.create_image(160, 120, image = self.drugaKartaDealer)
#Po pravilih je druga karta dealerja zakrita
self.zakritaKarta = tkinter.PhotoImage(file = 'karte/back.gif')
self.zakritaKartaNaPlatnu = self.platno.create_image(160, 120, image = self.zakritaKarta)
##
#Na platno postavimo navodila, kaj naj igralec stori
self.navodilaPlatno = self.platno.create_window(270, 230, window = self.placeBet)
#Ponastavimo se gumbe
self.gumbHit.config(state = 'disabled')
self.gumbStand.config(state = 'disabled')
self.gumb10.config(state = 'normal')
self.gumb20.config(state = 'normal')
self.gumb50.config(state = 'normal')
self.platno.delete(self.gumbNaprejNaPlatnu)
def newGame(self):
'''Metoda se uporablja, ko igralec ostane brez denarja. Lahko pa se kliče
za svež začetek'''
self.credit = 1000
self.platno.delete(self.creditNapisPlatno)
self.creditNapis = tkinter.Label(text = '$'+str(self.credit), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.creditNapisPlatno = self.platno.create_window(610, 90, window = self.creditNapis)
self.vsotaStave = 0
self.platno.delete(self.vsotaStaveNapisPlatno)
self.vsotaStaveNapis = tkinter.Label(text = '$'+str(self.vsotaStave), bg = 'green', font = ('Helvetica', 27, 'bold'))
self.vsotaStaveNapisPlatno = self.platno.create_window(610, 285, window = self.vsotaStaveNapis)
self.naslednjaRoka()
if __name__ == '__main__':
root = tkinter.Tk()
root.title('Blackjack')
app = Blackjack(root)
root.mainloop()
|
import h5py
import os
import numpy as np
from sys import argv
from stereo_processing import align_audio, downsize
path = '/Volumes/seagate/legit_data/'
current_path = os.getcwd()+'/'
print "opening main file"
with h5py.File(current_path+argv[1], 'r') as main_data:
main_audio = main_data['audio'].value
main_depth = main_data['depth'].value
new_audio = [main_audio]
new_depth = [main_depth]
num_new_samples = 0
old_audio_shape = main_audio.shape
old_depth_shape = main_depth.shape
for filename in argv[2:]:
print "loading %s data" %filename
with h5py.File(path+filename, 'r') as f:
print "aligning audio"
a = f['audio'].value
aligned = align_audio(5000, a)
new_audio.append(aligned)
print "downsizing depth"
d = f['depth'].value
downsized = np.empty((aligned.shape[0],12,16))
counter = 0
for d_map in d:
downsized[counter] = downsize(d_map)
print "done with map", counter
counter += 1
new_depth.append(downsized)
num_new_samples += a.shape[0]
audio_tuple = tuple(new_audio)
depth_tuple = tuple(new_depth)
print "audio concatenation"
all_audio = np.concatenate(audio_tuple)
print "depth concatenation"
all_depth = np.concatenate(depth_tuple)
print "\n\nold audio shape:", old_audio_shape
print "old depth shape:", old_depth_shape
print "total number of new samples added:",num_new_samples
print "new audio shape:", all_audio.shape
print "new depth shape:", all_depth.shape
print "\n\nsaving new file"
with h5py.File("data_100t.h5", 'w') as d:
d.create_dataset('audio', data=all_audio)
d.create_dataset('depth', data=all_depth)
|
# Generated by Django 3.0.3 on 2020-09-02 20:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("dashboard", "0024_xengchannels_time"),
]
operations = [
migrations.AddField(
model_name="snaptoant",
name="node",
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name="snaptoant",
name="snap",
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
import unittest
import copy
import os
from unittest.mock import patch
from mongoengine import connect, disconnect
from google.protobuf.json_format import MessageToDict
from google.protobuf.empty_pb2 import Empty
from spaceone.core.unittest.result import print_message
from spaceone.core.unittest.runner import RichTestRunner
from spaceone.core import config
from spaceone.core import utils
from spaceone.core.service import BaseService
from spaceone.core.locator import Locator
from spaceone.core.pygrpc import BaseAPI
from spaceone.api.statistics.v1 import storage_pb2
from spaceone.statistics.api.v1.storage import Storage
from test.factory.storage_factory import StorageFactory
from spaceone.statistics.connector import PluginConnector
from spaceone.core.model.mongo_model import MongoModel
class _MockStorageService(BaseService):
'''
def add(self, params):
params = copy.deepcopy(params)
if 'tags' in params:
params['tags'] = utils.dict_to_tags(params['tags'])
return ScheduleFactory(**params)
def update(self, params):
params = copy.deepcopy(params)
if 'tags' in params:
params['tags'] = utils.dict_to_tags(params['tags'])
return ScheduleFactory(**params)
def delete(self, params):
pass
def enable(self, params):
return ScheduleFactory(**params)
def disable(self, params):
return ScheduleFactory(**params)
def get(self, params):
return ScheduleFactory(**params)
def list(self, params):
return ScheduleFactory.build_batch(10, **params), 10
def stat(self, params):
return {
'results': [{'project_id': utils.generate_id('project'), 'server_count': 100}]
}
'''
def get(self, params):
params = copy.deepcopy(params)
return StorageFactory(**params)
def register(self, params):
return StorageFactory(**params)
def update(self, params):
params = copy.deepcopy(params)
return StorageFactory(**params)
def list(self, params):
return StorageFactory.build_batch(10, **params), 10
def enable(self, params):
return StorageFactory(**params)
def disable(self, params):
return StorageFactory(**params)
def deregister(self, params):
return StorageFactory(**params)
def update_plugin(self, params):
return StorageFactory(**params)
def verify_plugin(self, params):
return StorageFactory(**params)
class TestStorageAPI(unittest.TestCase):
@classmethod
def setUpClass(cls):
config.init_conf(package='spaceone.statistics')
connect('test', host='mongomock://localhost')
config_path = os.environ.get('SPACEONE_CONFIG_FILE')
super().setUpClass()
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
disconnect()
@patch.object(BaseAPI, '__init__', return_value=None)
@patch.object(Locator, 'get_service', return_value=_MockStorageService())
@patch.object(BaseAPI, 'parse_request')
def test_register_storage(self, mock_parse_request, *args):
params = {
'name': utils.generate_id('storage', 4),
'tags': {
utils.random_string(5): utils.random_string(5)
},
'plugin_info': {
'plugin_id': utils.generate_id('plugin'),
'version': '1.1',
'secret_id': utils.generate_id('secret')
},
'user_id': utils.generate_id('user'),
'domain_id': utils.generate_id('domain')
}
mock_parse_request.return_value = (params, {})
storage_servicer = Storage()
storage_info = storage_servicer.register(params, {})
print_message(storage_info, 'test_register_storage')
storage_data = MessageToDict(storage_info, preserving_proto_field_name=True)
self.assertIsInstance(storage_info, storage_pb2.StorageInfo)
self.assertEqual(storage_info.name, params['name'])
self.assertEqual(storage_data['state'], 'ENABLED')
# self.assertIsNotNone(storage_info.capability)
self.assertDictEqual(storage_data['tags'], params['tags'])
self.assertIsInstance(storage_info.plugin_info, storage_pb2.PluginInfo) # Check if 'PluginInfo' exists
self.assertEqual(storage_data['plugin_info']['plugin_id'], params['plugin_info']['plugin_id'])
self.assertEqual(storage_data['plugin_info']['version'], params['plugin_info']['version'])
self.assertEqual(storage_data['domain_id'], params['domain_id'])
self.assertIsNotNone(getattr(storage_info, 'created_at', None))
print(f'[TEST REGISTER STORAGE] {storage_data}')
@patch.object(BaseAPI, '__init__', return_value=None)
@patch.object(Locator, 'get_service', return_value=_MockStorageService())
@patch.object(BaseAPI, 'parse_request')
def test_update_storage(self, mock_parse_request, *args):
params = {
'storage_id': utils.generate_id('storage'),
'name': 'update-storage-name',
'tags': {
'update_key': 'update_value'
},
'domain_id': utils.generate_id('domain')
}
mock_parse_request.return_value = (params, {})
storage_servicer = Storage()
storage_info = storage_servicer.update(params, {})
print_message(storage_info, 'test_update_schedule')
storage_data = MessageToDict(storage_info, preserving_proto_field_name=True)
self.assertIsInstance(storage_info, storage_pb2.StorageInfo)
self.assertEqual(storage_data['name'], params['name'])
self.assertEqual(storage_data['storage_id'], params['storage_id'])
self.assertDictEqual(storage_data['tags'], params['tags'])
print(f'[TEST UPDATE STORAGE] {storage_data}')
@patch.object(BaseAPI, '__init__', return_value=None)
@patch.object(Locator, 'get_service', return_value=_MockStorageService())
@patch.object(BaseAPI, 'parse_request')
def test_get_storage(self, mock_parse_request, *args):
mock_parse_request.return_value = ({}, {})
params = {
'domain_id': utils.generate_id('domain'),
'storage_id': utils.generate_id('storage')
}
storage_servicer = Storage()
storage_info = storage_servicer.get(params, {})
storage_data = MessageToDict(storage_info, preserving_proto_field_name=True)
print_message(storage_info, 'test_get_schedule')
self.assertIsInstance(storage_info, storage_pb2.StorageInfo)
print(f'[TEST GET STORAGE] {storage_data}')
@patch.object(BaseAPI, '__init__', return_value=None)
@patch.object(Locator, 'get_service', return_value=_MockStorageService())
@patch.object(BaseAPI, 'parse_request')
def test_list_schedules(self, mock_parse_request, *args):
mock_parse_request.return_value = ({}, {})
storage_servicer = Storage()
schedules_info = storage_servicer.list({}, {})
print_message(schedules_info, 'test_list_schedules')
self.assertIsInstance(schedules_info, storage_pb2.StoragesInfo)
self.assertIsInstance(schedules_info.results[0], storage_pb2.StorageInfo)
self.assertEqual(schedules_info.total_count, 10)
@patch.object(BaseAPI, '__init__', return_value=None)
@patch.object(Locator, 'get_service', return_value=_MockStorageService())
@patch.object(BaseAPI, 'parse_request')
def test_enable_storage(self, mock_parse_request, *args):
params = {
'storage_id': utils.generate_id('storage'),
'state': 'ENABLED',
'domain_id': utils.generate_id('domain')
}
mock_parse_request.return_value = (params, {})
storage_servicer = Storage()
storage_info = storage_servicer.enable(params, {})
storage_data = MessageToDict(storage_info, preserving_proto_field_name=True)
print_message(storage_info, 'test_enable_storage')
self.assertIsInstance(storage_info, storage_pb2.StorageInfo)
self.assertEqual(storage_info.state, storage_pb2.StorageInfo.State.ENABLED)
print(f'[TEST ENABLE STORAGE] {storage_data}')
@patch.object(BaseAPI, '__init__', return_value=None)
@patch.object(Locator, 'get_service', return_value=_MockStorageService())
@patch.object(BaseAPI, 'parse_request')
def test_disable_storage(self, mock_parse_request, *args):
params = {
'storage_id': utils.generate_id('storage'),
'state': 'DISABLED',
'domain_id': utils.generate_id('domain')
}
mock_parse_request.return_value = (params, {})
storage_servicer = Storage()
storage_info = storage_servicer.disable(params, {})
storage_data = MessageToDict(storage_info, preserving_proto_field_name=True)
print_message(storage_info, 'test_disable_storage')
self.assertIsInstance(storage_info, storage_pb2.StorageInfo)
self.assertEqual(storage_info.state, storage_pb2.StorageInfo.State.DISABLED)
print(f'[TEST DISABLE STORAGE] {storage_data}')
@patch.object(BaseAPI, '__init__', return_value=None)
@patch.object(Locator, 'get_service', return_value=_MockStorageService())
@patch.object(BaseAPI, 'parse_request')
def test_deregister_storage(self, mock_parse_request, *args):
params = {
'storage_id': utils.generate_id('storage'),
'domain_id': utils.generate_id('domain')
}
mock_parse_request.return_value = (params, {})
storage_servicer = Storage()
storage_info = storage_servicer.deregister(params, {})
storage_data = MessageToDict(storage_info, preserving_proto_field_name=True)
print_message(storage_info, 'test_deregister_storage')
# TODO : ASK!!
# self.assertIsInstance(storage_info, Empty)
# self.assertEqual(storage_info.state, storage_pb2.StorageInfo.State.DISABLED)
print(f'[TEST DEREGISTER STORAGE] {storage_data}')
@patch.object(BaseAPI, '__init__', return_value=None)
@patch.object(Locator, 'get_service', return_value=_MockStorageService())
@patch.object(PluginConnector, '__init__', return_value=None)
@patch.object(PluginConnector, 'initialize', return_value='grpc://plugin.spaceone.dev:50051')
@patch.object(PluginConnector, 'get_plugin_endpoint', return_value='grpc://plugin.spaceone.dev:50051')
@patch.object(BaseAPI, 'parse_request')
def test_update_plugin(self, mock_parse_request, *args):
params = {
'storage_id': utils.generate_id('storage'),
'name': 'storage-plugin-update',
'plugin_info': {
'plugin_id': utils.generate_id('storage'),
'version': '3.0',
'options': {},
},
'tags': {
'update_key': 'update_value'
},
'domain_id': utils.generate_id('domain')
}
mock_parse_request.return_value = (params, {})
storage_servicer = Storage()
storage_info = storage_servicer.update_plugin(params, {})
storage_data = MessageToDict(storage_info, preserving_proto_field_name=True)
print_message(storage_info, 'test_update_storage_plugin')
self.assertIsInstance(storage_info, storage_pb2.StorageInfo)
self.assertEqual(storage_info.name, params['name'])
self.assertDictEqual(storage_data['tags'], params['tags'])
self.assertEqual(storage_info.plugin_info.version, params['plugin_info']['version'])
self.assertIsNotNone(storage_info.plugin_info)
print(f'[TEST UPDATE STORAGE PLUGIN] {storage_data}')
@patch.object(BaseAPI, '__init__', return_value=None)
@patch.object(Locator, 'get_service', return_value=_MockStorageService())
@patch.object(PluginConnector, '__init__', return_value=None)
@patch.object(PluginConnector, 'initialize', return_value='grpc://plugin.spaceone.dev:50051')
@patch.object(PluginConnector, 'get_plugin_endpoint', return_value='grpc://plugin.spaceone.dev:50051')
@patch.object(BaseAPI, 'parse_request')
def test_verify_plugin(self, mock_parse_request, *args):
params = {
'storage_id': utils.generate_id('storage'),
'domain_id': utils.generate_id('domain')
}
mock_parse_request.return_value = (params, {})
storage_servicer = Storage()
storage_info = storage_servicer.verify_plugin(params, {})
storage_data = MessageToDict(storage_info, preserving_proto_field_name=True)
print_message(storage_info, 'test_deregister_storage_plugin')
self.assertIsInstance(storage_info, Empty)
print(f'[TEST VERIFY STORAGE PLUGIN] {storage_data}')
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
|
#!/usr/bin/env python3
#
# Copyright 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittest for serial_utils."""
import unittest
from unittest import mock
from cros.factory.test.utils import serial_utils
from cros.factory.external import serial
_DEFAULT_DRIVER = 'pl2303'
_DEFAULT_INDEX = '1-1'
_DEFAULT_PORT = '/dev/ttyUSB0'
_SEND_RECEIVE_INTERVAL_SECS = 0.2
_RETRY_INTERVAL_SECS = 0.5
_COMMAND = 'Command'
_RESPONSE = '.'
_RECEIVE_SIZE = 1
class OpenSerialTest(unittest.TestCase):
def testOpenSerial(self):
# Sequence matters: create a serial mock then stub out serial.Serial.
mock_serial = mock.Mock(serial.Serial)
mock_serial.isOpen = lambda: True
with mock.patch('cros.factory.external.serial.Serial') as serial_mock:
serial_mock.return_value = mock_serial
serial_utils.OpenSerial(port=_DEFAULT_PORT, baudrate=19200)
serial_mock.assert_called_once_with(port=_DEFAULT_PORT, baudrate=19200)
def testOpenSerialNoPort(self):
self.assertRaises(ValueError, serial_utils.OpenSerial)
class FindTtyByDriverTest(unittest.TestCase):
@mock.patch('glob.glob')
@mock.patch('os.path.realpath')
def testFindTtyByDriver(self, realpath_mock, glob_mock):
glob_mock.return_value = ['/dev/ttyUSB0', '/dev/ttyUSB1']
realpath_mock.return_value = _DEFAULT_DRIVER
self.assertEqual(_DEFAULT_PORT,
serial_utils.FindTtyByDriver(_DEFAULT_DRIVER))
glob_mock.assert_called_once_with('/dev/tty*')
realpath_mock.assert_called_once_with(
'/sys/class/tty/ttyUSB0/device/driver')
@mock.patch('glob.glob')
@mock.patch('os.path.realpath')
def testFindTtyByDriverSecondPort(self, realpath_mock, glob_mock):
glob_mock.return_value = ['/dev/ttyUSB0', '/dev/ttyUSB1']
realpath_mock.side_effect = ['foo', _DEFAULT_DRIVER]
realpath_calls = [
mock.call('/sys/class/tty/ttyUSB0/device/driver'),
mock.call('/sys/class/tty/ttyUSB1/device/driver')]
self.assertEqual('/dev/ttyUSB1',
serial_utils.FindTtyByDriver(_DEFAULT_DRIVER))
glob_mock.assert_called_once_with('/dev/tty*')
self.assertEqual(realpath_mock.call_args_list, realpath_calls)
@mock.patch('glob.glob')
@mock.patch('os.path.realpath')
def testFindTtyByDriverNotFound(self, realpath_mock, glob_mock):
glob_mock.return_value = ['/dev/ttyUSB0', '/dev/ttyUSB1']
realpath_mock.side_effect = ['foo', 'bar']
realpath_calls = [
mock.call('/sys/class/tty/ttyUSB0/device/driver'),
mock.call('/sys/class/tty/ttyUSB1/device/driver')]
self.assertIsNone(serial_utils.FindTtyByDriver(_DEFAULT_DRIVER))
glob_mock.assert_called_once_with('/dev/tty*')
self.assertEqual(realpath_mock.call_args_list, realpath_calls)
@mock.patch('cros.factory.test.utils.serial_utils.DeviceInterfaceProtocol')
@mock.patch('glob.glob')
@mock.patch('os.path.realpath')
def testFindTtyByDriverInterfaceProtocol(self, realpath_mock, glob_mock,
device_interface_protocol_mock):
glob_mock.return_value = ['/dev/ttyUSB0', '/dev/ttyUSB1']
realpath_mock.side_effect = [_DEFAULT_DRIVER, _DEFAULT_DRIVER]
realpath_calls = [
mock.call('/sys/class/tty/ttyUSB0/device/driver'),
mock.call('/sys/class/tty/ttyUSB1/device/driver')]
device_interface_protocol_mock.side_effect = ['00', '01']
device_interface_protocol_calls = [
mock.call('/sys/class/tty/ttyUSB0/device'),
mock.call('/sys/class/tty/ttyUSB1/device')]
self.assertEqual('/dev/ttyUSB1',
serial_utils.FindTtyByDriver(_DEFAULT_DRIVER,
interface_protocol='01'))
glob_mock.assert_called_once_with('/dev/tty*')
self.assertEqual(realpath_mock.call_args_list, realpath_calls)
self.assertEqual(device_interface_protocol_mock.call_args_list,
device_interface_protocol_calls)
@mock.patch('glob.glob')
@mock.patch('os.path.realpath')
def testFindTtyByDriverMultiple(self, realpath_mock, glob_mock):
glob_mock.return_value = ['/dev/ttyUSB0', '/dev/ttyUSB1']
realpath_mock.side_effect = [_DEFAULT_DRIVER, _DEFAULT_DRIVER]
realpath_calls = [
mock.call('/sys/class/tty/ttyUSB0/device/driver'),
mock.call('/sys/class/tty/ttyUSB1/device/driver')]
self.assertEqual([_DEFAULT_PORT, '/dev/ttyUSB1'],
serial_utils.FindTtyByDriver(_DEFAULT_DRIVER,
multiple_ports=True))
glob_mock.assert_called_once_with('/dev/tty*')
self.assertEqual(realpath_mock.call_args_list, realpath_calls)
class FindTtyByPortIndexTest(unittest.TestCase):
@mock.patch('glob.glob')
@mock.patch('os.path.realpath')
def testFindTtyByPortIndex(self, realpath_mock, glob_mock):
glob_mock.return_value = ['/dev/ttyUSB0', '/dev/ttyUSB1']
realpath_mock.side_effect = [_DEFAULT_DRIVER, '/%s/' % _DEFAULT_INDEX]
realpath_calls = [
mock.call('/sys/class/tty/ttyUSB0/device/driver'),
mock.call('/sys/class/tty/ttyUSB0/device')]
self.assertEqual(_DEFAULT_PORT,
serial_utils.FindTtyByPortIndex(_DEFAULT_INDEX,
_DEFAULT_DRIVER))
glob_mock.assert_called_once_with('/dev/tty*')
self.assertEqual(realpath_mock.call_args_list, realpath_calls)
@mock.patch('glob.glob')
@mock.patch('os.path.realpath')
def testFindTtyByPortIndexSecondPort(self, realpath_mock, glob_mock):
glob_mock.return_value = ['/dev/ttyUSB0', '/dev/ttyUSB1']
realpath_mock.side_effect = ['foo', _DEFAULT_DRIVER,
'/%s/' % _DEFAULT_INDEX]
realpath_calls = [
mock.call('/sys/class/tty/ttyUSB0/device/driver'),
mock.call('/sys/class/tty/ttyUSB1/device/driver'),
mock.call('/sys/class/tty/ttyUSB1/device')]
self.assertEqual('/dev/ttyUSB1',
serial_utils.FindTtyByPortIndex(_DEFAULT_INDEX,
_DEFAULT_DRIVER))
glob_mock.assert_called_once_with('/dev/tty*')
self.assertEqual(realpath_mock.call_args_list, realpath_calls)
@mock.patch('glob.glob')
@mock.patch('os.path.realpath')
def testFindTtyByPortIndexNotFound(self, realpath_mock, glob_mock):
glob_mock.return_value = ['/dev/ttyUSB0', '/dev/ttyUSB1']
realpath_mock.side_effect = ['foo', 'bar']
realpath_calls = [
mock.call('/sys/class/tty/ttyUSB0/device/driver'),
mock.call('/sys/class/tty/ttyUSB1/device/driver')]
self.assertIsNone(serial_utils.FindTtyByPortIndex(_DEFAULT_INDEX,
_DEFAULT_DRIVER))
glob_mock.assert_called_once_with('/dev/tty*')
self.assertEqual(realpath_mock.call_args_list, realpath_calls)
class SerialDeviceCtorTest(unittest.TestCase):
def testCtor(self):
device = serial_utils.SerialDevice()
self.assertEqual(0.2, device.send_receive_interval_secs)
self.assertEqual(0.5, device.retry_interval_secs)
self.assertFalse(device.log)
@mock.patch('cros.factory.test.utils.serial_utils.OpenSerial')
@mock.patch('cros.factory.test.utils.serial_utils.FindTtyByDriver')
def testConnect(self, find_tty_by_driver_mock, open_serial_mock):
find_tty_by_driver_mock.return_value = _DEFAULT_PORT
mock_serial = mock.Mock(serial.Serial)
open_serial_mock.return_value = mock_serial
device = serial_utils.SerialDevice()
device.Connect(driver=_DEFAULT_DRIVER)
find_tty_by_driver_mock.assert_called_once_with(_DEFAULT_DRIVER)
open_serial_mock.assert_called_once_with(
port=_DEFAULT_PORT, baudrate=9600, bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,
timeout=0.5, writeTimeout=0.5)
def testConnectPortDriverMissing(self):
device = serial_utils.SerialDevice()
self.assertRaises(serial.SerialException, device.Connect)
@mock.patch('cros.factory.test.utils.serial_utils.FindTtyByDriver')
def testConnectDriverLookupFailure(self, find_tty_by_driver_mock):
find_tty_by_driver_mock.return_value = ''
device = serial_utils.SerialDevice()
self.assertRaises(serial.SerialException, device.Connect,
driver='UnknownDriver')
find_tty_by_driver_mock.assert_called_once_with('UnknownDriver')
@mock.patch('cros.factory.test.utils.serial_utils.OpenSerial')
def testCtorNoPortLookupIfPortSpecified(self, open_serial_mock):
# FindTtyByDriver isn't called.
open_serial_mock.return_value = None
device = serial_utils.SerialDevice()
device.Connect(driver='UnknownDriver', port=_DEFAULT_PORT)
open_serial_mock.assert_called_once_with(
port=_DEFAULT_PORT, baudrate=9600, bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,
timeout=0.5, writeTimeout=0.5)
class SerialDeviceSendAndReceiveTest(unittest.TestCase):
def setUp(self):
self.device = serial_utils.SerialDevice()
# Mock Serial and inject it.
self.mock_serial = mock.Mock(serial.Serial)
self.device._serial = self.mock_serial # pylint: disable=protected-access
def tearDown(self):
del self.device
self.mock_serial.close.assert_called_once_with()
def testSend(self):
self.device.Send(_COMMAND)
self.mock_serial.write.assert_called_once_with(_COMMAND)
self.mock_serial.flush.assert_called_once_with()
def testSendTimeout(self):
self.mock_serial.write.side_effect = serial.SerialTimeoutException
self.mock_serial.write_timeout = 0.5
self.assertRaises(serial.SerialTimeoutException, self.device.Send, _COMMAND)
self.mock_serial.write.assert_called_once_with(_COMMAND)
def testSendDisconnected(self):
self.mock_serial.write.side_effect = serial.SerialException
self.assertRaises(serial.SerialException, self.device.Send, _COMMAND)
self.mock_serial.write.assert_called_once_with(_COMMAND)
def testReceive(self):
self.mock_serial.read.return_value = '.'
self.assertEqual('.', self.device.Receive())
self.mock_serial.read.assert_called_once_with(1)
def testReceiveTimeout(self):
self.mock_serial.read.return_value = ''
self.mock_serial.timeout = 0.5
self.assertRaises(serial.SerialTimeoutException, self.device.Receive)
self.mock_serial.read.assert_called_once_with(1)
def testReceiveShortageTimeout(self):
# Requested 5 bytes, got only 4 bytes.
self.mock_serial.read.return_value = 'None'
self.mock_serial.timeout = 0.5
self.assertRaises(serial.SerialTimeoutException, self.device.Receive, 5)
self.mock_serial.read.assert_called_once_with(5)
def testReceiveWhatsInBuffer(self):
IN_BUFFER = 'InBuf'
self.mock_serial.in_waiting = len(IN_BUFFER)
self.mock_serial.read.return_value = IN_BUFFER
self.assertEqual(IN_BUFFER, self.device.Receive(0))
self.mock_serial.read.assert_called_once_with(len(IN_BUFFER))
class SerialDeviceSendReceiveTest(unittest.TestCase):
def setUp(self):
self.device = serial_utils.SerialDevice()
# Mock methods to facilitate SendReceive testing.
self.device.Send = mock.Mock()
self.device.Receive = mock.Mock()
self.device.FlushBuffer = mock.Mock()
def tearDown(self):
del self.device
@mock.patch('time.sleep')
def testSendReceive(self, sleep_mock):
self.device.Receive.return_value = _RESPONSE
self.assertEqual(_RESPONSE, self.device.SendReceive(_COMMAND))
self.device.Send.assert_called_once_with(_COMMAND)
sleep_mock.assert_called_once_with(_SEND_RECEIVE_INTERVAL_SECS)
self.device.Receive.assert_called_once_with(_RECEIVE_SIZE)
self.device.FlushBuffer.assert_called_once_with()
@mock.patch('time.sleep')
def testSendReceiveOverrideIntervalSecs(self, sleep_mock):
override_interval_secs = 1
self.device.Receive.return_value = _RESPONSE
self.assertEqual(
_RESPONSE,
self.device.SendReceive(_COMMAND,
interval_secs=override_interval_secs))
self.device.Send.assert_called_once_with(_COMMAND)
sleep_mock.assert_called_once_with(override_interval_secs)
self.device.Receive.assert_called_once_with(_RECEIVE_SIZE)
self.device.FlushBuffer.assert_called_once_with()
@mock.patch('time.sleep')
def testSendReceiveWriteTimeoutRetrySuccess(self, sleep_mock):
# Send timeout at first time & retry ok.
self.device.Send.side_effect = [serial.SerialTimeoutException, None]
send_calls = [mock.call(_COMMAND), mock.call(_COMMAND)]
sleep_calls = [
mock.call(_RETRY_INTERVAL_SECS),
mock.call(_SEND_RECEIVE_INTERVAL_SECS)]
self.device.Receive.return_value = _RESPONSE
self.assertEqual(_RESPONSE, self.device.SendReceive(_COMMAND, retry=1))
self.assertEqual(self.device.Send.call_args_list, send_calls)
self.assertEqual(sleep_mock.call_args_list, sleep_calls)
self.assertEqual(2, self.device.FlushBuffer.call_count)
self.device.Receive.assert_called_once_with(_RECEIVE_SIZE)
@mock.patch('time.sleep')
def testSendReceiveReadTimeoutRetrySuccess(self, sleep_mock):
send_calls = [mock.call(_COMMAND), mock.call(_COMMAND)]
sleep_calls = [
mock.call(_SEND_RECEIVE_INTERVAL_SECS),
mock.call(_RETRY_INTERVAL_SECS),
mock.call(_SEND_RECEIVE_INTERVAL_SECS)]
# Read timeout at first time & retry ok.
self.device.Receive.side_effect = [
serial.SerialTimeoutException,
_RESPONSE]
receive_calls = [mock.call(_RECEIVE_SIZE), mock.call(_RECEIVE_SIZE)]
self.assertEqual(_RESPONSE, self.device.SendReceive(_COMMAND, retry=1))
self.assertEqual(self.device.Send.call_args_list, send_calls)
self.assertEqual(sleep_mock.call_args_list, sleep_calls)
self.assertEqual(self.device.Receive.call_args_list, receive_calls)
self.assertEqual(2, self.device.FlushBuffer.call_count)
@mock.patch('time.sleep')
def testSendRequestWriteTimeoutRetryFailure(self, sleep_mock):
# Send timeout & retry still fail.
self.device.Send.side_effect = [
serial.SerialTimeoutException,
serial.SerialTimeoutException]
send_calls = [mock.call(_COMMAND), mock.call(_COMMAND)]
self.assertRaises(serial.SerialTimeoutException, self.device.SendReceive,
_COMMAND, retry=1)
self.assertEqual(self.device.Send.call_args_list, send_calls)
sleep_mock.assert_called_once_with(_RETRY_INTERVAL_SECS)
self.assertEqual(2, self.device.FlushBuffer.call_count)
class SerialDeviceSendExpectReceiveTest(unittest.TestCase):
def setUp(self):
self.device = serial_utils.SerialDevice()
# Mock methods to facilitate SendExpectReceive testing.
self.device.SendReceive = mock.Mock()
def tearDown(self):
del self.device
def testSendExpectReceive(self):
self.device.SendReceive.return_value = _RESPONSE
self.assertTrue(self.device.SendExpectReceive(_COMMAND, _RESPONSE))
self.device.SendReceive.assert_called_once_with(
_COMMAND, _RECEIVE_SIZE, retry=0, interval_secs=None,
suppress_log=True)
def testSendExpectReceiveMismatch(self):
self.device.SendReceive.return_value = 'x'
self.assertFalse(self.device.SendExpectReceive(_COMMAND, _RESPONSE))
self.device.SendReceive.assert_called_once_with(
_COMMAND, _RECEIVE_SIZE, retry=0, interval_secs=None,
suppress_log=True)
def testSendExpectReceiveTimeout(self):
self.device.SendReceive.side_effect = serial.SerialTimeoutException
self.assertFalse(self.device.SendExpectReceive(_COMMAND, _RESPONSE))
self.device.SendReceive.assert_called_once_with(
_COMMAND, _RECEIVE_SIZE, retry=0, interval_secs=None,
suppress_log=True)
if __name__ == '__main__':
unittest.main()
|
from . import navigation, filters, filter_layout
|
import pytest
import sys
sys.path.append("..")
from utils import recover
def test_recovery():
privkey, chaincode = recover.restore_key_and_chaincode("backup.zip", "priv.pem", "Thefireblocks1!")
assert(privkey == 0x473d1820ca4bf7cf6b018a8520b1ec0849cb99bce4fff45c5598723f67b3bd52)
pub = recover.get_public_key(privkey)
assert(pub == "021d84f3b6d7c6888f81c7cc381b658d85319f27e1ea9c93dff128667fb4b82ba0")
assert(recover.encode_extended_key(privkey, chaincode, False) == "xprv9s21ZrQH143K24Mfq5zL5MhWK9hUhhGbd45hLXo2Pq2oqzMMo63oStZzF9aunJDs4SsrmoxycAo6xxBTHawSz5sYxEy8TpCkv66Sci373DJ")
assert(recover.encode_extended_key(pub, chaincode, True) == "xpub661MyMwAqRbcEYS8w7XLSVeEsBXy79zSzH1J8vCdxAZningWLdN3zgtU6QJJZSgiCXT6sq7wa2jCk5t4Vv1r1E4q1venKghAAdyzieufGyX")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (C) 2014 Arulalan.T <arulalant@gmail.com>
#
# This file is part of 'open-tamil/txt2unicode' package examples
#
import sys
sys.path.append('../..')
from tamil.txt2unicode import tscii2unicode
tscii = """¾¢ÕÅûÙÅ÷
«ÕǢ ¾¢ÕìÌÈû """
uni = tscii2unicode(tscii)
f = open('unicode-result.txt', 'w')
f.write(uni)
f.close()
print("tscii", tscii)
print("unicode", uni)
print("converted unicode stored in 'unicode-result.txt' file")
|
import acoustic_array
if __name__ == '__main__':
# Open the phase controller
pc = acoustic_array.Controller()
# The controller has 32 output banks (0-31), and 256 channels (0-255).
# Each pin on the output of the device controls 16 channels, i.e.
# FPGA pin 1: channels 0-15
# FPGA pin 2: channels 16-31
# ... and so on.
# Additionally, pins 17-20 are the control outputs to the shift registers:
# FPGA pin 17: data (DS or pin 14 on 74LVC595)
# FPGA pin 18: shift clock (SHCP or pin 11 on 74LVC595)
# FPGA pin 19: output register clock (STCP or pin 12 on 74LVC595)
# Each shift register only has 8 channels, however, you can get the full 16
# on each output pin by connecting Q7S (pin 9) of one 74LVC595 to DS (pin
# 14) of the second. SHCP and STCP should be connected to the same
# channels on the first register.
# Note the additional requirements for each register:
# GND (PIN 8) => ground (G on FPGA)
# ~MR (PIN 10) => 3.3 V (on FPGA)
# ~OE (PIN 13) => ground
# VCC (PIN 16) => 3.3 V
#
# At any given instance in time it is reading from one of the 32 `banks' to
# produce the output. You can also write to any of these banks at any
# time; for example you might be reading from bank 1, then write to bank
# 2 and switch over once you're finished writing.
#
# All writes to the device take a command character and two 1 byte data
# characters. If the command character is capitalized, it will return
# a response, otherwise it will not.
# The first response byte is an error code, which should always be 0.
# The second two bytes vary based on the command.
# Here we will write to and then switch to bank 12, which is arbitrary.
bank = 12
# Select address to write to = bank, channel
# In this case we will write all 256 channels, so channel = 0
print('Write address:', pc.cmd('A', bank, 0))
for n in range(256):
# Both phase and duty cycle range from 0-255, thus 128 is 50% duty
# Each channel advances by 8 in phase -- smaller changes won't be
# visible, as the default driver has only 32 updates per cycle.
phase = (n * 8) % 256
# 50% duty cycle
duty = 128
# This command writes to the current address, and updates the address by one.
# The returned data is the bank/channel just written.
print('Sucessfully wrote data to:', pc.cmd('W', phase, duty))
# Select the output bank
print('Selected bank:', pc.cmd('B', 0, bank))
|
#
# Copyright (c) 2017 Electronic Arts Inc. All Rights Reserved
#
from __future__ import print_function
from __future__ import absolute_import
from builtins import object
import json
import shutil
import os
import re
import time
import logging
import threading
from sys import platform
from .base import BaseJob
class DeleteFiles(BaseJob):
def __call__(self, parameters, pipe, log):
params = json.loads(parameters)
if 'files' in params:
for f in params['files']:
if os.path.exists(f):
pipe.send('Delete %s' % f)
# ED: Should this job fail if we cannot delete the files?
try:
os.remove(f)
except:
pass
class Meta(object):
description = 'Delete local of remote files'
class ExportTake(BaseJob):
def __call__(self, parameters, pipe, log):
params = json.loads(parameters)
if not 'root_export_path' in params:
raise Exception('Error in parameters, "root_export_path" not found')
if not 'nodes' in params:
raise Exception('Error in parameters, "nodes" not found')
child_to_launch = []
# Launch one child job for each node that needs to copy files
for node_name in params['nodes']:
file_list = params['nodes'][node_name]
child_params = {'file_list':file_list, 'root_export_path':params['root_export_path'], 'delete_source_files':True}
child_launch_info = {}
child_launch_info['job_class'] = 'jobs.archive.CopyFiles'
child_launch_info['params'] = json.dumps(child_params)
child_launch_info['node_name'] = node_name
child_launch_info['req_gpu'] = False
child_to_launch.append(child_launch_info)
self.yieldToChildren(child_to_launch)
class Meta(object):
description = 'This job is used to copy files from a local folder to a network storage'
def safeCopyFile(src, dest, block_size=64*1024*1024, callback=None):
if os.path.exists(dest):
os.remove(dest)
with open(src, 'rb') as fin:
with open(dest, 'wb') as fout:
arr = fin.read(block_size)
while arr != "":
fout.write(arr)
if callback:
callback(len(arr))
arr = fin.read(block_size)
def nice_time(s):
hours = s // 3600
minutes = (s-3600*hours) // 60
seconds = int(s-3600*hours-60*minutes)
if hours>0:
return '%d hours %d minutes' % (hours,minutes)
if minutes>0:
return '%d minutes %d seconds' % (minutes,seconds)
return '%d seconds' % seconds
def nice_size(s):
if s<1024:
return "%d Bytes" % s
s = s // 1024
if s<1024:
return "%d kB" % s
s = s // 1024
if s<1024:
return "%d MB" % s
s = s // 1024
if s<1024:
return "%d GB" % s
s = s // 1024
return "%d TB" % s
class ProgressPercentage(object):
def __init__(self, src, pipe):
if type(src) is list:
self._filenames = src
else:
self._filenames = [src]
self._size = sum([self.safe_get_size(f) for f in self._filenames])
self._seen_so_far = 0
self._lock = threading.Lock()
self._start_time = time.time()
self._file_index = 0
self._pipe = pipe
def safe_get_size(self, filename):
size = 0
try:
if os.path.exists(filename):
size = os.path.getsize(filename)
except:
pass
return size
def next_file(self):
self._file_index = self._file_index + 1
def __call__(self, bytes_amount):
with self._lock:
self._seen_so_far += bytes_amount
percent = 100.0 * self._seen_so_far / self._size if self._size > 0 else 0
# Update stats
elapsed = time.time() - self._start_time
if elapsed > 0.0:
copy_speed = self._seen_so_far/elapsed
# Compute estimated ETA
eta = ''
if copy_speed>0 and self._seen_so_far>0 and self._seen_so_far < self._size:
eta = nice_time(max(self._size-self._seen_so_far,0)/copy_speed) + ' remaining'
self._pipe.send('Copying [%d%%] File %d of %d (%s/s) %s' % (int(percent), self._file_index+1,len(self._filenames),nice_size(int(copy_speed)), eta))
class CopyFiles(BaseJob):
def __call__(self, parameters, pipe, log):
params = json.loads(parameters)
delete_src_files = 'delete_source_files' in params and params['delete_source_files']
if 'file_list' in params:
progress = ProgressPercentage([src for src,dest in params['file_list']], pipe)
for i,t in enumerate(params['file_list']):
src,dest = t
# destination folder
try:
os.makedirs(dest)
except:
pass
if not os.path.exists(dest):
raise Exception('Cannot create folder %s' % dest)
dest_file = os.path.join(dest,os.path.split(src)[1])
log.info('Copy %s to %s' % (src, dest_file))
# Check if file already exists and should be skipped
skip_file = False
if os.path.exists(dest_file) and not os.path.exists(src):
log.info('Skip existing file: %s' % dest_file)
skip_file = True
if not skip_file:
if not os.path.exists(src):
raise Exception('Source file does not exist: %s' % src)
file_size = os.path.getsize(src)
safeCopyFile(src, dest_file, callback=progress)
if not os.path.exists(dest_file):
raise Exception('Destination file does not exist: %s' % dest_file)
if not os.path.getsize(dest_file) == file_size:
raise Exception('File size mismatch: %s' % dest_file)
progress.next_file()
# Everything copied, delete source files
if delete_src_files:
for src,dest in params['file_list']:
if os.path.exists(src):
log.debug('Deleting %s' % src)
try:
os.remove(src)
except:
log.error('Could not delete %s' % src)
log.info('Done')
class Meta(object):
description = 'This job is used to copy files from a local folder to a network storage'
if __name__ == "__main__":
# Test SafeCopyFile
print('Test SafeCopyFile')
src = r'C:\ava_capture\20170508_094614\26681150_000.avi'
dest = r'C:\ava_capture\20170508_094614\26681150_000b.avi'
safeCopyFile(src, dest, 8*1024*1024)
|
#!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test /vsioss
# Author: Even Rouault <even dot rouault at spatialys dot com>
#
###############################################################################
# Copyright (c) 2017, Even Rouault <even dot rouault at spatialys dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import stat
import sys
from osgeo import gdal
import gdaltest
import webserver
import pytest
def open_for_read(uri):
"""
Opens a test file for reading.
"""
return gdal.VSIFOpenExL(uri, 'rb', 1)
###############################################################################
def test_visoss_init():
gdaltest.oss_vars = {}
for var in ('OSS_SECRET_ACCESS_KEY', 'OSS_ACCESS_KEY_ID', 'OSS_TIMESTAMP', 'OSS_HTTPS', 'OSS_VIRTUAL_HOSTING', 'OSS_ENDPOINT'):
gdaltest.oss_vars[var] = gdal.GetConfigOption(var)
if gdaltest.oss_vars[var] is not None:
gdal.SetConfigOption(var, "")
assert gdal.GetSignedURL('/vsioss/foo/bar') is None
###############################################################################
# Error cases
def test_visoss_1():
if not gdaltest.built_against_curl():
pytest.skip()
# Missing OSS_SECRET_ACCESS_KEY
gdal.ErrorReset()
with gdaltest.error_handler():
f = open_for_read('/vsioss/foo/bar')
assert f is None and gdal.VSIGetLastErrorMsg().find('OSS_SECRET_ACCESS_KEY') >= 0
gdal.ErrorReset()
with gdaltest.error_handler():
f = open_for_read('/vsioss_streaming/foo/bar')
assert f is None and gdal.VSIGetLastErrorMsg().find('OSS_SECRET_ACCESS_KEY') >= 0
gdal.SetConfigOption('OSS_SECRET_ACCESS_KEY', 'OSS_SECRET_ACCESS_KEY')
# Missing OSS_ACCESS_KEY_ID
gdal.ErrorReset()
with gdaltest.error_handler():
f = open_for_read('/vsioss/foo/bar')
assert f is None and gdal.VSIGetLastErrorMsg().find('OSS_ACCESS_KEY_ID') >= 0
gdal.SetConfigOption('OSS_ACCESS_KEY_ID', 'OSS_ACCESS_KEY_ID')
def test_visoss_real_test():
if not gdaltest.built_against_curl():
pytest.skip()
if gdaltest.skip_on_travis():
pytest.skip()
# ERROR 1: The OSS Access Key Id you provided does not exist in our records.
gdal.ErrorReset()
with gdaltest.error_handler():
f = open_for_read('/vsioss/foo/bar.baz')
if f is not None or gdal.VSIGetLastErrorMsg() == '':
if f is not None:
gdal.VSIFCloseL(f)
if gdal.GetConfigOption('APPVEYOR') is not None:
return
pytest.fail(gdal.VSIGetLastErrorMsg())
gdal.ErrorReset()
with gdaltest.error_handler():
f = open_for_read('/vsioss_streaming/foo/bar.baz')
assert f is None and gdal.VSIGetLastErrorMsg() != ''
###############################################################################
def test_visoss_start_webserver():
gdaltest.webserver_process = None
gdaltest.webserver_port = 0
if not gdaltest.built_against_curl():
pytest.skip()
(gdaltest.webserver_process, gdaltest.webserver_port) = webserver.launch(handler=webserver.DispatcherHttpHandler)
if gdaltest.webserver_port == 0:
pytest.skip()
gdal.SetConfigOption('OSS_SECRET_ACCESS_KEY', 'OSS_SECRET_ACCESS_KEY')
gdal.SetConfigOption('OSS_ACCESS_KEY_ID', 'OSS_ACCESS_KEY_ID')
gdal.SetConfigOption('CPL_OSS_TIMESTAMP', 'my_timestamp')
gdal.SetConfigOption('OSS_HTTPS', 'NO')
gdal.SetConfigOption('OSS_VIRTUAL_HOSTING', 'NO')
gdal.SetConfigOption('OSS_ENDPOINT', '127.0.0.1:%d' % gdaltest.webserver_port)
###############################################################################
def get_oss_fake_bucket_resource_method(request):
request.protocol_version = 'HTTP/1.1'
if 'Authorization' not in request.headers:
sys.stderr.write('Bad headers: %s\n' % str(request.headers))
request.send_response(403)
return
expected_authorization = 'OSS OSS_ACCESS_KEY_ID:ZFgKjvMtWUwm9CTeCYoPomhuJiE='
if request.headers['Authorization'] != expected_authorization:
sys.stderr.write("Bad Authorization: '%s'\n" % str(request.headers['Authorization']))
request.send_response(403)
return
request.send_response(200)
request.send_header('Content-type', 'text/plain')
request.send_header('Content-Length', 3)
request.send_header('Connection', 'close')
request.end_headers()
request.wfile.write("""foo""".encode('ascii'))
###############################################################################
# Test with a fake OSS server
def test_visoss_2():
if gdaltest.webserver_port == 0:
pytest.skip()
signed_url = gdal.GetSignedURL('/vsioss/oss_fake_bucket/resource',
['START_DATE=20180212T123456Z'])
assert (signed_url in ('http://127.0.0.1:8080/oss_fake_bucket/resource?Expires=1518442496&OSSAccessKeyId=OSS_ACCESS_KEY_ID&Signature=bpFqur6tQMNN7Xe7UHVFFrugmgs%3D',
'http://127.0.0.1:8081/oss_fake_bucket/resource?Expires=1518442496&OSSAccessKeyId=OSS_ACCESS_KEY_ID&Signature=bpFqur6tQMNN7Xe7UHVFFrugmgs%3D'))
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_fake_bucket/resource', custom_method=get_oss_fake_bucket_resource_method)
with webserver.install_http_handler(handler):
f = open_for_read('/vsioss/oss_fake_bucket/resource')
assert f is not None
data = gdal.VSIFReadL(1, 4, f).decode('ascii')
gdal.VSIFCloseL(f)
assert data == 'foo'
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_fake_bucket/resource', custom_method=get_oss_fake_bucket_resource_method)
with webserver.install_http_handler(handler):
f = open_for_read('/vsioss_streaming/oss_fake_bucket/resource')
assert f is not None
data = gdal.VSIFReadL(1, 4, f).decode('ascii')
gdal.VSIFCloseL(f)
assert data == 'foo'
handler = webserver.SequentialHandler()
def method(request):
request.protocol_version = 'HTTP/1.1'
request.send_response(400)
response = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>AccessDenied</Code>
<Message>The bucket you are attempting to access must be addressed using the specified endpoint. Please send all future requests to this endpoint.</Message>
<HostId>unused</HostId>
<Bucket>unuset</Bucket>
<Endpoint>localhost:%d</Endpoint>
</Error>""" % request.server.port
response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response)
request.send_header('Content-type', 'application/xml')
request.send_header('Transfer-Encoding', 'chunked')
request.send_header('Connection', 'close')
request.end_headers()
request.wfile.write(response.encode('ascii'))
handler.add('GET', '/oss_fake_bucket/redirect', custom_method=method)
def method(request):
request.protocol_version = 'HTTP/1.1'
if request.headers['Host'].startswith('localhost'):
request.send_response(200)
request.send_header('Content-type', 'text/plain')
request.send_header('Content-Length', 3)
request.send_header('Connection', 'close')
request.end_headers()
request.wfile.write("""foo""".encode('ascii'))
else:
sys.stderr.write('Bad headers: %s\n' % str(request.headers))
request.send_response(403)
handler.add('GET', '/oss_fake_bucket/redirect', custom_method=method)
# Test region and endpoint 'redirects'
with webserver.install_http_handler(handler):
f = open_for_read('/vsioss/oss_fake_bucket/redirect')
assert f is not None
data = gdal.VSIFReadL(1, 4, f).decode('ascii')
gdal.VSIFCloseL(f)
if data != 'foo':
if gdaltest.is_travis_branch('trusty'):
pytest.skip('Skipped on trusty branch, but should be investigated')
pytest.fail(data)
# Test region and endpoint 'redirects'
handler.req_count = 0
with webserver.install_http_handler(handler):
f = open_for_read('/vsioss_streaming/oss_fake_bucket/redirect')
assert f is not None
data = gdal.VSIFReadL(1, 4, f).decode('ascii')
gdal.VSIFCloseL(f)
assert data == 'foo'
handler = webserver.SequentialHandler()
def method(request):
# /vsioss_streaming/ should have remembered the change of region and endpoint
if not request.headers['Host'].startswith('localhost'):
sys.stderr.write('Bad headers: %s\n' % str(request.headers))
request.send_response(403)
request.protocol_version = 'HTTP/1.1'
request.send_response(400)
response = 'bla'
response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response)
request.send_header('Content-type', 'application/xml')
request.send_header('Transfer-Encoding', 'chunked')
request.send_header('Connection', 'close')
request.end_headers()
request.wfile.write(response.encode('ascii'))
handler.add('GET', '/oss_fake_bucket/non_xml_error', custom_method=method)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
with gdaltest.error_handler():
f = open_for_read('/vsioss_streaming/oss_fake_bucket/non_xml_error')
assert f is None and gdal.VSIGetLastErrorMsg().find('bla') >= 0
handler = webserver.SequentialHandler()
response = '<?xml version="1.0" encoding="UTF-8"?><oops>'
response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response)
handler.add('GET', '/oss_fake_bucket/invalid_xml_error', 400,
{'Content-type': 'application/xml',
'Transfer-Encoding': 'chunked',
'Connection': 'close'}, response)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
with gdaltest.error_handler():
f = open_for_read('/vsioss_streaming/oss_fake_bucket/invalid_xml_error')
assert f is None and gdal.VSIGetLastErrorMsg().find('<oops>') >= 0
handler = webserver.SequentialHandler()
response = '<?xml version="1.0" encoding="UTF-8"?><Error/>'
response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response)
handler.add('GET', '/oss_fake_bucket/no_code_in_error', 400,
{'Content-type': 'application/xml',
'Transfer-Encoding': 'chunked',
'Connection': 'close'}, response)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
with gdaltest.error_handler():
f = open_for_read('/vsioss_streaming/oss_fake_bucket/no_code_in_error')
assert f is None and gdal.VSIGetLastErrorMsg().find('<Error/>') >= 0
handler = webserver.SequentialHandler()
response = '<?xml version="1.0" encoding="UTF-8"?><Error><Code>AuthorizationHeaderMalformed</Code></Error>'
response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response)
handler.add('GET', '/oss_fake_bucket/no_region_in_AuthorizationHeaderMalformed_error', 400,
{'Content-type': 'application/xml',
'Transfer-Encoding': 'chunked',
'Connection': 'close'}, response)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
with gdaltest.error_handler():
f = open_for_read('/vsioss_streaming/oss_fake_bucket/no_region_in_AuthorizationHeaderMalformed_error')
assert f is None and gdal.VSIGetLastErrorMsg().find('<Error>') >= 0
handler = webserver.SequentialHandler()
response = '<?xml version="1.0" encoding="UTF-8"?><Error><Code>PermanentRedirect</Code></Error>'
response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response)
handler.add('GET', '/oss_fake_bucket/no_endpoint_in_PermanentRedirect_error', 400,
{'Content-type': 'application/xml',
'Transfer-Encoding': 'chunked',
'Connection': 'close'}, response)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
with gdaltest.error_handler():
f = open_for_read('/vsioss_streaming/oss_fake_bucket/no_endpoint_in_PermanentRedirect_error')
assert f is None and gdal.VSIGetLastErrorMsg().find('<Error>') >= 0
handler = webserver.SequentialHandler()
response = '<?xml version="1.0" encoding="UTF-8"?><Error><Code>bla</Code></Error>'
response = '%x\r\n%s\r\n0\r\n\r\n' % (len(response), response)
handler.add('GET', '/oss_fake_bucket/no_message_in_error', 400,
{'Content-type': 'application/xml',
'Transfer-Encoding': 'chunked',
'Connection': 'close'}, response)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
with gdaltest.error_handler():
f = open_for_read('/vsioss_streaming/oss_fake_bucket/no_message_in_error')
assert f is None and gdal.VSIGetLastErrorMsg().find('<Error>') >= 0
###############################################################################
# Test ReadDir() with a fake OSS server
def test_visoss_3():
if gdaltest.webserver_port == 0:
pytest.skip()
handler = webserver.SequentialHandler()
def method(request):
request.protocol_version = 'HTTP/1.1'
request.send_response(200)
request.send_header('Content-type', 'application/xml')
response = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix>a_dir/</Prefix>
<NextMarker>bla</NextMarker>
<Contents>
<Key>a_dir/resource3.bin</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>123456</Size>
</Contents>
</ListBucketResult>
"""
request.send_header('Content-Length', len(response))
request.end_headers()
request.wfile.write(response.encode('ascii'))
handler.add('GET', '/oss_fake_bucket2/?delimiter=%2F&prefix=a_dir%2F', custom_method=method)
def method(request):
request.protocol_version = 'HTTP/1.1'
request.send_response(200)
request.send_header('Content-type', 'application/xml')
response = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix>a_dir/</Prefix>
<Contents>
<Key>a_dir/resource4.bin</Key>
<LastModified>2015-10-16T12:34:56.000Z</LastModified>
<Size>456789</Size>
</Contents>
<CommonPrefixes>
<Prefix>a_dir/subdir/</Prefix>
</CommonPrefixes>
</ListBucketResult>
"""
request.send_header('Content-Length', len(response))
request.end_headers()
request.wfile.write(response.encode('ascii'))
handler.add('GET', '/oss_fake_bucket2/?delimiter=%2F&marker=bla&prefix=a_dir%2F', custom_method=method)
with webserver.install_http_handler(handler):
f = open_for_read('/vsioss/oss_fake_bucket2/a_dir/resource3.bin')
if f is None:
if gdaltest.is_travis_branch('trusty'):
pytest.skip('Skipped on trusty branch, but should be investigated')
pytest.fail()
gdal.VSIFCloseL(f)
with webserver.install_http_handler(webserver.SequentialHandler()):
dir_contents = gdal.ReadDir('/vsioss/oss_fake_bucket2/a_dir')
assert dir_contents == ['resource3.bin', 'resource4.bin', 'subdir']
assert gdal.VSIStatL('/vsioss/oss_fake_bucket2/a_dir/resource3.bin').size == 123456
assert gdal.VSIStatL('/vsioss/oss_fake_bucket2/a_dir/resource3.bin').mtime == 1
# ReadDir on something known to be a file shouldn't cause network access
dir_contents = gdal.ReadDir('/vsioss/oss_fake_bucket2/a_dir/resource3.bin')
assert dir_contents is None
# Test CPL_VSIL_CURL_NON_CACHED
for config_option_value in ['/vsioss/oss_non_cached/test.txt',
'/vsioss/oss_non_cached',
'/vsioss/oss_non_cached:/vsioss/unrelated',
'/vsioss/unrelated:/vsioss/oss_non_cached',
'/vsioss/unrelated:/vsioss/oss_non_cached:/vsioss/unrelated']:
with gdaltest.config_option('CPL_VSIL_CURL_NON_CACHED', config_option_value):
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_non_cached/test.txt', 200, {}, 'foo')
with webserver.install_http_handler(handler):
f = open_for_read('/vsioss/oss_non_cached/test.txt')
assert f is not None, config_option_value
data = gdal.VSIFReadL(1, 3, f).decode('ascii')
gdal.VSIFCloseL(f)
assert data == 'foo', config_option_value
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_non_cached/test.txt', 200, {}, 'bar2')
with webserver.install_http_handler(handler):
size = gdal.VSIStatL('/vsioss/oss_non_cached/test.txt').size
assert size == 4, config_option_value
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_non_cached/test.txt', 200, {}, 'foo')
with webserver.install_http_handler(handler):
size = gdal.VSIStatL('/vsioss/oss_non_cached/test.txt').size
if size != 3:
print(config_option_value)
pytest.fail(data)
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_non_cached/test.txt', 200, {}, 'bar2')
with webserver.install_http_handler(handler):
f = open_for_read('/vsioss/oss_non_cached/test.txt')
assert f is not None, config_option_value
data = gdal.VSIFReadL(1, 4, f).decode('ascii')
gdal.VSIFCloseL(f)
assert data == 'bar2', config_option_value
# Retry without option
for config_option_value in [None,
'/vsioss/oss_non_cached/bar.txt']:
with gdaltest.config_option('CPL_VSIL_CURL_NON_CACHED', config_option_value):
handler = webserver.SequentialHandler()
if config_option_value is None:
handler.add('GET', '/oss_non_cached/?delimiter=%2F', 200, {'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix>/</Prefix>
<Contents>
<Key>/test.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>40</Size>
</Contents>
<Contents>
<Key>/test2.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>40</Size>
</Contents>
</ListBucketResult>
""")
handler.add('GET', '/oss_non_cached/test.txt', 200, {}, 'foo')
with webserver.install_http_handler(handler):
f = open_for_read('/vsioss/oss_non_cached/test.txt')
assert f is not None, config_option_value
data = gdal.VSIFReadL(1, 3, f).decode('ascii')
gdal.VSIFCloseL(f)
assert data == 'foo', config_option_value
handler = webserver.SequentialHandler()
with webserver.install_http_handler(handler):
f = open_for_read('/vsioss/oss_non_cached/test.txt')
assert f is not None, config_option_value
data = gdal.VSIFReadL(1, 4, f).decode('ascii')
gdal.VSIFCloseL(f)
# We should still get foo because of caching
assert data == 'foo', config_option_value
# List buckets (empty result)
handler = webserver.SequentialHandler()
handler.add('GET', '/', 200, {'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListAllMyBucketsResult>
<Buckets>
</Buckets>
</ListAllMyBucketsResult>
""")
with webserver.install_http_handler(handler):
dir_contents = gdal.ReadDir('/vsioss/')
assert dir_contents == ['.']
gdal.VSICurlClearCache()
# List buckets
handler = webserver.SequentialHandler()
handler.add('GET', '/', 200, {'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListAllMyBucketsResult>
<Buckets>
<Bucket>
<Name>mybucket</Name>
</Bucket>
</Buckets>
</ListAllMyBucketsResult>
""")
with webserver.install_http_handler(handler):
dir_contents = gdal.ReadDir('/vsioss/')
assert dir_contents == ['mybucket']
###############################################################################
# Test simple PUT support with a fake OSS server
def test_visoss_4():
if gdaltest.webserver_port == 0:
pytest.skip()
with webserver.install_http_handler(webserver.SequentialHandler()):
with gdaltest.error_handler():
f = gdal.VSIFOpenL('/vsioss/oss_fake_bucket3', 'wb')
assert f is None
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_fake_bucket3/empty_file.bin', 200, {'Connection': 'close'}, 'foo')
with webserver.install_http_handler(handler):
assert gdal.VSIStatL('/vsioss/oss_fake_bucket3/empty_file.bin').size == 3
# Empty file
handler = webserver.SequentialHandler()
def method(request):
if request.headers['Content-Length'] != '0':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
return
request.send_response(200)
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('PUT', '/oss_fake_bucket3/empty_file.bin', custom_method=method)
with webserver.install_http_handler(handler):
f = gdal.VSIFOpenL('/vsioss/oss_fake_bucket3/empty_file.bin', 'wb')
assert f is not None
gdal.ErrorReset()
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() == ''
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_fake_bucket3/empty_file.bin', 200, {'Connection': 'close'}, '')
with webserver.install_http_handler(handler):
assert gdal.VSIStatL('/vsioss/oss_fake_bucket3/empty_file.bin').size == 0
# Invalid seek
handler = webserver.SequentialHandler()
with webserver.install_http_handler(handler):
f = gdal.VSIFOpenL('/vsioss/oss_fake_bucket3/empty_file.bin', 'wb')
assert f is not None
with gdaltest.error_handler():
ret = gdal.VSIFSeekL(f, 1, 0)
assert ret != 0
gdal.VSIFCloseL(f)
# Invalid read
handler = webserver.SequentialHandler()
with webserver.install_http_handler(handler):
f = gdal.VSIFOpenL('/vsioss/oss_fake_bucket3/empty_file.bin', 'wb')
assert f is not None
with gdaltest.error_handler():
ret = gdal.VSIFReadL(1, 1, f)
assert not ret
gdal.VSIFCloseL(f)
# Error case
handler = webserver.SequentialHandler()
handler.add('PUT', '/oss_fake_bucket3/empty_file_error.bin', 403)
with webserver.install_http_handler(handler):
f = gdal.VSIFOpenL('/vsioss/oss_fake_bucket3/empty_file_error.bin', 'wb')
assert f is not None
gdal.ErrorReset()
with gdaltest.error_handler():
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() != ''
# Nominal case
with webserver.install_http_handler(webserver.SequentialHandler()):
f = gdal.VSIFOpenL('/vsioss/oss_fake_bucket3/another_file.bin', 'wb')
assert f is not None
assert gdal.VSIFSeekL(f, gdal.VSIFTellL(f), 0) == 0
assert gdal.VSIFSeekL(f, 0, 1) == 0
assert gdal.VSIFSeekL(f, 0, 2) == 0
assert gdal.VSIFWriteL('foo', 1, 3, f) == 3
assert gdal.VSIFSeekL(f, gdal.VSIFTellL(f), 0) == 0
assert gdal.VSIFWriteL('bar', 1, 3, f) == 3
handler = webserver.SequentialHandler()
def method(request):
if request.headers['Content-Length'] != '6':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.wfile.write('HTTP/1.1 100 Continue\r\n\r\n'.encode('ascii'))
content = request.rfile.read(6).decode('ascii')
if content != 'foobar':
sys.stderr.write('Did not get expected content: %s\n' % content)
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.send_response(200)
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('PUT', '/oss_fake_bucket3/another_file.bin', custom_method=method)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() == ''
###############################################################################
# Test simple DELETE support with a fake OSS server
def test_visoss_5():
if gdaltest.webserver_port == 0:
pytest.skip()
with webserver.install_http_handler(webserver.SequentialHandler()):
with gdaltest.error_handler():
ret = gdal.Unlink('/vsioss/foo')
assert ret != 0
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_delete_bucket/delete_file', 200, {'Connection': 'close'}, 'foo')
with webserver.install_http_handler(handler):
assert gdal.VSIStatL('/vsioss/oss_delete_bucket/delete_file').size == 3
with webserver.install_http_handler(webserver.SequentialHandler()):
assert gdal.VSIStatL('/vsioss/oss_delete_bucket/delete_file').size == 3
handler = webserver.SequentialHandler()
handler.add('DELETE', '/oss_delete_bucket/delete_file', 204)
with webserver.install_http_handler(handler):
ret = gdal.Unlink('/vsioss/oss_delete_bucket/delete_file')
assert ret == 0
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_delete_bucket/delete_file', 404, {'Connection': 'close'}, 'foo')
handler.add('GET', '/oss_delete_bucket/?delimiter=%2F&max-keys=100&prefix=delete_file%2F', 404, {'Connection': 'close'}, 'foo')
with webserver.install_http_handler(handler):
assert gdal.VSIStatL('/vsioss/oss_delete_bucket/delete_file') is None
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_delete_bucket/delete_file_error', 200)
handler.add('DELETE', '/oss_delete_bucket/delete_file_error', 403)
with webserver.install_http_handler(handler):
with gdaltest.error_handler():
ret = gdal.Unlink('/vsioss/oss_delete_bucket/delete_file_error')
assert ret != 0
###############################################################################
# Test multipart upload with a fake OSS server
def test_visoss_6():
if gdaltest.webserver_port == 0:
pytest.skip()
with gdaltest.config_option('VSIOSS_CHUNK_SIZE', '1'): # 1 MB
with webserver.install_http_handler(webserver.SequentialHandler()):
f = gdal.VSIFOpenL('/vsioss/oss_fake_bucket4/large_file.bin', 'wb')
assert f is not None
size = 1024 * 1024 + 1
big_buffer = 'a' * size
handler = webserver.SequentialHandler()
def method(request):
request.protocol_version = 'HTTP/1.1'
response = '<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>'
request.send_response(200)
request.send_header('Content-type', 'application/xml')
request.send_header('Content-Length', len(response))
request.end_headers()
request.wfile.write(response.encode('ascii'))
handler.add('POST', '/oss_fake_bucket4/large_file.bin?uploads', custom_method=method)
def method(request):
if request.headers['Content-Length'] != '1048576':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.send_response(200)
request.send_header('ETag', '"first_etag"')
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('PUT', '/oss_fake_bucket4/large_file.bin?partNumber=1&uploadId=my_id', custom_method=method)
with webserver.install_http_handler(handler):
ret = gdal.VSIFWriteL(big_buffer, 1, size, f)
assert ret == size
handler = webserver.SequentialHandler()
def method(request):
if request.headers['Content-Length'] != '1':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
return
request.send_response(200)
request.send_header('ETag', '"second_etag"')
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('PUT', '/oss_fake_bucket4/large_file.bin?partNumber=2&uploadId=my_id', custom_method=method)
def method(request):
if request.headers['Content-Length'] != '186':
sys.stderr.write('Did not get expected headers: %s\n' % str(request.headers))
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
content = request.rfile.read(186).decode('ascii')
if content != """<CompleteMultipartUpload>
<Part>
<PartNumber>1</PartNumber><ETag>"first_etag"</ETag></Part>
<Part>
<PartNumber>2</PartNumber><ETag>"second_etag"</ETag></Part>
</CompleteMultipartUpload>
""":
sys.stderr.write('Did not get expected content: %s\n' % content)
request.send_response(400)
request.send_header('Content-Length', 0)
request.end_headers()
return
request.send_response(200)
request.send_header('Content-Length', 0)
request.end_headers()
handler.add('POST', '/oss_fake_bucket4/large_file.bin?uploadId=my_id', custom_method=method)
gdal.ErrorReset()
with webserver.install_http_handler(handler):
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() == ''
handler = webserver.SequentialHandler()
handler.add('POST', '/oss_fake_bucket4/large_file_initiate_403_error.bin?uploads', 403)
handler.add('POST', '/oss_fake_bucket4/large_file_initiate_empty_result.bin?uploads', 200)
handler.add('POST', '/oss_fake_bucket4/large_file_initiate_invalid_xml_result.bin?uploads', 200, {}, 'foo')
handler.add('POST', '/oss_fake_bucket4/large_file_initiate_no_uploadId.bin?uploads', 200, {}, '<foo/>')
with webserver.install_http_handler(handler):
for filename in ['/vsioss/oss_fake_bucket4/large_file_initiate_403_error.bin',
'/vsioss/oss_fake_bucket4/large_file_initiate_empty_result.bin',
'/vsioss/oss_fake_bucket4/large_file_initiate_invalid_xml_result.bin',
'/vsioss/oss_fake_bucket4/large_file_initiate_no_uploadId.bin']:
with gdaltest.config_option('VSIOSS_CHUNK_SIZE', '1'): # 1 MB
f = gdal.VSIFOpenL(filename, 'wb')
assert f is not None
with gdaltest.error_handler():
ret = gdal.VSIFWriteL(big_buffer, 1, size, f)
assert ret == 0
gdal.ErrorReset()
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() == ''
handler = webserver.SequentialHandler()
handler.add('POST', '/oss_fake_bucket4/large_file_upload_part_403_error.bin?uploads', 200, {},
'<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>')
handler.add('PUT', '/oss_fake_bucket4/large_file_upload_part_403_error.bin?partNumber=1&uploadId=my_id', 403)
handler.add('DELETE', '/oss_fake_bucket4/large_file_upload_part_403_error.bin?uploadId=my_id', 204)
handler.add('POST', '/oss_fake_bucket4/large_file_upload_part_no_etag.bin?uploads', 200, {},
'<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>')
handler.add('PUT', '/oss_fake_bucket4/large_file_upload_part_no_etag.bin?partNumber=1&uploadId=my_id', 200)
handler.add('DELETE', '/oss_fake_bucket4/large_file_upload_part_no_etag.bin?uploadId=my_id', 204)
with webserver.install_http_handler(handler):
for filename in ['/vsioss/oss_fake_bucket4/large_file_upload_part_403_error.bin',
'/vsioss/oss_fake_bucket4/large_file_upload_part_no_etag.bin']:
with gdaltest.config_option('VSIOSS_CHUNK_SIZE', '1'): # 1 MB
f = gdal.VSIFOpenL(filename, 'wb')
assert f is not None, filename
with gdaltest.error_handler():
ret = gdal.VSIFWriteL(big_buffer, 1, size, f)
assert ret == 0, filename
gdal.ErrorReset()
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() == '', filename
# Simulate failure in AbortMultipart stage
handler = webserver.SequentialHandler()
handler.add('POST', '/oss_fake_bucket4/large_file_abortmultipart_403_error.bin?uploads', 200, {},
'<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>')
handler.add('PUT', '/oss_fake_bucket4/large_file_abortmultipart_403_error.bin?partNumber=1&uploadId=my_id', 403)
handler.add('DELETE', '/oss_fake_bucket4/large_file_abortmultipart_403_error.bin?uploadId=my_id', 403)
filename = '/vsioss/oss_fake_bucket4/large_file_abortmultipart_403_error.bin'
with webserver.install_http_handler(handler):
with gdaltest.config_option('VSIOSS_CHUNK_SIZE', '1'): # 1 MB
f = gdal.VSIFOpenL(filename, 'wb')
assert f is not None, filename
with gdaltest.error_handler():
ret = gdal.VSIFWriteL(big_buffer, 1, size, f)
assert ret == 0, filename
gdal.ErrorReset()
with gdaltest.error_handler():
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() != '', filename
# Simulate failure in CompleteMultipartUpload stage
handler = webserver.SequentialHandler()
handler.add('POST', '/oss_fake_bucket4/large_file_completemultipart_403_error.bin?uploads', 200, {},
'<?xml version="1.0" encoding="UTF-8"?><InitiateMultipartUploadResult><UploadId>my_id</UploadId></InitiateMultipartUploadResult>')
handler.add('PUT', '/oss_fake_bucket4/large_file_completemultipart_403_error.bin?partNumber=1&uploadId=my_id', 200, {'ETag': 'first_etag'}, '')
handler.add('PUT', '/oss_fake_bucket4/large_file_completemultipart_403_error.bin?partNumber=2&uploadId=my_id', 200, {'ETag': 'second_etag'}, '')
handler.add('POST', '/oss_fake_bucket4/large_file_completemultipart_403_error.bin?uploadId=my_id', 403)
# handler.add('DELETE', '/oss_fake_bucket4/large_file_completemultipart_403_error.bin?uploadId=my_id', 204)
filename = '/vsioss/oss_fake_bucket4/large_file_completemultipart_403_error.bin'
with webserver.install_http_handler(handler):
with gdaltest.config_option('VSIOSS_CHUNK_SIZE', '1'): # 1 MB
f = gdal.VSIFOpenL(filename, 'wb')
assert f is not None, filename
ret = gdal.VSIFWriteL(big_buffer, 1, size, f)
assert ret == size, filename
gdal.ErrorReset()
with gdaltest.error_handler():
gdal.VSIFCloseL(f)
assert gdal.GetLastErrorMsg() != '', filename
###############################################################################
# Test Mkdir() / Rmdir()
def test_visoss_7():
if gdaltest.webserver_port == 0:
pytest.skip()
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_bucket_test_mkdir/dir/', 404, {'Connection': 'close'})
handler.add('GET', '/oss_bucket_test_mkdir/?delimiter=%2F&max-keys=100&prefix=dir%2F', 404, {'Connection': 'close'})
handler.add('PUT', '/oss_bucket_test_mkdir/dir/', 200)
with webserver.install_http_handler(handler):
ret = gdal.Mkdir('/vsioss/oss_bucket_test_mkdir/dir', 0)
assert ret == 0
# Try creating already existing directory
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_bucket_test_mkdir/dir/', 416)
with webserver.install_http_handler(handler):
ret = gdal.Mkdir('/vsioss/oss_bucket_test_mkdir/dir', 0)
assert ret != 0
handler = webserver.SequentialHandler()
handler.add('DELETE', '/oss_bucket_test_mkdir/dir/', 204)
with webserver.install_http_handler(handler):
ret = gdal.Rmdir('/vsioss/oss_bucket_test_mkdir/dir')
assert ret == 0
# Try deleting already deleted directory
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_bucket_test_mkdir/dir/', 404)
handler.add('GET', '/oss_bucket_test_mkdir/?delimiter=%2F&max-keys=100&prefix=dir%2F', 404, {'Connection': 'close'})
with webserver.install_http_handler(handler):
ret = gdal.Rmdir('/vsioss/oss_bucket_test_mkdir/dir')
assert ret != 0
# Try deleting non-empty directory
handler = webserver.SequentialHandler()
handler.add('GET', '/oss_bucket_test_mkdir/dir_nonempty/', 416)
handler.add('GET', '/oss_bucket_test_mkdir/?delimiter=%2F&max-keys=100&prefix=dir_nonempty%2F', 200,
{'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix>dir_nonempty/</Prefix>
<Contents>
<Key>dir_nonempty/test.txt</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>40</Size>
</Contents>
</ListBucketResult>
""")
with webserver.install_http_handler(handler):
ret = gdal.Rmdir('/vsioss/oss_bucket_test_mkdir/dir_nonempty')
assert ret != 0
###############################################################################
# Test handling of file and directory with same name
def test_visoss_8():
if gdaltest.webserver_port == 0:
pytest.skip()
handler = webserver.SequentialHandler()
handler.add('GET', '/visoss_8/?delimiter=%2F', 200,
{'Content-type': 'application/xml'},
"""<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult>
<Prefix></Prefix>
<Contents>
<Key>test</Key>
<LastModified>1970-01-01T00:00:01.000Z</LastModified>
<Size>40</Size>
</Contents>
<CommonPrefixes>
<Prefix>test/</Prefix>
</CommonPrefixes>
</ListBucketResult>
""")
with webserver.install_http_handler(handler):
listdir = gdal.ReadDir('/vsioss/visoss_8', 0)
assert listdir == ['test', 'test/']
handler = webserver.SequentialHandler()
with webserver.install_http_handler(handler):
assert not stat.S_ISDIR(gdal.VSIStatL('/vsioss/visoss_8/test').mode)
handler = webserver.SequentialHandler()
with webserver.install_http_handler(handler):
assert stat.S_ISDIR(gdal.VSIStatL('/vsioss/visoss_8/test/').mode)
###############################################################################
def test_visoss_stop_webserver():
if gdaltest.webserver_port == 0:
pytest.skip()
# Clearcache needed to close all connections, since the Python server
# can only handle one connection at a time
gdal.VSICurlClearCache()
webserver.server_stop(gdaltest.webserver_process, gdaltest.webserver_port)
###############################################################################
# Nominal cases (require valid credentials)
def test_visoss_extra_1():
if not gdaltest.built_against_curl():
pytest.skip()
# Either a bucket name or bucket/filename
OSS_RESOURCE = gdal.GetConfigOption('OSS_RESOURCE')
if gdal.GetConfigOption('OSS_SECRET_ACCESS_KEY') is None:
pytest.skip('Missing OSS_SECRET_ACCESS_KEY')
elif gdal.GetConfigOption('OSS_ACCESS_KEY_ID') is None:
pytest.skip('Missing OSS_ACCESS_KEY_ID')
elif OSS_RESOURCE is None:
pytest.skip('Missing OSS_RESOURCE')
if '/' not in OSS_RESOURCE:
path = '/vsioss/' + OSS_RESOURCE
statres = gdal.VSIStatL(path)
assert statres is not None and stat.S_ISDIR(statres.mode), \
('%s is not a valid bucket' % path)
readdir = gdal.ReadDir(path)
assert readdir is not None, 'ReadDir() should not return empty list'
for filename in readdir:
if filename != '.':
subpath = path + '/' + filename
assert gdal.VSIStatL(subpath) is not None, \
('Stat(%s) should not return an error' % subpath)
unique_id = 'visoss_test'
subpath = path + '/' + unique_id
ret = gdal.Mkdir(subpath, 0)
assert ret >= 0, ('Mkdir(%s) should not return an error' % subpath)
readdir = gdal.ReadDir(path)
assert unique_id in readdir, \
('ReadDir(%s) should contain %s' % (path, unique_id))
ret = gdal.Mkdir(subpath, 0)
assert ret != 0, ('Mkdir(%s) repeated should return an error' % subpath)
ret = gdal.Rmdir(subpath)
assert ret >= 0, ('Rmdir(%s) should not return an error' % subpath)
readdir = gdal.ReadDir(path)
assert unique_id not in readdir, \
('ReadDir(%s) should not contain %s' % (path, unique_id))
ret = gdal.Rmdir(subpath)
assert ret != 0, ('Rmdir(%s) repeated should return an error' % subpath)
ret = gdal.Mkdir(subpath, 0)
assert ret >= 0, ('Mkdir(%s) should not return an error' % subpath)
f = gdal.VSIFOpenL(subpath + '/test.txt', 'wb')
assert f is not None
gdal.VSIFWriteL('hello', 1, 5, f)
gdal.VSIFCloseL(f)
ret = gdal.Rmdir(subpath)
assert ret != 0, \
('Rmdir(%s) on non empty directory should return an error' % subpath)
f = gdal.VSIFOpenL(subpath + '/test.txt', 'rb')
assert f is not None
data = gdal.VSIFReadL(1, 5, f).decode('utf-8')
assert data == 'hello'
gdal.VSIFCloseL(f)
ret = gdal.Unlink(subpath + '/test.txt')
assert ret >= 0, \
('Unlink(%s) should not return an error' % (subpath + '/test.txt'))
ret = gdal.Rmdir(subpath)
assert ret >= 0, ('Rmdir(%s) should not return an error' % subpath)
return
f = open_for_read('/vsioss/' + OSS_RESOURCE)
assert f is not None, ('cannot open %s' % ('/vsioss/' + OSS_RESOURCE))
ret = gdal.VSIFReadL(1, 1, f)
gdal.VSIFCloseL(f)
assert len(ret) == 1
# Same with /vsioss_streaming/
f = open_for_read('/vsioss_streaming/' + OSS_RESOURCE)
assert f is not None
ret = gdal.VSIFReadL(1, 1, f)
gdal.VSIFCloseL(f)
assert len(ret) == 1
if False: # pylint: disable=using-constant-test
# we actually try to read at read() time and bSetError = false:
# Invalid bucket : "The specified bucket does not exist"
gdal.ErrorReset()
f = open_for_read('/vsioss/not_existing_bucket/foo')
with gdaltest.error_handler():
gdal.VSIFReadL(1, 1, f)
gdal.VSIFCloseL(f)
assert gdal.VSIGetLastErrorMsg() != ''
# Invalid resource
gdal.ErrorReset()
f = open_for_read('/vsioss_streaming/' + OSS_RESOURCE + '/invalid_resource.baz')
assert f is None, gdal.VSIGetLastErrorMsg()
# Test GetSignedURL()
signed_url = gdal.GetSignedURL('/vsioss/' + OSS_RESOURCE)
f = open_for_read('/vsicurl_streaming/' + signed_url)
assert f is not None
ret = gdal.VSIFReadL(1, 1, f)
gdal.VSIFCloseL(f)
assert len(ret) == 1
###############################################################################
def test_visoss_cleanup():
for var in gdaltest.oss_vars:
gdal.SetConfigOption(var, gdaltest.oss_vars[var])
|
from os import chdir
from pathlib import Path
from shutil import copy2 as copy
from subprocess import run
import pytest
@pytest.fixture
def fossil():
return "/home/osboxes/src/fossil-snapshot-20210429/fossil"
@pytest.fixture
def repo_path():
return "/home/osboxes/proj/pyphlogiston/pyphlogiston/repo"
@pytest.fixture
def repo_name():
return "phlogiston.fossil"
@pytest.fixture
def data_path():
return "/home/osboxes/proj/pyphlogiston/pyphlogiston/data"
@pytest.fixture
def setup_script(tmp_path, fossil, repo_path, repo_name, data_path):
"""
1. set up a data directory
2. set up a data/checkout directory
3. set up a data/repo directory
4. initialize data/repo/repo.fossil
5. checkout data/repo/repo.fossil to data/checkout
return the staging path
"""
# TMP_PATH would be the install directory
# 1, 2, 3:
b = Path(str(tmp_path) + "data")
b.mkdir()
for d in ["stage", "repo"]:
c = Path(str(b) + d)
c.mkdir()
# 4.
r = Path(str(tmp_path) + "//data//repo")
print(f"{r=}")
r.mkdir(parents=True)
chdir(str(r))
init = [fossil, "init", "phologiston.fossil"]
out = run(init, capture_output=True)
assert out.returncode == 0
# 5.
s = Path(str(tmp_path) + "//data//stage")
print(f"{s=}")
co = [fossil, "open", f"{str(r)}/phologiston.fossil", "--workdir", str(s)]
out = run(co, capture_output=True)
assert out.returncode == 0
return s
@pytest.fixture
def test_repo(fossil, tmp_path, repo_name):
f = []
f.append(fossil)
test_path = f"{tmp_path}/{repo_name}"
f.append("init")
f.append(test_path)
# print(f'{" ".join([f for f in fossil])}')
out = run(f, capture_output=True)
assert out.returncode == 0
return test_path
@pytest.fixture
def add_files0(data_path):
p = Path(data_path)
return [str(f) for f in p.glob("./*")]
def test_fossil_version(fossil):
f = []
f.append(fossil)
f.append("version")
out = run(f, capture_output=True)
assert out.returncode == 0
def test_repo_info(fossil, test_repo):
f = []
f.append(fossil)
f.append("info")
f.append(test_repo)
# fossil.append('verbose')
out = run(f, capture_output=True)
assert out.returncode == 0
# print()
# print(out.stdout.decode('utf-8'))
def test_add(setup_script, fossil, add_files0):
"""
6. stick files in data/checkout
7. do the rest of the fossil thing
"""
chdir(setup_script)
for a in add_files0:
copy(a, setup_script)
f = []
f.append(fossil)
f.append("add")
f.append(f"{setup_script}/*")
out = run(f, capture_output=True)
assert out.returncode == 0
# print()
# print(out.stdout.decode('utf-8'))
|
def quick_sort(array):
if len(array) < 2:
return array
pivot = array[0]
lower = [i for i in array[1:] if i <= pivot]
upper = [i for i in array[1:] if i > pivot]
return quick_sort(lower) + [pivot] + quick_sort(upper)
quick_sort([12, 31, 5, 3, 0, 43, 99, 78, 32, 9, 7]) |
# Generated by Django 3.1.7 on 2021-06-17 16:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('userapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('out_trade_num', models.UUIDField()),
('order_num', models.CharField(max_length=50)),
('trade_no', models.CharField(max_length=120)),
('status', models.CharField(max_length=20)),
('payway', models.CharField(default='alipay', max_length=20)),
('address', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='userapp.address')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='userapp.userinfo')),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('goodsid', models.PositiveIntegerField()),
('colorid', models.PositiveIntegerField()),
('sizeid', models.PositiveIntegerField()),
('count', models.PositiveIntegerField()),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='order.order')),
],
),
]
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^$', 'privatebeta.views.invite', name='privatebeta_invite'),
url(r'^activate/(?P<code>\w+)/$', 'privatebeta.views.activate_invite',
{'redirect_to': '/register/'}, name='privatebeta_activate_invite'),
url(r'^sent/$', 'privatebeta.views.sent', name='privatebeta_sent'),
)
|
import numpy as np
import tensorflow as tf
import random
import pickle
######################################## TF-IDF ############################################
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import defaultdict
class TFIDF:
def __init__(self):
#load files
a = open('./data/type_dict_khkim.pickle', 'rb')
real_data = pickle.load(a)
a = open('./data/pk_type2idx.pkl', 'rb')
self.type_dict = pickle.load(a)
# 각 type 별로 모든 문장을 하나의 리스트로 저장.
self.real_data_by_type = []
for key in real_data.keys():
#print(len(real_data[key])) #how many sentences in one type?
self.real_data_by_type.append([word[0] for sent in real_data[key] for word in sent if word != 'UNK'])
dataset = [' '.join(type_sent) for type_sent in self.real_data_by_type] #속성 별 전체 문장, 인덱스 18개
self.tf = TfidfVectorizer(analyzer='word', ngram_range=(1, 1), min_df=1, sublinear_tf=True)
# tfidfVectorizer 입력으로 전체 문장 데이터 (18개 속성 모두)가 들어감 (dataset)
self.tfidf = self.tf.fit_transform(dataset) # 학습한 tfidfVectorizer tf를 기준으로 dataset 데이터의 tfidf 계산
self.tfidf_matrix = np.array(self.tfidf.toarray())
#tfidf_max = np.max(tfidf_matrix, axis=1) # max tfidf 구하는 코드
self.tfidf_max=[]
self.max_idx = [] # max tfidf 저장 변수
self.feature_names = self.tf.get_feature_names() # feature extraction
def keyword(self, idx):
search= np.sort(self.tfidf_matrix[idx])[::-1] #sort in descend order
for i in range (10): #10개 keyword
self.tfidf_max.append(search[i])
for i in range (len(self.tfidf_max)):
self.max_idx.append(np.where(self.tfidf_matrix[idx] == self.tfidf_max[i])) #append keywords(max 10) for each type
mylist=[]
for i in range(len(self.max_idx)):
for j in range(len(self.max_idx[i][0])):
if (self.max_idx[i][0][j] not in mylist):
mylist.append(self.max_idx[i][0][j])
words=[]
feature_idx= np.array(mylist) #np.ndarray of feature idx
for i in range(len(feature_idx)):
words.append(self.feature_names[feature_idx[i]]) #append keywords(max 10) for each type
return words |
from flask import Flask, make_response, request, abort, render_template
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Util import Padding
import base64
import hashlib
import json
import urllib
import random
app = Flask(__name__)
IV = Random.new().read(AES.block_size)
random.seed()
KEY = ''.join(chr(random.randint(0,255)) for i in range(16))
PADDING_STYLE = "pkcs7"
# Default cookie data
STARTING_COOKIE_DATA = {'username': 'guest', 'whats_the_answer_to_life_the_universe_and_everything': '', 'security_put_some_text_here': ''}
COOKIE_NAME = "wisdom_of_the_gods"
@app.route("/")
def index():
current_cookie = request.cookies.get(COOKIE_NAME)
has_query = False
answer = ''
# Completely pointless, just a little fun..
if "q" in request.args:
has_query = True
if "flag" in request.args["q"].lower():
answer = "The answer is not that simple..."
else:
answer = magic_8_ball()
# If user doesn't have a cookie set, set them up with one
if current_cookie is None:
encrypted_cookie = encrypt_cookie(STARTING_COOKIE_DATA)
resp = make_response(render_template('index.html', query=has_query))
resp.set_cookie(COOKIE_NAME, encrypted_cookie)
return resp
# Otherwise, attempt to decrypt the cookie. Gives useful info during padding oracle attack.
else:
decrypted_cookie = decrypt_cookie(current_cookie)
resp = make_response(render_template('index.html', query=has_query, answer=answer))
return resp
# If correct cookie has been set, show the user the flag. Otherwise, they get a 403 Forbidden as a hint there is something here
@app.route("/admin")
def admin():
solved = False
current_cookie = request.cookies.get(COOKIE_NAME)
if current_cookie is not None:
decrypted_cookie = decrypt_cookie(current_cookie)
solved = check_cookie(decrypted_cookie)
if not solved:
abort(403)
else:
return render_template('admin.html')
def magic_8_ball():
answers = [
"As I see it, yes",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
"Don't count on it",
"It is certain",
"It is decidedly so",
"Most likely",
"My reply is no",
"The gods say no",
"Outlook good",
"Outlook not so good",
"Reply hazy, try again",
"Signs point to yes",
"Very doubtful",
"Without a doubt",
"Yes",
"Yes, definitely",
"You may rely on it"
]
return random.choice(answers)
# Encrypt data for the cookie
def encrypt_cookie(cookie_data):
cookie_cipher = AES.new(KEY, AES.MODE_CBC, IV)
padded = Padding.pad(json.dumps(cookie_data), AES.block_size, style=PADDING_STYLE)
encrypted_cookie = urllib.quote(base64.b64encode(IV + cookie_cipher.encrypt(padded)))
return encrypted_cookie
# Attempt to decrypt the cookie
def decrypt_cookie(encrypted_cookie):
cookie_cipher = AES.new(KEY, AES.MODE_CBC, IV)
# Cookie decryption may fail if user has messed with the data
try:
b64_decoded_cookie = base64.b64decode(urllib.unquote(encrypted_cookie))
decrypted_cookie = cookie_cipher.decrypt(b64_decoded_cookie)
unpadded_cookie = Padding.unpad(decrypted_cookie, AES.block_size, style=PADDING_STYLE)
unpadded_cookie = unpadded_cookie[len(IV):] #remove beginning IV
return unpadded_cookie
# If cookie decryption fails for any reason, throw up a 500. Gives useful info to user during padding oracle attack
except Exception, e:
print str(e)
abort(500)
# Check if the cookie has all the correct data to solve the challenge
# Username should be 'admin'
# whats_the_answer_to_life_the_universe_and_everything should be 42
# and security_put_some_text_here should just contain something
def check_cookie(cookie_data):
cookie = json.loads(cookie_data)
if ( 'username' in cookie and
cookie.get('username').lower() == 'admin' and
'whats_the_answer_to_life_the_universe_and_everything' in cookie and
cookie.get('whats_the_answer_to_life_the_universe_and_everything') == '42' and
'security_put_some_text_here' in cookie and
len(cookie.get('security_put_some_text_here')) > 0
):
return True
return False
# Useful feature during debugging so I didn't have to manually remove the cookie every time :)
# will leave in just in case it's useful during the CTF
@app.route("/clear_cookie")
def clear_cookie():
resp = make_response()
resp.delete_cookie(COOKIE_NAME)
return resp
if __name__ == "__main__":
app.run(threaded=True, host='0.0.0.0', port=4738)
|
n=int(input())
academy_dict={}
for _ in range(n):
students=input()
grade=float(input())
if students not in academy_dict.keys():
academy_dict[students]=[]
academy_dict[students].append(grade)
for student,grades in academy_dict.items():
average_grades=sum(grades)/len(grades)
academy_dict[student]=average_grades
academy_dict=dict(sorted(academy_dict.items(), key=lambda kvp: -kvp[1]))
for student,grades in academy_dict.items():
if grades>=4.50:
print(f"{student} -> {grades:.2f}")
|
from unittest import TestCase
from memory_consumer import Consumer
class ConsumerTest(TestCase):
def test_simple(self):
consumer = Consumer(size=6, grow=3, hold=2)
expected_sizes = [2, 4, 6, 6, 6]
sizes = [len(consumer.data) for _ in consumer]
self.assertEqual(expected_sizes, sizes)
def test_uneven_multiple(self):
consumer = Consumer(size=5, grow=3, hold=2)
expected_sizes = [1, 3, 5, 5, 5]
sizes = [len(consumer.data) for _ in consumer]
self.assertEqual(expected_sizes, sizes)
def test_repeat(self):
consumer = Consumer(size=6, grow=2, hold=1, repeat=2)
expected_sizes = [3, 6, 6, 3, 6, 6]
sizes = [len(consumer.data) for _ in consumer]
self.assertEqual(expected_sizes, sizes)
def test_release(self):
consumer = Consumer(size=6, grow=2, hold=1, repeat=2, release=2)
expected_sizes = [3, 6, 6, 0, 0, 3, 6, 6, 0, 0]
sizes = [len(consumer.data) for _ in consumer]
self.assertEqual(expected_sizes, sizes)
def test_delete(self):
consumer = Consumer(size=8, grow=4, hold=1, delete=True)
expected_sizes = [2, 4, 6, 8, 8]
sizes = [len(consumer.data) for _ in consumer]
self.assertEqual(expected_sizes, sizes)
|
""":mod:`rmon` --- redis monitor system
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from flask import Flask
app = Flask(__name__, static_folder='assets', static_url_path='/assets')
app.config.from_envvar('RMON_SETTINGS')
from rmon.commons.views import mod as commonsModule
from rmon.clusters.views import mod as clustersModule
app.register_blueprint(commonsModule)
app.register_blueprint(clustersModule)
|
import logging
import xml.etree.ElementTree as ET
from .baseplugin import AbstractClient
from pushno.messages import ProwlValidationMessage
# API endpoint base URL
BASE_URL = "https://api.prowlapp.com/publicapi"
# API endpoint to send messages
MESSAGE_URL = "{}/add".format(BASE_URL)
# API endpoint to validate the user key
VALIDATE_URL = "{}/verify".format(BASE_URL)
# get the logger
log = logging.getLogger(__name__)
class ProwlClient(AbstractClient):
"""
client class to interact with the Prowl API
"""
def __init__(self, api_key, provider_key=None):
AbstractClient.__init__(self)
self._api_key = api_key
self._provider_key = provider_key
def _parse_result(self, r):
root = list(ET.fromstring(r.text))[0]
d = root.attrib
if root.text is not None:
d["status"] = root.text
return d
def send(self, message):
# override token and user in the given message
message.apikey = self._api_key
if self._provider_key is not None:
message.providerkey = self._provider_key
log.debug("prepared message: {}".format(message.data))
# ensure that message is correct
message.validate()
# finally, send the message
r = self._s.post(MESSAGE_URL, data=message.data)
res = self._parse_result(r)
log.debug("response: {}".format(res))
return res["code"] == "200", res
def validate_user(self):
# prepare validation message
validation_message = ProwlValidationMessage(
apikey=self._api_key, providerkey=self._provider_key
)
log.debug("prepared message: {}".format(validation_message.data))
# ensure that message is correct
validation_message.validate()
# get validation
r = self._s.get(VALIDATE_URL, params=validation_message.data)
res = self._parse_result(r)
log.debug("response: {}".format(res))
return res["code"] == "200", res
|
import random
class Traveller:
"""Class which represent a travelling salesman
Author : Thomas Minier
"""
def __init__(self, id=None):
self.id = id
self.path = list()
self.nbChilds = 0
def __eq__(self, other):
if type(self) != type(other):
return False
else:
return self.path == other.path
def __repr__(self):
cities = [city.name for city in self.path]
return '<Traveller : {}{}, cities : {}>'.format(self.id, cities)
def printPath(self):
print([city.name for city in self.path])
def addCity(self, city):
self.path.append(city)
def evaluatePath(self):
"""Evaluate the quality of the path taken by the traveller
"""
pathLength = 0
if len(self.path) > 0:
previousCity = self.path[0]
for ind in range(1, len(self.path)):
pathLength += previousCity.distanceWith(self.path[ind].name)
previousCity = self.path[ind]
return pathLength
def mutate(self):
"""Apply a mutation to the traveller
"""
# swap two cities in the path
first = random.randrange(len(self.path))
second = random.choice([i for i in range(len(self.path)) if i != first])
tmp = self.path[first]
self.path[first] = self.path[second]
self.path[second] = tmp
def breed(self, partner, mutateChance=-1.0):
"""Breed two travellers and produce a new one
"""
children = Traveller('{}.{}'.format(self.id, self.nbChilds))
# take a sample city sequence from self
locustInd = random.randint(0, len(self.path))
locust = self.path[0:locustInd]
ind = 0
# fill the path of children with fragments of the parents' paths
for city in partner.path:
if (ind <= len(locust)) and (city in locust):
children.addCity(locust[ind])
ind += 1
else:
children.addCity(city)
# try to mutate the children
if random.random() >= mutateChance:
children.mutate()
self.nbChilds += 1
return children
|
"""
файл функции, выполняющей команды в зависимости от времени
"""
from datetime import datetime
import pg_connect
import dataBase
import sqlite3
import args
"""
отдебажил функцию timer теперь выполнение, начисление очков и прочее работает корректно (upd я ошибался)
"""
def timer():
try:
connect, cursor = pg_connect.connect()
can = True
while True:
try:
if int(datetime.now().strftime('%M')) % 2 == 0 and int(datetime.now().strftime('%S')) == 0:
if can:
cursor.execute("SELECT End_time FROM Users")
end_time = cursor.fetchall()
for i in range(len(end_time)):
if end_time[i][0] != 'None':
end_minutes = int(end_time[i][0])
mitutes_now = int(datetime.now().strftime('%M'))
if mitutes_now == end_minutes or mitutes_now > end_minutes:
cursor.execute("SELECT ID FROM Users WHERE End_time=" + str(end_minutes))
user_id = cursor.fetchall()
for u in range(len(user_id[0])):
money = dataBase.get_task_cost(user_id[0][u])
args.bot.send_message(parse_mode='HTML', chat_id=user_id[0][u],
text='<b>Вы закончили выполнение задания</b>\nВаш '
'заработок: ' + str(money) + args.currency)
dataBase.plus_count_works(user_id[0][u]) # +1 к выполненным заданиям
company = dataBase.get_corp(user_id[0][u])
if company != 0 and company != '0':
owner_id = dataBase.get_owner(company)
dataBase.add_money(owner_id, int(money)/10)
dataBase.add_money(user_id[0][u], money)
dataBase.start_job(user_id[0][u], args.waitStatus, 'None')
print('------------\nchecking')
can = False # чтобы не выполнялось несколько раз в секунду
else:
can = True
except Exception as e:
print(e)
except Exception as e:
print(e)
|
#!/usr/bin/env python3
# Dump Android Verified Boot Signature (c) B.Kerler 2017-2018
import hashlib
import struct
from binascii import hexlify,unhexlify
import sys
import argparse
from Crypto.Util.asn1 import DerSequence
from Crypto.PublicKey import RSA
from Library.libavb import *
version="v1.6"
def extract_hash(pub_key,data):
hashlen = 32 #SHA256
encrypted = int(hexlify(data),16)
decrypted = hex(pow(encrypted, pub_key.e, pub_key.n))[2:]
if len(decrypted)%2!=0:
decrypted="0"+decrypted
decrypted=unhexlify(decrypted)
hash = decrypted[-hashlen:]
if (decrypted[-0x21:-0x20] != b'\x20') or (len(hash) != hashlen):
raise Exception('Signature error')
return hash
def dump_signature(data):
if data[0:2] == b'\x30\x82':
slen = struct.unpack('>H', data[2:4])[0]
total = slen + 4
cert = struct.unpack('<%ds' % total, data[0:total])[0]
der = DerSequence()
der.decode(cert)
cert0 = DerSequence()
cert0.decode(bytes(der[1]))
pk = DerSequence()
pk.decode(bytes(cert0[0]))
subjectPublicKeyInfo = pk[6]
meta = DerSequence().decode(bytes(der[3]))
name = meta[0][2:]
length = meta[1]
signature = bytes(der[4])[4:0x104]
pub_key = RSA.importKey(subjectPublicKeyInfo)
hash=extract_hash(pub_key,signature)
return [name,length,hash,pub_key,bytes(der[3])[1:2]]
class androidboot:
magic="ANDROID!" #BOOT_MAGIC_SIZE 8
kernel_size=0
kernel_addr=0
ramdisk_size=0
ramdisk_addr=0
second_addr=0
second_size=0
tags_addr=0
page_size=0
qcdt_size=0
os_version=0
name="" #BOOT_NAME_SIZE 16
cmdline="" #BOOT_ARGS_SIZE 512
id=[] #uint*8
extra_cmdline="" #BOOT_EXTRA_ARGS_SIZE 1024
def getheader(inputfile):
param = androidboot()
with open(inputfile, 'rb') as rf:
header = rf.read(0x660)
fields = struct.unpack('<8sIIIIIIIIII16s512s8I1024s', header)
param.magic = fields[0]
param.kernel_size = fields[1]
param.kernel_addr = fields[2]
param.ramdisk_size = fields[3]
param.ramdisk_addr = fields[4]
param.second_size = fields[5]
param.second_addr = fields[6]
param.tags_addr = fields[7]
param.page_size = fields[8]
param.qcdt_size = fields[9]
param.os_version = fields[10]
param.name = fields[11]
param.cmdline = fields[12]
param.id = [fields[13],fields[14],fields[15],fields[16],fields[17],fields[18],fields[19],fields[20]]
param.extra_cmdline = fields[21]
return param
def int_to_bytes(x):
return x.to_bytes((x.bit_length() + 7) // 8, 'big')
def rotstate(state):
if state==0:
print("AVB-Status: VERIFIED, 0")
else:
print("AVB-Status: RED, 3 or ORANGE, 1")
def main(argv):
info="Boot Signature Tool "+version+" (c) B.Kerler 2017-2019"
print("\n"+info)
print("----------------------------------------------")
parser = argparse.ArgumentParser(description=info)
parser.add_argument('--file','-f', dest='filename', default="", action='store', help='boot or recovery image filename')
parser.add_argument('--vbmeta','-v', dest='vbmetaname', action='store', default='', help='vbmeta partition')
parser.add_argument('--length', '-l', dest='inject', action='store_true', default=False, help='adapt signature length')
args = parser.parse_args()
if args.filename=="":
print("Usage: verify_signature.py -f [boot.img]")
exit(0)
param=getheader(args.filename)
kernelsize = int((param.kernel_size + param.page_size - 1) / param.page_size) * param.page_size
ramdisksize = int((param.ramdisk_size + param.page_size - 1) / param.page_size) * param.page_size
secondsize = int((param.second_size + param.page_size - 1) / param.page_size) * param.page_size
qcdtsize = int((param.qcdt_size + param.page_size - 1) / param.page_size) * param.page_size
print("Kernel=0x%08X,\tlength=0x%08X" % (param.page_size, kernelsize))
print("Ramdisk=0x%08X,\tlength=0x%08X" % ((param.page_size+kernelsize),ramdisksize))
print("Second=0x%08X,\tlength=0x%08X" % ((param.page_size+kernelsize+ramdisksize),secondsize))
print("QCDT=0x%08X,\tlength=0x%08X" % ((param.page_size+kernelsize+ramdisksize+secondsize),qcdtsize))
length=param.page_size+kernelsize+ramdisksize+secondsize+qcdtsize
print("Signature start=0x%08X" % length)
with open(args.filename,'rb') as fr:
data=fr.read()
filesize=os.stat(args.filename).st_size
footerpos=(filesize//0x1000*0x1000)-AvbFooter.SIZE
if data[footerpos:footerpos+4]==b"AVBf":
ftr=AvbFooter(data[footerpos:footerpos+AvbFooter.SIZE])
signature=data[ftr.vbmeta_offset:]
data=data[0:ftr.vbmeta_offset]
avbhdr=AvbVBMetaHeader(signature[:AvbVBMetaHeader.SIZE])
release_string=avbhdr.release_string.replace(b"\x00",b"").decode('utf-8')
print(f"\nAVB >=2.0 vbmeta detected: {release_string}\n----------------------------------------")
if " 1.0" not in release_string and " 1.1" not in release_string:
print("Sorry, only avb version <=1.1 is currently implemented")
exit(0)
hashdata=signature[avbhdr.SIZE:]
imgavbhash=AvbHashDescriptor(hashdata)
print("Image-Target: \t\t\t\t" + str(imgavbhash.partition_name))
# digest_size = len(hashlib.new(name=avbhash.hash_algorithm).digest())
# digest_padding = round_to_pow2(digest_size) - digest_size
# block_size=4096
# (hash_level_offsets, tree_size) = calc_hash_level_offsets(avbhash.image_size, block_size, digest_size + digest_padding)
# root_digest, hash_tree = generate_hash_tree(fr, avbhash.image_size, block_size, avbhash.hash_algorithm, avbhash.salt, digest_padding, hash_level_offsets, tree_size)
ctx=hashlib.new(name=imgavbhash.hash_algorithm)
ctx.update(imgavbhash.salt)
ctx.update(data[:imgavbhash.image_size])
root_digest=ctx.digest()
print("Salt: \t\t\t\t\t" + str(hexlify(imgavbhash.salt).decode('utf-8')))
print("Image-Size: \t\t\t\t" + hex(imgavbhash.image_size))
img_digest=str(hexlify(root_digest).decode('utf-8'))
img_avb_digest=str(hexlify(imgavbhash.digest).decode('utf-8'))
print("\nCalced Image-Hash: \t\t\t" + img_digest)
#print("Calced Hash_Tree: " + str(binascii.hexlify(hash_tree)))
print("Image-Hash: \t\t\t\t" + img_avb_digest)
avbmetacontent={}
vbmeta=None
if args.vbmetaname=="":
if os.path.exists("vbmeta.img"):
args.vbmetaname="vbmeta.img"
if args.vbmetaname!="":
with open(args.vbmetaname,'rb') as vbm:
vbmeta=vbm.read()
avbhdr=AvbVBMetaHeader(vbmeta[:AvbVBMetaHeader.SIZE])
if avbhdr.magic!=b'AVB0':
print("Unknown vbmeta data")
exit(0)
class authentication_data(object):
def __init__(self,hdr,data):
self.hash=data[0x100+hdr.hash_offset:0x100+hdr.hash_offset+hdr.hash_size]
self.signature=data[0x100+hdr.signature_offset:0x100+hdr.signature_offset+hdr.signature_size]
class auxilary_data(object):
def __init__(self, hdr, data):
self.data=data[0x100+hdr.authentication_data_block_size:0x100+hdr.authentication_data_block_size+hdr.auxiliary_data_block_size]
authdata=authentication_data(avbhdr,vbmeta)
auxdata=auxilary_data(avbhdr,vbmeta).data
auxlen=len(auxdata)
i=0
while (i<auxlen):
desc=AvbDescriptor(auxdata[i:])
data=auxdata[i:]
if desc.tag==AvbPropertyDescriptor.TAG:
avbproperty=AvbPropertyDescriptor(data)
avbmetacontent["property"]=dict(avbproperty=avbproperty)
elif desc.tag==AvbHashtreeDescriptor.TAG:
avbhashtree=AvbHashtreeDescriptor(data)
partition_name=avbhashtree.partition_name
salt=avbhashtree.salt
root_digest=avbhashtree.root_digest
avbmetacontent[partition_name]=dict(salt=salt,root_digest=root_digest)
elif desc.tag==AvbHashDescriptor.TAG:
avbhash=AvbHashDescriptor(data)
partition_name=avbhash.partition_name
salt=avbhash.salt
digest=avbhash.digest
avbmetacontent[partition_name] = dict(salt=salt,digest=digest)
elif desc.tag==AvbKernelCmdlineDescriptor.TAG:
avbcmdline=AvbKernelCmdlineDescriptor(data)
kernel_cmdline=avbcmdline.kernel_cmdline
avbmetacontent["cmdline"] = dict(kernel_cmdline=kernel_cmdline)
elif desc.tag==AvbChainPartitionDescriptor.TAG:
avbchainpartition=AvbChainPartitionDescriptor(data)
partition_name=avbchainpartition.partition_name
public_key=avbchainpartition.public_key
avbmetacontent[partition_name] = dict(public_key=public_key)
i += desc.SIZE+len(desc.data)
vbmeta_digest=None
if imgavbhash.partition_name in avbmetacontent:
if "digest" in avbmetacontent[imgavbhash.partition_name]:
digest=avbmetacontent[imgavbhash.partition_name]["digest"]
vbmeta_digest = str(hexlify(digest).decode('utf-8'))
print("VBMeta-Image-Hash: \t\t\t" + vbmeta_digest)
else:
print("Couldn't find "+imgavbhash.partition_name+" in "+args.vbmetaname)
exit(0)
if vbmeta!=None:
pubkeydata=vbmeta[AvbVBMetaHeader.SIZE+avbhdr.authentication_data_block_size+avbhdr.public_key_offset:
AvbVBMetaHeader.SIZE+avbhdr.authentication_data_block_size+avbhdr.public_key_offset
+avbhdr.public_key_size]
modlen = struct.unpack(">I",pubkeydata[:4])[0]//4
n0inv = struct.unpack(">I", pubkeydata[4:8])[0]
modulus=hexlify(pubkeydata[8:8+modlen]).decode('utf-8')
print("\nSignature-RSA-Modulus (n):\t"+modulus)
print("Signature-n0inv: \t\t\t" + str(n0inv))
if modulus=="d804afe3d3846c7e0d893dc28cd31255e962c9f10f5ecc1672ab447c2c654a94b5162b00bb06ef1307534cf964b9287a1b849888d867a423f9a74bdc4a0ff73a18ae54a815feb0adac35da3bad27bcafe8d32f3734d6512b6c5a27d79606af6bb880cafa30b4b185b34daaaac316341ab8e7c7faf90977ab9793eb44aecf20bcf08011db230c4771b96dd67b604787165693b7c22a9ab04c010c30d89387f0ed6e8bbe305bf6a6afdd807c455e8f91935e44feb88207ee79cabf31736258e3cdc4bcc2111da14abffe277da1f635a35ecadc572f3ef0c95d866af8af66a7edcdb8eda15fba9b851ad509ae944e3bcfcb5cc97980f7cca64aa86ad8d33111f9f602632a1a2dd11a661b1641bdbdf74dc04ae527495f7f58e3272de5c9660e52381638fb16eb533fe6fde9a25e2559d87945ff034c26a2005a8ec251a115f97bf45c819b184735d82d05e9ad0f357415a38e8bcc27da7c5de4fa04d3050bba3ab249452f47c70d413f97804d3fc1b5bb705fa737af482212452ef50f8792e28401f9120f141524ce8999eeb9c417707015eabec66c1f62b3f42d1687fb561e45abae32e45e91ed53665ebdedade612390d83c9e86b6c2da5eec45a66ae8c97d70d6c49c7f5c492318b09ee33daa937b64918f80e6045c83391ef205710be782d8326d6ca61f92fe0bf0530525a121c00a75dcc7c2ec5958ba33bf0432e5edd00db0db33799a9cd9cb743f7354421c28271ab8daab44111ec1e8dfc1482924e836a0a6b355e5de95ccc8cde39d14a5b5f63a964e00acb0bb85a7cc30be6befe8b0f7d348e026674016cca76ac7c67082f3f1aa62c60b3ffda8db8120c007fcc50a15c64a1e25f3265c99cbed60a13873c2a45470cca4282fa8965e789b48ff71ee623a5d059377992d7ce3dfde3a10bcf6c85a065f35cc64a635f6e3a3a2a8b6ab62fbbf8b24b62bc1a912566e369ca60490bf68abe3e7653c27aa8041775f1f303621b85b2b0ef8015b6d44edf71acdb2a04d4b421ba655657e8fa84a27d130eafd79a582aa381848d09a06ac1bbd9f586acbd756109e68c3d77b2ed3020e4001d97e8bfc7001b21b116e741672eec38bce51bb4062331711c49cd764a76368da3898b4a7af487c8150f3739f66d8019ef5ca866ce1b167921dfd73130c421dd345bd21a2b3e5df7eaca058eb7cb492ea0e3f4a74819109c04a7f42874c86f63202b462426191dd12c316d5a29a206a6b241cc0a27960996ac476578685198d6d8a62da0cfece274f282e397d97ed4f80b70433db17b9780d6cbd719bc630bfd4d88fe67acb8cc50b768b35bd61e25fc5f3c8db1337cb349013f71550e51ba6126faeae5b5e8aacfcd969fd6c15f5391ad05de20e751da5b9567edf4ee426570130b70141cc9e019ca5ff51d704b6c0674ecb52e77e174a1a399a0859ef1acd87e":
print("\n!!!! Image seems to be signed by google test keys, yay !!!!")
else:
print("VBMeta info missing... please copy vbmeta.img to the directory.")
state=3
if img_digest==img_avb_digest:
state=0
if vbmeta_digest!=None:
if vbmeta_digest==img_digest:
state=0
else:
state=3
rotstate(state)
exit(0)
else:
signature=data[length:]
data=data[:length]
sha256 = hashlib.sha256()
sha256.update(data)
try:
target,siglength,hash,pub_key,flag=dump_signature(signature)
except:
print("No signature found :/")
exit(0)
id=hexlify(data[576:576+32])
print("\nID: "+id.decode('utf-8'))
print("Image-Target: "+str(target))
print("Image-Size: "+hex(length))
print("Signature-Size: "+hex(siglength))
meta=b"\x30"+flag+b"\x13"+bytes(struct.pack('B',len(target)))+target+b"\x02\x04"+bytes(struct.pack(">I",length))
#print(meta)
sha256.update(meta)
digest=sha256.digest()
print("\nCalced Image-Hash:\t"+hexlify(digest).decode('utf8'))
print("Signature-Hash:\t\t" + hexlify(hash).decode('utf8'))
if str(hexlify(digest))==str(hexlify(hash)):
rotstate(0)
else:
rotstate(3)
modulus=int_to_bytes(pub_key.n)
exponent=int_to_bytes(pub_key.e)
mod=str(hexlify(modulus).decode('utf-8'))
print("\nSignature-RSA-Modulus (n):\t"+mod)
print("Signature-RSA-Exponent (e):\t" + str(hexlify(exponent).decode('utf-8')))
if mod=="e8eb784d2f4d54917a7bb33bdbe76967e4d1e43361a6f482aa62eb10338ba7660feba0a0428999b3e2b84e43c1fdb58ac67dba1514bb4750338e9d2b8a1c2b1311adc9e61b1c9d167ea87ecdce0c93173a4bf680a5cbfc575b10f7436f1cddbbccf7ca4f96ebbb9d33f7d6ed66da4370ced249eefa2cca6a4ff74f8d5ce6ea17990f3550db40cd11b319c84d5573265ae4c63a483a53ed08d9377b2bccaf50c5a10163cfa4a2ed547f6b00be53ce360d47dda2cdd29ccf702346c2370938eda62540046797d13723452b9907b2bd10ae7a1d5f8e14d4ba23534f8dd0fb1484a1c8696aa997543a40146586a76e981e4f937b40beaebaa706a684ce91a96eea49":
print("\n!!!! Image seems to be signed by google test keys, yay !!!!")
sha256 = hashlib.sha256()
sha256.update(modulus+exponent)
pubkey_hash=sha256.digest()
locked=pubkey_hash+struct.pack('<I',0x0)
unlocked = pubkey_hash + struct.pack('<I', 0x1)
sha256 = hashlib.sha256()
sha256.update(locked)
root_of_trust_locked=sha256.digest()
sha256 = hashlib.sha256()
sha256.update(unlocked)
root_of_trust_unlocked=sha256.digest()
print("\nTZ Root of trust (locked):\t\t" + str(hexlify(root_of_trust_locked).decode('utf-8')))
print("TZ Root of trust (unlocked):\t" + str(hexlify(root_of_trust_unlocked).decode('utf-8')))
if (args.inject==True):
pos = signature.find(target)
if (pos != -1):
lenpos = signature.find(struct.pack(">I",length)[0],pos)
if (lenpos!=-1):
with open(args.filename[0:-4]+"_signed.bin",'wb') as wf:
wf.write(data)
wf.write(signature[0:lenpos])
wf.write(struct.pack(">I",length))
wf.write(signature[lenpos+4:])
print("Successfully injected !")
if __name__ == "__main__":
main(sys.argv[1:])
|
import logging
import os
import unittest
from utils.users import UsersUtil
class UsersUtilTest(unittest.TestCase):
def setUp(self):
def _fake_fetch_actor(actor_url):
return {
'preferredUsername': 'cianlr',
'summary': 'cianlr is here'
}
os.environ["HOST_NAME"] = "cianisharrypotter.secret"
self.logger = logging.getLogger(__name__)
self.util = UsersUtil(self.logger, None)
self.util._activ_util.fetch_actor = _fake_fetch_actor
def test_parse_local_username(self):
a, b = self.util.parse_username('admin')
self.assertEqual(a, 'admin')
self.assertIsNone(b)
def test_parse_foreign_username(self):
a, b = self.util.parse_username('cianlr@neopets.com')
self.assertEqual(a, 'cianlr')
self.assertEqual(b, 'neopets.com')
def test_parse_prefixed_local_username(self):
a, b = self.util.parse_username('@admin')
self.assertEqual(a, 'admin')
self.assertIsNone(b)
def test_parse_prefixed_foreign_username(self):
a, b = self.util.parse_username('@cianlr@neopets.com')
self.assertEqual(a, 'cianlr')
self.assertEqual(b, 'neopets.com')
def test_parse_actor(self):
a, b = self.util.parse_actor('https://neopets.com/@cianlr')
self.assertEqual(a, 'https://neopets.com')
self.assertEqual(b, 'cianlr')
def test_parse_bad_username(self):
with self.assertLogs(self.logger, level='WARNING'):
a, b = self.util.parse_username('a@b@c')
self.assertIsNone(a)
self.assertIsNone(b)
def test_get_or_create_user_from_db_too_many_attempts(self):
resp = self.util.get_or_create_user_from_db(
None, None, attempt_number=100)
self.assertIsNone(resp)
if __name__ == '__main__':
unittest.main()
|
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from wshop.models.fields import NullCharField
class NullCharFieldTest(TestCase):
def test_from_db_value_converts_null_to_string(self):
field = NullCharField()
self.assertEqual('', field.from_db_value(None, expression=None, connection=None, context=None))
def test_get_prep_value_converts_empty_string_to_null(self):
field = NullCharField()
self.assertEqual(None, field.get_prep_value(''))
def test_raises_exception_for_invalid_null_blank_combo(self):
with self.assertRaises(ImproperlyConfigured):
NullCharField(null=True, blank=False)
with self.assertRaises(ImproperlyConfigured):
NullCharField(null=False, blank=True)
with self.assertRaises(ImproperlyConfigured):
NullCharField(null=False, blank=False)
|
import logging
import threading
import time
from cachetools import LRUCache
from celery.events import EventReceiver
import tornado.gen
from tornado.ioloop import PeriodicCallback
from tornado.queues import Queue
logger = logging.getLogger(__name__)
class EventMonitor(threading.Thread):
max_events = 100
def __init__(self, capp, io_loop):
"""Monitors and stores events received from celery."""
super().__init__()
self.capp = capp
self.io_loop = io_loop
self.events = Queue(self.max_events)
def run(self):
# We don't want too frequent retries
try_interval = 1
while True:
try:
try_interval *= 2
with self.capp.connection() as conn:
recv = EventReceiver(conn,
handlers={'*': self.on_event},
app=self.capp)
try_interval = 1
recv.capture(limit=None, timeout=None, wakeup=True)
except (KeyboardInterrupt, SystemExit):
import _thread
_thread.interrupt_main()
except Exception as e:
logger.error('Failed to capture events: "%s", '
'trying again in %s seconds.',
e, try_interval)
logger.debug(e, exc_info=True)
time.sleep(try_interval)
def on_event(self, event):
"""Called when an event from celery is received."""
# Transfer control to IOLoop, tornado.queue is not thread-safe
self.io_loop.add_callback(self.events.put, event)
class EventHandler:
events_enable_interval = 5000 # in seconds
# Maximum number of finished items to keep track of
max_finished_history = 1000
# celery events that represent a task finishing
finished_events = (
'task-succeeded',
'task-failed',
'task-rejected',
'task-revoked',
)
def __init__(self, capp, io_loop):
"""Monitors events that are received from celery.
capp - The celery app
io_loop - The event loop to use for dispatch
"""
super().__init__()
self.capp = capp
self.timer = PeriodicCallback(self.on_enable_events,
self.events_enable_interval)
self.monitor = EventMonitor(self.capp, io_loop)
self.listeners = {}
self.finished_tasks = LRUCache(self.max_finished_history)
@tornado.gen.coroutine
def start(self):
"""Start event handler.
Expects to be run as a coroutine.
"""
self.timer.start()
logger.debug('Starting celery monitor thread')
self.monitor.start()
while True:
event = yield self.monitor.events.get()
try:
task_id = event['uuid']
except KeyError:
continue
# Record finished tasks in-case they are requested
# too late or are re-requested.
if event['type'] in self.finished_events:
self.finished_tasks[task_id] = event
try:
callback = self.listeners[task_id]
except KeyError:
pass
else:
callback(event)
def stop(self):
self.timer.stop()
# FIXME: can not be stopped gracefully
# self.monitor.stop()
def on_enable_events(self):
"""Called periodically to enable events for workers
launched after the monitor.
"""
try:
self.capp.control.enable_events()
except Exception as e:
logger.debug('Failed to enable events: %s', e)
def add_listener(self, task_id, callback):
"""Add event listener for a task with ID `task_id`."""
try:
event = self.finished_tasks[task_id]
except KeyError:
self.listeners[task_id] = callback
else:
# Task has already finished
callback(event)
def remove_listener(self, task_id):
"""Remove listener for `task_id`."""
try:
del self.listeners[task_id]
except KeyError: # may have been cached
pass
|
from os.path import join, dirname
from os import environ
from watson_developer_cloud import VisualRecognitionV3
visual_recognition = VisualRecognitionV3('2016-05-20', api_key='4a5dce0273f76cfc7fdebaec7d43f6828a512194')
def classify(url):
return visual_recognition.classify(images_url=url)
|
# -*- coding: utf-8-*-
import os
from robot import config, utils, logging
from watchdog.events import FileSystemEventHandler
logger = logging.getLogger(__name__)
class ConfigMonitor(FileSystemEventHandler):
def __init__(self, conversation):
FileSystemEventHandler.__init__(self)
self._conversation = conversation
# 文件修改
def on_modified(self, event):
if event.is_directory:
return
filename = event.src_path
extension = os.path.splitext(filename)[-1].lower()
if extension in ('.yaml', '.yml'):
err = utils.validyaml(filename)
if err is None:
logger.info("检测到文件 {} 发生变更".format(filename))
config.reload()
self._conversation.reload()
|
import time
class HumanBrain(object):
def __init__(self):
self.minKeyPress = 10
self.maxKeyPress = 100
self.timeAwareDeviation = 0.3
def keyPress(self):
"""Send a human like keypress.
Keyword arguments:
keyCode -- the real key to be pressed (example Keycode.SEVEN)
"""
self.keyboard.press(keyCode)
time.sleep(0.1)
self.keyboard.release(keyCode) |
float_1 = 0.25
float_2 = 40.0
product = float_1 * float_2
big_string = "The product was " + str(product) # float to string
|
import os
import time
import unittest
import twodlearn as tdl
import twodlearn.datasets.cifar10
from twodlearn.templates.supervised import (
LinearClassifier, MlpClassifier, AlexNetClassifier)
TESTS_PATH = os.path.dirname(os.path.abspath(__file__))
TMP_PATH = os.path.join(TESTS_PATH, 'cifar10_data/')
class OptimTest(unittest.TestCase):
def test_linear(self):
mnist = tdl.datasets.cifar10.Cifar10(work_directory=TMP_PATH,
reshape=True)
model = LinearClassifier(n_inputs=32*32*3, n_classes=10,
logger_path=os.path.join(TMP_PATH, 'loggers'),
options={'train/optim/max_steps': 200})
t1 = time.time()
model.fit(mnist)
t2 = time.time()
print('training took:', t2-t1)
def test_mlp(self):
mnist = tdl.datasets.cifar10.Cifar10(work_directory=TMP_PATH,
reshape=True)
model = MlpClassifier(n_inputs=32*32*3, n_classes=10,
n_hidden=[500],
logger_path=os.path.join(TMP_PATH, 'loggers'),
options={'train/optim/learning_rate': 0.002,
'train/optim/max_steps': 200})
t1 = time.time()
model.fit(mnist)
t2 = time.time()
print('training took:', t2-t1)
def test_convnet(self):
mnist = tdl.datasets.cifar10.Cifar10(work_directory=TMP_PATH,
reshape=False)
model = AlexNetClassifier(
input_shape=[32, 32, 3],
n_classes=10,
n_filters=[32, 64],
filter_sizes=[[5, 5], [5, 5]],
pool_sizes=[[2, 2], [2, 2]],
n_hidden=[1024],
logger_path=os.path.join(TMP_PATH, 'loggers'),
options={'train/optim/learning_rate': 0.001,
'train/optim/max_steps': 200})
t1 = time.time()
model.fit(mnist)
t2 = time.time()
print('training took:', t2-t1)
if __name__ == "__main__":
unittest.main()
|
import itertools
import numpy as np
from functools import partial
from pyquil import Program, api
from pyquil.paulis import PauliSum, PauliTerm, exponential_map, sZ
from pyquil.gates import *
from scipy.optimize import minimize
import pennylane as qml
from pennylane import numpy as np
np.set_printoptions(precision=3, suppress=True)
import re
def create_circuit(beta, gamma,initial_state,exp_Hm,exp_Hc):
circuit = Program()
circuit += initial_state
for i in range(p):
for term_exp_Hc in exp_Hc:
circuit += term_exp_Hc(-beta[i])
for term_exp_Hm in exp_Hm:
circuit += term_exp_Hm(-gamma[i])
return circuit
# set p beforehand
p = 2
def QAOA_circ(parameters):# = np.random.uniform(0, np.pi*2, 2*p)):
beta = parameters[:p]
gamma = parameters [p:]
def set_up_QAOA_in_pyquil(beta, gamma, p , n_qubits = 2, J = np.array([[0,1],[0,0]])):
Hm = [PauliTerm("X", i, 1.0) for i in range(n_qubits)]
Hc = []
####################Prepare the hamiltonian for measurement
Hamilton=prepare_qaoa_hamiltonian(J,n_qubits)
###################
initial_state = Program()
for i in range(n_qubits):
initial_state += H(i)
for i in range(n_qubits):
for j in range(n_qubits):
Hc.append(PauliTerm("Z", i, -J[i, j]) * PauliTerm("Z", j, 1.0))
exp_Hm = []
exp_Hc = []
for term in Hm:
exp_Hm.append(exponential_map(term))
for term in Hc:
exp_Hc.append(exponential_map(term))
qaoa_circuit = create_circuit(beta, gamma,initial_state,exp_Hm,exp_Hc)
return Hamilton,qaoa_circuit
Hamilton,pyquil_circ=set_up_QAOA_in_pyquil(beta, gamma, p)
pyquil_circ_list=str(pyquil_circ).split('\n')
for item in pyquil_circ_list:
u_p_1=None
q_1=None
q_2=None
u_p_2=None
u_p_3=None
if 'H' in item:
q_1=item[item.find('H')+2]
qml.Hadamard(wires=q_1)
elif 'RZ(' in item:
temp=item.replace('RZ(','')
u_p_1=temp[:temp.find(')')]
q_1=temp[temp.find(')')+2]
qml.RZ(float(u_p_1),wires=q_1)
elif 'RX' in item:
pass
elif 'CNOT' in item:
temp=item.replace('CNOT ','')
q_1=temp[0]
q_2=temp[2]
qml.CNOT(wires=[q_1, q_2])
wiress=[i for i in range(n_qubits)]
return qml.expval.Hermitian(Hamilton,wires=wiress)
|
import os
import glob
import pyopenms
toTest = set()
# pyopenms in the ignore list as it is represented twice in
# the pyopenms package
ignore = ["numpy", "np", "re", "os", "types", "sysinfo", "pyopenms"]
for clz_name, clz in pyopenms.__dict__.items():
if clz_name in ignore or clz_name.startswith("__"):
continue
if not hasattr(clz, "__dict__"):
continue
for method_name, method in clz.__dict__.items():
if method_name.startswith("_") and not method_name.startswith("__"):
continue
if method_name in [ "__doc__", "__new__", "__file__", "__name__",
"__package__", "__builtins__", "__copy__"]:
continue
toTest.add("%s.%s" % (clz_name, method_name))
def parse_doc(item, collection):
if item.__doc__ is not None:
it = iter(item.__doc__.split("\n"))
for line in it:
if not "@tests" in line:
continue
for line in it:
line = line.strip()
if "@end" in line:
break
if not line:
continue
clz, method = line.split(".")
if clz=="":
clz = oldclzz
fullname = "%s.%s" % (clz, method)
if fullname.endswith("()"):
print fullname, "declared with parentesis, fix it"
fullname = fullname[:-2]
collection.add(fullname)
oldclzz = clz
def collectRecursed(obj, collection):
if hasattr(obj, "__dict__"):
for name, item in obj.__dict__.items():
if name.upper().startswith("TEST") or\
name.upper().startswith("_TEST"):
parse_doc(item, collection)
collectRecursed(item, collection)
declaredAsTested = set()
for p in glob.glob("tests/unittests/test*.py"):
module_name= p[:-3].replace("/",".").replace(os.sep, ".")
module = __import__(module_name).unittests
collectRecursed(module, declaredAsTested)
missing = toTest-declaredAsTested
if missing:
print
print len(missing), "tests/test declarations are missing !"
for name in sorted(missing):
print " ", name
toMuch = declaredAsTested-toTest
if toMuch:
print
print len(toMuch), "tests/test declarations do not fit:"
for name in toMuch:
print " ", name
|
# -*- coding: utf-8 -*-
#
# (c) 2017 Kilian Kluge
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Resource management was added to EFrame by Kilian Kluge.
"""
import logging
import threading
from PyQt4 import QtCore
class Resources(QtCore.QObject):
claim_signal = QtCore.pyqtSignal()
release_signal = QtCore.pyqtSignal()
def __init__(self, modules):
super(Resources, self).__init__()
self.logger = logging.getLogger("State.Resources")
self.modules = modules
self.claimEvent = threading.Event()
self.releaseEvent = threading.Event()
self.claiming = False
self.claimed = False
self.remoteClaimRequested = False
self.remoteReleaseRequested = False
self.responseCounter = 0
self.releasedEvents = {}
self.unclaimedModules = []
self.claimedResponses = {}
self.claim_signal.connect(self.claim)
self.release_signal.connect(self.release)
def areClaimed(self):
return self.claimed
def isClaiming(self):
return self.claiming
def remoteClaimRequestHandled(self):
return not self.remoteClaimRequested
def remoteReleaseRequestHandled(self):
return not self.remoteReleaseRequested
def remoteClaim(self):
if self.remoteClaimRequested:
self.logger.warning(
"Received request for remoteClaim while previous request "
"has not yet been handled.")
else:
self.remoteClaimRequested = True
self.claim_signal.emit()
def remoteRelease(self):
if self.remoteReleaseRequested:
self.logger.warning(
"Received request for remoteRelease while previous request "
"has not yet been handled.")
else:
self.remoteReleaseRequested = True
self.release_signal.emit()
def claim(self):
self.logger.info("Attempting to claim hardware Resources.")
if self.claiming or self.claimed:
self.logger.warning("Already claimed Resources (%s) or currently "
"claiming/releasing (%s).",
self.claimed, self.claiming)
fakeClaimEvent = threading.Event()
fakeClaimEvent.set()
self.remoteClaimRequested = False
return fakeClaimEvent
else:
self.logger.debug("Clearing events and variables.")
self.claimEvent.clear()
self.claiming = True
self.claimed = False
self.remoteClaimRequested = False
self.logger.debug("Requesting claimedEvents and claimedFlags.")
self.claimedResponses = {}
self.unclaimedModules = []
for module in self.modules.itervalues():
self.claimedResponses[module] = module.claimResources()
self.responseCounter = 0
self.logger.debug("Starting timer for _waitForClaim.")
QtCore.QTimer.singleShot(100, self._waitForClaim)
return self.claimEvent
def _waitForClaim(self):
if self.claiming:
self.logger.debug("Waiting for claim.")
ready = True
for module, event in self.claimedResponses.iteritems():
if event.isSet():
if (not module.claimed) and \
not (module in self.unclaimedModules):
self.logger.warning("Could not claim %s",
module._name)
self.logger.debug("Module instance: %s",
module)
self.unclaimedModules.append(module)
else:
ready = False
if ready:
self.logger.info("All modules responded to claim.")
self.logger.info("Unclaimed: %s", self.unclaimedModules)
if not self.unclaimedModules:
self.logger.info("Successfully claimed all modules.")
self.claimed = True
self.claimEvent.set()
self.claiming = False
else:
self.logger.debug("Could not claim the following "
"modules' hardware Resources: "
"%s", self.unclaimedModules)
self.claimed = False
self._releaseAfterUnsuccesfulClaim()
else:
self.responseCounter += 1
self.logger.debug("Claim responseCounter: %d",
self.responseCounter)
if self.responseCounter > 50:
unresponsiveModules = []
for module in self.claimedResponses:
if not event.isSet():
unresponsiveModules.append(module)
event.set() # signal the module to stop trying
self.logger.error("Took longer than 5 seconds to claim "
"resources. Still waiting for: %s",
unresponsiveModules)
self._releaseAfterUnsuccesfulClaim()
else:
self.logger.debug("Set timer for _waitForClaim.")
QtCore.QTimer.singleShot(100, self._waitForClaim)
else:
self.logger.warning("Resource claim was aborted.")
for event in self.claimedResponses.itervalues():
event.set() # signal the modules to stop trying
self.logger.info("Now trying to release Resources")
self.claimEvent.set()
self.claiming = False
event = self.release()
def release(self):
self.logger.info("Attempting to release hardware resources.")
if self.claiming:
self.logger.warning("Already claiming/releasing Resources.")
if self.releaseEvent.isSet():
# we are likely claiming, so the caller can go on with
# their work and will fail later
fakeReleaseEvent = threading.Event()
fakeReleaseEvent.set()
self.remoteReleaseRequested = False
return fakeReleaseEvent
else:
# we are releasing, so the caller might as well wait
return self.releaseEvent
else:
self.logger.debug("Clearing events and variables.")
if not self.claimed:
self.logger.warning("Attempting to release resources "
"even though not all resources are "
"currently claimed.")
self.claiming = True
self.releaseEvent.clear()
self.remoteReleaseRequested = False
self.logger.debug("Requesting releasedEvents.")
self.releasedEvents = {}
for module in self.modules.itervalues():
self.releasedEvents[module] = module.releaseResources()
self.responseCounter = 0
self.logger.debug("Starting timer for _waitForRelease.")
QtCore.QTimer.singleShot(100, self._waitForRelease)
return self.releaseEvent
def _waitForRelease(self):
if self.claiming:
ready = True
for module, event in self.releasedEvents.iteritems():
if not event.isSet():
ready = False
if ready:
self.logger.info("All modules responded to release request.")
self.claimed = False
self.releaseEvent.set()
self.claiming = False
self.logger.debug("Released resources.")
else:
self.responseCounter =+ 1
self.logger.debug("Release responseCounter: %d",
self.responseCounter)
if self.responseCounter > 50:
unresponsiveModules = []
for module in self.claimedResponses:
if not event.isSet():
unresponsiveModules.append(module)
event.set() # signal the module to stop trying
self.logger.error("Took longer than 5 seconds to release "
"resources. Still waiting for: %s",
unresponsiveModules)
self.releaseEvent.set() # unfreeze the calling module
self.claimed = False # consistency
self.claiming = False
self.logger.debug("Signalled released resources.")
else:
self.logger.debug("Starting timer for _waitForRelease.")
QtCore.QTimer.singleShot(100, self._waitForRelease)
else:
self.logger.error("Resource release was aborted. This should never "
"happen and points to a deeper issue.")
for event in self.releasedEvents.itervalues():
event.set() # signal the modules to stop trying
def _releaseAfterUnsuccesfulClaim(self):
self.logger.info("Now trying to release Resources")
self.claimEvent.set()
self.claiming = False
event = self.release()
|
'''
Detectors entry point.
'''
def get_bounding_boxes(frame, model):
'''
Run object detection algorithm and return a list of bounding boxes and other metadata.
'''
if model == 'yolo':
from detectors.yolo import get_bounding_boxes as gbb
elif model == 'haarcascade':
from detectors.haarcascade import get_bounding_boxes as gbb
elif model == 'tfoda':
from detectors.tfoda import get_bounding_boxes as gbb
else:
raise Exception('Invalid detector model, algorithm or API specified (options: yolo, tfoda, haarcascade)')
return gbb(frame)
|
# Leo colorizer control file for actionscript mode.
# This file is in the public domain.
# Properties for actionscript mode.
properties = {
"commentEnd": "*/",
"commentStart": "/*",
"doubleBracketIndent": "false",
"indentCloseBrackets": "}",
"indentOpenBrackets": "{",
"indentPrevLine": "\\s*(if|while)\\s*(|else|case|default:)[^;]*|for\\s*\\(.*)",
"lineComment": "//",
"lineUpClosingBracket": "true",
"wordBreakChars": ",+-=<>/?^&*",
}
# Attributes dict for actionscript_main ruleset.
actionscript_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "\\",
"highlight_digits": "true",
"ignore_case": "false",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for actionscript mode.
attributesDictDict = {
"actionscript_main": actionscript_main_attributes_dict,
}
# Keywords dict for actionscript_main ruleset.
actionscript_main_keywords_dict = {
"#endinitclip": "keyword1",
"#include": "literal2",
"#initclip": "keyword1",
"ASSetPropFlags": "literal2",
"Accessibility": "literal2",
"Array": "keyword3",
"BACKSPACE": "literal2",
"Boolean": "literal2",
"CAPSLOCK": "literal2",
"CONTROL": "literal2",
"Camera": "literal2",
"Color": "keyword3",
"ContextMenu": "literal2",
"ContextMenuItem": "literal2",
"CustomActions": "literal2",
"DELETEKEY": "literal2",
"DOWN": "literal2",
"DataGlue": "literal2",
"Date": "literal2",
"E": "literal2",
"END": "literal2",
"ENTER": "literal2",
"ESCAPE": "literal2",
"Error": "literal2",
"Function": "keyword3",
"HOME": "literal2",
"INSERT": "literal2",
"Infinity": "literal2",
"Key": "keyword3",
"LEFT": "literal2",
"LN10": "literal2",
"LN2": "literal2",
"LOG10E": "literal2",
"LOG2E": "literal2",
"LoadVars": "literal2",
"LocalConnection": "literal2",
"MAX_VALUE": "literal2",
"MIN_VALUE": "literal2",
"MMExecute": "keyword3",
"Math": "keyword3",
"Microphone": "literal2",
"Mouse": "keyword3",
"MovieClip": "keyword3",
"MovieClipLoader": "literal2",
"NEGATIVE_INFINITY": "literal2",
"NaN": "literal2",
"NetConnection": "literal2",
"NetServices": "literal2",
"NetStream": "literal2",
"Number": "literal2",
"Object": "keyword3",
"PGDN": "literal2",
"PGUP": "literal2",
"PI": "literal2",
"POSITIVE_INFINITY": "literal2",
"PrintJob": "literal2",
"RIGHT": "literal2",
"SHIFT": "literal2",
"SPACE": "literal2",
"SQRT1_2": "literal2",
"SQRT2": "literal2",
"Selection": "keyword3",
"SharedObject": "literal2",
"Sound": "keyword3",
"Stage": "literal2",
"String": "literal2",
"StyleSheet": "literal2",
"System": "literal2",
"TAB": "literal2",
"TextField": "literal2",
"TextFormat": "literal2",
"TextSnapshot": "literal2",
"UP": "literal2",
"UTC": "literal2",
"Video": "literal2",
"Void": "keyword1",
"XML": "keyword3",
"XMLNode": "keyword3",
"XMLSocket": "keyword3",
"__constructor__": "literal2",
"__proto__": "literal2",
"_accProps": "literal2",
"_alpha": "literal2",
"_currentframe": "literal2",
"_droptarget": "literal2",
"_focusrect": "literal2",
"_framesloaded": "literal2",
"_global": "literal2",
"_height": "literal2",
"_highquality": "keyword2",
"_level": "literal2",
"_lockroot": "literal2",
"_name": "literal2",
"_parent": "literal2",
"_quality": "literal2",
"_root": "literal2",
"_rotation": "literal2",
"_soundbuftime": "literal2",
"_target": "literal2",
"_totalframes": "literal2",
"_url": "literal2",
"_visible": "literal2",
"_width": "literal2",
"_x": "literal2",
"_xmouse": "literal2",
"_xscale": "literal2",
"_y": "literal2",
"_ymouse": "literal2",
"_yscale": "literal2",
"abs": "literal2",
"abstract": "keyword1",
"acos": "literal2",
"activityLevel": "literal2",
"add": "keyword1",
"addItem": "literal2",
"addItemAt": "literal2",
"addListener": "literal2",
"addPage": "literal2",
"addProperty": "literal2",
"addRequestHeader": "literal2",
"addView": "literal2",
"align": "literal2",
"allowDomain": "literal2",
"allowInsecureDomain": "literal2",
"and": "keyword1",
"appendChild": "literal2",
"apply": "literal2",
"arguments": "literal2",
"asin": "literal2",
"atan": "literal2",
"atan2": "literal2",
"attachAudio": "literal2",
"attachMovie": "literal2",
"attachSound": "literal2",
"attributes": "literal2",
"autoSize": "literal2",
"avHardwareDisable": "literal2",
"background": "literal2",
"backgroundColor": "literal2",
"bandwidth": "literal2",
"beginFill": "literal2",
"beginGradientFill": "literal2",
"bindFormatFunction": "literal2",
"bindFormatStrings": "literal2",
"blockIndent": "literal2",
"bold": "literal2",
"boolean": "keyword3",
"border": "literal2",
"borderColor": "literal2",
"bottomScroll": "literal2",
"break": "keyword1",
"bufferLength": "literal2",
"bufferTime": "literal2",
"builtInItems": "literal2",
"bullet": "literal2",
"byte": "keyword3",
"bytesLoaded": "literal2",
"bytesTotal": "literal2",
"call": "literal2",
"callee": "literal2",
"caller": "literal2",
"capabilities": "literal2",
"caption": "literal2",
"case": "keyword1",
"catch": "keyword1",
"ceil": "literal2",
"char": "keyword3",
"charAt": "literal2",
"charCodeAt": "literal2",
"childNodes": "literal2",
"chr": "keyword2",
"class": "keyword1",
"clear": "literal2",
"clearInterval": "literal2",
"cloneNode": "literal2",
"close": "literal2",
"color": "literal2",
"concat": "literal2",
"connect": "literal2",
"const": "keyword1",
"contentType": "literal2",
"continue": "keyword1",
"copy": "literal2",
"cos": "literal2",
"createElement": "literal2",
"createEmptyMovieClip": "literal2",
"createGatewayConnection": "literal2",
"createTextField": "literal2",
"createTextNode": "literal2",
"currentFps": "literal2",
"curveTo": "literal2",
"customItems": "literal2",
"data": "literal2",
"deblocking": "literal2",
"debugger": "keyword1",
"default": "keyword1",
"delete": "keyword1",
"do": "keyword1",
"docTypeDecl": "literal2",
"domain": "literal2",
"double": "keyword3",
"duplicateMovieClip": "literal2",
"duration": "literal2",
"dynamic": "keyword1",
"else": "keyword1",
"embedFonts": "literal2",
"enabled": "literal2",
"endFill": "literal2",
"enum": "keyword1",
"eq": "keyword1",
"escape": "literal2",
"eval": "literal2",
"exactSettings": "literal2",
"exp": "literal2",
"export": "keyword2",
"extends": "keyword1",
"false": "literal2",
"filter": "literal2",
"final": "keyword1",
"finally": "keyword1",
"findText": "literal2",
"firstChild": "literal2",
"float": "keyword3",
"floor": "literal2",
"flush": "literal2",
"focusEnabled": "literal2",
"font": "literal2",
"for": "keyword1",
"fps": "literal2",
"fromCharCode": "literal2",
"fscommand": "literal2",
"function": "keyword1",
"gain": "literal2",
"ge": "keyword1",
"get": "literal2",
"getAscii": "literal2",
"getBeginIndex": "literal2",
"getBounds": "literal2",
"getBytesLoaded": "literal2",
"getBytesTotal": "literal2",
"getCaretIndex": "literal2",
"getCode": "literal2",
"getColumnNames": "literal2",
"getCount": "literal2",
"getDate": "literal2",
"getDay": "literal2",
"getDebug": "literal2",
"getDebugConfig": "literal2",
"getDebugID": "literal2",
"getDepth": "literal2",
"getEndIndex": "literal2",
"getFocus": "literal2",
"getFontList": "literal2",
"getFullYear": "literal2",
"getHours": "literal2",
"getInstanceAtDepth": "literal2",
"getItemAt": "literal2",
"getLength": "literal2",
"getLocal": "literal2",
"getMilliseconds": "literal2",
"getMinutes": "literal2",
"getMonth": "literal2",
"getNewTextFormat": "literal2",
"getNextHighestDepth": "literal2",
"getNumberAvailable": "literal2",
"getPan": "literal2",
"getProgress": "literal2",
"getProperty": "literal2",
"getRGB": "literal2",
"getSWFVersion": "literal2",
"getSeconds": "literal2",
"getSelected": "literal2",
"getSelectedText": "literal2",
"getService": "literal2",
"getSize": "literal2",
"getStyle": "literal2",
"getStyleNames": "literal2",
"getText": "literal2",
"getTextExtent": "literal2",
"getTextFormat": "literal2",
"getTextSnapshot": "literal2",
"getTime": "literal2",
"getTimer": "literal2",
"getTimezoneOffset": "literal2",
"getTransform": "literal2",
"getURL": "literal2",
"getUTCDate": "literal2",
"getUTCDay": "literal2",
"getUTCFullYear": "literal2",
"getUTCHours": "literal2",
"getUTCMilliseconds": "literal2",
"getUTCMinutes": "literal2",
"getUTCMonth": "literal2",
"getUTCSeconds": "literal2",
"getVersion": "literal2",
"getVolume": "literal2",
"getYear": "literal2",
"globalToLocal": "literal2",
"goto": "keyword1",
"gotoAndPlay": "literal2",
"gotoAndStop": "literal2",
"gt": "keyword1",
"hasAccessibility": "literal2",
"hasAudio": "literal2",
"hasAudioEncoder": "literal2",
"hasChildNodes": "literal2",
"hasEmbeddedVideo": "literal2",
"hasMP3": "literal2",
"hasPrinting": "literal2",
"hasScreenBroadcast": "literal2",
"hasScreenPlayback": "literal2",
"hasStreamingAudio": "literal2",
"hasStreamingVideo": "literal2",
"hasVideoEncoder": "literal2",
"height": "literal2",
"hide": "literal2",
"hideBuiltInItems": "literal2",
"hitArea": "literal2",
"hitTest": "literal2",
"hitTestTextNearPos": "literal2",
"hscroll": "literal2",
"html": "literal2",
"htmlText": "literal2",
"id3": "literal2",
"if": "keyword1",
"ifFrameLoaded": "keyword1",
"ignoreWhite": "literal2",
"implements": "keyword1",
"import": "keyword2",
"in": "keyword1",
"indent": "literal2",
"index": "literal2",
"indexOf": "literal2",
"insertBefore": "literal2",
"install": "literal2",
"instanceof": "keyword1",
"int": "keyword3",
"interface": "keyword1",
"isActive": "literal2",
"isDebugger": "literal2",
"isDown": "literal2",
"isFinite": "literal2",
"isFullyPopulated": "literal2",
"isLocal": "literal2",
"isNaN": "literal2",
"isToggled": "literal2",
"italic": "literal2",
"join": "literal2",
"language": "literal2",
"lastChild": "literal2",
"lastIndexOf": "literal2",
"le": "keyword1",
"leading": "literal2",
"leftMargin": "literal2",
"length": "literal2",
"lineStyle": "literal2",
"lineTo": "literal2",
"list": "literal2",
"load": "literal2",
"loadClip": "literal2",
"loadMovie": "literal2",
"loadMovieNum": "literal2",
"loadSound": "literal2",
"loadVariables": "literal2",
"loadVariablesNum": "literal2",
"loaded": "literal2",
"localFileReadDisable": "literal2",
"localToGlobal": "literal2",
"log": "literal2",
"long": "keyword3",
"lt": "keyword1",
"manufacturer": "literal2",
"max": "literal2",
"maxChars": "literal2",
"maxhscroll": "literal2",
"maxscroll": "literal2",
"mbchr": "keyword2",
"mblength": "keyword2",
"mbord": "keyword2",
"mbsubstring": "keyword2",
"menu": "literal2",
"message": "literal2",
"min": "literal2",
"motionLevel": "literal2",
"motionTimeOut": "literal2",
"mouseWheelEnabled": "literal2",
"moveTo": "literal2",
"multiline": "literal2",
"muted": "literal2",
"name": "literal2",
"names": "literal2",
"native": "keyword1",
"ne": "keyword1",
"new": "keyword1",
"newline": "literal2",
"nextFrame": "literal2",
"nextScene": "literal2",
"nextSibling": "literal2",
"nodeName": "literal2",
"nodeType": "literal2",
"nodeValue": "literal2",
"not": "keyword1",
"null": "literal2",
"on": "keyword1",
"onActivity": "literal2",
"onChanged": "literal2",
"onClipEvent": "keyword1",
"onClose": "literal2",
"onConnect": "literal2",
"onData": "literal2",
"onDragOut": "literal2",
"onDragOver": "literal2",
"onEnterFrame": "literal2",
"onID3": "literal2",
"onKeyDown": "literal2",
"onKeyUp": "literal2",
"onKillFocus": "literal2",
"onLoad": "literal2",
"onLoadComplete": "literal2",
"onLoadError": "literal2",
"onLoadInit": "literal2",
"onLoadProgress": "literal2",
"onLoadStart": "literal2",
"onMouseDown": "literal2",
"onMouseMove": "literal2",
"onMouseUp": "literal2",
"onMouseWheel": "literal2",
"onPress": "literal2",
"onRelease": "literal2",
"onReleaseOutside": "literal2",
"onResize": "literal2",
"onRollOut": "literal2",
"onRollOver": "literal2",
"onScroller": "literal2",
"onSelect": "literal2",
"onSetFocus": "literal2",
"onSoundComplete": "literal2",
"onStatus": "literal2",
"onUnload": "literal2",
"onUpdate": "literal2",
"onXML": "literal2",
"or": "keyword1",
"ord": "keyword2",
"os": "literal2",
"package": "keyword2",
"parentNode": "literal2",
"parseCSS": "literal2",
"parseFloat": "literal2",
"parseInt": "literal2",
"parseXML": "literal2",
"password": "literal2",
"pause": "literal2",
"pixelAspectRatio": "literal2",
"play": "literal2",
"playerType": "literal2",
"pop": "literal2",
"position": "literal2",
"pow": "literal2",
"prevFrame": "literal2",
"prevScene": "literal2",
"previousSibling": "literal2",
"print": "literal2",
"printAsBitmap": "literal2",
"printAsBitmapNum": "literal2",
"printNum": "literal2",
"private": "keyword1",
"protected": "keyword1",
"prototype": "literal2",
"public": "keyword1",
"push": "literal2",
"quality": "literal2",
"random": "literal2",
"rate": "literal2",
"registerClass": "literal2",
"removeAll": "literal2",
"removeItemAt": "literal2",
"removeListener": "literal2",
"removeMovieClip": "literal2",
"removeNode": "literal2",
"removeTextField": "literal2",
"replaceItemAt": "literal2",
"replaceSel": "literal2",
"replaceText": "literal2",
"restrict": "literal2",
"return": "keyword1",
"reverse": "literal2",
"rightMargin": "literal2",
"round": "literal2",
"scaleMode": "literal2",
"screenColor": "literal2",
"screenDPI": "literal2",
"screenResolutionX": "literal2",
"screenResolutionY": "literal2",
"scroll": "literal2",
"security": "literal2",
"seek": "literal2",
"selectable": "literal2",
"send": "literal2",
"sendAndLoad": "literal2",
"separatorBefore": "literal2",
"serverString": "literal2",
"setBufferTime": "literal2",
"setClipboard": "literal2",
"setCredentials": "literal2",
"setDate": "literal2",
"setDebug": "literal2",
"setDebugID": "literal2",
"setDefaultGatewayURL": "literal2",
"setDeliveryMode": "literal2",
"setField": "literal2",
"setFocus": "literal2",
"setFullYear": "literal2",
"setGain": "literal2",
"setHours": "literal2",
"setI": "literal2",
"setInterval": "literal2",
"setMask": "literal2",
"setMilliseconds": "literal2",
"setMinutes": "literal2",
"setMode": "literal2",
"setMonth": "literal2",
"setMotionLevel": "literal2",
"setNewTextFormat": "literal2",
"setPan": "literal2",
"setProperty": "literal2",
"setQuality": "literal2",
"setRGB": "literal2",
"setRate": "literal2",
"setSeconds": "literal2",
"setSelectColor": "literal2",
"setSelected": "literal2",
"setSelection": "literal2",
"setSilenceLevel": "literal2",
"setStyle": "literal2",
"setTextFormat": "literal2",
"setTime": "literal2",
"setTransform": "literal2",
"setUTCDate": "literal2",
"setUTCFullYear": "literal2",
"setUTCHours": "literal2",
"setUTCMilliseconds": "literal2",
"setUTCMinutes": "literal2",
"setUTCMonth": "literal2",
"setUTCSeconds": "literal2",
"setUseEchoSuppression": "literal2",
"setVolume": "literal2",
"setYear": "literal2",
"shift": "literal2",
"short": "keyword3",
"show": "literal2",
"showMenu": "literal2",
"showSettings": "literal2",
"silenceLevel": "literal2",
"silenceTimeout": "literal2",
"sin": "literal2",
"size": "literal2",
"slice": "literal2",
"smoothing": "literal2",
"sort": "literal2",
"sortItemsBy": "literal2",
"sortOn": "literal2",
"splice": "literal2",
"split": "literal2",
"sqrt": "literal2",
"start": "literal2",
"startDrag": "literal2",
"static": "keyword1",
"status": "literal2",
"stop": "literal2",
"stopAllSounds": "literal2",
"stopDrag": "literal2",
"styleSheet": "literal2",
"substr": "literal2",
"substring": "literal2",
"super": "literal2",
"swapDepths": "literal2",
"switch": "keyword1",
"synchronized": "keyword1",
"tabChildren": "literal2",
"tabEnabled": "literal2",
"tabIndex": "literal2",
"tabStops": "literal2",
"tan": "literal2",
"target": "literal2",
"targetPath": "literal2",
"tellTarget": "literal2",
"text": "literal2",
"textColor": "literal2",
"textHeight": "literal2",
"textWidth": "literal2",
"this": "literal2",
"throw": "keyword1",
"throws": "keyword1",
"time": "literal2",
"toLowerCase": "literal2",
"toString": "literal2",
"toUpperCase": "literal2",
"toggleHighQuality": "literal2",
"trace": "literal2",
"trackAsMenu": "literal2",
"transient": "keyword1",
"true": "literal2",
"try": "keyword1",
"type": "literal2",
"typeof": "keyword1",
"undefined": "literal2",
"underline": "literal2",
"unescape": "literal2",
"uninstall": "literal2",
"unloadClip": "literal2",
"unloadMovie": "literal2",
"unloadMovieNum": "literal2",
"unshift": "literal2",
"unwatch": "literal2",
"updateAfterEvent": "literal2",
"updateProperties": "literal2",
"url": "literal2",
"useCodepage": "literal2",
"useEchoSuppression": "literal2",
"useHandCursor": "literal2",
"valueOf": "literal2",
"var": "keyword1",
"variable": "literal2",
"version": "literal2",
"visible": "literal2",
"void": "keyword3",
"volatile": "keyword1",
"watch": "literal2",
"while": "keyword1",
"width": "literal2",
"with": "keyword1",
"wordWrap": "literal2",
"xmlDecl": "literal2",
}
# Dictionary of keywords dictionaries for actionscript mode.
keywordsDictDict = {
"actionscript_main": actionscript_main_keywords_dict,
}
# Rules for actionscript_main ruleset.
def actionscript_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="/*", end="*/",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def actionscript_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
def actionscript_rule2(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=True, no_word_break=False)
# Used to color "("
def actionscript_rule3(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="function", pattern="(",
at_line_start=False, at_whitespace_end=False, at_word_start=False, exclude_match=True)
def actionscript_rule4(colorer, s, i):
return colorer.match_eol_span(s, i, kind="comment1", seq="//",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False)
# Used to color ")". 2011/05/22: change kind to "operator". Also changed actionscript.xml.
def actionscript_rule5(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=")",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
# Used to color "(". 2011/05/22: change kind to "operator". Also changed actionscript.xml.
def actionscript_rule6(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="(",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule7(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule8(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="!",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule9(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule10(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule11(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="+",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule12(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="-",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule13(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="/",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule14(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="*",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule15(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule16(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="<",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule17(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="%",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule18(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="&",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule19(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="|",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule20(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="^",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule21(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="~",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule22(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=".",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule23(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="}",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule24(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="{",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule25(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=",",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule26(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=";",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule27(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="]",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule28(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="[",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule29(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="?",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule30(colorer, s, i):
return colorer.match_mark_previous(s, i, kind="label", pattern=":",
at_line_start=True, at_whitespace_end=False, at_word_start=False, exclude_match=True)
def actionscript_rule31(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=":",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def actionscript_rule32(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for actionscript_main ruleset.
rulesDict1 = {
"!": [actionscript_rule8,],
"\"": [actionscript_rule1,],
"#": [actionscript_rule32,],
"%": [actionscript_rule17,],
"&": [actionscript_rule18,],
"'": [actionscript_rule2,],
"(": [actionscript_rule3,actionscript_rule6,],
")": [actionscript_rule5,],
"*": [actionscript_rule14,],
"+": [actionscript_rule11,],
",": [actionscript_rule25,],
"-": [actionscript_rule12,],
".": [actionscript_rule22,],
"/": [actionscript_rule0,actionscript_rule4,actionscript_rule13,],
"0": [actionscript_rule32,],
"1": [actionscript_rule32,],
"2": [actionscript_rule32,],
"3": [actionscript_rule32,],
"4": [actionscript_rule32,],
"5": [actionscript_rule32,],
"6": [actionscript_rule32,],
"7": [actionscript_rule32,],
"8": [actionscript_rule32,],
"9": [actionscript_rule32,],
":": [actionscript_rule30,actionscript_rule31,],
";": [actionscript_rule26,],
"<": [actionscript_rule10,actionscript_rule16,],
"=": [actionscript_rule7,],
">": [actionscript_rule9,actionscript_rule15,],
"?": [actionscript_rule29,],
"@": [actionscript_rule32,],
"A": [actionscript_rule32,],
"B": [actionscript_rule32,],
"C": [actionscript_rule32,],
"D": [actionscript_rule32,],
"E": [actionscript_rule32,],
"F": [actionscript_rule32,],
"G": [actionscript_rule32,],
"H": [actionscript_rule32,],
"I": [actionscript_rule32,],
"J": [actionscript_rule32,],
"K": [actionscript_rule32,],
"L": [actionscript_rule32,],
"M": [actionscript_rule32,],
"N": [actionscript_rule32,],
"O": [actionscript_rule32,],
"P": [actionscript_rule32,],
"Q": [actionscript_rule32,],
"R": [actionscript_rule32,],
"S": [actionscript_rule32,],
"T": [actionscript_rule32,],
"U": [actionscript_rule32,],
"V": [actionscript_rule32,],
"W": [actionscript_rule32,],
"X": [actionscript_rule32,],
"Y": [actionscript_rule32,],
"Z": [actionscript_rule32,],
"[": [actionscript_rule28,],
"]": [actionscript_rule27,],
"^": [actionscript_rule20,],
"_": [actionscript_rule32,],
"a": [actionscript_rule32,],
"b": [actionscript_rule32,],
"c": [actionscript_rule32,],
"d": [actionscript_rule32,],
"e": [actionscript_rule32,],
"f": [actionscript_rule32,],
"g": [actionscript_rule32,],
"h": [actionscript_rule32,],
"i": [actionscript_rule32,],
"j": [actionscript_rule32,],
"k": [actionscript_rule32,],
"l": [actionscript_rule32,],
"m": [actionscript_rule32,],
"n": [actionscript_rule32,],
"o": [actionscript_rule32,],
"p": [actionscript_rule32,],
"q": [actionscript_rule32,],
"r": [actionscript_rule32,],
"s": [actionscript_rule32,],
"t": [actionscript_rule32,],
"u": [actionscript_rule32,],
"v": [actionscript_rule32,],
"w": [actionscript_rule32,],
"x": [actionscript_rule32,],
"y": [actionscript_rule32,],
"z": [actionscript_rule32,],
"{": [actionscript_rule24,],
"|": [actionscript_rule19,],
"}": [actionscript_rule23,],
"~": [actionscript_rule21,],
}
# x.rulesDictDict for actionscript mode.
rulesDictDict = {
"actionscript_main": rulesDict1,
}
# Import dict for actionscript mode.
importDict = {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.