max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
apps/Video/admin.py | yxf010/QuanfitaSite | 1 | 12761851 | from django.contrib import admin
from .models import Video
# Register your models here.
admin.site.register(Video)
class VideoAdmin(admin.ModelAdmin):
list_display = ('aid','name', 'tags', 'url', 'cover', 'desc', 'add_time')
| 1.648438 | 2 |
db_migration/alembic/versions/20220114_163228_6a036f1cb50c_added_additional_file_attributes.py | ghga-de/internal-file-registry-service | 0 | 12761852 | """Added additional file attributes
Revision ID: <KEY>
Revises: 826d7777c67c
Create Date: 2022-01-14 16:32:28.259435
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "826d7777c67c"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("fileinfo", sa.Column("creation_date", sa.DateTime(), nullable=False))
op.add_column("fileinfo", sa.Column("update_date", sa.DateTime(), nullable=False))
op.add_column("fileinfo", sa.Column("format", sa.String(), nullable=False))
op.add_column("fileinfo", sa.Column("size", sa.Integer(), nullable=False))
op.drop_column("fileinfo", "registration_date")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"fileinfo",
sa.Column(
"registration_date",
postgresql.TIMESTAMP(),
autoincrement=False,
nullable=False,
),
)
op.drop_column("fileinfo", "size")
op.drop_column("fileinfo", "format")
op.drop_column("fileinfo", "update_date")
op.drop_column("fileinfo", "creation_date")
# ### end Alembic commands ###
| 1.53125 | 2 |
cli/polyaxon/utils/cmd.py | polyaxon/cli | 0 | 12761853 | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
from subprocess import PIPE
from psutil import Popen
def run_command(cmd, data, location, chw, env=None):
cmd_env = None
if env:
cmd_env = os.environ.copy()
cmd_env.update(env)
cwd = os.getcwd()
if location is not None and chw is True:
cwd = location
elif location is not None and chw is False:
cmd = "{0} {1}".format(cmd, location)
r = Popen(
shlex.split(cmd), stdout=PIPE, stdin=PIPE, stderr=PIPE, cwd=cwd, env=cmd_env
)
if data is None:
output = r.communicate()[0].decode("utf-8")
else:
output = r.communicate(input=data)[0]
return output
| 2.328125 | 2 |
petlib/cipher.py | wouterl/petlib | 1 | 12761854 | <reponame>wouterl/petlib<filename>petlib/cipher.py
from .bindings import _FFI, _C
import pytest
_pool = []
def get_intptr():
if _pool == []:
_pool.append( _FFI.new("int *") )
return _pool.pop()
def return_intptr(ptr):
_pool.append(ptr)
class Cipher(object):
""" A class representing a symmetric cipher and mode.
Example:
An example of encryption and decryption using AES in counter mode.
>>> from os import urandom
>>> aes = Cipher("AES-128-CTR") # Init AES in Counter mode
>>> key = urandom(16)
>>> iv = urandom(16)
>>>
>>> # Get a CipherOperation object for encryption
>>> enc = aes.enc(key, iv)
>>> ref = b"Hello World"
>>> ciphertext = enc.update(ref)
>>> ciphertext += enc.finalize()
>>>
>>> # Get a CipherOperation object for decryption
>>> dec = aes.dec(key, iv)
>>> plaintext = dec.update(ciphertext)
>>> plaintext += dec.finalize()
>>> plaintext == ref # Check resulting plaintest matches referece one.
True
"""
__slots__ = ["alg", "gcm", "_pool"]
def __init__(self, name, _alg=None):
"""Initialize the cipher by name."""
self._pool = []
if _alg:
self.alg = _alg
self.gcm = True
return
else:
self.alg = _C.EVP_get_cipherbyname(name.encode("utf8"))
self.gcm = False
if self.alg == _FFI.NULL:
raise Exception("Unknown cipher: %s" % name )
if "gcm" in name.lower():
self.gcm = True
if "ccm" in name.lower():
raise Exception("CCM mode not supported")
def len_IV(self):
"""Return the Initialization Vector length in bytes."""
return int(self.alg.iv_len)
def len_key(self):
"""Return the secret key length in bytes."""
return int(self.alg.key_len)
def len_block(self):
"""Return the block size in bytes."""
return int(self.alg.block_size)
def get_nid(self):
"""Return the OpenSSL nid of the cipher and mode."""
return int(self.alg.nid)
def op(self, key, iv=None, enc=1):
"""Initializes a cipher operation, either encrypt or decrypt
and returns a CipherOperation object
Args:
key (str): the block cipher symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
enc (int): set to 1 to perform encryption, or 0 to perform decryption.
"""
if iv is None:
iv = _FFI.NULL
ok = True
if self._pool == []:
c_op = CipherOperation(enc)
else:
c_op = self._pool.pop()
c_op.init(enc)
ok &= ( len(key) == int(self.alg.key_len) )
ok &= ( enc == 0 or enc == 1 )
if not ok: raise Exception("Cipher exception: Wrong key length or enc mode.")
if not self.gcm:
if iv != _FFI.NULL:
ok &= ( len(iv) == self.len_IV() )
ok &= ( _C.EVP_CipherInit_ex(c_op.ctx, self.alg, _FFI.NULL, key, iv, enc) )
else:
ok &= ( _C.EVP_CipherInit_ex(c_op.ctx, self.alg, _FFI.NULL, _FFI.NULL, _FFI.NULL, enc) )
# assert len(iv) <= self.len_block()
ok &= ( _C.EVP_CIPHER_CTX_ctrl(c_op.ctx, _C.EVP_CTRL_GCM_SET_IVLEN, len(iv), _FFI.NULL))
_C.EVP_CIPHER_CTX_ctrl(c_op.ctx, _C.EVP_CTRL_GCM_SET_IV_FIXED, -1, iv);
_C.EVP_CIPHER_CTX_ctrl(c_op.ctx, _C.EVP_CTRL_GCM_IV_GEN, 0, iv)
ok &= ( _C.EVP_CipherInit_ex(c_op.ctx, _FFI.NULL, _FFI.NULL, key, iv, enc) )
if not ok: raise Exception("Cipher exception: Init failed.")
c_op.cipher = self
return c_op
def enc(self, key, iv):
"""Initializes an encryption engine with the cipher with a specific key and Initialization Vector (IV).
Returns the CipherOperation engine.
Args:
key (str): the block cipher symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
"""
return self.op(key, iv, enc=1)
def dec(self, key, iv):
"""Initializes a decryption engine with the cipher with a specific key and Initialization Vector (IV).
Returns the CipherOperation engine.
Args:
key (str): the block cipher symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
"""
return self.op(key, iv, enc=0)
#def __del__(self):
# pass
# --------- AES GCM special functions ---------------
@staticmethod
def aes_128_gcm():
"""Returns a pre-initalized AES-GCM cipher with 128 bits key size"""
return Cipher(None, _C.EVP_aes_128_gcm())
@staticmethod
def aes_192_gcm():
"""Returns a pre-initalized AES-GCM cipher with 192 bits key size"""
return Cipher(None, _C.EVP_aes_192_gcm())
@staticmethod
def aes_256_gcm():
"""Returns a pre-initalized AES-GCM cipher with 256 bits key size"""
return Cipher(None, _C.EVP_aes_256_gcm())
def quick_gcm_enc(self, key, iv, msg, assoc=None, tagl=16):
"""One operation GCM encryption.
Args:
key (str): the AES symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
msg (str): the message to encrypt.
assoc (str): associated data that will be integrity protected, but not encrypted.
tagl (int): the length of the tag, up to the block length.
Example:
Use of `quick_gcm_enc` and `quick_gcm_dec` for AES-GCM operations.
>>> from os import urandom # Secure OS random source
>>> aes = Cipher("aes-128-gcm") # Initialize AES-GCM with 128 bit keys
>>> iv = urandom(16)
>>> key = urandom(16)
>>> # Encryption using AES-GCM returns a ciphertext and a tag
>>> ciphertext, tag = aes.quick_gcm_enc(key, iv, b"Hello")
>>> # Decrytion using AES-GCM
>>> p = aes.quick_gcm_dec(key, iv, ciphertext, tag)
>>> assert p == b'Hello'
"""
enc = self.enc(key, iv)
if assoc:
enc.update_associated(assoc)
ciphertext = enc.update(msg)
ciphertext += enc.finalize()
tag = enc.get_tag(tagl)
return (ciphertext, tag)
def quick_gcm_dec(self, key, iv, cip, tag, assoc=None):
"""One operation GCM decrypt. See usage example in "quick_gcm_enc".
Throws an exception on failure of decryption
Args:
key (str): the AES symmetric key. Length depends on block cipher choice.
iv (str): an Initialization Vector of up to the block size. (Can be shorter.)
cip (str): the ciphertext to decrypt.
tag (int): the integrity tag.
assoc (str): associated data that will be integrity protected, but not encrypted.
"""
dec = self.dec(key, iv)
if assoc:
dec.update_associated(assoc)
dec.set_tag(tag)
plain = dec.update(cip)
try:
plain += dec.finalize()
except:
raise Exception("Cipher: decryption failed.")
return plain
class CipherOperation(object):
__slots__ = ["ctx", "cipher", "xenc"]
def __init__(self, xenc):
self.ctx = _C.EVP_CIPHER_CTX_new()
self.init(xenc)
def init(self, xenc):
_C.EVP_CIPHER_CTX_init(self.ctx)
self.cipher = None
self.xenc = xenc
def set_padding(self, pad):
"""Sets the padding on or off, accodring to pad (bool).
Example:
>>> from os import urandom
>>> aes = Cipher("AES-128-ECB") # Init AES in Electronic codebook mode
>>> key = urandom(16)
>>> iv = None
>>>
>>> # Get a CipherOperation object for encryption
>>> enc = aes.enc(key, iv)
>>> enc.set_padding(False)
>>> ref = b"A" * 16
>>> ciphertext = enc.update(ref)
>>> ciphertext += enc.finalize()
>>> len(ciphertext)
16
>>> # Get a CipherOperation object for decryption
>>> dec = aes.dec(key, iv)
>>> dec.set_padding(False)
>>> plaintext = dec.update(ciphertext)
>>> plaintext += dec.finalize()
>>> plaintext == ref # Check resulting plaintest matches referece one.
True
"""
ok = _C.EVP_CIPHER_CTX_set_padding(self.ctx, pad)
if not ok: raise Exception("Cipher exception: Set padding failed.")
def update(self, data):
"""Processes some data, and returns a partial result."""
block_len = self.cipher.alg.block_size # self.cipher.len_block()
alloc_len = len(data) + block_len + 1
# outl = _FFI.new("int *")
outl = get_intptr()
outl[0] = alloc_len
out = _FFI.new("unsigned char[]", alloc_len)
ok = _C.EVP_CipherUpdate(self.ctx, out, outl, data, len(data))
if not ok: raise Exception("Cipher exception: Update failed.")
ret = bytes(_FFI.buffer(out)[:int(outl[0])])
return_intptr(outl)
return ret
def finalize(self):
"""Finalizes the operation and may return some additional data.
Throws an exception if the authenticator tag is different from the expected value.
Example:
Example of the exception thrown when an invalid tag is provided.
>>> from os import urandom
>>> aes = Cipher.aes_128_gcm() # Define an AES-GCM cipher
>>> iv = urandom(16)
>>> key = urandom(16)
>>> ciphertext, tag = aes.quick_gcm_enc(key, iv, b"Hello")
>>>
>>> dec = aes.dec(key, iv) # Get a decryption CipherOperation
>>> dec.set_tag(urandom(len(tag))) # Provide an invalid tag.
>>> plaintext = dec.update(ciphertext) # Feed in the ciphertext for decryption.
>>> try:
... dec.finalize() # Check and Finalize.
... except:
... print("Failure")
Failure
Throws an exception since integrity check fails due to the invalid tag.
"""
block_len = self.cipher.len_block()
alloc_len = block_len
outl = _FFI.new("int *")
outl[0] = alloc_len
out = _FFI.new("unsigned char[]", alloc_len)
try:
ok = _C.EVP_CipherFinal_ex(self.ctx, out, outl)
if not ok: raise Exception("Cipher exception: Finalize failed.")
if outl[0] == 0:
return b''
ret = bytes(_FFI.buffer(out)[:int(outl[0])])
return ret
except:
raise Exception("Cipher: decryption failed.")
def update_associated(self, data):
"""Processes some GCM associated data, and returns nothing."""
if self.xenc == 0:
self.set_tag(b"\00" * 16)
outl = _FFI.new("int *")
ok = ( _C.EVP_CipherUpdate(self.ctx, _FFI.NULL, outl, data, len(data)))
ok &=( outl[0] == len(data) )
if not ok: raise Exception("Cipher exception: Update associated data failed.")
def get_tag(self, tag_len = 16):
"""Get the GCM authentication tag. Execute after finalizing the encryption.
Example:
AES-GCM encryption usage:
>>> from os import urandom
>>> aes = Cipher.aes_128_gcm() # Initialize AES cipher
>>> key = urandom(16)
>>> iv = urandom(16)
>>> enc = aes.enc(key, iv) # Get an encryption CipherOperation
>>> enc.update_associated(b"Hello") # Include some associated data
>>> ciphertext = enc.update(b"World!") # Include some plaintext
>>> nothing = enc.finalize() # Finalize
>>> tag = enc.get_tag(16) # Get the AES-GCM tag
"""
tag = _FFI.new("unsigned char []", tag_len)
ok = _C.EVP_CIPHER_CTX_ctrl(self.ctx, _C.EVP_CTRL_GCM_GET_TAG, tag_len, tag)
if not ok: raise Exception("Cipher exception: Cipher control failed.")
ret = bytes(_FFI.buffer(tag)[:])
return ret
def set_tag(self, tag):
"""Specify the GCM authenticator tag. Must be done before finalizing decryption
Example:
AES-GCM decryption and check:
>>> aes = Cipher.aes_128_gcm() # Define an AES-GCM cipher
>>> ciphertext, tag = (b'dV\\xb9:\\xd0\\xbe', b'pA\\xbe?\\xfc\\xd1&\\x03\\x1438\\xc5\\xf8In\\xaa')
>>> dec = aes.dec(key=b"A"*16, iv=b"A"*16) # Get a decryption CipherOperation
>>> dec.update_associated(b"Hello") # Feed in the non-secret assciated data.
>>> plaintext = dec.update(ciphertext) # Feed in the ciphertext for decryption.
>>> dec.set_tag(tag) # Provide the AES-GCM tag for integrity.
>>> nothing = dec.finalize() # Check and finalize.
>>> assert plaintext == b'World!'
"""
ok = (_C.EVP_CIPHER_CTX_ctrl(self.ctx, _C.EVP_CTRL_GCM_SET_TAG, len(tag), tag))
if not ok: raise Exception("Cipher exception: Set tag failed.")
def __del__(self):
if self not in self.cipher._pool:
self.cipher._pool.append(self)
else:
_C.EVP_CIPHER_CTX_cleanup(self.ctx)
_C.EVP_CIPHER_CTX_free(self.ctx)
## When testing ignore extra variables
# pylint: disable=unused-variable,redefined-outer-name
def test_aes_init():
aes = Cipher("AES-128-CBC")
assert aes.alg != _FFI.NULL
assert aes.len_IV() == 16
assert aes.len_block() == 16
assert aes.len_key() == 16
assert aes.get_nid() == 419
del aes
def test_errors():
with pytest.raises(Exception) as excinfo:
aes = Cipher("AES-128-XXF")
assert 'Unknown' in str(excinfo.value)
def test_aes_enc():
aes = Cipher("AES-128-CBC")
enc = aes.op(key=b"A"*16, iv=b"A"*16)
ref = b"Hello World" * 10000
ciphertext = enc.update(ref)
ciphertext += enc.finalize()
dec = aes.op(key=b"A"*16, iv=b"A"*16, enc=0)
plaintext = dec.update(ciphertext)
plaintext += dec.finalize()
assert plaintext == ref
def test_aes_ctr():
aes = Cipher("AES-128-CTR")
enc = aes.op(key=b"A"*16, iv=b"A"*16)
ref = b"Hello World" * 10000
ciphertext = enc.update(ref)
ciphertext += enc.finalize()
dec = aes.op(key=b"A"*16, iv=b"A"*16, enc=0)
plaintext = dec.update(ciphertext)
plaintext += dec.finalize()
assert plaintext == ref
def test_aes_ops():
aes = Cipher("AES-128-CTR")
enc = aes.enc(key=b"A"*16, iv=b"A"*16)
ref = b"Hello World" * 10000
ciphertext = enc.update(ref)
ciphertext += enc.finalize()
dec = aes.dec(key=b"A"*16, iv=b"A"*16)
plaintext = dec.update(ciphertext)
plaintext += dec.finalize()
assert plaintext == ref
def test_aes_gcm_encrypt():
aes = Cipher.aes_128_gcm()
assert aes.gcm
enc = aes.op(key=b"A"*16, iv=b"A"*16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
assert isinstance(tag, bytes)
def test_aes_gcm_encrypt_192():
aes = Cipher.aes_192_gcm()
assert aes.gcm
enc = aes.op(key=b"A"*24, iv=b"A"*16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
def test_aes_gcm_encrypt_256():
aes = Cipher.aes_256_gcm()
assert aes.gcm
enc = aes.op(key=b"A"*32, iv=b"A"*16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
@pytest.fixture
def aesenc():
aes = Cipher.aes_128_gcm()
assert aes.gcm
enc = aes.op(key=b"A"*16, iv=b"A"*16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
return (aes,enc, ciphertext, tag)
def test_gcm_dec(aesenc):
aes, enc, ciphertext, tag = aesenc
dec = aes.dec(key=b"A"*16, iv=b"A"*16)
dec.update_associated(b"Hello")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
dec.finalize()
assert plaintext == b"World!"
def test_gcm_dec_badassoc(aesenc):
aes, enc, ciphertext, tag = aesenc
dec = aes.dec(key=b"A"*16, iv=b"A"*16)
dec.update_associated(b"H4llo")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
with pytest.raises(Exception) as excinfo:
dec.finalize()
assert "Cipher" in str(excinfo.value)
def test_gcm_dec_badkey(aesenc):
aes, enc, ciphertext, tag = aesenc
dec = aes.dec(key=b"B"*16, iv=b"A"*16)
dec.update_associated(b"Hello")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
with pytest.raises(Exception) as excinfo:
dec.finalize()
assert "Cipher" in str(excinfo.value)
def test_gcm_dec_badiv(aesenc):
aes, enc, ciphertext, tag = aesenc
dec = aes.dec(key=b"A"*16, iv=b"B"*16)
dec.update_associated(b"Hello")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
with pytest.raises(Exception) as excinfo:
dec.finalize()
assert "Cipher" in str(excinfo.value)
def test_aes_gcm_byname():
aes = Cipher("aes-128-gcm")
assert aes.gcm
enc = aes.op(key=b"A"*16, iv=b"A"*16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
assert c2 == b''
tag = enc.get_tag(16)
assert len(tag) == 16
dec = aes.dec(key=b"A"*16, iv=b"A"*16)
dec.update_associated(b"Hello")
plaintext = dec.update(ciphertext)
dec.set_tag(tag)
dec.finalize()
assert plaintext == b"World!"
def test_aes_gcm_different_IV():
aes = Cipher("aes-128-gcm")
enc = aes.op(key=b"A"*16, iv=b"A"*16)
enc.update_associated(b"Hello")
ciphertext = enc.update(b"World!")
c2 = enc.finalize()
tag = enc.get_tag(16)
enc = aes.op(key=b"A"*16, iv=b"A"*16)
enc.update_associated(b"Hello")
ciphertext2 = enc.update(b"World!")
c2 = enc.finalize()
tag2 = enc.get_tag(16)
enc = aes.op(key=b"A"*16, iv=b"B"*16)
enc.update_associated(b"Hello")
ciphertext3 = enc.update(b"World!")
c2 = enc.finalize()
tag3 = enc.get_tag(16)
assert ciphertext == ciphertext2
assert ciphertext != ciphertext3
def test_quick():
aes = Cipher("aes-128-gcm")
c, t = aes.quick_gcm_enc(b"A"*16, b"A"*16, b"Hello")
p = aes.quick_gcm_dec(b"A"*16, b"A"*16, c, t)
assert p == b"Hello"
def test_quick_assoc():
aes = Cipher("aes-128-gcm")
c, t = aes.quick_gcm_enc(b"A"*16, b"A"*16, b"Hello", assoc=b"blah")
p = aes.quick_gcm_dec(b"A"*16, b"A"*16, c, t, assoc=b"blah")
assert p == b"Hello"
def test_ecb():
key = b"\x02" * 16
data = b"\x01" * 16
assert len(data) == 16
aes = Cipher("AES-128-ECB")
enc = aes.enc(key, None)
c = enc.update(data)
c += enc.finalize()
assert len(data) == 16
aes = Cipher("AES-128-ECB")
enc = aes.dec(key, None)
c1 = enc.update(c)
c1 += enc.finalize()
assert c1 == data
# pylint: enable=unused-variable,redefined-outer-name
| 2.734375 | 3 |
Samples/Python/Plural/Plural.py | atkins126/I18N | 43 | 12761855 | <reponame>atkins126/I18N
import gettext
gettext.bindtextdomain('Plural', 'locale')
gettext.textdomain('Plural')
_ = gettext.gettext
print(_("Plural Sample"))
for value in [0, 1, 2, 3, 4, 5, 11, 21, 101, 111]:
# count: Amount of files
print(gettext.ngettext("{count} file", "{count} files", value).format(count=value)) | 2.953125 | 3 |
convlstm_autoencoder/convlstm_autoencoder.py | AlbertoCenzato/pytorch_model_zoo | 11 | 12761856 | from typing import List, Tuple
import torch
from torch import nn
from torch import Tensor
from convlstm import ConvLSTM, HiddenState
class ConvLSTMAutoencoder(nn.Module):
"""
This model is an implementation of the 'autoencoder' convolutional LSTM
model proposed in 'Convolutional LSTM Network: A Machine Learning Approach
for Precipitation Nowcasting', Shi et al., 2015, http://arxiv.org/abs/1506.04214
Instead of one decoding network, as proposed in the paper, this model has two
decoding networks as in 'Unsupervised Learning of Video Representations using LSTMs',
Srivastava et al., 2016.
The encoding network receives a sequence of images and outputs its hidden state that
should represent a compressed representation of the sequence. Its hidden state is then
used as initial hidden state for the two decoding networks that use the information
contained in it to respectively reconstruct the input sequence and to predict future
frames.
"""
def __init__(self, input_size: Tuple[int, int], input_dim: int,
hidden_dim: List[int], kernel_size: List[Tuple[int, int]],
batch_first: bool=True, bias: bool=True, decoding_steps: int=-1):
super(ConvLSTMAutoencoder, self).__init__()
self.decoding_steps = decoding_steps
self.input_size = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.batch_first = batch_first
self.num_layers = len(hidden_dim)
self.encoder = ConvLSTM(
input_size=input_size,
input_dim=input_dim,
hidden_dim=hidden_dim,
kernel_size=kernel_size,
num_layers=self.num_layers,
batch_first=False,
bias=bias,
mode=ConvLSTM.SEQUENCE
)
# reverse the order of hidden dimensions and kernels
decoding_hidden_dim = list(reversed(hidden_dim))
decoding_kernel_size = list(reversed(kernel_size))
decoding_hidden_dim .append(input_dim) # NOTE: we need a num_of_decoding_layers = num_of_encoding_layers+1
decoding_kernel_size.append((1,1)) # so we add a 1x1 ConvLSTM as last decoding layer
self.input_reconstruction = ConvLSTM(
input_size=input_size,
input_dim=input_dim,
hidden_dim=decoding_hidden_dim,
kernel_size=decoding_kernel_size,
num_layers=self.num_layers + 1,
batch_first=False,
bias=bias,
mode=ConvLSTM.STEP_BY_STEP
)
self.future_prediction = ConvLSTM(
input_size=input_size,
input_dim=input_dim,
hidden_dim=decoding_hidden_dim,
kernel_size=decoding_kernel_size,
num_layers=self.num_layers + 1,
batch_first=False,
bias=bias,
mode=ConvLSTM.STEP_BY_STEP
)
def forward(self, input_sequence: Tensor) -> Tuple[Tensor]:
sequence = input_sequence.transpose(0,1) if self.batch_first else input_sequence # always work in sequence-first mode
sequence_len = sequence.size(0)
steps = self.decoding_steps if self.decoding_steps != -1 else sequence_len
# encode
_, hidden_state = self.encoder(sequence)
last_frame = sequence[-1, :]
h_n, c_n = hidden_state
representation = (h_n[-1], c_n[-1])
# decode for input reconstruction
output_seq_recon = ConvLSTMAutoencoder._decode(self.input_reconstruction, last_frame,
representation, steps)
# decode for future prediction
output_seq_pred = ConvLSTMAutoencoder._decode(self.future_prediction, last_frame,
representation, steps)
if self.batch_first: # if input was batch_first restore dimension order
reconstruction = output_seq_recon.transpose(0,1)
prediction = output_seq_pred .transpose(0,1)
else:
reconstruction = output_seq_recon
prediction = output_seq_pred
return (reconstruction, prediction)
@staticmethod
def _decode(decoder: ConvLSTM, last_frame: Tensor, representation: HiddenState, steps: int) -> Tensor:
decoded_sequence = []
h_n, c_n = representation
h_0, c_0 = decoder.init_hidden(last_frame.size(0))
h_0[0], c_0[0] = h_n, c_n
state = (h_0, c_0)
output = last_frame
for t in range(steps):
output, state = decoder(output, state)
decoded_sequence.append(output)
return torch.stack(decoded_sequence, dim=0)
| 3.421875 | 3 |
packages/django-backend/notify/apps.py | ZechyW/cs-toolkit | 1 | 12761857 | <gh_stars>1-10
from django.apps import AppConfig
class NotifyConfig(AppConfig):
name = "notify"
def ready(self):
# noinspection PyUnresolvedReferences
import notify.signals
| 1.453125 | 1 |
sendgrid/version.py | sarrionandia/tournatrack | 1 | 12761858 | version_info = (1, 0, 1)
__version__ = '.'.join(str(v) for v in version_info) | 1.695313 | 2 |
vox/win/tests/test_first_mismatch.py | drocco007/vox_linux | 5 | 12761859 | <reponame>drocco007/vox_linux
from vox.win.textbuf import first_mismatch
import pytest
def test_zero_length_strings():
assert 0 == first_mismatch('', '')
def test_should_be_length_of_identical_single_character_strings():
assert 1 == first_mismatch('a', 'a')
def test_should_be_beginning_of_different_single_character_strings():
assert 0 == first_mismatch('b', 'c')
@pytest.mark.parametrize('target', ['d', 'de', 'the', 'quad', ' '])
def test_should_be_beginning_with_zero_length_source(target):
assert 0 == first_mismatch('', target)
@pytest.mark.parametrize('source', ['z', 'ea', 'the', 'quad', ' '])
def test_should_be_beginning_with_zero_length_target(source):
assert 0 == first_mismatch(source, '')
def test_should_be_length_of_identical_two_character_strings():
assert 2 == first_mismatch('az', 'az')
def test_should_be_beginning_of_different_two_character_strings():
assert 0 == first_mismatch('bx', 'cw')
def test_first_mismatch_with_2_character_strings():
assert 1 == first_mismatch('ab', 'a ')
def test_different_length_strings():
a = 'Returns a subset'
b = 'Returns a set a subset'
assert 11 == first_mismatch(a, b)
def test_different_length_strings_with_limit():
a = 'Returns a subset'
b = 'Returns a set a subset'
assert 7 == first_mismatch(a, b, 7)
def test_different_length_strings_with_limit_past_mismatch():
a = 'Returns a subset'
b = 'Returns a set a subset'
assert 11 == first_mismatch(a, b, 15)
| 2.515625 | 3 |
flowtext/models/elmo/utils.py | Oneflow-Inc/text | 1 | 12761860 | <reponame>Oneflow-Inc/text
import collections
import random
import logging
from urllib.parse import urlparse
from urllib.request import Request, urlopen
import os
import shutil
import hashlib
import tempfile
import tarfile
from tqdm import tqdm
import oneflow as flow
from oneflow import Tensor
logger = logging.getLogger("elmo")
def recover(li, ind):
dummy = list(range(len(ind)))
dummy.sort(key=lambda l: ind[l])
li = [li[i] for i in dummy]
return li
def get_lengths_from_binary_sequence_mask(mask: flow.Tensor):
return mask.long().sum(-1)
def sort_batch_by_length(tensor, sequence_lengths):
if not isinstance(tensor, Tensor) or not isinstance(sequence_lengths, Tensor):
raise Exception("Both the tensor and sequence length must be flow.Tensor.")
(sorted_sequence_lengths, permutation_index) = sequence_lengths.sort(
0, descending=True
)
sorted_tensor = tensor.index_select(0, permutation_index)
sequence_lengths.data.copy_(flow.arange(0, sequence_lengths.size(0)))
index_range = sequence_lengths.clone()
index_range = flow.Tensor(index_range.long())
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return (
sorted_tensor,
sorted_sequence_lengths,
restoration_indices,
permutation_index,
)
# TODO: modify after the orthogonal supported.
# def block_orthogonal(tensor: flow.Tensor, split_sizes: List[int], gain: float = 1.0) -> None:
# if isinstance(tensor, Tensor):
# sizes = list(tensor.size())
# if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):
# raise Exception("tensor dimensions must be divisible by their respective "
# "split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes))
# indexes = [list(range(0, max_size, split))
# for max_size, split in zip(sizes, split_sizes)]
# for block_start_indices in itertools.product(*indexes):
# index_and_step_tuples = zip(block_start_indices, split_sizes)
# block_slice = tuple([slice(start_index, start_index + step)
# for start_index, step in index_and_step_tuples])
# tensor[block_slice] = flow.nn.init.orthogonal_(tensor[block_slice].contiguous(), gain=gain)
def get_dropout_mask(dropout_probability: float, tensor_for_masking: Tensor):
binary_mask = tensor_for_masking.clone()
binary_mask.data.copy_(flow.rand(tensor_for_masking.size()) > dropout_probability)
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
def dict2namedtuple(dic):
return collections.namedtuple("Namespace", dic.keys())(**dic)
def read_list(sents, max_chars=None):
dataset = []
textset = []
for sent in sents:
data = ["<bos>"]
text = []
for token in sent:
text.append(token)
if max_chars is not None and len(token) + 2 > max_chars:
token = token[: max_chars - 2]
data.append(token)
data.append("<eos>")
dataset.append(data)
textset.append(text)
return dataset, textset
def create_one_batch(x, word2id, char2id, config, oov="<oov>", pad="<pad>", sort=True):
batch_size = len(x)
lst = list(range(batch_size))
if sort:
lst.sort(key=lambda l: -len(x[l]))
x = [x[i] for i in lst]
lens = [len(x[i]) for i in lst]
max_len = max(lens)
if word2id is not None:
oov_id, pad_id = word2id.get(oov, None), word2id.get(pad, None)
assert oov_id is not None and pad_id is not None
batch_w = flow.zeros(batch_size, max_len).fill_(pad_id)
for i, x_i in enumerate(x):
for j, x_ij in enumerate(x_i):
batch_w[i, j] = word2id.get(x_ij, oov_id)
batch_w = batch_w.long()
else:
batch_w = None
if char2id is not None:
bow_id, eow_id, oov_id, pad_id = [
char2id.get(key, None) for key in ("<eow>", "<bow>", oov, pad)
]
assert (
bow_id is not None
and eow_id is not None
and oov_id is not None
and pad_id is not None
)
if config["token_embedder"]["name"].lower() == "cnn":
max_chars = config["token_embedder"]["max_characters_per_token"]
assert max([len(w) for i in lst for w in x[i]]) + 2 <= max_chars
elif config["token_embedder"]["name"].lower() == "lstm":
max_chars = max([len(w) for i in lst for w in x[i]]) + 2
else:
raise ValueError(
"Unknown token_embedder: {0}".format(config["token_embedder"]["name"])
)
batch_c = flow.zeros(batch_size, max_len, max_chars).fill_(pad_id)
for i, x_i in enumerate(x):
for j, x_ij in enumerate(x_i):
batch_c[i, j, 0] = bow_id
if x_ij == "<bos>" or x_ij == "<eos>":
batch_c[i, j, 1] = char2id.get(x_ij)
batch_c[i, j, 2] = eow_id
else:
for k, c in enumerate(x_ij):
batch_c[i, j, k + 1] = char2id.get(c, oov_id)
batch_c[i, j, len(x_ij) + 1] = eow_id
batch_c = batch_c.long()
else:
batch_c = None
masks = [flow.zeros(batch_size, max_len), [], []]
for i, x_i in enumerate(x):
for j in range(len(x_i)):
masks[0][i, j] = 1
if j + 1 < len(x_i):
masks[1].append(i * max_len + j)
if j > 0:
masks[2].append(i * max_len + j)
assert len(masks[1]) <= batch_size * max_len
assert len(masks[2]) <= batch_size * max_len
masks[0] = flow.Tensor(masks[0]).long()
masks[1] = flow.Tensor(masks[1]).long()
masks[2] = flow.Tensor(masks[2]).long()
return batch_w, batch_c, lens, masks
def create_batches(
x,
batch_size,
word2id,
char2id,
config,
perm=None,
shuffle=False,
sort=True,
text=None,
):
ind = list(range(len(x)))
lst = perm or list(range(len(x)))
if shuffle:
random.shuffle(lst)
if sort:
lst.sort(key=lambda l: -len(x[l]))
x = [x[i] for i in lst]
ind = [ind[i] for i in lst]
if text is not None:
text = [text[i] for i in lst]
sum_len = 0.0
batches_w, batches_c, batches_lens, batches_masks, batches_text, batches_ind = (
[],
[],
[],
[],
[],
[],
)
size = batch_size
nbatch = (len(x) - 1) // size + 1
for i in range(nbatch):
start_id, end_id = i * size, (i + 1) * size
bw, bc, blens, bmasks = create_one_batch(
x[start_id:end_id], word2id, char2id, config, sort=sort
)
sum_len += sum(blens)
batches_w.append(bw)
batches_c.append(bc)
batches_lens.append(blens)
batches_masks.append(bmasks)
batches_ind.append(ind[start_id:end_id])
if text is not None:
batches_text.append(text[start_id:end_id])
if sort:
perm = list(range(nbatch))
random.shuffle(perm)
batches_w = [batches_w[i] for i in perm]
batches_c = [batches_c[i] for i in perm]
batches_lens = [batches_lens[i] for i in perm]
batches_masks = [batches_masks[i] for i in perm]
batches_ind = [batches_ind[i] for i in perm]
if text is not None:
batches_text = [batches_text[i] for i in perm]
logger.info("{} batches, avg len: {:.1f}".format(nbatch, sum_len / len(x)))
recover_ind = [item for sublist in batches_ind for item in sublist]
if text is not None:
return (
batches_w,
batches_c,
batches_lens,
batches_masks,
batches_text,
recover_ind,
)
return batches_w, batches_c, batches_lens, batches_masks, recover_ind
def load_state_dict_from_url(url: str, saved_path: str):
if saved_path == None:
saved_path = "./pretrained_flow"
url_parse = urlparse(url)
if not os.path.exists(saved_path):
os.mkdir(saved_path)
package_name = url_parse.path.split("/")[-1]
package_path = os.path.join(saved_path, package_name)
download_url_to_file(url, package_path)
print(
"The pretrained-model file saved in '{}'".format(
os.path.abspath(saved_path)
)
)
with tarfile.open(package_path) as f:
f.extractall(saved_path)
file_name = url_parse.path.split("/")[-1].split(".")[0]
file_path = os.path.join(saved_path, file_name)
return file_path
def download_url_to_file(url, dst, hash_prefix=None, progress=True):
file_size = None
req = Request(url)
u = urlopen(req)
meta = u.info()
if hasattr(meta, "getheaders"):
content_length = meta.getheaders("Content-Length")
else:
content_length = meta.get_all("Content-Length")
if content_length is not None and len(content_length) > 0:
file_size = int(content_length[0])
dst = os.path.expanduser(dst)
dst_dir = os.path.dirname(dst)
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
try:
if hash_prefix is not None:
sha256 = hashlib.sha256()
with tqdm(
total=file_size,
disable=not progress,
unit="B",
unit_scale=True,
unit_divisor=1024,
) as pbar:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
if hash_prefix is not None:
sha256.update(buffer)
pbar.update(len(buffer))
f.close()
if hash_prefix is not None:
digest = sha256.hexdigest()
if digest[: len(hash_prefix)] != hash_prefix:
raise RuntimeError(
'invalid hash value (expected "{}", got "{}")'.format(
hash_prefix, digest
)
)
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
| 2.296875 | 2 |
student_input.py | jamesnoria/udh_calculator | 0 | 12761861 | <filename>student_input.py
import sqlite3
import pandas as pd
class StudentInput:
""" Student registration for the first time (only if is new) """
def __init__(self):
""" Data Base Connection """
self.db = sqlite3.connect('./students.db')
self.sql = self.db.cursor()
def student_init(self):
""" Getting student_id, password and dni from data base """
data = self.sql.execute('SELECT * FROM students;')
return data.fetchone()
def student_welcome(self):
""" Validation for new students """
df = pd.read_sql_query('SELECT * FROM students;', self.db)
# if data base is empty:
if df.empty:
print('***** Bienvenido a la Calculadora de Promedios *****')
print(
'Necesito registrarte por ÚNICA vez para acceder directamente de ahora en adelante')
while True:
student_id = input('Código de alumno: ')
student_pw = input('Contraseña: ')
student_dni = input('DNI: ')
print(
f'\nEstos son los datos que ingresaste:\nCódigo de alumno: {student_id}\nContraseña: {student_pw}\nDNI: {student_dni}')
print(
'\nEstos datos tienen que estar correctos ya que si no lo estan, TODO el programa no funcionará')
right_option = input('¿Estas seguro de ingresarlos? (si/no): ')
if right_option == 'si':
self.sql.execute(f"""
INSERT INTO students (id, password, dni)
VALUES ('{student_id}', '{student_pw}', '{student_dni}');
""")
self.db.commit()
print('\n¡LISTO!, ya estas registrado')
break
else:
continue
| 3.984375 | 4 |
notebooks/model_scratchpad.py | a-barton/cdk-model-test | 0 | 12761862 | # %%
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from xgboost import XGBClassifier
import pandas as pd
# %%
data = pd.read_csv("../data/iris.csv")
X = data.drop("class", axis=1)
y = data["class"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
# %%
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.fit_transform(X_test)
# %%
label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(y_train)
y_test = label_encoder.fit_transform(y_test)
# %%
model = XGBClassifier(
max_depth=3,
objective="multi:softprob",
eval_metric="merror",
use_label_encoder=False,
)
model.fit(X_train_scaled, y_train)
preds = model.predict(X_test_scaled)
print(model.score(X_test_scaled, y_test))
print(confusion_matrix(y_test, preds))
# %%
| 3.203125 | 3 |
vkge/training/losses.py | acr42/Neural-Variational-Knowledge-Graphs | 11 | 12761863 | <gh_stars>10-100
# -*- coding: utf-8 -*-
import tensorflow as tf
import sys
def logistic_loss(scores, targets):
"""
Logistic loss as used in [1]
[1] http://jmlr.org/proceedings/papers/v48/trouillon16.pdf
:param scores: (N,) Tensor containing scores of examples.
:param targets: (N,) Tensor containing {0, 1} targets of examples.
:return: Loss value.
"""
logistic_losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=scores, labels=targets)
loss = tf.reduce_sum(logistic_losses)
return loss
def hinge_loss(scores, targets, margin=1):
"""
Hinge loss.
:param scores: (N,) Tensor containing scores of examples.
:param targets: (N,) Tensor containing {0, 1} targets of examples.
:param margin: float representing the margin in the hinge loss relu(margin - logits * (2 * targets - 1))
:return: Loss value.
"""
hinge_losses = tf.nn.relu(margin - scores * (2 * targets - 1))
loss = tf.reduce_sum(hinge_losses)
return loss
# Aliases
logistic = logistic_loss
hinge = hinge_loss
def get_function(function_name):
this_module = sys.modules[__name__]
if not hasattr(this_module, function_name):
raise ValueError('Unknown loss function: {}'.format(function_name))
return getattr(this_module, function_name)
| 2.734375 | 3 |
Ar_Script/ar_179_测试_键盘事件.py | archerckk/PyTest | 0 | 12761864 | <filename>Ar_Script/ar_179_测试_键盘事件.py
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver=webdriver.Chrome()
driver.get('http://www.baidu.com')
target=driver.find_element_by_id('kw')
target.send_keys('<PASSWORD>')
target.send_keys(Keys.BACK_SPACE)
target.send_keys(Keys.SPACE)
target.send_keys('教程')
target.send_keys(Keys.CONTROL,'a')
target.send_keys(Keys.CONTROL,'x')
target.send_keys(Keys.CONTROL,'v')
target.click()
time.sleep(3)
driver.quit()
| 2.203125 | 2 |
python/pycascading/decorators.py | fakeNetflix/twitter-repo-pycascading | 49 | 12761865 | #
# Copyright 2011 Twitter, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PyCascading function decorators to be used with user-defined functions.
A user-defined function is a function that gets applied as a filter or an
Each function for each tuple, or the reduce-side function for tuples in a
grouping in an Every Cascading operation.
UDFs can emit a new set of tuples (as in a Function after an Each operation),
keep or filter out tuples (a Filter after an Each), or emit aggregate values
(an Aggregator or Buffer for a group after an Every).
We use globally or locally scoped Python functions to perform these
user-defined operations. When building the data processing pipeline, we can
simply stream data through a Python function with PyCascading if it was
decorated by one of the decorators.
* A udf_'map' function is executed for each input tuple, and returns no, one, or
several new output tuples.
* A 'udf_filter' is a boolean-valued function, which should return true if the
input tuple should be kept for the output, and false if not.
* A 'udf_buffer' is a function that is applied to groups of tuples, and is the
equivalent of a Cascading Buffer. It returns an aggregate after iterating
through the tuples in the group.
Exports the following:
udf
yields
numargs_expected
python_list_expected
python_dict_expected
collects_output
produces_python_list
produces_tuples
udf_filter
udf_map
udf_buffer
"""
__author__ = '<NAME>'
import inspect
from pycascading.pipe import DecoratedFunction
from com.twitter.pycascading import CascadingBaseOperationWrapper
from com.twitter.pycascading import CascadingRecordProducerWrapper
def _function_decorator(args, kwargs, defaults={}):
"""
A decorator to recursively decorate a function with arbitrary attributes.
"""
def fun_decorator(function_or_callabledict):
if isinstance(function_or_callabledict, DecoratedFunction):
# Another decorator is next
dff = function_or_callabledict
else:
# The original function comes next
dff = DecoratedFunction.decorate_function(function_or_callabledict)
# Add the attributes to the decorated function
dff.decorators.update(additional_parameters)
return dff
additional_parameters = dict(defaults)
additional_parameters.update(kwargs)
if len(args) == 1 and not kwargs and (inspect.isroutine(args[0]) or isinstance(args[0], DecoratedFunction)):
# We used the decorator without ()s, the first argument is the
# function. We cannot use additional parameters in this case.
return fun_decorator(args[0])
else:
return fun_decorator
def udf(*args, **kwargs):
"""The function can receive tuples or groups of tuples from Cascading.
This is the decorator to use when we have a function that we want to use
in a Cascading job after an Each or Every.
"""
return _function_decorator(args, kwargs)
def yields(*args, **kwargs):
"""The function is a generator that yields output tuples.
PyCascading considers this function a generator that yields one or more
output tuples before returning. If this decorator is not used, the way the
function emits tuples is determined automatically at runtime the first time
the funtion is called. The alternative to yielding values is to return
one tuple with return.
We can safely yield Nones or not yield anything at all; no output tuples
will be emitted in this case.
"""
return _function_decorator(args, kwargs, \
{ 'output_method' : CascadingRecordProducerWrapper.OutputMethod.YIELDS })
def numargs_expected(num, *args, **kwargs):
"""The function expects a num number of fields in the input tuples.
Arguments:
num -- the exact number of fields that the input tuples must have
"""
return _function_decorator(args, kwargs, { 'numargs_expected' : num })
def python_list_expected(*args, **kwargs):
"""PyCascading will pass in the input tuples as Python lists.
There is some performance penalty as all the incoming tuples need to be
converted to Python lists.
"""
params = dict(kwargs)
params.update()
return _function_decorator(args, kwargs, { 'input_conversion' : \
CascadingBaseOperationWrapper.ConvertInputTuples.PYTHON_LIST })
def python_dict_expected(*args, **kwargs):
"""The input tuples are converted to Python dicts for this function.
PyCascading will convert all input tuples to a Python dict for this
function. The keys of the dict are the Cascading field names and the values
are the values read from the tuple.
There is some performance penalty as all the incoming tuples need to be
converted to Python dicts.
"""
return _function_decorator(args, kwargs, { 'input_conversion' : \
CascadingBaseOperationWrapper.ConvertInputTuples.PYTHON_DICT })
def collects_output(*args, **kwargs):
"""The function expects an output collector where output tuples are added.
PyCascading will pass in a Cascading TupleEntryCollector to which the
function can add output tuples by calling its 'add' method.
Use this if performance is important, as no conversion takes place between
Python objects and Cascading tuples.
"""
return _function_decorator(args, kwargs, { 'output_method' : \
CascadingRecordProducerWrapper.OutputMethod.COLLECTS })
def produces_python_list(*args, **kwargs):
"""The function emits Python lists as tuples.
These will be converted by PyCascading to Cascading Tuples, so this impacts
performance somewhat.
"""
return _function_decorator(args, kwargs, { 'output_type' : \
CascadingRecordProducerWrapper.OutputType.PYTHON_LIST })
def produces_tuples(*args, **kwargs):
"""The function emits native Cascading Tuples or TupleEntrys.
No conversion takes place so this is a fast way to add tuples to the
output.
"""
return _function_decorator(args, kwargs, { 'output_type' : \
CascadingRecordProducerWrapper.OutputType.TUPLE })
def udf_filter(*args, **kwargs):
"""This makes the function a filter.
The function should return 'true' for each input tuple that should stay
in the output stream, and 'false' if it is to be removed.
IMPORTANT: this behavior is the opposite of what Cascading expects, but
similar to how the Python filter works!
Note that the same effect can be attained by a map that returns the tuple
itself or None if it should be filtered out.
"""
return _function_decorator(args, kwargs, { 'type' : 'filter' })
def udf_map(*args, **kwargs):
"""The function decorated with this emits output tuples for each input one.
The function is called for all the tuples in the input stream as happens
in a Cascading Each. The function input tuple is passed in to the function
as the first parameter and is a native Cascading TupleEntry unless the
python_list_expected or python_dict_expected decorators are also used.
If collects_output is used, the 2nd parameter is a Cascading
TupleEntryCollector to which Tuples or TupleEntrys can be added. Otherwise,
the function may return an output tuple or yield any number of tuples if
it is a generator.
Whether the function yields or returns will be determined automatically if
no decorators used that specify this, and so will be the output tuple type
(it can be Python list or a Cascading Tuple).
Note that the meaning of 'map' used here is closer to the Python map()
builtin than the 'map' in MapReduce. It essentially means that each input
tuple needs to be transformed (mapped) by a custom function.
Arguments:
produces -- a list of output field names
"""
return _function_decorator(args, kwargs, { 'type' : 'map' })
def udf_buffer(*args, **kwargs):
"""The function decorated with this takes a group and emits aggregates.
A udf_buffer function must follow a Cascading Every operation, which comes
after a GroupBy. The function will be called for each grouping on a
different reducer. The first parameter passed to the function is the
value of the grouping field for this group, and the second is an iterator
to the tuples belonging to this group.
Note that the iterator always points to a static variable in Cascading
that holds a copy of the current TupleEntry, thus we cannot cache this for
subsequent operations in the function. Instead, take iterator.getTuple() or
create a new TupleEntry by deep copying the item in the loop.
Cascading also doesn't automatically add the group field to the output
tuples, so we need to do it manually. In fact a Cascading Buffer is more
powerful than an aggregator, although it can be used as one. It acts more
like a function emitting arbitrary tuples for groups, rather than just a
simple aggregator.
By default the output tuples will be what the buffer returns or yields,
and the grouping fields won't be included. This is different from the
aggregators' behavior, which add the output fields to the grouping fields.
Also, only one buffer may follow a GroupBy, in contrast to aggregators, of
which many may be present.
See http://groups.google.com/group/cascading-user/browse_thread/thread/f5e5f56f6500ed53/f55fdd6bba399dcf?lnk=gst&q=scope#f55fdd6bba399dcf
"""
return _function_decorator(args, kwargs, { 'type' : 'buffer' })
def unwrap(*args, **kwargs):
"""Unwraps the tuple into function parameters before calling the function.
This is not implemented on the Java side yet.
"""
return _function_decorator(args, kwargs, { 'parameters' : 'unwrap' })
def tuplein(*args, **kwargs):
return _function_decorator(args, kwargs, { 'parameters' : 'tuple' })
| 2.671875 | 3 |
src/ml_utils.py | masaponto/ml_utilities | 0 | 12761866 | <filename>src/ml_utils.py
#!/usr/bin/env python
import numpy as np
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
def cross_validation(estimator, data_set, k=5, scaling=False):
'''
find cross_validation accuracy
estimator must be implemented fit, predict and Inheritance Baseestimator and ClassifierMixin
'''
X, y = shuffle(data_set.data, data_set.target)
n = data_set.data.shape[0]
m = n // k
scores = [validation(estimator, X, y, m, index, scaling) for index in range(0, n - (n % k), m)]
return np.array(scores)
def split_data(X, y, m, index):
x_test = X[index: index + m]
y_test = y[index: index + m]
x_train1 = X[:index]
y_train1 = y[:index]
x_train2 = X[index + m:]
y_train2 = y[index + m:]
x_train = np.r_[x_train1, x_train2]
y_train = np.r_[y_train1, y_train2]
return x_train, y_train, x_test, y_test
def validation(estimator, X, y, m, index, scaling=False):
x_train, y_train, x_test, y_test = split_data(X, y, m, index)
if scaling:
scaler = StandardScaler().fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
estimator.fit(x_train, y_train)
test_score = estimator.score(x_test, y_test)
return test_score
def validation_with_train(estimator, X, y, m, index):
x_train, y_train, x_test, y_test = split_data(X, y, m, index)
estimator.fit(x_train, y_train)
test_score = estimator.score(x_test, y_test)
train_score = estimator.score(x_train, y_train)
return test_score, train_score
def argwrapper(args):
return args[0](*args[1:])
def mp_cross_validation(estimator, data_set, k=5, p_num=4, scaling=False):
'''
Cross-validation with multi processing
'''
from multiprocessing import Pool
from multiprocessing import Process
assert(isinstance(k, int))
assert(k > 0)
assert(isinstance(p_num, int))
assert(p_num > 0)
X, y = shuffle(data_set.data, data_set.target)
n = data_set.data.shape[0]
m = n // k
n = n - (n % k)
p = Pool(p_num)
func_args = [(validation, estimator, data_set.data, data_set.target, m, index, scaling)
for index in range(0, n, m)]
scores = p.map(argwrapper, func_args)
p.close()
return np.array(scores)
def mp_cross_validation_with_train(estimator, data_set, k=5, p_num=4):
from multiprocessing import Pool
from multiprocessing import Process
assert(isinstance(k, int))
assert(k > 0)
assert(isinstance(p_num, int))
assert(p_num > 0)
X, y = shuffle(data_set.data, data_set.target)
n = data_set.data.shape[0]
m = n // k
n = n - (n % k)
p = Pool(p_num)
func_args = [(validation_with_train, estimator, data_set.data, data_set.target, m, index)
for index in range(0, n, m)]
scores = p.map(argwrapper, func_args)
p.close()
test_scores = [s[0] for s in scores]
train_scores = [s[1] for s in scores]
return np.array(test_scores), np.array(train_scores)
def main():
from elm import ELM
from sklearn.preprocessing import normalize
from sklearn.datasets import fetch_mldata
data_set = fetch_mldata('australian')
print(mp_cross_validation(ELM(100), data_set, scaling=True))
print(cross_validation(ELM(100), data_set, scaling=True))
data_set.data = normalize(data_set.data)
print(mp_cross_validation_with_train(ELM(100), data_set))
if __name__ == "__main__":
main()
| 3.046875 | 3 |
ambari-agent/src/main/python/ambari_agent/DataCleaner.py | flipkart-incubator/incubator-ambari | 2 | 12761867 | <filename>ambari-agent/src/main/python/ambari_agent/DataCleaner.py
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import AmbariConfig
import threading
import os
import time
import re
import logging
logger = logging.getLogger()
class DataCleaner(threading.Thread):
FILE_NAME_PATTERN = 'errors-\d+.txt|output-\d+.txt|site-\d+.pp'
def __init__(self, config):
threading.Thread.__init__(self)
self.daemon = True
logger.info('Data cleanup thread started')
self.config = config
self.file_max_age = int(config.get('agent','data_cleanup_max_age'))
if self.file_max_age < 86400:
logger.warn('The minimum value allowed for data_cleanup_max_age is 1 '
'day. Setting data_cleanup_max_age to 86400.')
self.file_max_age = 86400
self.cleanup_interval = int(config.get('agent','data_cleanup_interval'))
if self.cleanup_interval < 3600:
logger.warn('The minimum value allowed for data_cleanup_interval is 1 '
'hour. Setting data_cleanup_interval to 3600.')
self.file_max_age = 3600
self.data_dir = config.get('agent','prefix')
self.compiled_pattern = re.compile(self.FILE_NAME_PATTERN)
self.stopped = False
def __del__(self):
logger.info('Data cleanup thread killed.')
def cleanup(self):
for root, dirs, files in os.walk(self.data_dir):
for f in files:
file_path = os.path.join(root, f)
if self.compiled_pattern.match(f):
try:
if time.time() - os.path.getmtime(file_path) > self.file_max_age:
os.remove(os.path.join(file_path))
logger.debug('Removed file: ' + file_path)
except Exception:
logger.error('Error when removing file: ' + file_path)
def run(self):
while not self.stopped:
logger.info('Data cleanup started')
self.cleanup()
logger.info('Data cleanup finished')
time.sleep(self.cleanup_interval)
def main():
data_cleaner = DataCleaner(AmbariConfig.config)
data_cleaner.start()
data_cleaner.join()
if __name__ == "__main__":
main()
| 2.171875 | 2 |
pyDocStr/document_package.py | LostPy/pydocstr | 1 | 12761868 | import package_to_document
import pyDocStr
import os
print(pyDocStr.__file__)
current_path = os.getcwd()
print(current_path)
pyDocStr.build_docstrings_package(
"./pyDocStr/package_to_document",
new_package_path="./pyDocStr/package_documented",
subpackages=True,
level_logger='debug'
) | 2.171875 | 2 |
pyston/converters/__init__.py | druids/django-pyston | 7 | 12761869 | import types
import json
from io import StringIO
from collections import OrderedDict
from defusedxml import ElementTree as ET
from django.core.serializers.json import DjangoJSONEncoder
from django.http.response import HttpResponseBase
from django.template.loader import get_template
from django.utils.encoding import force_text
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.module_loading import import_string
from django.utils.html import format_html
from pyston.utils.helpers import UniversalBytesIO, serialized_data_to_python
from pyston.utils.datastructures import FieldsetGenerator
from pyston.conf import settings
from .file_generators import CSVGenerator, XLSXGenerator, PDFGenerator, TXTGenerator
def is_collection(data):
return isinstance(data, (list, tuple, set, types.GeneratorType))
def get_default_converters():
"""
Register all converters from settings configuration.
"""
converters = OrderedDict()
for converter_class_path in settings.CONVERTERS:
converter_class = import_string(converter_class_path)()
converters[converter_class.format] = converter_class
return converters
def get_default_converter_name(converters=None):
"""
Gets default converter name
"""
converters = get_default_converters() if converters is None else converters
return list(converters.keys())[0]
def get_converter(result_format, converters=None):
"""
Gets an converter, returns the class and a content-type.
"""
converters = get_default_converters() if converters is None else converters
if result_format in converters:
return converters.get(result_format)
else:
raise ValueError('No converter found for type {}'.format(result_format))
def get_converter_name_from_request(request, converters=None, input_serialization=False):
"""
Function for determining which converter name to use
for output.
"""
try:
import mimeparse
except ImportError:
mimeparse = None
context_key = 'accept'
if input_serialization:
context_key = 'content_type'
converters = get_default_converters() if converters is None else converters
default_converter_name = get_default_converter_name(converters)
if mimeparse and context_key in request._rest_context:
supported_mime_types = set()
converter_map = {}
preferred_content_type = None
for name, converter_class in converters.items():
if name == default_converter_name:
preferred_content_type = converter_class.media_type
supported_mime_types.add(converter_class.media_type)
converter_map[converter_class.media_type] = name
supported_mime_types = list(supported_mime_types)
if preferred_content_type:
supported_mime_types.append(preferred_content_type)
try:
preferred_content_type = mimeparse.best_match(supported_mime_types,
request._rest_context[context_key])
except ValueError:
pass
default_converter_name = converter_map.get(preferred_content_type, default_converter_name)
return default_converter_name
def get_converter_from_request(request, converters=None, input_serialization=False):
"""
Function for determining which converter name to use
for output.
"""
return get_converter(get_converter_name_from_request(request, converters, input_serialization), converters)
def get_supported_mime_types(converters):
return [converter.media_type for _, converter in converters.items()]
class Converter:
"""
Converter from standard data types to output format (JSON,YAML, Pickle) and from input to python objects
"""
charset = 'utf-8'
media_type = None
format = None
allow_tags = False
@property
def content_type(self):
return '{}; charset={}'.format(self.media_type, self.charset)
def _encode(self, data, options=None, **kwargs):
"""
Encodes data to output string. You must implement this method or change implementation encode_to_stream method.
"""
raise NotImplementedError
def _decode(self, data, **kwargs):
"""
Decodes data to string input
"""
raise NotImplementedError
def _encode_to_stream(self, output_stream, data, options=None, **kwargs):
"""
Encodes data and writes it to the output stream
"""
output_stream.write(self._encode(data, options=options, **kwargs))
def encode_to_stream(self, output_stream, data, options=None, **kwargs):
self._encode_to_stream(self._get_output_stream(output_stream), data, options=options, **kwargs)
def decode(self, data, **kwargs):
return self._decode(data, **kwargs)
def _get_output_stream(self, output_stream):
return output_stream if isinstance(output_stream, UniversalBytesIO) else UniversalBytesIO(output_stream)
class XMLConverter(Converter):
"""
Converter for XML.
Supports only output conversion
"""
media_type = 'text/xml'
format = 'xml'
root_element_name = 'response'
def _to_xml(self, xml, data):
from pyston.serializer import LAZY_SERIALIZERS
if isinstance(data, LAZY_SERIALIZERS):
self._to_xml(xml, data.serialize())
elif is_collection(data):
for item in data:
xml.startElement('resource', {})
self._to_xml(xml, item)
xml.endElement('resource')
elif isinstance(data, dict):
for key, value in data.items():
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key)
else:
xml.characters(force_text(data))
def _encode(self, data, **kwargs):
if data is not None:
stream = StringIO()
xml = SimplerXMLGenerator(stream, 'utf-8')
xml.startDocument()
xml.startElement(self.root_element_name, {})
self._to_xml(xml, data)
xml.endElement(self.root_element_name)
xml.endDocument()
return stream.getvalue()
else:
return ''
def _decode(self, data, **kwargs):
return ET.fromstring(data)
class LazyDjangoJSONEncoder(DjangoJSONEncoder):
def default(self, o):
from pyston.serializer import LAZY_SERIALIZERS
if isinstance(o, types.GeneratorType):
return tuple(o)
elif isinstance(o, LAZY_SERIALIZERS):
return o.serialize()
else:
return super(LazyDjangoJSONEncoder, self).default(o)
class JSONConverter(Converter):
"""
JSON emitter, understands timestamps.
"""
media_type = 'application/json'
format = 'json'
def _encode_to_stream(self, output_stream, data, options=None, **kwargs):
options = settings.JSON_CONVERTER_OPTIONS if options is None else options
if data is not None:
json.dump(data, output_stream, cls=LazyDjangoJSONEncoder, ensure_ascii=False, **options)
def _decode(self, data, **kwargs):
return json.loads(data)
class GeneratorConverter(Converter):
"""
Generator converter is more complicated.
Contains user readable informations (headers).
Supports only output.
Output is flat.
It is necessary set generator_class as class attribute
This class contains little bit low-level implementation
"""
generator_class = None
def _render_headers(self, field_name_list):
result = []
if len(field_name_list) == 1 and '' in field_name_list:
return result
for field_name in field_name_list:
result.append(field_name)
return result
def _get_recursive_value_from_row(self, data, key_path):
from pyston.serializer import LAZY_SERIALIZERS
if isinstance(data, LAZY_SERIALIZERS):
return self._get_recursive_value_from_row(data.serialize(), key_path)
elif len(key_path) == 0:
return data
elif isinstance(data, dict):
return self._get_recursive_value_from_row(data.get(key_path[0], ''), key_path[1:])
elif is_collection(data):
return [self._get_recursive_value_from_row(val, key_path) for val in data]
else:
return ''
def _render_dict(self, value, first):
if first:
return '\n'.join(('{}: {}'.format(key, self.render_value(val, False)) for key, val in value.items()))
else:
return '({})'.format(
', '.join(('{}: {}'.format(key, self.render_value(val, False)) for key, val in value.items()))
)
def _render_iterable(self, value, first):
if first:
return '\n'.join((self.render_value(val, False) for val in value))
else:
return '({})'.format(', '.join((self.render_value(val, False) for val in value)))
def render_value(self, value, first=True):
if isinstance(value, dict):
return self._render_dict(value, first)
elif is_collection(value):
return self._render_iterable(value, first)
else:
return force_text(value)
def _get_value_from_row(self, data, field):
return self.render_value(self._get_recursive_value_from_row(data, field.key_path) or '')
def _render_row(self, row, field_name_list):
return (self._get_value_from_row(row, field) for field in field_name_list)
def _render_content(self, field_name_list, converted_data):
constructed_data = converted_data
if not is_collection(constructed_data):
constructed_data = [constructed_data]
return (self._render_row(row, field_name_list) for row in constructed_data)
def _encode_to_stream(self, output_stream, data, resource=None, requested_fields=None, direct_serialization=False,
**kwargs):
fieldset = FieldsetGenerator(
resource,
force_text(requested_fields) if requested_fields is not None else None,
direct_serialization=direct_serialization
).generate()
self.generator_class().generate(
self._render_headers(fieldset),
self._render_content(fieldset, data),
output_stream
)
class CSVConverter(GeneratorConverter):
"""
Converter for CSV response.
Supports only output conversion
"""
generator_class = CSVGenerator
media_type = 'text/csv'
format = 'csv'
allow_tags = True
class XLSXConverter(GeneratorConverter):
"""
Converter for XLSX response.
For its use must be installed library xlsxwriter
Supports only output conversion
"""
generator_class = XLSXGenerator
media_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
format = 'xlsx'
allow_tags = True
class PDFConverter(GeneratorConverter):
"""
Converter for PDF response.
For its use must be installed library pisa
Supports only output conversion
"""
generator_class = PDFGenerator
media_type = 'application/pdf'
format = 'pdf'
class TXTConverter(GeneratorConverter):
"""
Converter for TXT response.
Supports only output conversion
"""
generator_class = TXTGenerator
media_type = 'plain/text'
format = 'txt'
allow_tags = True
class HTMLConverter(Converter):
"""
Converter for HTML.
Supports only output conversion and should be used only for debug
"""
media_type = 'text/html'
format = 'html'
template_name = 'pyston/html_converter.html'
def _get_put_form(self, resource, obj):
from pyston.resource import BaseObjectResource
return (
resource._get_form(inst=obj)
if isinstance(resource, BaseObjectResource) and resource.has_put_permission(obj=obj)
else None
)
def _get_post_form(self, resource, obj):
from pyston.resource import BaseObjectResource
return (
resource._get_form(inst=obj)
if isinstance(resource, BaseObjectResource) and resource.has_post_permission(obj=obj)
else None
)
def _get_forms(self, resource, obj):
return {
'post': self._get_post_form(resource, obj),
'put': self._get_put_form(resource, obj),
}
def _get_converter(self, resource):
return JSONConverter()
def _get_permissions(self, resource, obj):
return {
'post': resource.has_post_permission(obj=obj),
'get': resource.has_get_permission(obj=obj),
'put': resource.has_put_permission(obj=obj),
'delete': resource.has_delete_permission(obj=obj),
'head': resource.has_head_permission(obj=obj),
'options': resource.has_options_permission(obj=obj),
} if resource else {}
def _update_headers(self, http_headers, resource, converter):
http_headers['Content-Type'] = converter.content_type
return http_headers
def encode_to_stream(self, output_stream, data, options=None, **kwargs):
assert output_stream is not HttpResponseBase, 'Output stream must be http response'
self._get_output_stream(output_stream).write(
self._encode(data, response=output_stream, options=options, **kwargs)
)
def _convert_url_to_links(self, data):
if isinstance(data, list):
return [self._convert_url_to_links(val) for val in data]
elif isinstance(data, dict):
return OrderedDict((
(key, format_html('<a href=\'{0}\'>{0}</a>', val) if key == 'url' else self._convert_url_to_links(val))
for key, val in data.items()
))
else:
return data
def _encode(self, data, response=None, http_headers=None, resource=None, result=None, **kwargs):
from pyston.resource import BaseObjectResource
http_headers = {} if http_headers is None else http_headers.copy()
converter = self._get_converter(resource)
http_headers = self._update_headers(http_headers, resource, converter)
obj = (
resource._get_obj_or_none() if isinstance(resource, BaseObjectResource) and resource.has_permission()
else None
)
kwargs.update({
'http_headers': http_headers,
'resource': resource,
})
data_stream = UniversalBytesIO()
converter._encode_to_stream(data_stream, self._convert_url_to_links(serialized_data_to_python(data)), **kwargs)
context = kwargs.copy()
context.update({
'permissions': self._get_permissions(resource, obj),
'forms': self._get_forms(resource, obj),
'output': data_stream.getvalue(),
'name': resource._get_name() if resource and resource.has_permission() else response.status_code
})
# All responses has set 200 response code, because response can return status code without content (204) and
# browser doesn't render it
response.status_code = 200
return get_template(self.template_name).render(context, request=resource.request if resource else None)
| 1.9375 | 2 |
motion-track.py | priyablue/motion-track | 0 | 12761870 | <filename>motion-track.py<gh_stars>0
#!/usr/bin/env python
progname = "motion_track.py"
ver = "version 0.96"
"""
motion-track ver 0.95 written by <NAME> <EMAIL>
Raspberry (Pi) - python opencv2 motion tracking using picamera module
This is a raspberry pi python opencv2 motion tracking demonstration program.
It will detect motion in the field of view and use opencv to calculate the
largest contour and return its x,y coordinate. I will be using this for
a simple RPI robotics project, but thought the code would be useful for
other users as a starting point for a project. I did quite a bit of
searching on the internet, github, etc but could not find a similar
implementation that returns x,y coordinates of the most dominate moving
object in the frame. Some of this code is base on a YouTube tutorial by
<NAME> using C here https://www.youtube.com/watch?v=X6rPdRZzgjg
Here is a my YouTube video demonstrating this demo program using a
Raspberry Pi B2 https://youtu.be/09JS7twPBsQ
Requires a Raspberry Pi with a RPI camera module installed and configured
dependencies. Cut and paste command below into a terminal sesssion to
download and install motion_track demo. Program will be installed to
~/motion-track-demo folder
curl -L https://raw.github.com/pageauc/motion-track/master/motion-track-install.sh | bash
To Run Demo
cd ~/motion-track-demo
./motion-track.py
"""
print("%s %s using python2 and OpenCV2" % (progname, ver))
print("Loading Please Wait ....")
import os
mypath=os.path.abspath(__file__) # Find the full path of this python script
baseDir=mypath[0:mypath.rfind("/")+1] # get the path location only (excluding script name)
baseFileName=mypath[mypath.rfind("/")+1:mypath.rfind(".")]
progName = os.path.basename(__file__)
# Check for variable file to import and error out if not found.
configFilePath = baseDir + "config.py"
if not os.path.exists(configFilePath):
print("ERROR - Missing config.py file - Could not find Configuration file %s" % (configFilePath))
import urllib2
config_url = "https://raw.github.com/pageauc/motion-track/master/config.py"
print(" Attempting to Download config.py file from %s" % ( config_url ))
try:
wgetfile = urllib2.urlopen(config_url)
except:
print("ERROR - Download of config.py Failed")
print(" Try Rerunning the motion-track-install.sh Again.")
print(" or")
print(" Perform GitHub curl install per Readme.md")
print(" and Try Again")
print("Exiting %s" % ( progName ))
quit()
f = open('config.py','wb')
f.write(wgetfile.read())
f.close()
# Read Configuration variables from config.py file
from config import *
# import the necessary packages
import io
import time
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
#-----------------------------------------------------------------------------------------------
class PiVideoStream:
def __init__(self, resolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=CAMERA_FRAMERATE, rotation=0, hflip=False, vflip=False):
# initialize the camera and stream
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.rotation = rotation
self.camera.framerate = framerate
self.camera.hflip = hflip
self.camera.vflip = vflip
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
# initialize the frame and the variable used to indicate
# if the thread should be stopped
self.frame = None
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
self.rawCapture.truncate(0)
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
#-----------------------------------------------------------------------------------------------
def show_FPS(start_time,frame_count):
if debug:
if frame_count >= FRAME_COUNTER:
duration = float(time.time() - start_time)
FPS = float(frame_count / duration)
print("Processing at %.2f fps last %i frames" %( FPS, frame_count))
frame_count = 0
start_time = time.time()
else:
frame_count += 1
return start_time, frame_count
#-----------------------------------------------------------------------------------------------
def motion_track():
print("Initializing Camera ....")
# Save images to an in-program stream
# Setup video stream on a processor Thread for faster speed
vs = PiVideoStream().start()
vs.camera.rotation = CAMERA_ROTATION
vs.camera.hflip = CAMERA_HFLIP
vs.camera.vflip = CAMERA_VFLIP
time.sleep(2.0)
if window_on:
print("press q to quit opencv display")
else:
print("press ctrl-c to quit")
print("Start Motion Tracking ....")
cx = 0
cy = 0
cw = 0
ch = 0
frame_count = 0
start_time = time.time()
# initialize image1 using image2 (only done first time)
image2 = vs.read()
image1 = image2
grayimage1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
first_image = False
still_scanning = True
while still_scanning:
image2 = vs.read()
start_time, frame_count = show_FPS(start_time, frame_count)
# initialize variables
motion_found = False
biggest_area = MIN_AREA
# At this point the image is available as stream.array
# Convert to gray scale, which is easier
grayimage2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Get differences between the two greyed, blurred images
differenceimage = cv2.absdiff(grayimage1, grayimage2)
differenceimage = cv2.blur(differenceimage,(BLUR_SIZE,BLUR_SIZE))
# Get threshold of difference image based on THRESHOLD_SENSITIVITY variable
retval, thresholdimage = cv2.threshold(differenceimage,THRESHOLD_SENSITIVITY,255,cv2.THRESH_BINARY)
# Get all the contours found in the thresholdimage
contours, hierarchy = cv2.findContours(thresholdimage,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
total_contours = len(contours)
# save grayimage2 to grayimage1 ready for next image2
grayimage1 = grayimage2
# find contour with biggest area
for c in contours:
# get area of next contour
found_area = cv2.contourArea(c)
# find the middle of largest bounding rectangle
if found_area > biggest_area:
motion_found = True
biggest_area = found_area
(x, y, w, h) = cv2.boundingRect(c)
cx = int(x + w/2) # put circle in middle of width
cy = int(y + h/6) # put circle closer to top
cw = w
ch = h
if motion_found:
# Do Something here with motion data
if window_on:
# show small circle at motion location
if SHOW_CIRCLE:
cv2.circle(image2,(cx,cy),CIRCLE_SIZE,(0,255,0), LINE_THICKNESS)
else:
cv2.rectangle(image2,(cx,cy),(x+cw,y+ch),(0,255,0), LINE_THICKNESS)
if debug:
print("Motion at cx=%3i cy=%3i total_Contours=%2i biggest_area:%3ix%3i=%5i" % (cx ,cy, total_contours, cw, ch, biggest_area))
if window_on:
if diff_window_on:
cv2.imshow('Difference Image',differenceimage)
if thresh_window_on:
cv2.imshow('OpenCV Threshold', thresholdimage)
if WINDOW_BIGGER > 1: # Note setting a bigger window will slow the FPS
image2 = cv2.resize( image2,( big_w, big_h ))
cv2.imshow('Movement Status (Press q in Window to Quit)', image2)
# Close Window if q pressed while movement status window selected
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
vs.stop()
print("End Motion Tracking")
still_scanning = False
#-----------------------------------------------------------------------------------------------
if __name__ == '__main__':
try:
motion_track()
finally:
print("")
print("+++++++++++++++++++++++++++++++++++")
print("%s %s - Exiting" % (progname, ver))
print("+++++++++++++++++++++++++++++++++++")
print("")
| 3.078125 | 3 |
ex39.py | FernandaMakiHirose/programas-jupyter | 0 | 12761871 | <gh_stars>0
# Escreva um programa que leia a velocidade de um carro. Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$7,00 por cada Km acima do limite.
v = float(input('Qual é a velocidade atual do carro? '))
if v > 80:
print('Multado!')
m = (v - 80) * 7
print(f'Você deve pagar uma multa de R${m}')
print('Tenha um bom dia.') | 3.765625 | 4 |
testing/test_attractors.py | arielbro/attractor_learning | 0 | 12761872 | import numpy as np
import logic
from unittest import TestCase
import graphs
import sympy
from collections import namedtuple
import random
from attractors import find_num_attractors_onestage, \
vertex_model_impact_scores, stochastic_vertex_model_impact_scores, find_num_steady_states, \
find_attractors_dubrova, find_attractors_onestage_enumeration, ImpactType, \
vertex_state_impact_scores, stochastic_vertex_state_impact_scores, graph_model_impact_score, \
graph_state_impact_score, stochastic_graph_model_impact_score, stochastic_graph_state_impact_score
import attractors
dubrova_path = "../" + attractors.dubrova_path
ILPAttractorExperimentParameters = namedtuple("AttractorExperimentParameters", "G T P n_attractors")
VertexModelImpactExperimentParameters = namedtuple("VertexModelImpactExperimentParameters", "G current_attractors T P "
"impact_types relative_basins "
"maximal_bits "
"impacts")
VertexStateImpactExperimentParameters = namedtuple("VertexStateImpactExperimentParameters", "G current_attractors "
"relative_basins "
"max_transient_len "
"impacts")
StochasticVertexModelImpactExperimentParameters = namedtuple(
"StochasticVertexModelImpactExperimentParameters", "G current_attractors "
"bits_of_change relative_basins impact_type impacts")
StochasticVertexStateImpactExperimentParameters = namedtuple(
"StochasticVertexStateImpactExperimentParameters", "G impacts")
GraphModelImpactExperimentParameters = namedtuple("GraphModelImpactExperimentParameters", "G current_attractors T P "
"impact_types relative_basins "
"maximal_bits "
"impact")
GraphStateImpactExperimentParameters = namedtuple("GraphStateImpactExperimentParameters", "G current_attractors "
"relative_basins "
"max_transient_len maximal_bits "
"impact")
StochasticGraphModelImpactExperimentParameters = namedtuple(
"StochasticGraphModelImpactExperimentParameters", "G current_attractors "
"bits_of_change relative_basins impact_type impact")
StochasticGraphStateImpactExperimentParameters = namedtuple(
"StochasticGraphStateImpactExperimentParameters", "G bits_of_change impact")
DubrovaExperimentParameters = namedtuple("DubrovaExperimentParameters", "G mutate n_attractors")
class TestAttractors(TestCase):
def test_num_attractors_onestage(self):
experiments = []
"""test on known toy models"""
# 0, 1
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=1, n_attractors=0))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=1))
# 2, 3
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1)])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=1, n_attractors=0))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=1))
# 4, 5
G = graphs.Network(vertex_names=["A"], edges=[],
vertex_functions=[None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=3, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=2))
# 6, 7
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[logic.SymmetricThresholdFunction(signs=[1], threshold=1),
None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=5, n_attractors=4))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=5, n_attractors=4))
# 8, 9
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1),
None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=1, n_attractors=0))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=2))
# 10, 11
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=2, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=1, n_attractors=1))
# 12, 13
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=3, n_attractors=2))
# 14, 15
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[None, None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=5, n_attractors=4))
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=6, n_attractors=4))
# 16, 17
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[None, True])
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=5, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=6, n_attractors=2))
# 18, 19, 20
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[sympy.Nand, sympy.And])
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=0))
experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=2, n_attractors=1))
experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=1, n_attractors=1))
# 21, 22, 23
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[sympy.Nand, sympy.Nand])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=3, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=3))
experiments.append(ILPAttractorExperimentParameters(G=G, T=15, P=15, n_attractors=3))
# 24, 25
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[lambda x: True, lambda x: False])
experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=2, n_attractors=1))
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=2, n_attractors=1))
# 26, 27
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[None, sympy.And])
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=4, n_attractors=3))
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=4, n_attractors=2))
# 28, 29
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[None, lambda _: True])
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=1))
experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=2, n_attractors=1))
# 30, 31
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[None, None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=6, n_attractors=3))
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=6, n_attractors=2))
# 32, 33, 34, 35, 36
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")],
vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2),
logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1),
logic.SymmetricThresholdFunction.from_function(sympy.Nand, 0)])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=3, n_attractors=3))
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=4, n_attractors=3))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=3, n_attractors=3))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=4, n_attractors=4))
experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=4, n_attractors=4))
# 37
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")],
vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2),
logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1),
None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=3, n_attractors=3))
# 38, 39, 40
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand]*3)
experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=2, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=10, P=10, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=5, P=10, n_attractors=1))
# 41, 42
# acyclic, should have 2**#input_nodes attractors of length 1
G = graphs.Network(vertex_names=["v1", "v2", "v3", "v4", "v5", "v6"],
edges=[("v1", "v4"), ("v2", "v4"), ("v1", "v5"), ("v4", "v6")],
vertex_functions=[sympy.Nand]*6)
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=10, n_attractors=8))
experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=10, n_attractors=8))
# 43, 44, 45
G = graphs.Network(vertex_names=["A1", "B1", "B2", "C1", "C2"],
edges=[("A1", "A1"), ("B1", "B2"), ("B2", "B1"), ("C1", "C2"), ("C2", "C1")],
vertex_functions=[sympy.And]*5)
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=10, n_attractors=8))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=18, n_attractors=18))
experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=40, n_attractors=20)) # offsets!
# 46, 47, 48
# a failed random graph added as a constant test
G = graphs.Network(
vertex_names=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16',
'17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31',
'32', '33', '34'],
edges=[('1', '2'), ('2', '16'), ('3', '17'), ('5', '15'), ('6', '29'), ('7', '28'), ('8', '22'),
('9', '28'), ('10', '18'), ('11', '15'), ('12', '24'), ('13', '14'), ('15', '18'), ('16', '26'),
('17', '27'), ('18', '20'), ('19', '23'), ('20', '27'), ('23', '26'), ('24', '29'), ('25', '33'),
('26', '30'), ('27', '32'), ('28', '32'), ('30', '32'), ('31', '34'), ('32', '33'), ('33', '34')],
vertex_functions=[None, None, sympy.Nand, None, None, None, None, None, None, None, None, None, None, None,
sympy.Or, sympy.Nand,
sympy.Nand, sympy.Nand, sympy.Nand, None, sympy.Xor, None, sympy.And, sympy.Nand,
sympy.Xor, None, sympy.And, sympy.Nand, sympy.And, sympy.Xor, sympy.Or, None, sympy.Or,
sympy.And, sympy.And])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=6, n_attractors=6))
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=10, n_attractors=10))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=10, n_attractors=10))
# 49, 50, 51
# G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel"
# "\\Attractors - for Ariel\\BNS_Dubrova_2011\\MAPK_large2.cnet")
# experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=15, n_attractors=12))
# experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=15, n_attractors=14))
# experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=15, n_attractors=14))
G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel"
"\\Attractors - for Ariel\\BNS_Dubrova_2011\\tcr.cnet")
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=15, n_attractors=8))
experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=15, n_attractors=9))
experiments.append(ILPAttractorExperimentParameters(G=G, T=7, P=15, n_attractors=9))
# for _ in range(5):
# size = 35
# G = graphs.Network(vertex_names=list(range(size)),
# edges=[(i, random.choice(list(range(i+1, size)))) for i in range(size)
# if random.random() < 0.8 and i != size-1],
# vertex_functions=[random.choice([sympy.And, sympy.Nand, sympy.Or, sympy.Xor])
# for _ in range(size)])
# input_nodes = 0
# for v in G.vertices:
# is_input = True
# for e in G.edges:
# if e[1] == v:
# is_input = False
# break
# if is_input:
# input_nodes += 1
# attractor_number = 2**input_nodes
# experiments.append(ExperimentParameters(G=G, T=1, P=3, n_attractors=min(3, attractor_number)))
# experiments.append(ExperimentParameters(G=G, T=2, P=10, n_attractors=min(10, attractor_number)))
# experiments.append(ExperimentParameters(G=G, T=10, P=3, n_attractors=min(3, attractor_number)))
# TODO: figure out how disjoint long attractors work together (multiplying doesn't account for offsets)
# """test on basic semi-random networks: create connectivity components of acyclis networks and simple cycles"""
# n_random_experiment = 0
# while n_random_experiment < 10:
# n_components = random.randint(1, 3)
# attractor_number = 1
# max_attractor_len = 0
# cur_graph = None
# for n_component in range(n_components): # TODO: change to graph union method
# comp_size = random.randint(1, 5)
# V = [i for i in range(comp_size)]
# E = []
# comp_type =random.choice(["cycle", "acyclic"])
# if comp_type == "acyclic":
# for i in range(len(V) - 1): # create only forward facing edges
# for j in range(i+1, len(V)):
# if random.random() <= 0.8:
# E.append((V[i], V[j]))
# component_graph = graphs.Network(vertex_names=V, edges=E)
# restriction_level = random.choice([graphs.FunctionTypeRestriction.NONE,
# graphs.FunctionTypeRestriction.SYMMETRIC_THRESHOLD,
# graphs.FunctionTypeRestriction.SIMPLE_GATES])
# component_graph.randomize_functions(function_type_restriction=restriction_level)
# input_nodes = 0
# for v in V:
# is_input = True
# for e in E:
# if e[1] == v:
# is_input = False
# break
# if is_input:
# input_nodes += 1
# attractor_number *= 2**input_nodes
# max_attractor_len = max(max_attractor_len, 1)
# elif comp_type == "cycle":
# """currently supports only a cycle of identity function, using a group theory theorem from
# https://www.quora.com/How-many-unique-binary-matrices-are-there-up-to-rotations-translations-and-flips
# , can later add negation cycles"""
# for i in range(len(V)):
# E.append((V[i], V[(i + 1) % len(V)]))
# component_graph = graphs.Network(vertex_names=V, edges=E, vertex_functions=[sympy.And]*len(V))
# attractor_number *= binary_necklaces(len(V))
# max_attractor_len = max(max_attractor_len, len(V))
# cur_graph = component_graph if cur_graph is None else cur_graph + component_graph
# if attractor_number * len(cur_graph.vertices) * max_attractor_len <= 250:
# experiments.append(ExperimentParameters(G=cur_graph, T=max_attractor_len,
# P=attractor_number + 1,
# n_attractors=attractor_number))
# n_random_experiment += 1
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "n={}, T={}, P={}, expected_n_attractors={}".format(len(experiment.G.vertices),
experiment.T, experiment.P, experiment.n_attractors)
# continue
use_sampling = bool(random.randint(0, 1))
use_sampling_for_mip_start = bool(random.randint(0, 1))
simplify = bool(random.randint(0, 1))
key_slice_size = random.randint(1, 15)
print "key_slice_size={}".format(key_slice_size)
n_attractors = find_num_attractors_onestage(G=experiment.G, max_len=experiment.T, max_num=experiment.P,
use_sat=False, verbose=False,
sampling_bounds=(3, 3) if use_sampling else None,
use_sampling_for_mip_start=use_sampling_for_mip_start,
simplify_general_boolean=simplify,
key_slice_size=key_slice_size)
try:
self.assertEqual(n_attractors, experiment.n_attractors)
except AssertionError as e:
print e
print experiment.G
raise e
except Exception as e:
raise e
# print "number of experiments (without keys)={}".format(len(experiments))
# for i, experiment in enumerate(experiments):
# print "experiment #{}".format(i)h
# print "n={}, T={}, P={}, expected_n_attractors={}".format(len(experiment.G.vertices),
# experiment.T, experiment.P, experiment.n_attractors)
# # continue
# n_attractors = find_num_attractors_onestage(G=experiment.G, max_len=experiment.T, max_num=experiment.P,
# use_sat=False, verbose=False,
# use_state_keys=False, require_result=experiment.n_attractors)
# try:
# self.assertEqual(n_attractors, experiment.n_attractors)
# except AssertionError as e:
# print e
# print experiment.G
# raise e
def test_vertex_degeneracy_scores(self):
self.assertTrue(False) # TODO: write...
def test_graph_state_impact_scores(self):
experiments = []
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #0
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=1,
maximal_bits=1,
impact=0))
# experiment #1
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=1,
impact=0))
# experiment #2
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
maximal_bits=1,
impact=0))
# experiment #3
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
maximal_bits=10,
impact=0))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.Nand, None])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #4
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=1,
maximal_bits=1,
impact=0))
# experiment #5
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=1,
impact=0))
# experiment #6
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
maximal_bits=1,
impact=0))
# experiment #7
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
maximal_bits=10,
impact=0))
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #8
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=5,
maximal_bits=1,
impact=1))
# experiment #9
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=1,
impact=1))
# experiment #10
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=5,
maximal_bits=5,
impact=1))
# experiment #11
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
max_transient_len=5,
maximal_bits=5,
impact=1))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.And, None])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #12
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
max_transient_len=5,
maximal_bits=5,
impact=1))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #13
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=1,
impact=1))
# experiment #14
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
max_transient_len=5,
maximal_bits=5,
impact=1))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #15
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=1,
impact=1))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #16
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=1,
impact=0))
# experiment #17
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=3,
impact=0))
# experiment #18
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=5,
maximal_bits=2,
impact=0))
G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("B", "A"), ("C", "A"), ("D", "A"),
("A", "B"), ("C", "B"), ("D", "B"),
("A", "C"), ("B", "C"), ("D", "C"),
("A", "D"), ("B", "D"), ("C", "D")],
vertex_functions=[lambda a, b, c: a + b + c > 1 for _ in range(4)])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# 0000 and 1111 are stable points, and attract everything with hamming distance <= 1,
# where 2 bits of change land right into another attractor.
# Other three two-state attractors are unstable under one bit change, with transient length of 1,
# Or they can be switched between eachother/stables with 2 (same as 0000/1111 ones, if needed)
# bits of change.
# experiment #19
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=1,
impact=0))
# experiment #20
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=1,
maximal_bits=1,
impact=3 / 5.0))
# experiment #21
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=5,
maximal_bits=1,
impact=3 / 5.0))
# experiment #22
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=2,
impact=1))
# experiment #23
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=3,
maximal_bits=2,
impact=1))
relative_basins = [5 / float(16) if len(attractor) == 1 else 2 / float(16) for
attractor in current_attractors]
# experiment #24
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=relative_basins,
max_transient_len=5,
maximal_bits=1,
impact=6 / 16.0))
# experiment #25
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=relative_basins,
max_transient_len=0,
maximal_bits=2,
impact=1))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "C")],
vertex_functions=[None, sympy.And, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #19
# 000, 110 and 111 are the steady states. First is stable, other can change on
# right vertex change, B with one step and C immediately.
# experiment #26
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=1,
impact=2 / 3.0))
# experiment #27
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=2,
impact=2 / 3.0))
# experiment #28
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=5,
maximal_bits=5,
impact=2 / 3.0))
relative_len_decider = lambda attractor: 0.5 if [
int(s) for s in attractor[0]] == [0, 0, 0] else 3 / float(8) if [
int(s) for s in attractor[0]] == [1, 1, 0] else 1 / float(8)
relative_basins = [relative_len_decider(att) for att in current_attractors]
# experiment #29
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=relative_basins,
max_transient_len=5,
maximal_bits=2,
impact=0.5))
# experiment #30
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=relative_basins,
max_transient_len=0,
maximal_bits=1,
impact=0.5))
G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"),
("D", "D")],
vertex_functions=[None, sympy.And, sympy.And, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# Now 0000 is stable, 1110 changes immediently on last vertex change, 1111 can change in 2, 1, or 0
# steps on change of second, third or last vertex.
# experiment #31
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
maximal_bits=1,
impact=2 / 3.0))
# experiment #31
experiments.append(GraphStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=3,
maximal_bits=3,
impact=2 / 3.0))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "attractors:"
print experiment.current_attractors
print "n={}, relative_basins={}, expected_impacts={}".\
format(len(experiment.G.vertices), experiment.relative_basins, experiment.impact)
impact = graph_state_impact_score(G=experiment.G, current_attractors=experiment.current_attractors,
max_transient_len=experiment.max_transient_len,
relative_attractor_basin_sizes=experiment.relative_basins,
key_slice_size=15, maximal_bits_of_change=experiment.maximal_bits)
# (from vertex version) got numeric problems with test #16 regardless of key_slice
impact = round(impact, 5)
experiment_impact = round(experiment.impact, 5)
print "expected impact:"
print experiment_impact
print "got impact:"
print impact
try:
self.assertEqual(impact, experiment_impact)
except AssertionError as e:
print e
print experiment.G
raise e
def test_vertex_state_impact_scores(self):
# TODO: test stochastic kind
experiments = []
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #0
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=1,
impacts=[0]))
# experiment #1
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
impacts=[0]))
# experiment #2
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
impacts=[0]))
# experiment #3
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=[1],
max_transient_len=30,
impacts=[0]))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.Nand, None])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #4
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
impacts=[0, np.nan]))
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #5
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
impacts=[1]))
# experiment #6
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
impacts=[1]))
# experiment #7
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
impacts=[1]))
# experiment #8
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
max_transient_len=1,
impacts=[1]))
# experiment #9
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
max_transient_len=0,
impacts=[1]))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.And, None])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #10
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
impacts=[1, np.nan]))
# experiment #11
experiments.append(VertexStateImpactExperimentParameters(G=G,
current_attractors=current_attractors,
relative_basins=[0.1, 0.4, 0.4, 0.1],
max_transient_len=0,
impacts=[1, np.nan]))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #12
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
impacts=[1] * 3))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #13
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
impacts=[1, 1, 1]))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #14
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
impacts=[0, 0, 0]))
G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("B", "A"), ("C", "A"), ("D", "A"),
("A", "B"), ("C", "B"), ("D", "B"),
("A", "C"), ("B", "C"), ("D", "C"),
("A", "D"), ("B", "D"), ("C", "D")],
vertex_functions=[lambda a, b, c: a + b + c > 1 for _ in range(4)])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #15
# 0000 and 1111 are stable points, and attract everything with hamming distance <= 1.
# Other three two-state attractors are unstable under one bit change, with transient length of 1.
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
impacts=[0] * 4))
# experiment #16
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=1,
impacts=[3 / 5.0] * 4))
# experiment #17
relative_basins = [5 / float(16) if len(attractor) == 1 else 2 / float(16) for
attractor in current_attractors]
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=relative_basins,
max_transient_len=1,
impacts=[6 / 16.0, 6 / 16.0,
6 / 16.0, 6 / 16.0]))
# experiment #18
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=relative_basins,
max_transient_len=2,
impacts=[6 / 16.0, 6 / 16.0,
6 / 16.0, 6 / 16.0]))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "C")],
vertex_functions=[None, sympy.And, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #19
# 000, 110 and 111 are the steady states. First is stable, other can change on
# right vertex change, B with one step and C immediately.
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
impacts=[np.nan, 0, 2 / 3.0]))
# experiment #20
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=1,
impacts=[np.nan, 1 / 3.0, 2/ 3.0]))
relative_len_decider = lambda attractor: 0.5 if [
int(s) for s in attractor[0]] == [0, 0, 0] else 3 / float(8) if [
int(s) for s in attractor[0]] == [1, 1, 0] else 1 / float(8)
relative_basins = [relative_len_decider(att) for att in current_attractors]
# experiment #21
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=relative_basins,
max_transient_len=1,
impacts=[np.nan, 1 / 8.0, 0.5]))
G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"),
("D", "D")],
vertex_functions=[None, sympy.And, sympy.And, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# Now 0000 is stable, 1110 changes immediently on last vertex change, 1111 can change in 2, 1, or 0
# steps on change of second, third or last vertex.
# experiment #22
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=0,
impacts=[np.nan, 0, 0, 2 / float(3)]))
# experiment #23
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=1,
impacts=[np.nan, 0, 1 / float(3),
2 / float(3)]))
# experiment #24
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=2,
impacts=[np.nan, 1 / float(3), 1 / float(3),
2 / float(3)]))
# experiment #25
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=3,
impacts=[np.nan, 1 / float(3), 1 / float(3),
2 / float(3)]))
# experiment #26
experiments.append(VertexStateImpactExperimentParameters(G=G, current_attractors=current_attractors,
relative_basins=None,
max_transient_len=30,
impacts=[np.nan, 1 / float(3), 1 / float(3),
2 / float(3)]))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "attractors:"
print experiment.current_attractors
print "n={}, relative_basins={}, expected_impacts={}".\
format(len(experiment.G.vertices), experiment.relative_basins, experiment.impacts)
impacts = vertex_state_impact_scores(G=experiment.G, current_attractors=experiment.current_attractors,
max_transient_len=experiment.max_transient_len,
relative_attractor_basin_sizes=experiment.relative_basins,
key_slice_size=15)
# got numeric problems with test #16 regardless of key_slice
impacts = [round(x, 5) if not np.isnan(x) else x for x in impacts]
experiment_impacts = [round(x, 5) if not np.isnan(x) else x for x in experiment.impacts]
print "expected impacts:"
print impacts
print "got impacts:"
print experiment_impacts
try:
self.assertEqual(impacts, experiment_impacts)
except AssertionError as e:
print e
print experiment.G
raise e
def test_graph_model_impact_scores(self):
# TODO: also test the resulting models (assure they have the correct number of attractors)
experiments = []
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #0
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #1
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #2
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=2))
# experiment #3
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=1.5))
# experiment #4
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #5
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[1],
impact=1.5))
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #6
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impact=0.5))
# experiment #7
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impact=0.9))
# experiment #8
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #9
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impact=0.75))
# experiment #10
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=0.5))
# experiment #11
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=0))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.And, None])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #12
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #13
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[0.1, 0.4, 0.4, 0.1],
impact=0.75))
# experiment #14
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=0.5))
# experiment #15
experiments.append(GraphModelImpactExperimentParameters(G=G, T=3, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=0.25))
# experiment #16
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=0))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #17
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #18
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #19
experiments.append(GraphModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=2))
# experiment #20
experiments.append(GraphModelImpactExperimentParameters(G=G, T=6, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impact=1.25))
# experiment #21
experiments.append(GraphModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impact=1.5))
# experiment #22
experiments.append(GraphModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Addition,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impact=0.5))
# experiment #23
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=0.5))
# experiment #24
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=5, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #25
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impact=0.75))
# experiment #26
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #27
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=0.5))
# experiment #28
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=0.75))
# experiment #29
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition,
maximal_bits=3,
current_attractors=current_attractors,
relative_basins=None,
impact=0.5))
# experiment #30
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition,
maximal_bits=4,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #31
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=5, impact_types=ImpactType.Addition,
maximal_bits=4,
current_attractors=current_attractors,
relative_basins=None,
impact=0.5))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #32
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #33
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=3, impact_types=ImpactType.Addition,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impact=3))
# experiment #34
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=6, impact_types=ImpactType.Addition,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impact=3))
# experiment #35
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=6, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=3))
# experiment #36
experiments.append(GraphModelImpactExperimentParameters(G=G, T=1, P=6, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impact=1))
# experiment #36
experiments.append(GraphModelImpactExperimentParameters(G=G, T=7, P=6, impact_types=ImpactType.Addition,
maximal_bits=3,
current_attractors=current_attractors,
relative_basins=None,
impact=4))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "n={}, T={}, P={}, maximal_bits={}, relative_basins={}, expected_impact={}".\
format(len(experiment.G.vertices),
experiment.T, experiment.P, experiment.maximal_bits, experiment.relative_basins,
experiment.impact)
print experiment.current_attractors
impact = graph_model_impact_score(G=experiment.G, current_attractors=experiment.current_attractors,
max_len=experiment.T,
max_num=experiment.P,
impact_types=experiment.impact_types,
relative_attractor_basin_sizes=experiment.relative_basins,
maximal_bits_of_change=experiment.maximal_bits)
try:
self.assertEqual(impact, experiment.impact)
except AssertionError as e:
print e
print experiment.G
raise e
def test_vertex_model_impact_scores(self):
# TODO: also test the resulting models (assure they have the correct number of attractors)
# TODO: test stochastic kind
experiments = []
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #0
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1]))
# experiment #1
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1]))
# experiment #2
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[2]))
# experiment #3
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1.5]))
# experiment #4
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1]))
# experiment #5
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[1],
impacts=[1.5]))
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #6
experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0.5]))
# experiment #7
experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impacts=[0.9]))
# experiment #8
experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1]))
# experiment #9
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impacts=[0.75]))
# experiment #10
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0.5]))
# experiment #11
experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0]))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.And, None])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #12
experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Invalidation,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1, np.nan]))
# experiment #13
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[0.1, 0.4, 0.4, 0.1],
impacts=[0.75, np.nan]))
# experiment #14
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=3, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0.5, np.nan]))
# experiment #15
experiments.append(VertexModelImpactExperimentParameters(G=G, T=3, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0.25, np.nan]))
# experiment #16
experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0, np.nan]))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #17
experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1] * 3))
# experiment #18
experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1] * 3))
# experiment #19
experiments.append(VertexModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[2] * 3))
# experiment #20
experiments.append(VertexModelImpactExperimentParameters(G=G, T=6, P=3, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impacts=[1.25] * 3))
# experiment #21
experiments.append(VertexModelImpactExperimentParameters(G=G, T=6, P=5, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impacts=[1.5] * 3))
# experiment #22
experiments.append(VertexModelImpactExperimentParameters(G=G, T=6, P=2, impact_types=ImpactType.Addition,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0.5] * 3))
# experiment #23
experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=1, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0.5] * 3))
# experiment #24
experiments.append(VertexModelImpactExperimentParameters(G=G, T=1, P=5, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1] * 3))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #25
experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0.75, 0.75, 0.75]))
# experiment #26
experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1, 1, 1]))
# experiment #27
experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0.5, 0.5, 0.5]))
# experiment #28
experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Both,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[0.75, 0.75, 0.75]))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #29
experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Invalidation,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1, 1, 1]))
# experiment #30
experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition,
maximal_bits=1,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1, 1, 3]))
# experiment #31
experiments.append(VertexModelImpactExperimentParameters(G=G, T=7, P=5, impact_types=ImpactType.Addition,
maximal_bits=2,
current_attractors=current_attractors,
relative_basins=None,
impacts=[1, 1, 3]))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "n={}, T={}, P={}, maximal_bits={}, relative_basins={}, expected_impacts={}".\
format(len(experiment.G.vertices),
experiment.T, experiment.P, experiment.maximal_bits, experiment.relative_basins,
experiment.impacts)
print experiment.current_attractors
impacts = vertex_model_impact_scores(G=experiment.G, current_attractors=experiment.current_attractors,
max_len=experiment.T,
max_num=experiment.P,
impact_types=experiment.impact_types,
relative_attractor_basin_sizes=experiment.relative_basins,
maximal_bits_of_change=experiment.maximal_bits)
try:
self.assertEqual(impacts, experiment.impacts)
except AssertionError as e:
print e
print experiment.G
raise e
def test_stochastic_graph_state_impact_scores(self):
experiments = []
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
# experiment #0
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=0))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.Nand, None])
# experiment #1
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=0))
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
# experiment #2
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=1))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.And, None])
# experiment #3
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=1))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand])
# experiment #4
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=0.5))
# experiment #5
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=0.5))
# experiment #6
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=3, impact=0))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.And])
# experiment #7
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=1))
# experiment #8
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=0.5))
# experiment #9
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=3, impact=1))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True])
# experiment #10
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=0))
# experiment #11
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=0))
# experiment #12
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=3, impact=0))
G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("B", "A"), ("C", "A"), ("D", "A"),
("A", "B"), ("C", "B"), ("D", "B"),
("A", "C"), ("B", "C"), ("D", "C"),
("A", "D"), ("B", "D"), ("C", "D")],
vertex_functions=[lambda a, b, c: a + b + c > 1 for _ in range(4)])
# experiment #13
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1, impact=3 / 8.0))
# experiment #14
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=1))
# experiment #15
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=3, impact=1))
# experiment #16
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=4, impact=10 / 16.0))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "C")],
vertex_functions=[None, sympy.And, sympy.And])
# 000, 110 and 111 are the steady states. First is stable, other can change on
# right vertex change, B with one step and C immediately.
# experiment #17
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1,
impact=(3 / 8.0 * 0) + (3 / 8.0 * 0.5) +
(1 / 8.0 * 0.5) + (1 / 8.0 * 0)))
# experiment #18
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=2, impact=1 / 16.0))
G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"),
("D", "D")],
vertex_functions=[None, sympy.And, sympy.And, sympy.And])
# Now 0000 is stable, 1110 changes immediently on last vertex change, 1111 can change in 2, 1, or 0
# steps on change of second, third or last vertex.
# experiment #19
experiments.append(StochasticGraphStateImpactExperimentParameters(G=G, bits_of_change=1,
impact=0.20833333333))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "n={}, expected_impact={}".\
format(len(experiment.G.vertices), experiment.impact)
for iteration in range(10):
n_iter = random.randint(700, 1400)
parallel_n_jobs = random.choice([None, 1, 2, 3])
estimated_impact = stochastic_graph_state_impact_score(G=experiment.G, n_iter=n_iter,
bits_of_change=experiment.bits_of_change,
parallel_n_jobs=parallel_n_jobs)
print "estimated_impact={}".format(estimated_impact)
self.assertTrue(abs(estimated_impact - experiment.impact) < 0.1)
def test_stochastic_vertex_state_impact_scores(self):
experiments = []
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
# experiment #0
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[0]))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.Nand, None])
# experiment #1
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[0, np.nan]))
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
# experiment #2
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[1]))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.And, None])
# experiment #3
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[1, np.nan]))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand])
# experiment #4
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[0.5] * 3))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.And])
# experiment #5
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[1, 1, 1]))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True])
# experiment #6
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[0, 0, 0]))
G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("B", "A"), ("C", "A"), ("D", "A"),
("A", "B"), ("C", "B"), ("D", "B"),
("A", "C"), ("B", "C"), ("D", "C"),
("A", "D"), ("B", "D"), ("C", "D")],
vertex_functions=[lambda a, b, c: a + b + c > 1 for _ in range(4)])
# experiment #7
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G, impacts=[3 / 8.0] * 4))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "C")],
vertex_functions=[None, sympy.And, sympy.And])
# experiment #8
# 000, 110 and 111 are the steady states. First is stable, other can change on
# right vertex change, B with one step and C immediately.
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G,
impacts=[np.nan, 1/8.0, 0.5]))
G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"),
("D", "D")],
vertex_functions=[None, sympy.And, sympy.And, sympy.And])
# Now 0000 is stable, 1110 changes immediently on last vertex change, 1111 can change in 2, 1, or 0
# steps on change of second, third or last vertex.
# experiment #9
experiments.append(StochasticVertexStateImpactExperimentParameters(G=G,
impacts=[np.nan,
1/16.0, 1/16.0,
0.5]))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "n={}, expected_impacts={}".\
format(len(experiment.G.vertices), experiment.impacts)
for iteration in range(10):
n_iter = random.randint(700, 1400)
parallel_n_jobs = random.choice([None, 1, 2, 3])
estimated_impacts = stochastic_vertex_state_impact_scores(G=experiment.G, n_iter=n_iter,
parallel_n_jobs=parallel_n_jobs)
print "estimated_impacts={}".format(estimated_impacts)
self.assertTrue(len(experiment.impacts) == len(estimated_impacts))
for calculated_impact, estimated_impact in zip(experiment.impacts, estimated_impacts):
if np.isnan(calculated_impact):
self.assertTrue(np.isnan(estimated_impact))
else:
self.assertTrue(abs(estimated_impact - calculated_impact) < 0.1)
def test_stochastic_graph_model_impact_scores(self):
# TODO: also test the resulting models (assure they have the correct number of attractors)
experiments = []
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #0
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=1))
# experiment #1
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=1))
# experiment #2
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=1))
# experiment #3
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=2))
# experiment #4
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impact=1.5))
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #5
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=0.5))
# experiment #6
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impact_type=ImpactType.Invalidation,
impact=0.5))
# experiment #7
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=1))
# experiment #8
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=0))
# experiment #9
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=0.5))
# experiment #10
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impact=0.75))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.And, None])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #11
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=0.5))
# experiment #12
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=1))
# experiment #13
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=0.5))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #14
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=1))
# experiment #15
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=1))
# experiment #16
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=0.5))
# experiment #17
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=(3 / 15.0) * 2 + (12 / 15.0) * 0.5))
# experiment #18
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=3,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=0.5))
# experiment #19
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=4,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=(3 / 15.0) * 1 + (12 / 15.0) * 0.5))
# experiment #20
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impact=0.75))
# experiment #21
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impact=(3 / 15.0) * 1.5 + (12 / 15.0) * 0.75))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #22
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=3 / 4.0))
# experiment #23
basin_sizes = [3 / 8.0 if len(att) > 1 else 1 / 8.0 for att in current_attractors]
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=basin_sizes,
impact_type=ImpactType.Invalidation,
impact=7 / 8.0))
# experiment #24
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=(3 / 15.0) * 1 + (12 / 15.0) *
(0.5 * 3 / 4.0 + 0.5 * 1)))
# experiment #25
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=0))
# experiment #26
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=(3 / 15.0) * 0.5 + (12 / 15.0) *
(0.5 * 0 + 0.5 * 0.25)))
# experiment #27
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impact=7 / 16.0))
# experiment #28
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impact=(3 / 15.0) * 0.75 + (12 / 15.0) *
(0.5 * (3/8.0 + 0) + 0.5 * (3/8.0 + 0.125))))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #29
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=0.5))
# experiment #30
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=(3 / 15.0) * 1 + (12 / 15.0) * 3 / 4.0))
# experiment #31
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=(2 / 3.0 * 0.5 + 1 / 3.0 * 2.5)))
# experiment #32
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impact=(2 / 3.0 * 0.5 + 1 / 3.0 * (
0.5 * 1.5 + 0.5 * 1.5))))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A"), ("B", "B")],
vertex_functions=[sympy.And, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #33
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=(1 / 3.0 * 0.5 + 2 / 3.0 * 0.25)))
# experiment #34
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impact_type=ImpactType.Invalidation,
impact=(1 / 3.0 * 0.5 + 2 / 3.0 * 0.25)))
# experiment #35
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impact=(1 / 15.0 * 1 +
6 / 15.0 * 3.5 / 6.0 +
8 / 15.0 * 5 / 8.0)))
# experiment #36
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=(1 / 3.0 * 0.25 +
2 / 3.0 * 1 / 8.0)))
# experiment #37
experiments.append(StochasticGraphModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impact=(1 / 15.0 * 0.5 +
6 / 15.0 * 1 / 4.0 +
8 / 15.0 * 2 * 0.5 / 8.0)))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "n={}, bits_of_change={}, relative_basins={}, impact_type={}, expected_impact={}".\
format(len(experiment.G.vertices),
experiment.bits_of_change, experiment.relative_basins, experiment.impact_type,
experiment.impact)
print experiment.current_attractors
for use_dubrova in [False, True]:
n_iter = random.randint(800, 880)
attractor_estimation_n_iter = random.randint(50, 55)
parallel_n_jobs = random.choice([None, 1, 2, 3])
estimated_impact = stochastic_graph_model_impact_score(
G=experiment.G, current_attractors=experiment.current_attractors, n_iter=n_iter, use_dubrova=use_dubrova,
bits_of_change=experiment.bits_of_change,
relative_attractor_basin_sizes=experiment.relative_basins,
attractor_estimation_n_iter=attractor_estimation_n_iter,
impact_type=experiment.impact_type,
cur_dubrova_path=dubrova_path,
parallel_n_jobs=parallel_n_jobs)
print "estimated_impact={}".format(estimated_impact)
print "expected_impacts={}".format(experiment.impact)
self.assertTrue(abs(estimated_impact - experiment.impact) < 0.15)
def test_stochastic_vertex_model_impact_scores(self):
# TODO: also test the resulting models (assure they have the correct number of attractors)
experiments = []
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #0
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[1]))
# experiment #1
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[1]))
# experiment #2
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,
bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[1]))
# experiment #3
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[2]))
# experiment #4
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,
bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impacts=[1.5]))
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #5
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[0.5]))
# experiment #6
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impact_type=ImpactType.Invalidation,
impacts=[0.5]))
# experiment #7
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[1]))
# experiment #8
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[0]))
# experiment #9
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[0.5]))
# experiment #10
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impacts=[0.75]))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[sympy.And, None])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #11
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[0.5, np.nan]))
# experiment #12
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[1, np.nan]))
# experiment #13
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[0.5, np.nan]))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.Nand])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #14
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[1] * 3))
# experiment #15
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[1] * 3))
# experiment #16
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[0.5] * 3))
# experiment #17
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[2] * 3))
# experiment #18
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impacts=[0.75] * 3))
# experiment #19
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G,bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impacts=[1.5] * 3))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #20
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[3 / 4.0] * 3))
# experiment #21
basin_sizes = [3 / 8.0 if len(att) > 1 else 1 / 8.0 for att in current_attractors]
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=basin_sizes,
impact_type=ImpactType.Invalidation,
impacts=[7 / 8.0] * 3))
# experiment #22
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[1, 1, 1]))
# experiment #23
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[0] * 3))
# experiment #24
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[0.5, 0.5, 0.5]))
# experiment #25
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impacts=[7 / 16.0] * 3))
# experiment #26
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impacts=[0.75] * 3))
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand, sympy.Nand, lambda _: True])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #27
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[0.5] * 3))
# experiment #28
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[1] * 3))
# experiment #29
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[0.5, 0.5, 2.5]))
# experiment #30
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[1, 1, 1]))
# experiment #31
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Both,
impacts=[0.5, 0.5, 1.5]))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A"), ("B", "B")],
vertex_functions=[sympy.And, sympy.And])
current_attractors = find_attractors_dubrova(G, dubrova_path, mutate_input_nodes=True)
# experiment #32
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[0.5, 0.25]))
# experiment #33
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=[0.1, 0.9],
impact_type=ImpactType.Invalidation,
impacts=[0.5, 0.25]))
# experiment #34
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Invalidation,
impacts=[1, 0.5]))
# experiment #35
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=1,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[0.25, 1 / 8.0]))
# experiment #36
experiments.append(StochasticVertexModelImpactExperimentParameters(G=G, bits_of_change=2,
current_attractors=current_attractors,
relative_basins=None,
impact_type=ImpactType.Addition,
impacts=[0.5, 1 / 4.0]))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "n={}, bits_of_change={}, relative_basins={}, impact_type={}, expected_impacts={}".\
format(len(experiment.G.vertices),
experiment.bits_of_change, experiment.relative_basins, experiment.impact_type,
experiment.impacts)
print experiment.current_attractors
for use_dubrova in [False, True]:
n_iter = random.randint(400, 440)
attractor_estimation_n_iter = random.randint(30, 35)
parallel_n_jobs = random.choice([None, 1, 2, 3])
estimated_impacts = stochastic_vertex_model_impact_scores(
G=experiment.G, current_attractors=experiment.current_attractors, n_iter=n_iter, use_dubrova=use_dubrova,
bits_of_change=experiment.bits_of_change,
relative_attractor_basin_sizes=experiment.relative_basins,
attractor_estimation_n_iter=attractor_estimation_n_iter,
impact_type=experiment.impact_type,
cur_dubrova_path=dubrova_path,
parallel_n_jobs=parallel_n_jobs)
self.assertTrue(len(experiment.impacts) == len(estimated_impacts))
print "estimated_impacts={}".format(estimated_impacts)
for calculated_impact, estimated_impact in zip(experiment.impacts, estimated_impacts):
if np.isnan(calculated_impact):
self.assertTrue(np.isnan(estimated_impact))
else:
self.assertTrue(abs(estimated_impact - calculated_impact) < 0.15)
def test_find_num_steady_states(self):
"""test on known toy models"""
# 0, 1
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 0)
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 0)
G = graphs.Network(vertex_names=["A"], edges=[],
vertex_functions=[None])
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 2)
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 2)
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 2)
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 2)
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[sympy.Nand, sympy.And])
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 0)
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 0)
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[sympy.Nand, sympy.Nand])
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 2)
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[lambda x: True, lambda x: False])
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 1)
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=True), 1)
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand]*3)
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 0)
G = graphs.Network(vertex_names=["A", "B", "C", "D"], edges=[("A", "B"), ("B", "C"), ("C", "D"), ("D", "A")],
vertex_functions=[sympy.Nand]*4)
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 2)
# acyclic, should have 2**#input_nodes attractors of length 1
G = graphs.Network(vertex_names=["v1", "v2", "v3", "v4", "v5", "v6"],
edges=[("v1", "v4"), ("v2", "v4"), ("v1", "v5"), ("v4", "v6")],
vertex_functions=[sympy.Nand]*6)
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 8)
G = graphs.Network(vertex_names=["A1", "B1", "B2", "C1", "C2"],
edges=[("A1", "A1"), ("B1", "B2"), ("B2", "B1"), ("C1", "C2"), ("C2", "C1")],
vertex_functions=[sympy.And]*5)
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand]*3)
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 0)
G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel"
"\\Attractors - for Ariel\\BNS_Dubrova_2011\\tcr.cnet")
self.assertEqual(find_num_steady_states(G, verbose=False, simplify_general_boolean=False), 8)
def test_find_attractors_dubrova(self):
experiments = []
"""test on known toy models"""
# 0, 1
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1))
experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=1))
# 2
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1)])
experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1))
# 3, 4
G = graphs.Network(vertex_names=["A"], edges=[],
vertex_functions=[None])
experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1))
experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=2))
# 5, 6
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[sympy.Nand, sympy.And])
experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1))
experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=1))
# 7, 8
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[lambda x: True, lambda x: False])
experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1))
experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=1))
# 9, 10
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")],
vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2),
logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1),
True])
experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=3))
experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=3))
# 11, 12
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")],
vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2),
logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1),
False])
experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1))
experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=1))
# 13, 14
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")],
vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2),
logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1),
None])
experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=1))
experiments.append(DubrovaExperimentParameters(G=G, mutate=True, n_attractors=4))
# 15
G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel"
"\\Attractors - for Ariel\\BNS_Dubrova_2011\\tcr.cnet")
# G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel"
# "\\Attractors - for Ariel\\BNS_Dubrova_2011\\MAPK_large.cnet")
experiments.append(DubrovaExperimentParameters(G=G, mutate=False, n_attractors=9))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "n={}, mutate={}, expected_n_attractors={}".format(len(experiment.G.vertices),
experiment.mutate, experiment.n_attractors)
# continue
attractors = find_attractors_dubrova(G=experiment.G,
dubrova_path="../bns_dubrova.exe",
mutate_input_nodes=experiment.mutate)
n_attractors = len(attractors)
try:
self.assertEqual(n_attractors, experiment.n_attractors)
except AssertionError as e:
print e
print experiment.G
raise e
except Exception as e:
raise e
print "testing state order in attractor"
# TODO: expand? random graphs, compare ILP attractors with Dubrova's
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[sympy.And, sympy.Nand, True])
desired_attractor = [[0, 0, 1], [0, 1, 1], [1, 1, 1], [1, 0, 1]]
# repeat manually, (otherwise there's mutual dependence of tests).
possible_attractors = [desired_attractor[shift:] + desired_attractor[:shift] for shift in range(4)]
# print possible_attractors
found_attractors = find_attractors_dubrova(G, dubrova_path="../bns_dubrova.exe", mutate_input_nodes=True)
self.assertTrue(len(found_attractors) == 1)
found_attractor = [[int(v) for v in state] for state in found_attractors[0]]
# print found_attractor
self.assertTrue(any(found_attractor == possible_attractors[i] for i in range(len(possible_attractors))))
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[sympy.And, sympy.Nand])
desired_attractor = [[0, 0], [0, 1], [1, 1], [1, 0]]
# repeat manually, (otherwise there's mutual dependence of tests).
possible_attractors = [desired_attractor[shift:] + desired_attractor[:shift] for shift in range(4)]
# print possible_attractors
found_attractors = find_attractors_dubrova(G, dubrova_path="../bns_dubrova.exe", mutate_input_nodes=True)
self.assertTrue(len(found_attractors) == 1)
found_attractor = [[int(v) for v in state] for state in found_attractors[0]]
# print found_attractor
self.assertTrue(any(found_attractor == possible_attractor for possible_attractor in possible_attractors))
def test_find_attractors_enumerate(self):
experiments = []
"""test on known toy models"""
# 0, 1
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.Nand])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=0))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=1))
# 2, 3
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1)])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=0))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=1))
# 4, 5
G = graphs.Network(vertex_names=["A"], edges=[],
vertex_functions=[None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=2))
# 6, 7
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "A")],
vertex_functions=[logic.SymmetricThresholdFunction(signs=[-1], threshold=1),
None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=0))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=2))
# 8, 9
G = graphs.Network(vertex_names=["A"], edges=[("A", "A")],
vertex_functions=[sympy.And])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=None, n_attractors=2))
# 10, 11
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[sympy.Nand, sympy.And])
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=0))
experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=None, n_attractors=1))
# 12, 13, 14
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[sympy.Nand, sympy.Nand])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=3))
experiments.append(ILPAttractorExperimentParameters(G=G, T=15, P=None, n_attractors=3))
# 15, 16
G = graphs.Network(vertex_names=["A", "B"], edges=[("A", "B"), ("B", "A")],
vertex_functions=[lambda x: True, lambda x: False])
experiments.append(ILPAttractorExperimentParameters(G=G, T=4, P=None, n_attractors=1))
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=1))
# 17, 18, 19
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")],
vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2),
logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1),
logic.SymmetricThresholdFunction.from_function(sympy.Nand, 0)])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=3))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=4))
experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=None, n_attractors=4))
# 20
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "A"), ("C", "A")],
vertex_functions=[logic.SymmetricThresholdFunction.from_function(sympy.Nand, 2),
logic.SymmetricThresholdFunction.from_function(sympy.Nand, 1),
None])
experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=None, n_attractors=4))
# 21, 22, 23
G = graphs.Network(vertex_names=["A", "B", "C"], edges=[("A", "B"), ("B", "C"), ("C", "A")],
vertex_functions=[sympy.Nand]*3)
experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=None, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=10, P=None, n_attractors=2))
experiments.append(ILPAttractorExperimentParameters(G=G, T=5, P=None, n_attractors=1))
# 24, 25
# acyclic, should have 2**#input_nodes attractors of length 1
G = graphs.Network(vertex_names=["v1", "v2", "v3", "v4", "v5", "v6"],
edges=[("v1", "v4"), ("v2", "v4"), ("v1", "v5"), ("v4", "v6")],
vertex_functions=[sympy.Nand]*6)
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=8))
experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=None, n_attractors=8))
# 26, 27
G = graphs.Network(vertex_names=["A1", "B1", "B2", "C1", "C2"],
edges=[("A1", "A1"), ("B1", "B2"), ("B2", "B1"), ("C1", "C2"), ("C2", "C1")],
vertex_functions=[sympy.And]*5)
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=8))
experiments.append(ILPAttractorExperimentParameters(G=G, T=3, P=None, n_attractors=20)) # offsets!
# 28, 29
# a failed random graph added as a constant test
G = graphs.Network(
vertex_names=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16',
'17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31',
'32', '33', '34'],
edges=[('1', '2'), ('2', '16'), ('3', '17'), ('5', '15'), ('6', '29'), ('7', '28'), ('8', '22'),
('9', '28'), ('10', '18'), ('11', '15'), ('12', '24'), ('13', '14'), ('15', '18'), ('16', '26'),
('17', '27'), ('18', '20'), ('19', '23'), ('20', '27'), ('23', '26'), ('24', '29'), ('25', '33'),
('26', '30'), ('27', '32'), ('28', '32'), ('30', '32'), ('31', '34'), ('32', '33'), ('33', '34')],
vertex_functions=[None, None, sympy.Nand, None, None, None, None, None, None, None, None, None, None, None,
sympy.Or, sympy.Nand,
sympy.Nand, sympy.Nand, sympy.Nand, None, sympy.Xor, None, sympy.And, sympy.Nand,
sympy.Xor, None, sympy.And, sympy.Nand, sympy.And, sympy.Xor, sympy.Or, None, sympy.Or,
sympy.And, sympy.And])
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=2**17))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=2**17))
# 30, 31, 32, 33
G = graphs.Network.parse_cnet("C:\\Users\\ariel\\Downloads\\Attractors - for Ariel"
"\\Attractors - for Ariel\\BNS_Dubrova_2011\\tcr.cnet")
experiments.append(ILPAttractorExperimentParameters(G=G, T=1, P=None, n_attractors=8))
experiments.append(ILPAttractorExperimentParameters(G=G, T=2, P=None, n_attractors=8))
experiments.append(ILPAttractorExperimentParameters(G=G, T=6, P=None, n_attractors=9))
experiments.append(ILPAttractorExperimentParameters(G=G, T=8, P=None, n_attractors=9))
print "number of experiments (with keys)={}".format(len(experiments))
for i, experiment in enumerate(experiments):
print "experiment #{}".format(i)
print "n={}, T={}, expected_n_attractors={}".format(len(experiment.G.vertices),
experiment.T, experiment.n_attractors)
# continue
simplify = bool(random.randint(0, 1))
key_slice_size = random.randint(1, 15)
print "key_slice_size={}".format(key_slice_size)
n_attractors = len(find_attractors_onestage_enumeration(G=experiment.G, max_len=experiment.T,
verbose=False,
simplify_general_boolean=simplify,
key_slice_size=key_slice_size))
try:
self.assertEqual(n_attractors, experiment.n_attractors)
except AssertionError as e:
print e
print experiment.G
raise e
except Exception as e:
raise e
# TODO: add dubrova v.s. ILP testing again.
| 2.25 | 2 |
Cklib/Run.py | kamphaus/HPCGrunner | 0 | 12761873 | class Run(dict):
attributes = ('nx', 'ny', 'nz', 'time', 'NbrOfCores', 'platform', 'configuration', 'repetitions', 'mpiargs', 'tag')
def __init__(self, serie, data, **kwargs):
super(Run, self).__init__(**kwargs)
self.data = data
self.parent = serie
for x in Run.attributes:
if x in data:
self[x] = data[x]
else:
if x in serie:
self[x] = serie[x]
# if 'repetitions' not in self:
# self.repetitions = 1
#if 'results' not in self: self['results'] = []
if hasattr(self, 'init'):
self.init(serie, data)
def getReduced(self):
return { k:self[k] for k in Run.attributes if k in self and (k not in self.parent or self[k] != self.parent[k]) }
def getRunAttributes(self):
return { k:self[k] for k in Run.attributes if k in self }
# Compare based on the attributes named in Run.attributes
def __eq__(self, other):
if isinstance(other, Run):
a = { k:self[k] for k in Run.attributes if k in self }
b = { k:other[k] for k in Run.attributes if k in other }
return a == b
else:
return super(Run, self).__eq__(other)
| 2.796875 | 3 |
stekkam_01.py | sritekk/SkillsWorkshop2018 | 0 | 12761874 | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 22:20:52 2018
@author: Srinivas
"""
import numpy as np
X = np.arange(1, 1000)
Y = X[(X % 3 == 0) | (X % 5 == 0)]
Z = sum(Y)
print(Z)
| 3.34375 | 3 |
plenum/test/checkpoints/test_stashed_messages_processed_on_backup_replica_ordering_resumption.py | cam-parra/indy-plenum | 0 | 12761875 | from plenum.server.replica import Replica
from plenum.test import waits
from plenum.test.delayers import cDelay, chk_delay
from plenum.test.helper import sdk_send_random_requests, assertExp, incoming_3pc_msgs_count
from stp_core.loop.eventually import eventually
nodeCount = 4
CHK_FREQ = 5
# LOG_SIZE in checkpoints corresponds to the catch-up lag in checkpoints
LOG_SIZE = 2 * CHK_FREQ
def test_stashed_messages_processed_on_backup_replica_ordering_resumption(
looper, chkFreqPatched, reqs_for_checkpoint,
one_replica_and_others_in_backup_instance,
sdk_pool_handle, sdk_wallet_client, view_change_done,
txnPoolNodeSet):
"""
Verifies resumption of ordering 3PC-batches on a backup replica
on detection of a lag in checkpoints in case it is detected after
some 3PC-messages related to the next checkpoint have already been stashed
as laying outside of the watermarks.
Please note that to verify this case the config is set up so that
LOG_SIZE == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
"""
slow_replica, other_replicas = one_replica_and_others_in_backup_instance
view_no = slow_replica.viewNo
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, 2)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Don't receive Commits from two replicas
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[0].node.name))
slow_replica.node.nodeIbStasher.delay(
cDelay(instId=1, sender_filter=other_replicas[1].node.name))
# Send a request for which the replica will not be able to order the batch
# due to an insufficient count of Commits
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Receive further Commits from now on
slow_replica.node.nodeIbStasher.drop_delayeds()
slow_replica.node.nodeIbStasher.resetDelays()
# Send requests but in a quantity insufficient
# for catch-up number of checkpoints
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
reqs_for_checkpoint - 3)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Don't receive Checkpoints
slow_replica.node.nodeIbStasher.delay(chk_delay(instId=1))
# Send more requests to reach catch-up number of checkpoints
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
reqs_for_checkpoint)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Ensure that there are no 3PC-messages stashed
# as laying outside of the watermarks
assert slow_replica.stasher.num_stashed_watermarks == 0
# Send a request for which the batch will be outside of the watermarks
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))
# Ensure that the replica has not ordered any batches
# after the very first one
assert slow_replica.last_ordered_3pc == (view_no, 2)
# Ensure that the watermarks have not been shifted since the view start
assert slow_replica.h == 0
assert slow_replica.H == LOG_SIZE
# Ensure that there are some quorumed stashed checkpoints
assert slow_replica.stashed_checkpoints_with_quorum()
# Ensure that now there are 3PC-messages stashed
# as laying outside of the watermarks
assert slow_replica.stasher.num_stashed_watermarks == incoming_3pc_msgs_count(len(txnPoolNodeSet))
# Receive belated Checkpoints
slow_replica.node.nodeIbStasher.reset_delays_and_process_delayeds()
# Ensure that the replica has ordered the batch for the last sent request
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc ==
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 1)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
# Ensure that the watermarks have been shifted so that the lower watermark
# now equals to the end of the last stable checkpoint in the instance
assert slow_replica.h == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
assert slow_replica.H == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE
# Ensure that now there are no quorumed stashed checkpoints
assert not slow_replica.stashed_checkpoints_with_quorum()
# Ensure that now there are no 3PC-messages stashed
# as laying outside of the watermarks
assert slow_replica.stasher.num_stashed_watermarks == 0
# Send a request and ensure that the replica orders the batch for it
sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
looper.run(
eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc ==
(view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 2)),
slow_replica,
retryWait=1,
timeout=waits.expectedTransactionExecutionTime(nodeCount)))
| 2.125 | 2 |
mpids/MPInumpy/examples/mpiarray_creation_arange.py | edgargabriel/mpids | 1 | 12761876 | from mpi4py import MPI
import numpy as np
import mpids.MPInumpy as mpi_np
if __name__ == "__main__":
#Capture default communicator, MPI process rank, and number of MPI processes
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
note = "Note: creation routines are using their default MPI related kwargs."
note += "\nDefault kwargs:"
note += " routine(..., comm=MPI.COMM_WORLD, root=0, dist='b')\n"
print(note) if rank == 0 else None
#Arange, evenly spaced values within specified interval
print('From arange(start, stop, step) Routine') if rank == 0 else None
mpi_arange = mpi_np.arange(size * 5)
print('Local Arange Result Rank {}: {}'.format(rank, mpi_arange))
print() if rank == 0 else None
| 2.546875 | 3 |
muDIC/tests/test_vlab/test_virtualTensileTest.py | diehlpk/muDIC | 7 | 12761877 | <gh_stars>1-10
import logging
from unittest import TestCase
import numpy as np
import muDIC.vlab as vlab
class TestVirtualTensileTest(TestCase):
# TODO: Rewrite these tests!
@classmethod
def setUpClass(cls):
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
cls.logger = logging.getLogger()
np.set_printoptions(precision=8)
cls.img_shape = (500, 500)
cls.tol = 1e-5
cls.image = vlab.speckle.dots_speckle(cls.img_shape, n_dots=10000, dot_radius_max=10)
def test__pass_through_user_img(self):
F = np.eye(2, dtype=np.float)
image_deformer = vlab.imageDeformer_from_defGrad(F)
downsampler = vlab.Downsampler(image_shape=self.img_shape, factor=1, fill=1., pixel_offset_stddev=0.0)
noise_injector = lambda img: img
virtualTest = vlab.SyntheticImageGenerator(speckle_image=self.image, image_deformer=image_deformer,
downsampler=downsampler,
noise_injector=noise_injector, n=10)
deviation = np.abs(virtualTest(1) - self.image)
if np.max(deviation) > self.tol:
self.fail("Image changed value or orientation. Largest error is%f" % np.max(deviation))
| 2.296875 | 2 |
pdf2dataset/pdf_extract_task.py | icaropires/pdf2dataset | 11 | 12761878 | <reponame>icaropires/pdf2dataset
import io
import os
import numpy as np
import pytesseract
import cv2
import pdftotext
from pdf2image import convert_from_bytes
from pdf2image.exceptions import PDFPageCountError, PDFSyntaxError
from PIL import Image as PilImage
from PIL.Image import DecompressionBombError
from .extract_task import ExtractTask, feature
class Image:
def __init__(self, image, image_format=None):
self.pil_image = image
self.image_format = image_format or self.pil_image.format
@classmethod
def from_bytes(cls, image_bytes):
image = PilImage.open(io.BytesIO(image_bytes))
return cls(image)
def resize(self, size):
pil_image = self.pil_image.resize(size)
return type(self)(pil_image, self.image_format)
def preprocess(self):
image = np.asarray(self.pil_image.convert('L'))
image = cv2.adaptiveThreshold(
image, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,
97, 50
)
image = cv2.erode(
image,
cv2.getStructuringElement(cv2.MORPH_CROSS, (2, 2)),
iterations=1
)
pil_image = PilImage.fromarray(image)
return type(self)(pil_image)
@staticmethod
def parse_size(image_size_str):
if image_size_str is None:
return None
image_size_str = image_size_str.lower()
try:
width, height = map(int, image_size_str.split('x'))
except ValueError as e:
raise ValueError(
f'Invalid image size parameter: {image_size_str}'
) from e
return width, height
def ocr(self, lang='por'):
# So pytesseract uses only one core per worker
os.environ['OMP_THREAD_LIMIT'] = '1'
return pytesseract.image_to_string(self.pil_image, lang=lang)
def to_bytes(self):
image_stream = io.BytesIO()
with io.BytesIO() as image_stream:
self.pil_image.save(image_stream, self.image_format)
image_bytes = image_stream.getvalue()
return image_bytes
class PdfExtractTask(ExtractTask):
class OcrError(Exception):
...
fixed_featues = ('path', 'page')
def __init__(self, path, page, *args,
ocr=False, ocr_image_size=None, ocr_lang='por',
image_format='jpeg', image_size=None, **kwargs):
self.page = page
self.ocr = ocr
self.ocr_lang = ocr_lang
self.ocr_image_size = ocr_image_size
self.image_format = image_format
self.image_size = Image.parse_size(image_size)
super().__init__(path, *args, **kwargs)
def __repr__(self):
return f'PdfExtractTask({self.path}, {self.page})'
def _extract_text_ocr(self):
image_bytes, _ = self.get_feature('image_original')
if not image_bytes:
raise self.OcrError("Wasn't possible to get page image!")
image = Image.from_bytes(image_bytes)
preprocessed = image.preprocess()
return preprocessed.ocr()
def _extract_text_native(self):
with io.BytesIO(self.file_bin) as f:
pages = pdftotext.PDF(f)
text = pages[self.page-1]
return text
@feature(
'binary', is_helper=True,
exceptions=(PDFPageCountError, PDFSyntaxError, DecompressionBombError)
)
def get_image_original(self):
images = convert_from_bytes(
self.file_bin, first_page=self.page,
single_file=True, fmt=self.image_format,
size=(None, self.ocr_image_size)
)
image = Image(images[0])
return image.to_bytes()
@feature('int16')
def get_page(self):
return self.page
@feature('string')
def get_path(self):
return str(self.path)
@feature('binary')
def get_image(self):
image_bytes, _ = self.get_feature('image_original')
if not image_bytes:
return None
if self.image_size:
image = Image.from_bytes(image_bytes)
size = self.image_size
image_bytes = image.resize(size).to_bytes()
return image_bytes
@feature('string', exceptions=[pdftotext.Error, OcrError])
def get_text(self):
if self.ocr:
return self._extract_text_ocr()
return self._extract_text_native()
| 2.796875 | 3 |
tvm/python/tvm/relay/backend/compile_engine.py | cmu-catalyst/collage | 32 | 12761879 | <gh_stars>10-100
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=len-as-condition,no-else-return,invalid-name
"""Backend code generation engine."""
from __future__ import absolute_import
import logging
import numpy as np
import tvm
from tvm import te, autotvm
from tvm.ir.transform import PassContext
from tvm.runtime import Object
from tvm.support import libinfo
from tvm.target import Target
from .. import function as _function
from .. import ty as _ty
from . import _backend
# for target-specific lowering
from tvm.relay.op import op as _op
#from tvm.relay.analysis import post_order_visit
from tvm import relay
from tvm import topi
from tvm.relay.op.strategy.generic import *
from tvm import te
from tvm.contrib.cudnn import softmax
logger = logging.getLogger("compile_engine")
tuning_logger = logging.getLogger("autotvm")
@tvm._ffi.register_object("relay.LoweredOutput")
class LoweredOutput(Object):
"""Lowered output"""
def __init__(self, outputs, implement):
self.__init_handle_by_constructor__(_backend._make_LoweredOutput, outputs, implement)
@tvm._ffi.register_object("relay.CCacheKey")
class CCacheKey(Object):
"""Key in the CompileEngine.
Parameters
----------
source_func : tvm.relay.Function
The source function.
target : tvm.Target
The target we want to run the function on.
"""
def __init__(self, source_func, target):
self.__init_handle_by_constructor__(_backend._make_CCacheKey, source_func, target)
@tvm._ffi.register_object("relay.CCacheValue")
class CCacheValue(Object):
"""Value in the CompileEngine, including usage statistics."""
def _get_cache_key(source_func, target):
if isinstance(source_func, _function.Function):
if isinstance(target, str):
target = Target(target)
if not target:
raise ValueError("Need target when source_func is a Function")
return CCacheKey(source_func, target)
if not isinstance(source_func, CCacheKey):
raise TypeError("Expect source_func to be CCacheKey")
return source_func
def get_shape(shape):
"""Convert the shape to correct dtype and vars."""
ret = []
for dim in shape:
if isinstance(dim, tvm.tir.IntImm):
if libinfo()["INDEX_DEFAULT_I64"] == "ON":
ret.append(dim)
else:
val = int(dim)
assert val <= np.iinfo(np.int32).max
ret.append(tvm.tir.IntImm("int32", val))
elif isinstance(dim, tvm.tir.Any):
ret.append(te.var("any_dim", "int32"))
else:
ret.append(dim)
return ret
def get_valid_implementations(op, attrs, inputs, out_type, target):
"""Get all valid implementations from the op strategy.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
Returns
-------
ret : List[relay.op.OpImplementation]
The list of all valid op implementations.
"""
fstrategy = op.get_attr("FTVMStrategy")
assert fstrategy is not None, (
"%s doesn't have an FTVMStrategy registered. You can register "
"one in python with `tvm.relay.op.register_strategy`." % op.name
)
with target:
strategy = fstrategy(attrs, inputs, out_type, target)
analyzer = tvm.arith.Analyzer()
ret = []
for spec in strategy.specializations:
if spec.condition:
# check if all the clauses in the specialized condition are true
flag = True
for clause in spec.condition.clauses:
clause = analyzer.canonical_simplify(clause)
if isinstance(clause, tvm.tir.IntImm) and clause.value:
continue
flag = False
break
if flag:
for impl in spec.implementations:
ret.append(impl)
else:
for impl in spec.implementations:
ret.append(impl)
return ret
def select_implementation(op, attrs, inputs, out_type, target, use_autotvm=True):
"""Select the best implementation from the op strategy.
If use_autotvm is True, it'll first try to find the best implementation
based on AutoTVM profile results. If no AutoTVM profile result is found,
it'll choose the implementation with highest plevel.
If use_autotvm is False, it'll directly choose the implementation with
highest plevel.
Note that this function doesn't support op with symbolic input shapes.
Parameters
----------
op : tvm.ir.Op
Relay operator.
attrs : object
The op attribute.
inputs : List[tvm.te.Tensor]
Input tensors to the op.
out_type : relay.Type
The output type.
target : tvm.target.Target
The target to compile the op.
use_autotvm : bool
Whether query AutoTVM to pick the best.
Returns
-------
ret : tuple(relay.op.OpImplementation, List[tvm.te.Tensor])
The best op implementation and the corresponding output tensors.
"""
all_impls = get_valid_implementations(op, attrs, inputs, out_type, target)
best_plevel_impl = max(all_impls, key=lambda x: x.plevel)
# Disable autotvm if auto_scheduler is enabled.
# (i.e., always return the implementation with the highest priority for auto-scheduler).
if PassContext.current().config.get("relay.backend.use_auto_scheduler", False):
use_autotvm = False
# If not use autotvm, always return the implementation with the highest priority
if not use_autotvm:
#logger.info(
# "Using %s for %s based on highest priority (%d)",
# best_plevel_impl.name,
# op.name,
# best_plevel_impl.plevel,
#)
outs = best_plevel_impl.compute(attrs, inputs, out_type)
return best_plevel_impl, outs
# Otherwise, try autotvm templates
outputs = {}
workloads = {}
best_autotvm_impl = None
best_cfg = None
dispatch_ctx = autotvm.task.DispatchContext.current
old_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = True
for impl in all_impls:
outs = impl.compute(attrs, inputs, out_type)
outputs[impl] = outs
workload = autotvm.task.get_workload(outs)
workloads[impl] = workload
if workload is None:
# Not an AutoTVM tunable implementation
continue
cfg = dispatch_ctx.query(target, workload)
if cfg.is_fallback:
# Skip fallback config
continue
#logger.info("Implementation %s for %s has cost %.2e", impl.name, op.name, cfg.cost)
if best_cfg is None or best_cfg.cost > cfg.cost:
best_autotvm_impl = impl
best_cfg = cfg
autotvm.GLOBAL_SCOPE.silent = old_silent
if best_autotvm_impl:
# The best autotvm implementation definitely doesn't use fallback config
#logger.info(
# "Using %s for %s based on lowest cost (%.2e)",
# best_autotvm_impl.name,
# op.name,
# best_cfg.cost,
#)
return best_autotvm_impl, outputs[best_autotvm_impl]
# Use the implementation with highest plevel
if workloads[best_plevel_impl] is not None:
msg = (
"Cannot find config for target=%s, workload=%s. A fallback configuration "
"is used, which may bring great performance regression."
% (target, workloads[best_plevel_impl])
)
if (
not autotvm.env.GLOBAL_SCOPE.silent
and msg not in autotvm.task.DispatchContext.warning_messages
):
autotvm.task.DispatchContext.warning_messages.add(msg)
tuning_logger.warning(msg)
#logger.info(
# "Using %s for %s based on highest priority (%s)",
# best_plevel_impl.name,
# op.name,
# best_plevel_impl.plevel,
#)
return best_plevel_impl, outputs[best_plevel_impl]
@tvm._ffi.register_func("relay.backend.target_specific_lowering")
def target_specific_lowering(func, inputMap, target_info=None):
import sys
from tvm.relay.op.contrib.tensorrt import partition_for_tensorrt
#print("\t[Compile_engine.py] Custom lowering?", file=sys.stderr)
# Eventually, we want to define custom implemenation
# However, currently, we do not know how to do it.
# So, for now, let's try the hacky way.
strategy = _op.OpStrategy()
# relay express, callback
#relay.analysis.post_order_visit(mod['main'], lambda expr: log_backend_op_perf(b_op_lib, expr, target))
#inputs = relay.analysis.free_vars(func.body)
calls = []
def extract_attr(expr, calls):
if type(expr) == tvm.relay.expr.Call:
calls.append(expr)
relay.analysis.post_order_visit(func, lambda expr: extract_attr(expr, calls))
tokens = target_info.split('_')
target = tokens[0]
pattern = '_'.join(tokens[1:])
def collect_input(inputMap):
inputs = []
for key, varray in inputMap.items():
for val in varray:
inputs.append(val)
return inputs
attrs, ret_type = None, None
if target == "cudnn":
# TODO: conv3d, avgpool, batchnorm
if pattern == "0-Op(nn.softmax)[*]":
strategy.add_implementation(
wrap_custom_compute_softmax(topi.cuda.softmax_cudnn),
wrap_topi_schedule(topi.generic.schedule_extern),
name="softmax.cudnn",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
elif pattern == "0-Op(sigmoid)[*]":
strategy.add_implementation(
wrap_custom_compute_activation(topi.cuda.sigmoid_cudnn),
wrap_topi_schedule(topi.generic.schedule_extern),
name="sigmoid.cudnn",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
elif pattern == "0-Op(nn.relu)[*]":
strategy.add_implementation(
wrap_custom_compute_activation(topi.cuda.relu_cudnn),
wrap_topi_schedule(topi.generic.schedule_extern),
name="relu.cudnn",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
elif pattern == "0-Op(tanh)[*]":
strategy.add_implementation(
wrap_custom_compute_activation(topi.cuda.tanh_cudnn),
wrap_topi_schedule(topi.generic.schedule_extern),
name="tanh.cudnn",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
# TODO: not supported yet
elif pattern == "0-Op(nn.bias_add)[*, *]":
strategy.add_implementation(
wrap_custom_compute_biasadd(topi.cuda.biasadd_cudnn),
wrap_topi_schedule(topi.generic.schedule_extern),
name="biasadd.cudnn",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
elif pattern == "0-Op(nn.conv2d)[*, *]":
strategy.add_implementation(
wrap_custom_compute_conv2d(
topi.cuda.conv2d_cudnn, need_data_layout=True, has_groups=True
),
wrap_topi_schedule(topi.generic.schedule_extern),
name="conv2d.cudnn",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
elif pattern == "0-Op(nn.conv3d)[*, *]":
strategy.add_implementation(
wrap_compute_conv3d(
topi.cuda.conv3d_cudnn, need_layout=True
),
wrap_topi_schedule(topi.generic.schedule_extern),
name="conv3d.cudnn",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
elif pattern == "0-Op(nn.max_pool2d)[*]":
strategy.add_implementation(
wrap_custom_compute_pool2d(topi.cuda.max_pool2d_cudnn),
wrap_topi_schedule(topi.generic.schedule_extern),
name="max_pool2d.cudnn",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
elif pattern == "0-Op(nn.avg_pool2d)[*]":
strategy.add_implementation(
wrap_custom_compute_pool2d(topi.cuda.avg_pool2d_cudnn),
wrap_topi_schedule(topi.generic.schedule_extern),
name="avg_pool2d.cudnn",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
# TODO: not supported yet
#elif pattern == "bn":
#strategy.add_implementation(
# wrap_custom_compute_maxpool2d(topi.cuda.maxpool2d_cudnn),
# wrap_topi_schedule(topi.generic.schedule_extern),
# name="bn.cudnn",
#)
# has single op
#attrs = calls[0].attrs
eret_type = calls[0].checked_type
#inputs = collect_input(inputMap)
# fused ops
elif pattern == "0-Op(nn.relu)[1-Op(add)[2-Op(nn.conv2d)[*, *], *]]":
strategy.add_implementation(
wrap_custom_compute_conv2d_add_relu(
topi.cuda.conv2d_add_relu_cudnn, need_data_layout=True, has_groups=True
),
wrap_topi_schedule(topi.generic.schedule_extern),
name="conv2d+add+relu.cudnn",
)
data, kernel, Z, bias = None, None, None, None
attrs, ret_type = None, None
for call in calls:
call_name = call.op.name
if "conv2d" in call_name:
attrs = call.attrs
ret_type = call.checked_type
args = call.args
data = inputMap[args[0]]
kernel = inputMap[args[1]]
elif "add" in call_name:
data2 = inputMap[args[1]]
elif "relu" in call_name:
Z = inputMap[args[0]]
inputs = [data[0], kernel[0], Z[0], data2[0]]
elif pattern == "0-Op(nn.relu)[1-Op(nn.biad_add)[2-Op(nn.conv2d)[*, *], *]]":
strategy.add_implementation(
wrap_custom_compute_conv2d_add_relu(
topi.cuda.conv2d_bias_relu_cudnn, need_data_layout=True, has_groups=True
),
wrap_topi_schedule(topi.generic.schedule_extern),
name="conv2d+bias+relu.cudnn",
)
data, kernel, Z, bias = None, None, None, None
attrs, ret_type = None, None
for call in calls:
call_name = call.op.name
if "conv2d" in call_name:
attrs = call.attrs
ret_type = call.checked_type
args = call.args
data = inputMap[args[0]]
kernel = inputMap[args[1]]
elif "bias_add" in call_name:
data2 = inputMap[args[1]]
elif "relu" in call_name:
Z = inputMap[args[0]]
inputs = [data[0], kernel[0], Z[0], data2[0]]
elif pattern == "0-Op(nn.relu)[1-Op(add)[2-Op(nn.conv3d)[*, *], *]]":
strategy.add_implementation(
wrap_custom_compute_conv3d_add_relu(
topi.cuda.conv3d_add_relu_cudnn, need_layout=True
),
wrap_topi_schedule(topi.generic.schedule_extern),
name="conv3d+add+relu.cudnn",
)
data, kernel, Z, bias = None, None, None, None
attrs, ret_type = None, None
for call in calls:
call_name = call.op.name
if "conv3d" in call_name:
attrs = call.attrs
ret_type = call.checked_type
args = call.args
data = inputMap[args[0]]
kernel = inputMap[args[1]]
elif "add" in call_name:
data2 = inputMap[args[1]]
elif "relu" in call_name:
Z = inputMap[args[0]]
inputs = [data[0], kernel[0], Z[0], data2[0]]
elif pattern == "0-Op(nn.relu)[1-Op(nn.conv2d)[*, *]]":
strategy.add_implementation(
wrap_custom_compute_conv2d_relu(
topi.cuda.conv2d_relu_cudnn, need_data_layout=True, has_groups=True
),
wrap_topi_schedule(topi.generic.schedule_extern),
name="conv2d_relu.cudnn",
)
data, kernel, Z, bias = None, None, None, None
attrs, ret_type = None, None
for call in calls:
call_name = call.op.name
if "conv2d" in call_name:
attrs = call.attrs
ret_type = call.checked_type
args = call.args
data = inputMap[args[0]]
kernel = inputMap[args[1]]
elif "add" in call_name:
bias = inputMap[args[1]]
elif "relu" in call_name:
Z = inputMap[args[0]]
inputs = [data[0], kernel[0]]
else:
# Unsupported backend op
assert False, "{} is currently not supported in {}".format(target_info, target)
# TODO: matmul vs dense?
elif target == "cublas":
if pattern == "0-Op(nn.dense)[*, *]":
strategy.add_implementation(
wrap_compute_dense(topi.cuda.dense_cublas),
wrap_topi_schedule(topi.generic.schedule_extern),
name="dense.cublas",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
elif pattern == "0-Op(nn.batch_matmul)[*, *]":
strategy.add_implementation(
wrap_compute_batch_matmul(topi.cuda.batch_matmul_cublas),
wrap_topi_schedule(topi.generic.schedule_extern),
name="batch_matmul.cublas",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
else:
# Unsupported backend op
assert False, "{} is currently not supported in {}".format(target_info, target)
elif target == "mkl":
if pattern == "0-Op(nn.dense)[*, *]":
from tvm.te import SpecializedCondition
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
same_type = inputs[0].dtype == inputs[1].dtype == ret_type.dtype
dtype = inputs[0].dtype
with SpecializedCondition(same_type and dtype in ["float32", "float64"] or u8s8s32):
strategy.add_implementation(
wrap_compute_dense(topi.x86.dense_mkl),
wrap_topi_schedule(topi.x86.schedule_dense_mkl),
name="dense.mkl",
)
elif pattern == "0-Op(nn.batch_matmul)[*, *]":
strategy.add_implementation(
wrap_compute_batch_matmul(topi.x86.batch_matmul_mkl),
wrap_topi_schedule(topi.x86.schedule_batch_matmul_mkl),
name="batch_matmul.mkl",
)
# has single op
attrs = calls[0].attrs
ret_type = calls[0].checked_type
inputs = collect_input(inputMap)
else:
# Unsupported backend op
assert False, "{} is currently not supported in {}".format(target_info, target)
elif target == "tensorrt":
assert False, f"{target} should be passed to the external compiler"
elif target == "dnnl":
assert False, f"{target} should be passed to the external compiler"
else:
# Unsupported target
assert False, "Unsupported target"
# To compute subgraph
# attrs for each op
# input for the subgraph
# - pattern - will be given
# May need rewrite?
#
impl, outputs = None, None
for spec in strategy.specializations:
#if spec.condition:
for impl in spec.implementations:
# attribute, inputs, output_type
outputs = impl.compute(attrs, inputs, ret_type)
return LoweredOutput(outputs, impl)
# Should not reach
return None
@tvm._ffi.register_func("relay.backend.lower_call")
def lower_call(call, inputs, target):
"""Lower the call expression to op implementation and tensor outputs."""
assert isinstance(call.op, tvm.ir.Op)
op = call.op
# Prepare the call_node->checked_type(). For the call node inputs, we ensure that
# the shape is Int32. Following code ensures the same for the output as well.
# TODO(@icemelon9): Support recursive tuple
ret_type = call.checked_type
if isinstance(ret_type, _ty.TensorType):
ret_type = _ty.TensorType(get_shape(ret_type.shape), ret_type.dtype)
elif isinstance(ret_type, _ty.TupleType):
new_fields = []
for field in ret_type.fields:
if isinstance(field, _ty.TensorType):
new_fields.append(_ty.TensorType(get_shape(field.shape), field.dtype))
else:
new_fields.append(field)
ret_type = _ty.TupleType(new_fields)
is_dyn = _ty.is_dynamic(call.checked_type)
for arg in call.args:
is_dyn = is_dyn or _ty.is_dynamic(arg.checked_type)
# check if in the AutoTVM tracing mode, and disable if op is not in wanted list
env = autotvm.task.TaskExtractEnv.current
reenable_tracing = False
if env is not None and env.tracing:
if env.wanted_relay_ops is not None and op not in env.wanted_relay_ops:
env.tracing = False
reenable_tracing = True
if not is_dyn:
best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target)
else:
# TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes.
best_impl, outputs = select_implementation(
op, call.attrs, inputs, ret_type, target, use_autotvm=False
)
import sys
#print(f"{op}, {target} --> {best_impl.name}", file=sys.stderr)
# re-enable AutoTVM tracing
if reenable_tracing:
env.tracing = True
return LoweredOutput(outputs, best_impl)
@tvm._ffi.register_object("relay.CompileEngine")
class CompileEngine(Object):
"""CompileEngine to get lowered code."""
def __init__(self):
raise RuntimeError("Cannot construct a CompileEngine")
def lower(self, source_func, target=None):
"""Lower a source_func to a CachedFunc.
Parameters
----------
source_func : Union[tvm.relay.Function, CCacheKey]
The source relay function.
target : tvm.Target
The target platform.
Returns
-------
cached_func: CachedFunc
The result of lowering.
"""
# pylint: disable=broad-except, import-outside-toplevel
try:
key = _get_cache_key(source_func, target)
return _backend._CompileEngineLower(self, key)
except Exception:
import traceback
msg = traceback.format_exc()
msg += "Error during compile func\n"
msg += "--------------------------\n"
msg += source_func.astext(show_meta_data=False)
msg += "--------------------------\n"
raise RuntimeError(msg)
def lower_shape_func(self, source_func, target=None):
key = _get_cache_key(source_func, target)
return _backend._CompileEngineLowerShapeFunc(self, key)
def jit(self, source_func, target=None):
"""JIT a source_func to a tvm.runtime.PackedFunc.
Parameters
----------
source_func : Union[tvm.relay.Function, CCacheKey]
The source relay function.
target : tvm.Target
The target platform.
Returns
-------
jited_func: tvm.runtime.PackedFunc
The result of jited function.
"""
key = _get_cache_key(source_func, target)
return _backend._CompileEngineJIT(self, key)
def clear(self):
"""clear the existing cached functions"""
_backend._CompileEngineClear(self)
def items(self):
"""List items in the cache.
Returns
-------
item_list : List[Tuple[CCacheKey, CCacheValue]]
The list of items.
"""
res = _backend._CompileEngineListItems(self)
assert len(res) % 2 == 0
return [(res[2 * i], res[2 * i + 1]) for i in range(len(res) // 2)]
def shape_func_items(self):
"""List items in the shape_func_cache.
Returns
-------
item_list : List[Tuple[CCacheKey, CCacheValue]]
The list of shape_func_items.
"""
res = _backend._CompileEngineListShapeFuncItems(self)
assert len(res) % 2 == 0
return [(res[2 * i], res[2 * i + 1]) for i in range(len(res) // 2)]
def get_current_ccache_key(self):
return _backend._CompileEngineGetCurrentCCacheKey(self)
def dump(self):
"""Return a string representation of engine dump.
Returns
-------
dump : str
The dumped string representation
"""
items = self.items()
res = "====================================\n"
res += "CompilerEngine dump, %d items cached\n" % len(items)
for k, v in items:
res += "------------------------------------\n"
res += "target={}\n".format(k.target)
res += "use_count={}\n".format(v.use_count)
res += "func_name={}\n".format(v.cached_func.func_name)
res += "----relay function----\n"
res += k.source_func.astext() + "\n"
res += "----tir function----- \n"
res += "inputs={}\n".format(v.cached_func.inputs)
res += "outputs={}\n".format(v.cached_func.outputs)
res += "function: \n"
res += v.cached_func.funcs.astext() + "\n"
res += "===================================\n"
shape_func_items = self.shape_func_items()
res += "%d shape_func_items cached\n" % len(shape_func_items)
for k, v in shape_func_items:
res += "------------------------------------\n"
res += "target={}\n".format(k.target)
res += "use_count={}\n".format(v.use_count)
res += "func_name={}\n".format(v.cached_func.func_name)
res += "----relay function----\n"
res += k.source_func.astext() + "\n"
res += "----tir function----- \n"
res += "inputs={}\n".format(v.cached_func.inputs)
res += "outputs={}\n".format(v.cached_func.outputs)
res += "function: \n"
res += v.cached_func.funcs.astext() + "\n"
res += "===================================\n"
return res
def get():
"""Get the global compile engine.
Returns
-------
engine : tvm.relay.backend.CompileEngine
The compile engine.
"""
return _backend._CompileEngineGlobal()
| 1.710938 | 2 |
local_settings.d/_10_use_suse_theme.py | toabctl/branding | 2 | 12761880 | <filename>local_settings.d/_10_use_suse_theme.py
AVAILABLE_THEMES = [
('suse', 'SUSE', 'themes/suse'),
('default', 'Default', 'themes/default'),
]
| 1.21875 | 1 |
djangocms_text_mediumeditor/widgets.py | mgierm/djangocms-text-mediumeditor | 4 | 12761881 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.forms.widgets import Textarea
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
class MediumEditorWidget(Textarea):
def __init__(self, attrs=None):
super(Textarea, self).__init__(attrs)
def render(self, name, value, attrs=None, renderer=None):
if attrs and "id" in attrs:
editor_id = attrs["id"]
else:
if not attrs:
attrs = {}
editor_id = attrs["id"] = "mediumeditor_%s" % (uuid.uuid4(),)
context = {"editor_id": editor_id}
return (
super(MediumEditorWidget, self).render(name, value, attrs)
+ mark_safe(render_to_string("cms/plugins/widgets/mediumeditor.html", context))
)
| 2.140625 | 2 |
ext/twitter.py | jaco8800/Toonbot | 2 | 12761882 | from discord.ext import commands
from peony import PeonyClient
from datetime import datetime
import discord
import asyncio
import json
from lxml import html
import html as htmlc
import traceback
class Twitter:
""" Twitter stream commands """
def __init__(self, bot):
self.bot = bot
self.tweetson = True
with open("twitter.json") as f:
self.track = json.load(f)
self.pclient = PeonyClient(**self.bot.credentials['Twitter'])
self.bot.twitask = self.bot.loop.create_task(self.twat())
def __unload(self):
self.tweetson = False
self.bot.twitask.cancel()
async def _save(self):
with await self.bot.configlock:
with open('twitter.json',"w",encoding='utf-8') as f:
json.dump(self.track,f,ensure_ascii=True,
sort_keys=True,indent=4, separators=(',',':'))
async def twat(self):
""" Twitter tracker function """
await self.bot.wait_until_ready()
# Retrieve list of IDs to track
ids = ",".join([str(i[1]["id"]) for i in self.track.items()])
footericon = "https://abs.twimg.com/icons/apple-touch-icon-192x192.png"
ts = self.pclient.stream.statuses.filter.post(follow=ids)
async with ts as stream:
print(f"Tracking {len(self.track.items())} twitter users.")
async for t in stream:
# Break loop if bot not running.
if self.bot.is_closed():
break
# if tweet output is disabled, break the loop.
if not self.tweetson:
break
# discard malformed tweets
if not hasattr(t,"user"):
continue
# Set destination or discard non-tracked
u = t.user
if u.id_str in ids:
s = self.track.items()
chanid = [i[1]["channel"] for i in s if i[1]["id"] == int(u.id_str)][0]
destin = self.bot.get_channel(chanid)
else:
continue
# discard retweets & adverts
if hasattr(t,'retweeted_status') or t.text.startswith(("rt",'ad')):
continue
# discard replies
if t["in_reply_to_status_id"] is not None:
continue
if t.truncated:
txt = htmlc.unescape(t.extended_tweet.full_text)
ents = dict(t.entities)
ents.update(dict(t.extended_tweet.entities))
else:
ents = t.entities
txt = htmlc.unescape(t.text)
if "coral" in txt:
continue
if "hashtags" in ents:
for i in ents["hashtags"]:
frnt = f"[#{i.text}]"
bk = f"(https://twitter.com/hashtag/{i.text})"
rpl = frnt + bk
txt = txt.replace(f'#{i.text}',rpl)
if "urls" in ents:
for i in ents["urls"]:
txt = txt.replace(i.url,i.expanded_url)
if "user_mentions" in ents:
for i in ents["user_mentions"]:
frnt = f"[@{i.screen_name}]"
bk = f"(https://twitter.com/{i.screen_name})"
rpl = frnt+bk
txt = txt.replace(f'@{i.screen_name}',rpl)
e = discord.Embed(description=txt)
if hasattr(u,"url"):
e.url = u.url
if hasattr(u,"profile_link_color"):
e.color = int(u.profile_link_color,16)
e.set_thumbnail(url=u.profile_image_url)
e.timestamp = datetime.strptime(t.created_at,"%a %b %d %H:%M:%S %z %Y")
e.set_footer(icon_url=footericon,text="Twitter")
lk = f"http://www.twitter.com/{u.screen_name}/status/{t.id_str}"
e.title = f"{u.name} (@{u.screen_name})"
e.url = lk
# Extract entities to lists
photos = []
videos = []
def extract_entities(alist):
for i in alist:
if i.type in ["photo","animated_gif"]:
photos.append(i.media_url)
elif i.type == "video":
videos.append(i.video_info.variants[1].url)
else:
print("Unrecognised TWITTER MEDIA TYPE")
print(i)
# Fuck this nesting kthx.
if hasattr(t,"extended_entities") and hasattr (t.extended_entities,"media"):
extract_entities(t.extended_entities.media)
if hasattr(t,"quoted_status"):
if hasattr(t.quoted_status,"extended_entities"):
if hasattr(t.quoted_status.extended_entities,"media"):
extract_entities(t.quoted_status.extended_entities.media)
# Set image if one image, else add embed field.
if len(photos) == 1:
e.set_image(url=photos[0])
elif len(photos) > 1:
en = enumerate(photos,start=1)
v = ", ".join([f"[{i}]({j})" for i, j in en])
e.add_field(name="Attached Photos",value=v,inline=True)
# Add embed field for videos
if videos:
if len(videos) > 1:
en = enumerate(videos,start=1)
v = ", ".join([f"[{i}]({j})" for i, j in en])
e.add_field(name="Attached Videos",value=v,inline=True)
else:
await destin.send(embed=e)
await destin.send(videos[0])
else:
await destin.send(embed=e)
@commands.group(aliases=["tweet","tweets","checkdelay","twstatus"],invoke_without_command=True)
@commands.is_owner()
async def twitter(self,ctx):
""" Check delay and status of twitter tracker """
e = discord.Embed(title="Twitter Status",color=0x7EB3CD)
e.set_thumbnail(url="https://i.imgur.com/jSEtorp.png")
if self.tweetson:
e.description = "```diff\n+ ENABLED```"
else:
e.description = "```diff\n- DISABLED```"
e.color = 0xff0000
footer = "Tweets are not currently being output."
e.set_footer(text=footer)
for i in set([i[1]["channel"] for i in self.track.items()]):
# Get Channel name from ID in JSON
fname = f"#{self.bot.get_channel(int(i)).name} Tracker"
# Find all tracks for this channel.
fvalue = "\n".join([c[0] for c in self.track.items() if c[1]["channel"] == i])
e.add_field(name=fname,value=fvalue)
if self.bot.is_owner(ctx.author):
x = self.bot.twitask._state
if x == "PENDING":
v = "✅ Task running."
elif x == "CANCELLED":
v = "⚠ Task Cancelled."
elif x == "FINISHED":
self.bot.twitask.print_stack()
v = "⁉ Task Finished"
z = self.bot.twitask.exception()
else:
v = f"❔ `{self.bot.twitask._state}`"
e.add_field(name="Debug Info",value=v,inline=False)
try:
e.add_field(name="Exception",value=z,inline=False)
except NameError:
pass
await ctx.send(embed=e)
@twitter.command(name="on",aliases=["start"])
@commands.is_owner()
async def _on(self,ctx):
""" Turn tweet output on """
if not self.tweetson:
self.tweetson = True
await ctx.send("<:tweet:332196044769198093> Twitter output has been enabled.")
self.bot.twitask = self.bot.loop.create_task(self.twat())
elif self.bot.twitask._state in ["FINISHED","CANCELLED"]:
e = discord.Embed(color=0x7EB3CD)
e.description = f"<:tweet:332196044769198093> Restarting {self.bot.twitask._state}\
task after exception {self.bot.twitask.exception()}."
await ctx.send(embed=e)
self.bot.twitask = self.bot.loop.create_task(self.twat())
else:
await ctx.send("<:tweet:332196044769198093> Twitter output already enabled.")
@twitter.command(name="off",aliases=["stop"])
@commands.is_owner()
async def _off(self,ctx):
""" Turn tweet output off """
if self.tweetson:
self.tweetson = False
await ctx.send("<:tweet:332196044769198093> Twitter output has been disabled.")
else:
await ctx.send("<:tweet:332196044769198093> Twitter output already disabled.")
@twitter.command(name="add")
@commands.is_owner()
async def _add(self,ctx,username):
""" Add user to track for this channel """
params = {"user_name":username,"submit":"GET+USER+ID"}
async with self.bot.session.get("http://gettwitterid.com/",params=params) as resp:
if resp.status != 200:
await ctx.send("🚫 HTTP Error {resp.status} try again later.")
return
tree = html.fromstring(await resp.text())
try:
id = tree.xpath('.//tr[1]/td[2]/p/text()')[0]
except IndexError:
await ctx.send("🚫 Couldn't find user with that name.")
self.track[username] = {"id":int(id),"channel":ctx.channel.id}
await self._save()
await ctx.send(f"<:tweet:332196044769198093> {username} will be tracked in {ctx.channel.mention} from next restart.")
@twitter.command(name="del")
@commands.is_owner()
async def _del(self,ctx,username):
""" Deletes a user from the twitter tracker """
trk = [{k.lower():k} for k in self.track.keys()]
if username.lower() in trk:
self.track.pop(trk[username.lower()])
await self._save()
def setup(bot):
bot.add_cog(Twitter(bot))
| 2.59375 | 3 |
tests/integration/sphinx/test_alabaster_sidebars.py | pauleveritt/goku | 0 | 12761883 | <gh_stars>0
import pytest
from bs4.element import Tag
# The default values for all theme options, knobs, templates, etc.
# Nothing customized in conf.py or anywhere else.
pytestmark = pytest.mark.sphinx('html', testroot='alabaster-sidebars')
# *** NOTE: We are using ``subdir/subfile.html`` to get some of the
# navigation in the sidebars.
@pytest.mark.parametrize('page', ['subdir/subfile.html', ], indirect=True)
class TestAlabasterSidebars:
""" Turn on the Alabaster-recommended html_sidebars """
def test_about_logo(self, page):
logo: Tag = page.select_one('p.logo')
assert logo
# The href on the link
assert '../index.html' == logo.find('a')['href']
# img path
assert '../_static/python-logo.png' == logo.find('img')['src']
# heading
assert 'Goku Sidebars' == logo.find('h1').text
def test_about_description(self, page):
assert 'description1' == page.select_one('p.blurb').text
def test_github(self, page):
github: Tag = page.find('iframe', attrs=dict(width='200px'))
assert 'github_user1' in github['src']
assert 'github_repo1' in github['src']
assert 'github_type1' in github['src']
assert 'github_count1' in github['src']
def test_about_travis(self, page):
travis: Tag = page.select('a.badge')[0]
assert 'travis-ci.org' in travis['href']
assert 'github_user1' in travis['href']
assert 'badge_branch1' in travis.select_one('img')['alt']
assert 'badge_branch1' in travis.select_one('img')['src']
def test_about_codecov(self, page):
travis: Tag = page.select('a.badge')[1]
assert 'codecov.io' in travis['href']
assert 'github_user1' in travis['href']
assert 'badge_branch1' in travis.select_one('img')['alt']
assert 'badge_branch1' in travis.select_one('img')['src']
def test_donate_heading(self, page):
heading: Tag = page.select_one('h3.donation')
assert heading
def test_donate_url(self, page):
link: Tag = page.find('a', attrs=dict(href='donate_url1'))
assert link
assert 'shields.io' in link.select_one('img')['src']
def test_donate_opencollective(self, page):
url = 'https://opencollective.com/opencollective1/donate'
link: Tag = page.find('a', attrs=dict(href=url))
assert link
assert 'opencollective.com' in link.select_one('img')['src']
def test_donate_tidelift(self, page):
link: Tag = page.find('a', attrs=dict(href='tidelift_url1'))
assert link
assert 'Tidelift Subscription' in link.text
def test_navigation(self, page):
toctree: Tag = page.select_one('ul.current')
assert toctree
# Should have two top-level items in it
nodes = toctree.find_all('li')
assert 3 == len(nodes)
# First
assert ['toctree-l1'] == nodes[0]['class']
assert '../hellopage.html' == nodes[0].find('a')['href']
assert 'Hello Page' == nodes[0].find('a').text
# Second
assert ['toctree-l1', 'current'] == nodes[1]['class']
assert 'index.html' == nodes[1].find('a')['href']
assert 'Subdir' == nodes[1].find('a').text
# Third
assert ['toctree-l2', 'current'] == nodes[2]['class']
assert '#' == nodes[2].find('a')['href']
assert 'Subfile' == nodes[2].find('a').text
def test_extra_nav_links(self, page):
extra: Tag = page.find('a', attrs=dict(href='extra1'))
assert 'Extra' == extra.text
def test_relations_heading(self, page):
# Relations is display: none but let's test it anyway
relations: Tag = page.select_one('div.relations')
assert 'Related Topics' == relations.find('h3').text
# Entries
entries = relations.find_all('a')
assert '../index.html' == entries[0]['href']
assert 'Documentation overview' == entries[0].text
assert 'index.html' == entries[1]['href']
assert 'Subdir' == entries[1].text
assert 'index.html' == entries[2]['href']
assert 'Subdir' == entries[2].text
| 2.078125 | 2 |
LC/82.py | szhu3210/LeetCode_Solutions | 2 | 12761884 | <reponame>szhu3210/LeetCode_Solutions<gh_stars>1-10
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
res=ListNode(0)
res.next=head
last=res
while last.next:
probe=last.next
depth=1
while probe and probe.next and probe.val==probe.next.val:
probe=probe.next
depth+=1
if depth>1:
last.next=probe.next
else:
last=last.next
return res.next | 3.5625 | 4 |
tests/unit_tests/data_steward/cdr_cleaner/cleaning_rules/table_suppression_test.py | lrwb-aou/curation | 16 | 12761885 | """
Unit test for table_suppression module
Original Issues: DC-1360
As part of the controlled tier, some table data will be entirely suppressed. When suppression happens, the table
needs to maintain it’s expected schema, but drop all of its data.
Apply table suppression to note, location, provider, and care_site tables.
table schemas should remain intact and match their data_steward/resource_files/schemas/<table>.json schema definition.
Should be added to list of CONTROLLED_TIER_DEID_CLEANING_CLASSES in data_steward/cdr_cleaner/clean_cdr.py
all data should be dropped from the tables
sandboxing not required
"""
# Python imports
import unittest
# Project imports
from cdr_cleaner.cleaning_rules.table_suppression import TableSuppression, tables, TABLE_SUPPRESSION_QUERY
from constants.cdr_cleaner import clean_cdr as clean_consts
import constants.cdr_cleaner.clean_cdr as cdr_consts
class TableSuppressionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.project_id = 'test_project'
self.dataset_id = 'test_dataset'
self.sandbox_id = 'test_sandbox'
self.client = None
self.rule_instance = TableSuppression(self.project_id, self.dataset_id,
self.sandbox_id)
self.assertEqual(self.rule_instance.project_id, self.project_id)
self.assertEqual(self.rule_instance.dataset_id, self.dataset_id)
self.assertEqual(self.rule_instance.sandbox_dataset_id, self.sandbox_id)
def test_setup_rule(self):
# Test
self.rule_instance.setup_rule(self.client)
def test_get_query_specs(self):
# Pre conditions
self.assertEqual(self.rule_instance.affected_datasets,
[clean_consts.CONTROLLED_TIER_DEID])
# Test
results_list = self.rule_instance.get_query_specs()
# Post conditions
expected_query_list = []
for table in tables:
query = dict()
query[cdr_consts.QUERY] = TABLE_SUPPRESSION_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
table=table,
)
expected_query_list.append(query)
self.assertEqual(results_list, expected_query_list)
| 2.28125 | 2 |
ns-allinone-3.27/ns-3.27/src/propagation/bindings/callbacks_list.py | zack-braun/4607_NS | 93 | 12761886 | callback_classes = [
['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<const ns3::MobilityModel>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
| 1.101563 | 1 |
2016/day8-1.py | ndraper2/adventofcode | 0 | 12761887 | def display_screen(instructions):
grid = [[' ' for i in range(6)] for i in range(50)]
for instruction in instructions:
if instruction.startswith('rect'):
x, y = instruction.split(' ')[-1].split('x')
x, y = int(x), int(y)
for i in range(x):
for j in range(y):
grid[i][j] = 'X'
elif instruction.startswith('rotate'):
words = instruction.split(' ')
shift = int(words[-1])
axis, number = words[2].split('=')
number = int(number)
if axis == 'x':
grid[number] = grid[number][-shift:] + grid[number][:-shift]
elif axis == 'y':
temprow = [grid[i][number] for i in range(50)]
temprow = temprow[-shift:] + temprow[:-shift]
for i in range(50):
grid[i][number] = temprow[i]
count = 0
for i in range(50):
for j in range(6):
if grid[i][j] == 'X':
count +=1
from pprint import pprint
for item in grid:
pprint(''.join(item))
return count
if __name__ == '__main__':
with open('input8-1.txt', 'r') as f:
instructions = f.read().splitlines()
print(display_screen(instructions))
print('ruruceoeil')
| 3.484375 | 3 |
convert.py | sharleynelefevre/Forced-Alignment | 0 | 12761888 | <reponame>sharleynelefevre/Forced-Alignment
#utils
import json
import re
import os
from typing import TextIO,Union
import warnings
#pyannote
from pyannote.core import Annotation,Segment,Timeline,notebook,SlidingWindowFeature,SlidingWindow
def xml_to_GeckoJSON(xml_root,raw_script):
"""
Parameters:
xml_root : root of the xml tree defined by vrbs for forced alignment. root[3] should be SegmentList, a list of speech segments
raw_script : `str` : the script as defined in https://github.com/hbredin/pyannote-db-plumcot/blob/develop/CONTRIBUTING.md#idepisodetxt
Each line is a speech turn and the first (space-separated) token is the normalized speaker id.
Returns:
gecko_json : a JSON `dict` based on the demo file of https://github.com/gong-io/gecko/blob/master/samples/demo.json
should be written to a file using json.dump"""
gecko_json=json.loads("""{
"schemaVersion" : "2.0",
"monologues" : [ ]
}""")
gecko_json["monologues"]=[[] for _ in raw_script.split("\n")]
json_i=0
terms=[]
current_speaker=xml_root[3][0][0].text.strip()[1:-1]
for i,speech_segment in enumerate(xml_root[3]):
for word in speech_segment:
if word.text.strip()[0]=="[":#speaker id -> add new speaker
speaker={
"name" : None,
"id" : current_speaker,#first and last charcater should be []
"vrbs_id" : speech_segment.attrib['spkid']
}
current_speaker=word.text.strip()[1:-1]
gecko_json["monologues"][json_i]={
"speaker":speaker,
"terms":terms
}
json_i+=1
terms=[]
else:
terms.append(
{
"start" : float(word.attrib['stime']),
"end" : float(word.attrib['stime'])+float(word.attrib['dur']),
"text" : word.text,
"type" : "WORD",
"confidence": float(word.attrib['conf'])
})
speaker={
"name" : None,
"id" : current_speaker,#first and last charcater should be []
"vrbs_id" : speech_segment.attrib['spkid']
}
new_monologue={
"speaker":speaker,
"terms":terms
}
if json_i<len(gecko_json["monologues"]):
gecko_json["monologues"][json_i]=new_monologue
else:
gecko_json["monologues"].append(new_monologue)
gecko_json["monologues"].pop(0)
return gecko_json
def gecko_JSON_to_aligned(gecko_JSON, uri=None):
"""
Parameters:
-----------
gecko_JSON : `dict`
loaded from a Gecko-compliant JSON as defined in xml_to_GeckoJSON
uri (uniform resource identifier) : `str`
which identifies the annotation (e.g. episode number)
Defaults to None.
Returns:
--------
aligned: `str`
as defined in README one file per space-separated token.
<file_uri> <speaker_id> <start_time> <end_time> <token> <confidence_score>
"""
aligned=""
for monologue in gecko_JSON["monologues"]:
speaker_ids=monologue["speaker"]["id"].split("@")#defined in https://github.com/hbredin/pyannote-db-plumcot/blob/develop/CONTRIBUTING.md#idepisodetxt
for i,term in enumerate(monologue["terms"]):
for speaker_id in speaker_ids:#most of the time there's only one
if speaker_id!='':#happens with "all@"
aligned+=f'{uri} {speaker_id} {term["start"]} {term["end"]} {term["text"].strip()} {term.get("confidence")}\n'
return aligned
def gecko_JSON_to_Annotation(gecko_JSON, uri=None, modality='speaker',
confidence_threshold=0.0, collar=0.0, expected_min_speech_time=0.0, manual=False):
"""
Parameters:
-----------
gecko_JSON : `dict`
loaded from a Gecko-compliant JSON as defined in xml_to_GeckoJSON
uri (uniform resource identifier) : `str`
which identifies the annotation (e.g. episode number)
Default : None
modality : `str`
modality of the annotation as defined in https://github.com/pyannote/pyannote-core
confidence_threshold : `float`, Optional.
The segments with confidence under confidence_threshold won't be added to UEM file.
Defaults to keep every segment (i.e. 0.0)
collar: `float`, Optional.
Merge tracks with same label and separated by less than `collar` seconds.
Defaults to keep tracks timeline untouched (i.e. 0.0)
expected_min_speech_time: `float`, Optional.
Threshold (in seconds) under which the total duration of speech time is suspicious (warns the user).
Defaults to never suspect anything (i.e. 0.0)
manual : `bool`
Whether the json is coming from a manual correction or straight from
the forced-alignment output.
In the former case, the regions timing is used. `confidence_threshold`
and `collar` are thus irrelevant.
In the latter case (default), the timing of each term is used.
Returns:
--------
annotation: pyannote `Annotation`
for speaker identification/diarization as defined in https://github.com/pyannote/pyannote-core
annotated: pyannote `Timeline`
representing the annotated parts of the gecko_JSON files (depends on confidence_threshold)
"""
annotation = Annotation(uri, modality)
not_annotated = Timeline(uri=uri)
total_speech_time=0.0
for monologue in gecko_JSON["monologues"]:
#defined in https://github.com/hbredin/pyannote-db-plumcot/blob/develop/CONTRIBUTING.md#idepisodetxt
speaker_ids=monologue["speaker"]["id"].split("@")
if manual:
for speaker_id in speaker_ids:#most of the time there's only one
if speaker_id!='':#happens with "all@"
annotation[Segment(monologue["start"],monologue["end"]),speaker_id]=speaker_id
total_speech_time+=monologue["end"]-monologue["start"]
else:
for i,term in enumerate(monologue["terms"]):
for speaker_id in speaker_ids:#most of the time there's only one
if speaker_id!='':#happens with "all@"
annotation[Segment(term["start"],term["end"]),speaker_id]=speaker_id
total_speech_time+=term["end"]-term["start"]
if term["confidence"] <= confidence_threshold:
not_annotated.add(Segment(term["start"],term["end"]))
if total_speech_time<expected_min_speech_time:
warnings.warn(f"total speech time of {uri} is only {total_speech_time})")
if manual:
annotated=Timeline(
[Segment(0.0,monologue["end"])],
uri
)
else:
annotation=annotation.support(collar)
annotated=not_annotated.gaps(support=Segment(0.0,term["end"]))
return annotation, annotated
| 2.578125 | 3 |
intro.py | Sirindil/NIMBH | 0 | 12761889 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 26 22:00:03 2016
@author: Sirindil
"""
import os
import sys
import time
import shlex
import random
import string
import struct
import time
import platform
import subprocess
import ctypes
from ctypes import windll, byref, wintypes, Structure, c_ulong
from ctypes.wintypes import SMALL_RECT
from colorama import init, Fore, Back, Style, Cursor
import win32com.client
import win32api, win32con
import ctypes
from ctypes import wintypes
from colorama import init
import re
from functools import partial
import winsound
#import pywinauto
init(strip=not sys.stdout.isatty()) # strip colors if stdout is redirected
user32 = ctypes.WinDLL('user32', use_last_error=True)
class POINT(Structure):
_fields_ = [("x", c_ulong), ("y", c_ulong)]
def queryMousePosition():
pt = POINT()
windll.user32.GetCursorPos(byref(pt))
return { "x": pt.x, "y": pt.y}
def click(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
CSI = '\033['
OSC = '\033]'
BEL = '\007'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_UNICODE = 0x0004
KEYEVENTF_SCANCODE = 0x0008
MAPVK_VK_TO_VSC = 0
# msdn.microsoft.com/en-us/library/dd375731
VK_TAB = 0x09
VK_MENU = 0x12
VK_RETURN = 0x0D
VK_CONTROL = 0x11
VK_D = 0x44
# C struct definitions
wintypes.ULONG_PTR = wintypes.WPARAM
class MOUSEINPUT(ctypes.Structure):
_fields_ = (("dx", wintypes.LONG),
("dy", wintypes.LONG),
("mouseData", wintypes.DWORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (("wVk", wintypes.WORD),
("wScan", wintypes.WORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
def __init__(self, *args, **kwds):
super(KEYBDINPUT, self).__init__(*args, **kwds)
# some programs use the scan code even if KEYEVENTF_SCANCODE
# isn't set in dwFflags, so attempt to map the correct code.
if not self.dwFlags & KEYEVENTF_UNICODE:
self.wScan = user32.MapVirtualKeyExW(self.wVk,
MAPVK_VK_TO_VSC, 0)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (("uMsg", wintypes.DWORD),
("wParamL", wintypes.WORD),
("wParamH", wintypes.WORD))
class INPUT(ctypes.Structure):
class _INPUT(ctypes.Union):
_fields_ = (("ki", KEYBDINPUT),
("mi", MOUSEINPUT),
("hi", HARDWAREINPUT))
_anonymous_ = ("_input",)
_fields_ = (("type", wintypes.DWORD),
("_input", _INPUT))
LPINPUT = ctypes.POINTER(INPUT)
def _check_count(result, func, args):
if result == 0:
raise ctypes.WinError(ctypes.get_last_error())
return args
user32.SendInput.errcheck = _check_count
user32.SendInput.argtypes = (wintypes.UINT, # nInputs
LPINPUT, # pInputs
ctypes.c_int) # cbSize
# Functions
def PressKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode,
dwFlags=KEYEVENTF_KEYUP))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def AltEnter():
"""Press Alt+Tab and hold Alt key for 2 seconds
in order to see the overlay.
"""
PressKey(VK_MENU) # Alt
PressKey(VK_RETURN) # Enter
ReleaseKey(VK_RETURN) # Enter~
time.sleep(0.5)
ReleaseKey(VK_MENU) # Alt~
def CtlD():
PressKey(VK_CONTROL) # Alt
PressKey(VK_D) # Enter
ReleaseKey(VK_D) # Enter~
time.sleep(0.5)
ReleaseKey(VK_CONTROL) # Alt~
user32 = ctypes.windll.user32
screensize = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
def terminalSize(): # Windows only
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if tuple_xy is None:
print("default")
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def clear():
sys.stderr.write("\x1b[2J\x1b[H")
mypicture = """\
888888OZZO888888888888MMMMMMMMNNMMMMMMMMMMMMMMMMMMMMMMNNMND8OOZZ$$ZOOOOOOOZZZZZZ
8888888OZO88888888888NMMMMMNNNMMMMMMMMMMMMMMMMMMMMMMMMMMMMNDOOOO$ZZZZZZZZZZZZZ$Z
8888888OOO8888888888NMMMMMMN8888888NNNNMMMMMMMMMMMMMMMMMMMMMN8OO$ZZOOOOOOOOOOOZZ
8888888OZ88888888888MMMMMND8ZZ$7777$O88888DNMMMMMMMMMMMMMMMMMN8OZZZZZZZZZZZZ$Z$$
8888888OOO888888888DMMMNDOZ$7777I???????I77$$DNMMMMMMMMMMMMMMMDOZ$ZOOOOOOOOZZZZZ
8888888OOO888888888NMMNOZ$7II??++++=+++++++?I7ZDNNMMMMMMMMMMMMM8ZZZZOZZZZZZZZ$Z$
88888888O88888OOOOOMMNOZ$7III??++==========++?I$8DDNNMMMMMMMMMMNZZZZOZZZOOZZOZZZ
888888888888888888DNNOZ$$77I??+++============++?IODDNNMMMMMMMMMN8ZZOZOOZZZZZZZZ$
888888888888888888DN8Z$$7I????++=============++??I7$ODMMMMMMMMMMDOZZZZZZZZZZZZZZ
88888888888888NMNDND8NNO$I++====~~~===++++++++???I77Z8NNMMMMMMMMD8OZZZZZZZZZZZZ$
8888888888888MMMMMNNMNNNND87=~~~~~~~~~===++???I???I7ZODNMMMMMMMMNOOOOOOOZZZZZZZ$
DD8888888888DMMMMMNZI?+?$8D8+~~~:::::~~=+????IIIII77$ODNNMMMMMMMM88OOOOOOOOOZZZZ
DD8888888888DDNMMN8I?++==+?+?=~::::+$ODDDDDD88O$77777ODNNMMMMMMMN8888888OOOOOOOO
DDD888888888888NNNO77$7I++=?++=~::~+7$$$ZZOODNNDOZ$$7ZDNNMMMMMMMDDD88888888OOOOO
DDDD88D8DD8DDD8888$ON8O$$II++??=:~~~=+=~==~~~?78D8Z$$$ODNMMMMMMMD88888888888OOOO
NDDDD8DDDDD88D888$78O+~D8D7II7I???=~=+?+===~~~+IODO$77$8DNMMMMMN8888888888888OOO
MNDD8DDDD888DD888$$Z7$7Z$Z7777I7$7+=II7II7II+==?I8O$$$$ONNNNMMMD88888888888888OO
MNNNDDDDDDDDDDD8O7III?+??I7Z7II7$$7?$=::8DNOZ7I?I$$7$$Z8NNNMMMNDD888888888888888
MMNNNDDDDDDDDDD8Z7IIII?II7$ZII$$$$$I++IIOD8ODO7III777$ODNNNNMMNDDDD8888888888888
MMMNNDDDDDDDDDD8$I??????I7?=+?$Z$$7?+++===?I7II???I77$ONMNMNNND888D8888888888888
MMNNNDDDDDDDDDD87I?++++???=::~?ZO$I====++==+?????II77Z8NMMNNMDDDDD88888888888888
MMMNNDNNDDDDDND87?+===~=+I+~===?OZ+====~==+++???II777Z8NMD$$77OD88D8888888888888
MMMNNNNNNNNNNNND7==~:~~+?II=IZD=I$~~~~~~==~===+?I7$$$Z8D8$7I77?8D888D88888888888
MMMMNNNNNNNNNNND7+=~~=+?7I=++++I7I~~~~~~~~~~==+?I7$$ZO8DOO?+I7?8DDDDD88888888888
MMMMMNNNNNNNNNND7+==+?II?~:::=?I?+=~::::~~===++?I7$ZZO8Z$$=:7?8DD8DDDDDDDDDDDDD8
MMMMMMMMNNNNNNND$?++=?I+=~~~:~+?++=~:,,::~==++??I7ZZO8$$$7=:7ZDDD8DD8D8888888888
MMMMMMMMMMNNNNNN7I+=+7$7II??=~~~~===~:,::~=+++?I7$OO8O7I?$I78DDD88DDDD8888888888
MMMMMMMMMMNNNNNN77?=+I$$7$OZ77I+~:~=~::::~==++?I7$O887?+IZ$8DDDDDDDDDDD888888888
MMMMMMMMMMNNNNNNO7I++++??IIIII$Z$?=~~:::~~==+??I7ZODDNO$8DDDDDDDDDDDDD8DD8D88888
MMMMMMMMMMMNMMMMM7I?++====~~:::~~~~~~~~~=====+?I$O8DDNNNNDDDDDDDDDDDDDDDDDD8D888
MMMMMMMMMMMMMMMMM$7I?======~::::~~~~~~~~===+++?7$888DNNNNNDDDDDDDDDDDDDDDDDDDD88
MMMMMMMMMMMMMMMMMNZ$7?++++++=~::~~~~~=~====++?IZOOO8NNNNNNNNNDDDDDDDDDDDDDDDDDD8
MMMMMMMMMMMMMMMMMMZ$I?=~~=====~~~==========+?7$88OODNNNNNNNNNNDDDDDDDDDDDDDDDDD8
MMMMMMMMMMMMMMMMMN7$I+~:::::~=~====+====++?I7$8OOO8NNNNNNNNNNNNDDDDDDDDDDDDDDDD8
MMMMMMMMMMMMMMMMND$?$?==::::::~=++??+???I7$ZOO$$Z8DNNNNNNNNNNNNDDDDDDDDDDDDDDDDD
MMMMMMMMMMMMMMMMMO$??77??+===~++?I77777$$ZZ7777$ZDNNNNNNNNNNNNNNDDDDDDDDDDDDDDDD
MMMMMMNNNMMMMMMMN$$++77?+?I77$$$$$777II?????II7Z8MMMMNNNNNNNNNNNDDDDNDDDDDDDDDDD
MMMMMMNNMMMMMMMMNOZ+=+7======+??????++====+??I$8DMMMMNNNNNNNNNNNDNNNNNDDDDDDDDDD
MMMMMMNNMMMMMMMMMM8+==??=~~~=~~~===~~~~~~==+I7Z8NMMMMMNNNNNNNNNNNNNNNNNNDDDDDDDD
NNMMMMMMMMMMNNNNNMM$=~=??===~====~:::~~~=++?I$ONMMMMMMMMNNNNNNNNNNNNNNNNNDDNDDDD
MMMMNMMMMMMNNNNNNMMMM8$??I7$ZO8888OOZ7III?II$DNNNMMMMMMMMNNNNNNNNNNNNNNNNNDNNDDD
MMMNMMMMMMMMMNNNNNMMMMNNNNNNNNNNNNNNNNNNNNNMMNNMMMMMMMMMMMMNNNNNNNNNNNNNNDDNNDDD
"""
castle = """\
~~~~~~~~~~~~~~~~~~~~~~~~~~~~O?~~+$+?~~~~~~~~~~=?O$~Z7~ZI++~~~~8$Z=$O~$+I?=~~~~~=~==N$$=~IO=$7I=================~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~::8=8$ZI?+I$=?~~~~~~~~~ONN87II7$7?=$+8788?III7ZI+$I~~~~~~~~~~NON+8D8D7$?ZI?~==~~~===~~~~~~~~~~~~:I
~~~~~~~~~~~~~:~:::::::::N8Z$7?+?++I~+~:::::::O8DOMNNMMM=~ZO88D8IMMM7NN?7+~~~~~~~~~~NODDDDD??II+~~~~~~~~~~~~~~~~~~~~~~~:N
~~~~~~~~~~~~::::::::::::D8887I?+I+==$::::::::8NNO$7=O?D7DOMNMDN?8N7ION=NN?~~~~~~~~~NODDOOD$$7?++~~~~~~~~~~~~~~~~~~~~~~ND
:~~~~~~~~~::::::::::::::888$$I??++~:O:~?I=::::DDN=II777~$O8DDND?ZI+~877$$~~~~~~~~$~MD88DOD$Z?+?=~~~~~~~~~~~~~~~~~~~~:MDN
~~~~~~~:::::::::::::::::88O$I???+=++Z$77~~::::DDN+:?=I$~$88N8DNIII?+$I7+?:::~~~~~O?MDDDD8ZZ$7?++~~~~~~~~~~~~~~~~~~~~D7OM
~~~~~~~~::::::::::::::::8OO7??+=~===$777?~::::8DD+++I7$=Z88D8DD?7??I777778D8?~:~:8NMDD88O$Z7I?+~~~~~~~~~~~~~~~~~~~:DMMDN
~~~~~~~:::::::::::::::::OOO$+??=~~~~ONOOD8N:8N8DD+II$$7=Z888DDD?7I?II??$7DDZ8DDDDNDNDDD8ZZZI7+=+~~~~~~~~~~~~~~~~~~:D8NMN
~~~~~~~~::::::::::::::::ZOO7I+==+~~~ZO88ZOOD8D8DD+I77M$=OOONODD+7I7IZ?+OIZDDZZ8DD8DNDD88DZ$II===~::~~~~~~~~:~~~~~~~IMMMM
~~~~~~~~~~~~::::::::::::Z8Z7?+=~Z~~=ONOO88O8DND8D?I?IMZ~OOOD8DD?$I7?M??$?$ZZ8ZDD8DDMDDDDOZ$7IN+=~~:~~~~~~~~~~~~~~::DNMMM
~~~~~~~~~~~~~~~~:::::::=O8Z7?==~=~~+OD8D88DDNN8DD???I77=Z$8O8ONM$DI+I?+$IZZO88ZOODDNDDDOOO7I?I+=OO~~::~~~~~~~~~~~~~N7MDM
==~~~~+I=+=~~~~~~~~~~~:?OOZ7?+=~=~:=OD$888ODDD88NI?I?7I+ODDDDNM??NII??I+II7D8Z88DDDNDD8DOZ$7I?+~Z$+:~~:~I=~:~=8=8MMMMMMM
=====8O8O?$$~~$=$+~=:I$NOOZ7?+=8+~~=8D$OOOZO88O8N+?I?I?~OZZD88N$7?O?+?III$ZOD8ZZZ8DNDDD88O$7++==8MND+~ZD8D=:~=$?8MMMMND8
O88DMNMNNNZ887NMMNDZ88MNOZOI?==N=+~=88Z$ZOOODN8DD???I?I~ZZNMMMMO?$??+?+++$$8D8$ZZO8NDD888Z7I?I=~DMNMMMNMNNI~~IMMMMMMMMMM
8ODDMMMMMNMM8NMMMMMN8ODN88O$?+=+~~==OD7O8Z88DDOON?II??I~OONDNDN?MDI+I+++?778DZZO$ODND8D88O$I?+=:DMMMMMMMMMMNNNMMMMMMMMMM
DNDOMMMONNM7OMMMMMMM7777N8OI??+=~:~=OD$Z$7$Z888DD+?7+II~OODNNNNOMD?I?+=?I$ZONOOZ$88NNDD88OI??+=~DMMMMMMMMMMMMMMMMMMMMMMM
DNDNDNM8NMNNMODMNNDNNNNMO887?=7:::=~Z8Z$Z$8O8DODN??+?$$~NDMMMMO8MOI7I7+?7O88OOODOONNDDDD8$7I7==~NMMMMMMMMMMMMMMMMMMMMMMM
8DN88DDDDNDDONNNDNNDD88DDNDZOII??$Z7MMMDNMMMMMNNDI$7ND8ON88DDNNNO8DO8ZZZODMMMMMMNDMMMMMMNDZZ7ZZ77MMMMMMMMMNNMMMMMMMMMMMM
Z$$ZZOMOMDN778MMMMMNZI7ONND8OO7$$ODDMMMMMMDZZOOOOOOOOIDD8III778MNDZOOOOOO$O8DMMMMMMMMMMMNMD8OZZZOMMMMMMMMMMMMMMMMMMMMMMM
+++INMNMMMM++ZMMMMMZI+ONNND88O7$$$ZONMNMNNMMMMMMMN$O88MD$77777$DNDDODDDMDN8ND8NMMMMMMMNMDMDZOZ77ONMMMMMMMMMMOMMMMMMMMMMM
+++?NMONND87+INMMDO+==ONDDDZ7IZ+=++?8N888NNMMMMM?IM8DNOZ7777II7$8DN8ODDDDN8II7DD8DNNNNNDD8O$I??+DMMMMMMMMMM?++NMMMMMMMMM
++++7DNND8+++==?I=====78DD8Z7I??++=+DN888MMMMM=8ZINDNO$IIII??+???ONNDD8ON8D8DN888DDMNNNDDDZZ$7I+DMMNZ8NNNNZ++?MMMMMMMMMM
++++++88O7I?++++++++===?DD8Z77II???IDN8DDMM$=+8MO7NN8$$77I77777I778DDDNO$ND88D8DDDNNNNNDDDOZ$7??DDI+++I8NOI++$I7NMMMMM8D
++++++++++++++++++++++??DDDZ$77D?IIIDNDD8I=:N+8M?DNO8O$77777777$ZZODDN88ZDO88N88DDNMNNNDNDOZ77I?DDI++?+?+????7N$$MMMMMMM
++++++++++++++++++++++++DDDZZ7INII?IDND7?MD=M+DZDNO8OZ77$7$7777777ZZ8OD8ONONDNDD8NNMNNNNNN8OZ$I???????????????7??$NDDMMM
????????++++++++++++++++DDDOZ$7IIIII$7?D7D8?NINNN$OZ7III7I?II$$III777ZO87N8N8DN8NNDOMNNNND8O$$7I?I??I????I????????IMMMMM
??????????????????+?+?+?DND8Z$7I7I?I+MN8$NO7IONNZ$77???????+???I$$I??I$$ODOMZNNDNNNN88NNNDD8OZ7IIIIIIIIIIIIIIIIIIZZMNMMM
????????????????????????NND8Z$7O77I8ONNDON$7N8NOZ$7?????+?I7$7??????IIIZ7D8DMZNNNN88NMND8ND8ZD$IIIIIIIIII7IIIIIIIIIMMMMM
I?IIIII?????????????????DNN8Z$Z=7I7DONNNOI88MO$$$$$II7$I??II?IIIIIII77I7$Z8N8DMDNMNZNDNNMDNOO8$$7777777777777777IIINMNMM
IIIIIIIIIIIIIIIIIIIIIIIINNN$I$?7$77DONNZ8N8ONI888O$77777777777I7777777ZODDZ$DN$MOMNOMMMNNDNNNZZ77777777777777777777ZNNNM
IIIIIIIIIIIIIIIIIIIIIIIID777?D$ZZ$$O8$?ZNZZO$OOOO7I777ZZ8ZZ$$$Z$$$777I7777$O8$$OD8MOMMMMNDODMDNZ$$$$$$$$$$$$$$$7$$$$77DM
7777777777777777777777O$$I?D8DOOOZZOZ77ZN7N788DDZZZZ$$ZZZZOO8Z$$777III77ZZ8NI$$D$ZNOMMMMNDODD8DNMNZ$$$$$$$$$$$$$$$$$ZMNM
77777777777777777777O7$7INNDDD$OOOZ?N$7$N$$7$$$$I??I$$7??II??III??IIII7IIIII$8ZDZZOM8NMMMD8DD8OOMMNM8ZZZZZZZZZZZZZZZ$OMM
7$$$$$77777777777$7ZIZ77NNNDDDZ8O$7DM$$$O$$$$$$7I???IIII?IIIIIII?I7ZO$IIII77$OO7ZZO8NM$MMD8NDD88ZZNMMNMZZZZZZZZZZZZZZZZD
$$$$$$$$$$$$$$$O$O7Z7I$$MMNNDD7O77O8N$$7DOOZZ$Z7IIIII?I?IIIII7II?IIIII7II7I777$ZOOO8MMM8M88DDD8O8ONDDNNMM8OZOOOZOOZOZZDM
$$$$$$$$$$$$$$Z$7$$NDIZZNNNND877O8O8N$$$ZZZZZ$III$$$Z$IIIIIII77I7III7I777$7$$$ZO8OODMMNNMODNDD88DON8DODNNNNMOOOOOOOOOOOD
ZZZZZZZZZZZOZZ$7$ZZDDIZZZZDNDO7OODO8N8$$ZZZZZ7IIIIIIIIIIIII7ZZ$Z$II7I777I77777$$O8D8NM8MMM8NNDND8OMDDOOO8NMNMMDOOOOOOOOO
ZZZZZZZZOZOZ$$$ZZZ8NNIZZZZZO?8DZOOZM$ZOOOZ$$77I77$IIIIIIIIII777I77I7I77IOZZOO$$$ZZM7NND8MDMNO88888MDD8888ODMMDNMD8O88OOO
ZZOOZODOOZZ7ZZZZOODMOIOOOOO7ZDOZOOOMZOOOZZZZ7IIII7IIIIIIII77III777I777777777$$$$$Z$8D888MD8NMDD888MDN88888888MMNNNNZ8888
OOOOZZOZD7OOOOOOOO8NO7OOO7Z8OD8OOO8OZOOZOZZZ$ZZZZ$7I7I7II7I7I77777777777777777$$$ZZO8$8DM888DMMDDDMDDDDDDD88888MMMNNND88
ODZO8ZO7OOOOOOOOOO8D8IOO+D8O8D888Z$$ZOOOOZZ7II77I77I7ZZZOD$777I7777777$I777777777$$$Z8ZDM8DDDDNMNDM8DDDDDDDDDDDDDNMMMMMN
8Z7$$ZOOOOOOOOOOO88D8I$7O8888DDO8ZZZOOOZZ$7777I777II777777777$ZZZZZZ7I7$$7$$7$$$ZO8OD8DIM8DDDDDDMMMDDDDDDDDDDDDDDDDNNMNM
Z7Z?OOOOOOOOO8O8888O8I$Z88888DDOOOOOOOOZZ77777777$777777I777777777777777ZOZZOOZ7$$$$ZZODN8DDDDDDDMMDDDDDDDDDDDDDDDDDDNMM
"""
welcome = """\
▄█ █▄ ▄████████ ▄█ ▄████████ ▄██████▄ ▄▄▄▄███▄▄▄▄ ▄████████
███ ███ ███ ███ ███ ███ ███ ███ ███ ▄██▀▀▀███▀▀▀██▄ ███ ███
███ ███ ███ █▀ ███ ███ █▀ ███ ███ ███ ███ ███ ███ █▀
███ ███ ▄███▄▄▄ ███ ███ ███ ███ ███ ███ ███ ▄███▄▄▄
███ ███ ▀▀███▀▀▀ ███ ███ ███ ███ ███ ███ ███ ▀▀███▀▀▀
███ ███ ███ █▄ ███ ███ █▄ ███ ███ ███ ███ ███ ███ █▄
███ ▄█▄ ███ ███ ███ ███▌ ▄ ███ ███ ███ ███ ███ ███ ███ ███ ███
▀███▀███▀ ██████████ █████▄▄██ ████████▀ ▀██████▀ ▀█ ███ █▀ ██████████
▀
"""
Welcome = """\
,ggg, gg ,gg
dP""Y8a 88 ,8P ,dPYb,
Yb, `88 88 d8' IP'`Yb
`" 88 88 88 I8 8I
88 88 88 I8 8'
88 88 88 ,ggg, I8 dP ,gggg, ,ggggg, ,ggg,,ggg,,ggg, ,ggg,
88 88 88 i8" "8i I8dP dP" "Yb dP" "Y8ggg ,8" "8P" "8P" "8, i8" "8i
Y8 ,88, 8P I8, ,8I I8P i8' i8' ,8I I8 8I 8I 8I I8, ,8I
Yb,,d8""8b,,dP `YbadP' ,d8b,_ ,d8,_ _,d8, ,d8' ,dP 8I 8I Yb, `YbadP'
"88" "88" 888P"Y8888P'"Y88P""Y8888PPP"Y8888P" 8P' 8I 8I `Y8888P"Y888
"""
to = """\
▄▀▀▀█▀▀▄ ▄▀▀▀▀▄
█ █ ▐ █ █
▐ █ █ █
█ ▀▄ ▄▀
▄▀ ▀▀▀▀
█
▐
"""
To = """\
.
.o8
.o888oo .ooooo.
888 d88' `88b
888 888 888
888 . 888 888
"888" `Y8bod8P'
"""
nimbh = """\
███▄ █ ██▓ ███▄ ▄███▓ ▄▄▄▄ ██░ ██
██ ▀█ █ ▓██▒▓██▒▀█▀ ██▒▓█████▄ ▓██░ ██▒
▓██ ▀█ ██▒▒██▒▓██ ▓██░▒██▒ ▄██▒██▀▀██░
▓██▒ ▐▌██▒░██░▒██ ▒██ ▒██░█▀ ░▓█ ░██
▒██░ ▓██░░██░▒██▒ ░██▒░▓█ ▀█▓░▓█▒░██▓
░ ▒░ ▒ ▒ ░▓ ░ ▒░ ░ ░░▒▓███▀▒ ▒ ░░▒░▒
░ ░░ ░ ▒░ ▒ ░░ ░ ░▒░▒ ░ ▒ ░▒░ ░
░ ░ ░ ▒ ░░ ░ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░
░
"""
Nimbh = """\
... ... . ..
.=*8888n.."%888: @88> . uW8" .uef^"
X ?8888f '8888 %8P .. . : `t888 :d88E
88x. '8888X 8888> . .888: x888 x888. 8888 . `888E
'8888k 8888X '"*8h. .@88u ~`8888~'888X`?888f` 9888.z88N 888E .z8k
"8888 X888X .xH8 ''888E` X888 888X '888> 9888 888E 888E~?888L
`8" X888!:888X 888E X888 888X '888> 9888 888E 888E 888E
=~` X888 X888X 888E X888 888X '888> 9888 888E 888E 888E
:h. X8*` !888X 888E X888 888X '888> 9888 888E 888E 888E
X888xX" '8888..: 888& "*88%""*88" '888!` .8888 888" 888E 888E
:~`888f '*888*" R888" `~ " `"` `%888*%" m888N= 888>
"" `"` "" "` `Y" 888
J88"
@%
:"
"""
fullgreet = """\
,ggg, gg ,gg
dP""Y8a 88 ,8P ,dPYb,
Yb, `88 88 d8' IP'`Yb
`" 88 88 88 I8 8I
88 88 88 I8 8'
88 88 88 ,ggg, I8 dP ,gggg, ,ggggg, ,ggg,,ggg,,ggg, ,ggg,
88 88 88 i8" "8i I8dP dP" "Yb dP" "Y8ggg ,8" "8P" "8P" "8, i8" "8i
Y8 ,88, 8P I8, ,8I I8P i8' i8' ,8I I8 8I 8I 8I I8, ,8I
Yb,,d8""8b,,dP `YbadP' ,d8b,_ ,d8,_ _,d8, ,d8' ,dP 8I 8I Yb, `YbadP'
"88" "88" 888P"Y8888P'"Y88P""Y8888PPP"Y8888P" 8P' 8I 8I `Y8888P"Y888
.
.o8
.o888oo .ooooo.
888 d88' `88b
888 888 888
888 . 888 888
"888" `Y8bod8P'
... ... . ..
.=*8888n.."%888: @88> . uW8" .uef^"
X ?8888f '8888 %8P .. . : `t888 :d88E
88x. '8888X 8888> . .888: x888 x888. 8888 . `888E
'8888k 8888X '"*8h. .@88u ~`8888~'888X`?888f` 9888.z88N 888E .z8k
"8888 X888X .xH8 ''888E` X888 888X '888> 9888 888E 888E~?888L
`8" X888!:888X 888E X888 888X '888> 9888 888E 888E 888E
=~` X888 X888X 888E X888 888X '888> 9888 888E 888E 888E
:h. X8*` !888X 888E X888 888X '888> 9888 888E 888E 888E
X888xX" '8888..: 888& "*88%""*88" '888!` .8888 888" 888E 888E
:~`888f '*888*" R888" `~ " `"` `%888*%" m888N= 888>
"" `"` "" "` `Y" 888
J88"
@%
:"
"""
greet = """\
\ / _ | _ _ _ _ _
\/\/ (/_|(_(_)| | |(/_
_|_ _
| (_)
|\ |. _ _ |_ |_
| \||| | ||_)| |
"""
dead = """\
__ __ _____ _ _ _______ ______ _______ ______ _______ _______ ______
\\_/ | | | | |_____| |_____/ |______ | \\ |______ |_____| | \\
| |_____| |_____| | | | \\_ |______ |_____/ |______ | | |_____/
"""
cinfo = """\
NIMBH
Copyright (c) 2016 <NAME>. All rights reserved.
NIMBH is held under the Attribution-NonCommercial-ShareAlike (CC BY-NC-SA) License.
Version 0.1 Alpha
"""
info = """\
Remember: You may press Control+D or ALT+F4 at any time to exit.
ALT+ENTER toggles fullscreen, although doing this may distort formatting.
Formatting is designed best around a 1920x1080 fullscreen monitor.
Press ALT+TAB to switch in and out of the game.
"""
def maxSize(x):
if x < 10:
return 9
if x < 100:
return 99
if x < 1000:
return 999
if x < 10000:
return 9999
else:
return 99
def randomDigits(y):
return ''.join(str(random.randint(0,9)) for x in range(y))
def randomChars(y):
return ''.join(random.choice(string.ascii_letters) for x in range(y))
def isInt(c):
try:
int(c)
return True
except:
return False
LF_FACESIZE = 32
STD_OUTPUT_HANDLE = -11
def nprint(s, x=0, c=" "):
for line in s.splitlines():
print(line.center(x, c), end="\r")
def replaceNumbers(s):
return re.sub('\d', lambda m: str(random.randint(0,9)), s)
class COORD(ctypes.Structure):
_fields_ = [("X", ctypes.c_short), ("Y", ctypes.c_short)]
class CONSOLE_FONT_INFOEX(ctypes.Structure):
_fields_ = [("cbSize", ctypes.c_ulong),
("nFont", ctypes.c_ulong),
("dwFontSize", COORD),
("FontFamily", ctypes.c_uint),
("FontWeight", ctypes.c_uint),
("FaceName", ctypes.c_wchar * LF_FACESIZE)]
def printXY(x, y, text):
sys.stdout.write("\x1b7\x1b[%d;%df%s\x1b8" % (x, y, text))
time.sleep(1)
sys.stdout.flush()
def beep(sound):
winsound.PlaySound('%s.wav' % sound, winsound.SND_FILENAME)
def blood(decay=15, dur=100, fast=True):
sizex, sizey = get_terminal_size()
#os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
positions = []
text = "0"*sizex
for i in range(sizex//2* + decay*2):
positions.append(random.randint(0, sizex))
if len(positions) >= sizex:
break
if 0 in positions:
positions = [x for x in positions if x != 0]
# positions.append(int(random.gauss(sizex//2, sizex//4)))
for i in range(dur):
if all(x == text[0] for x in text) and text[0] != "0":
for i in range(sizey):
start_time = time.time()
print("")
if time.time() - start_time < 0.008:
time.sleep(0.008 - (time.time() - start_time))
break # break function once it everything looks done
#positions.append(random.randint(0, sizex))
lenp = len(positions)
#text = str(randomDigits(x))
for index, j in enumerate(positions):
if all(x == text[0] for x in text) and text[0] != "0":
break
found = False
count = 0
while found == False and fast == True and count < decay:
count += 1
if all(x == text[0] for x in text):
found = True
break
pos = random.randint(0,sizex-1)
# print("pos:", pos)
if text[pos].isdigit() == True:
# print("True!")
text = text[:pos] + ' ' + text[pos + 1:]
found == True
break # Not sure why this look won't end without breaking
# else:
# print("False :(")
# print("break")
#positions.append(random.randint(0, sizex))
text = replaceNumbers(text)
shift = random.randint(0,1)
text = text[:j] + ' ' + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
print(Fore.RED, Style.DIM, text, end="\r")
def youdied(decay=15, dur=100, fast=True):
sizex, sizey = get_terminal_size()
#os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
positions = []
text = "0"*sizex
for i in range(sizex//2* + decay*2):
positions.append(random.randint(0, sizex))
if len(positions) >= sizex:
break
if 0 in positions:
positions = [x for x in positions if x != 0]
# positions.append(int(random.gauss(sizex//2, sizex//4)))
for i in range(dur):
if all(x == text[0] for x in text) and text[0] != "0":
for i in range(sizey):
start_time = time.time()
print("")
if time.time() - start_time < 0.01:
time.sleep(0.01 - (time.time() - start_time))
break # break function once it everything looks done
#positions.append(random.randint(0, sizex))
lenp = len(positions)
#text = str(randomDigits(x))
for index, j in enumerate(positions):
if all(x == text[0] for x in text) and text[0] != "0":
break
found = False
count = 0
while found == False and fast == True and count < decay:
count += 1
if all(x == text[0] for x in text):
found = True
break
pos = random.randint(0,sizex-1)
# print("pos:", pos)
if text[pos].isdigit() == True:
# print("True!")
text = text[:pos] + ' ' + text[pos + 1:]
found == True
break # Not sure why this look won't end without breaking
# else:
# print("False :(")
# print("break")
#positions.append(random.randint(0, sizex))
text = replaceNumbers(text)
shift = random.randint(0,1)
text = text[:j] + ' ' + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
print(Fore.RED, Style.DIM, text, end="\r")
print(Style.BRIGHT)
nprint(dead, sizex)
for i in range(sizey//2-2):
print("")
time.sleep(0.03)
print(Fore.WHITE, Style.DIM)
# time.sleep(1.5)
sys.stdout.write("\r")
ret = input("Enter 'q' to quit, or anything else to return to the main menu.".center(sizex) + Fore.RED + Style.BRIGHT)
return ret
def rain(dur=10**5): # pretend you're upside down ;)
sizex, sizey = get_terminal_size()
os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
positions = []
#bolt = x//2 + random.randint(-x//3, x//3)
#boltf = bolt
time1 = 250
time2 = 491
time3 = 599
time4 = 759
time5 = 956
nextbolt = time5 + random.randint(5,sizex)
bl1 = random.gauss(sizey//2, sizey//4)
bl2 = random.gauss(sizey//2, sizey//4)
bl3 = random.gauss(sizey//2, sizey//4)
bl4 = random.gauss(sizey//2, sizey//4)
bl5 = random.gauss(sizey//2, sizey//4)
bln = random.gauss(sizey//2, sizey//4)
fade = 0
def lightning(bolt, text):
boltf = bolt
boltf += random.randint(-1,1)
if boltf == bolt:
text = text[:boltf] + '|' + text[boltf + 1:]
elif boltf > bolt:
text = text[:boltf] + '\\' + text[boltf + 1:]
else:
text = text[:boltf] + '/' + text[boltf + 1:]
#p = str(Fore.BLUE, text[:bolt], Fore.YELLOW, text[bolt], Fore.BLUE, text[boltf + 1:])
p = Fore.BLUE + text[:boltf] + Fore.YELLOW + Style.BRIGHT + text[boltf] + Fore.BLUE + Style.NORMAL + text[boltf + 1:]
print(p, end="\r")
return boltf
for i in range(sizex*3):
positions.append(random.randint(0, sizex))
for i in range(dur):
text = "o"*sizex
#positions.append(random.randint(0, sizex))
lenp = len(positions)
#text = str(randomDigits(x))
for index, j in enumerate(positions):
shift = random.randint(0,1)
text = text[:j] + ' ' + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
if i >= time1 and i < time1 + bl1:
if i == time1:
bolt1 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt1 = lightning(bolt1, text)
elif i >= time2 and i < time2 + bl2:
if i == time2:
bolt2 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt2 = lightning(bolt2, text)
elif i >= time3 and i < time3 + bl3:
if i == time3:
bolt3 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt3 = lightning(bolt3, text)
elif i >= time4 and i < time4 + bl4:
if i == time4:
bolt4 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt4 = lightning(bolt4, text)
elif i >= time5 and i < time5 + bl5:
if i == time5:
bolt5 = sizex//2 + random.randint(-sizex//3, sizex//3)
bolt5 = lightning(bolt5, text)
elif i >= nextbolt and i < nextbolt + bln:
if i == nextbolt:
boltn = sizex//2 + random.randint(-sizex//3, sizex//3)
boltn = lightning(boltn, text)
if i == nextbolt + (sizey)//2:
nextbolt += sizey + fade + random.randint(1,sizex)
bln = random.gauss(sizey//2, sizey//4)
fade += 5
else:
print(Fore.BLUE, end="\r")
print(text, end="\r")
def tendrils():
sizex, sizey = get_terminal_size()
os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
positions = []
for i in range(sizey * 2):
start_time = time.time()
positions.append(random.randint(0, sizex))
positions.append(random.randint(0, sizex))
lenp = len(positions)
text = " " * sizex
for index, j in enumerate(positions):
shift = random.randint(0,1)
text = text[:j] + str(random.randint(0,9)) + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
print(text, end="\r")
if time.time() - start_time < 0.01:
time.sleep(0.01 - (time.time() - start_time))
def bloodText1(x, y):
positions = []
for i in range(int(x//2)):
positions.append(random.randint(0, x))
lenp = len(positions)
for i in range(y):
text = str(randomDigits(x))
for index, j in enumerate(positions):
shift = random.randint(0,1)
text = text[:j] + ' ' + text[j + 1:]
if shift == 0 and j == 0:
positions[index] += 1
elif shift == 1 and j == lenp - 1:
positions[index] -= 1
else:
if shift == 0:
positions[index] -= 1
else:
positions[index] += 1
print(text, end="\r")
def intro():
ctypes.windll.kernel32.SetConsoleTitleA("NIMBH")
font = CONSOLE_FONT_INFOEX()
font.cbSize = ctypes.sizeof(CONSOLE_FONT_INFOEX)
font.nFont = 12
font.dwFontSize.X = 12
font.dwFontSize.Y = 12
font.FontFamily = 54
font.FontWeight = 400
font.FaceName = "Lucida Console"
handle1 = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
ctypes.windll.kernel32.SetCurrentConsoleFontEx(
handle1, ctypes.c_long(False), ctypes.pointer(font))
AltEnter()
sizex, sizey = get_terminal_size()
#mode con: cols=sizex lines=sizey
#system("mode CON: COLS=",str(sizey))
#bufsize = wintypes._COORD(sizex, sizey) # rows, columns
#STDERR = -12
#h = windll.kernel32.GetStdHandle(STDERR)
#windll.kernel32.SetConsoleScreenBufferSize(h, bufsize)
#subprocess.Popen(["mode", "con:", "cols=",str(sizex), "lines=",str(sizey)])
#sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=32, cols=100))
os.system("mode con: cols="+str(sizex)+ "lines="+str(sizey))
#print("Terminal size:", get_terminal_size())
#pause = input("Press enter to begin.\n")
#clear()
count = 0
fullgreetsize = len(re.findall("\n", fullgreet))
for i in range((sizey- 1)):
print("| {0:<{1}} |".format("", sizex-4), end = "\r")
for line in fullgreet.splitlines():
count += 1
print("| {0:<{1}} |".format(line.center(sizex-4), sizex-4), end = "\r")
time.sleep(0.015)
for i in range((sizey-fullgreetsize)//2):
count += 1
print("| {0:<{1}} |".format("", sizex-4), end = "\r")
time.sleep(0.03)
#tendrils()
#clear()
print(Fore.RED, Style.DIM, end="\r"),
time.sleep(3)
print("".center(sizex, "_"), end="\r")
blood(40)
clear()
toPrint = replaceNumbers(fullgreet)
print(Style.RESET_ALL),
#print(Style.DIM),
print(Style.DIM),
for i in cinfo.splitlines():
print(i.rjust(sizex))
print(Fore.RED),
print(Style.BRIGHT),
#print(Back.WHITE)
for i in range((sizey - 63)//2-3):
print("")
for i in toPrint.splitlines():
print(i.center(sizex), end = "\r")
sys.stdout.write('\r')
sys.stdout.flush()
for i in range((sizey - 63)//2-1):
print("")
print(Fore.BLUE)
pause = input("Press enter to continue.\n")
clear()
for i in range((sizey)//2-6):
print("")
print(Fore.CYAN)
print(Style.DIM),
nprint(info, sizex)
for i in range((sizey)//2-6):
print("")
pause = input(Fore.RED + "Press enter to begin.\n")
if __name__ == "__main__":
intro()
clear()
youdied()
| 2.171875 | 2 |
src/petronia/defimpl/configuration/__init__.py | groboclown/petronia | 19 | 12761890 |
"""
Initial extension configuration implementations.
"""
| 1.15625 | 1 |
reward/policy/__init__.py | lgvaz/torchrl | 5 | 12761891 | from .base_policy import BasePolicy
| 1.078125 | 1 |
intercessor/__init__.py | RadicalZephyr/intercessor | 1 | 12761892 | <reponame>RadicalZephyr/intercessor<gh_stars>1-10
__project__ = 'intercessor'
__version__ = '0.0.0'
VERSION = "{0} v{1}".format(__project__, __version__)
import log
from pysistence import make_dict, make_list
def _identity(x):
x
class Interceptor(object):
def __init__(self, *,
id = None,
before = None,
after = None):
self.id = id
self.before = before or _identity
self.after = after or _identity
def fx_handler_to_interceptor(handler_fn):
def fx_handler_fn(context):
coeffects = context['coeffects']
event = coeffects['event']
effects = handler_fn(coeffects, event)
return context.using(effects=effects)
return Interceptor(id="fx-handler", before=fx_handler_fn)
class Intercessor(object):
def __init__(self):
self._db = make_dict()
self._registry = {}
def _make_context(self, event, db, interceptors):
coeffects = make_dict(db=self._db, event=event)
return make_dict(coeffects=coeffects,
queue=make_list(*interceptors),
stack=make_list())
def dispatch(self, event):
if event[0] in self._registry:
interceptors = self._registry[event[0]]
context = self._make_context(event, self._db, interceptors)
while context['queue'].first is not None:
next_interceptor = context['queue'].first
context = context.using(queue=context['queue'].rest)
next_interceptor.before(context)
context = context.using(stack=context['stack'].cons(next_interceptor))
ctx = handler[0].before(context)
fx = ctx['effects']
if 'db' in fx:
self._db = fx['db']
else:
log.info('There is no handler registered for event "{}"'.format(event[0]))
def reg_event_fx(self, event_name):
def register(h):
interceptors = [fx_handler_to_interceptor(h)]
self._registry[event_name] = interceptors
h._interceptors = interceptors
return h
return register
def with_after(self, after_fn):
def push_interceptor(h):
interceptor = Interceptor(id='before-fn', after=after_fn)
h._interceptors.insert(0, interceptor)
return h
return push_interceptor
| 2.203125 | 2 |
lid/_not_in_use/evaluation/bills_for_evaluation_set.py | righthan/policy_diffusion | 33 | 12761893 | <filename>lid/_not_in_use/evaluation/bills_for_evaluation_set.py<gh_stars>10-100
from elasticsearch import Elasticsearch
import re
import csv
import urllib2
import urllib
from urllib import urlopen
from tika import parser
import pickle
def create_bills(ls):
'''
args:
ls: list of lists of urls that correspond to matches
returns:
dictionary grouped by matches
'''
k = 0
bill_id = 0
bills = {}
bad_count = 0
for urls in ls:
for url,state in urls:
try:
print "bill_id: " + str(bill_id)
bills[bill_id] = {}
doc = urllib2.urlopen(url).read()
text = parser.from_buffer(doc)['content']
bills[bill_id]['url'] = url
bills[bill_id]['text'] = text
bills[bill_id]['match'] = k
bills[bill_id]['state'] = state
except:
pass
bad_count += 1
print 'bad_count: ', bad_count
bill_id += 1
k += 1
#get more evaluation bills
eval_bills = grab_more_eval_bills()
for more_bills in eval_bills:
print 'bill_group: ' k
k +=1
for text, state in more_bills:
bill_id += 1
print 'bill_id: ', i
bills[bill_id] = {}
bills[bill_id]['text'] = text
bills[bill_id]['state'] = state
bills[bill_id]['match'] = k
try:
for bill in bills.keys():
if bills[bill] == {} or bills[bill]['text'] == '' \
or bills[bill]['text'] == None:
del bills[bill]
except:
pass
return bills
def get_bill_by_id(unique_id):
es = Elasticsearch(['192.168.3.11:9200', '192.168.3.11:9200'], timeout=300)
match = es.search(index="state_bills", body={"query": {"match": {'unique_id': unique_id}}})
bill_text = match['hits']['hits'][0]['_source']['bill_document_first']
return bill_text
def grab_more_eval_bills():
with open('../../data/evaluation_set/bills_for_evaluation_set.csv') as f:
bills_list = [row for row in csv.reader(f.read().splitlines())]
bill_ids_list = []
url_lists = []
topic_list = []
for i in range(len(bills_list)):
state = bills_list[i][1]
if state == 'ct':
continue
topic = bills_list[i][0]
bill_number = bills_list[i][2]
bill_number = re.sub(' ', '', bill_number)
year = bills_list[i][3]
url = bills_list[i][6]
unique_id = str(state + '_' + year + '_' + bill_number)
topic_list.append(topic)
bill_ids_list.append(unique_id)
url_lists.append(url)
bills_ids = zip(bill_ids_list, url_lists)
bad_count = 0
bills_text = []
state_list = []
for i in range(len(bills_ids)):
try:
bill_text = get_bill_by_id(bills_ids[i][0])
except IndexError:
try:
url = bills_ids[i][1]
doc = urllib.urlopen(url).read()
bill_text = parser.from_buffer(doc)['content']
print url
except IOError:
bad_count += 1
print 'bad_count: ', bad_count
#skip this case
continue
bills_text.append(bill_text)
state = bills_ids[i][0][0:2]
state_list.append(state)
bills_state = zip(bills_text, state_list, topic_list)
bill_type_1 = []
bill_type_2 = []
for bill in bills_state:
if bill[-1] == 'Adult Guardianship and Protective Proceedings Jurisdiction Act':
bill_type_1.append((bill[0],bill[1]))
else:
bill_type_2.append((bill[0],bill[1]))
return [bill_type_2, bill_type_1]
def create_save_bills(bill_list):
bills = create_bills(bill_list)
with open('../../data/evaluation_set/labeled_bills.p', 'wb') as fp:
pickle.dump(bills, fp)
return bills
if __name__ == '__main__':
#each list in this list of lists contains bills that are matches
similar_bills = [[('http://www.azleg.gov/legtext/52leg/1r/bills/hb2505p.pdf', 'az'),
('http://www.legis.state.ak.us/basis/get_bill_text.asp?hsid=SB0012B&session=29', 'ak' ),
('http://www.capitol.hawaii.gov/session2015/bills/HB9_.PDF', 'hi'),
('http://www.capitol.hawaii.gov/session2015/bills/HB1047_.PDF', 'hi'),
('http://flsenate.gov/Session/Bill/2015/1490/BillText/Filed/HTML','fl'),
('http://ilga.gov/legislation/fulltext.asp?DocName=09900SB1836&GA=99&SessionId=88&DocTypeId=SB&LegID=88673&DocNum=1836&GAID=13&Session=&print=true','il'),
('http://www.legis.la.gov/Legis/ViewDocument.aspx?d=933306', 'la'),
('http://mgaleg.maryland.gov/2015RS/bills/sb/sb0040f.pdf', 'md'),
('http://www.legislature.mi.gov/documents/2015-2016/billintroduced/House/htm/2015-HIB-4167.htm', 'mi'),
('https://www.revisor.mn.gov/bills/text.php?number=HF549&version=0&session=ls89&session_year=2015&session_number=0','mn'),
('http://www.njleg.state.nj.us/2014/Bills/A2500/2354_R2.HTM','nj'),
('http://assembly.state.ny.us/leg/?sh=printbill&bn=A735&term=2015','ny'),
('http://www.ncga.state.nc.us/Sessions/2015/Bills/House/HTML/H270v1.html','nc'),
('https://olis.leg.state.or.us/liz/2015R1/Downloads/MeasureDocument/HB2005/A-Engrossed','or'),
('https://olis.leg.state.or.us/liz/2015R1/Downloads/MeasureDocument/SB947/Introduced','or'),
('http://www.legis.state.pa.us/CFDOCS/Legis/PN/Public/btCheck.cfm?txtType=HTM&sessYr=2015&sessInd=0&billBody=H&billTyp=B&billNbr=0624&pn=0724', 'pa'),
('http://www.scstatehouse.gov/sess121_2015-2016/prever/172_20141203.htm','sc'),
('http://lawfilesext.leg.wa.gov/Biennium/2015-16/Htm/Bills/House%20Bills/1356.htm', 'wa'),
('http://www.legis.state.wv.us/Bill_Status/bills_text.cfm?billdoc=hb2874%20intr.htm&yr=2015&sesstype=RS&i=2874','wv'),
('http://www.legis.state.wv.us/Bill_Status/bills_text.cfm?billdoc=hb2874%20intr.htm&yr=2015&sesstype=RS&i=2874', 'wv'),
# ('ftp://ftp.cga.ct.gov/2015/tob/h/2015HB-06784-R00-HB.htm','ct'),
('http://www.capitol.hawaii.gov/session2015/bills/SB129_.PDF','hi'),
('http://nebraskalegislature.gov/FloorDocs/104/PDF/Intro/LB493.pdf', 'ne'),
('http://www.gencourt.state.nh.us/legislation/2015/HB0600.html', 'nh')],
[('http://alecexposed.org/w/images/2/2d/7K5-No_Sanctuary_Cities_for_Illegal_Immigrants_Act_Exposed.pdf', 'model_legislation'),
('http://www.kslegislature.org/li_2012/b2011_12/measures/documents/hb2578_00_0000.pdf', 'ks'),
('http://flsenate.gov/Session/Bill/2011/0237/BillText/Filed/HTML','fl'),
('http://openstates.org/al/bills/2012rs/SB211/','al'),
('http://le.utah.gov/~2011/bills/static/HB0497.html','ut'),
('http://webserver1.lsb.state.ok.us/cf_pdf/2013-14%20FLR/HFLR/HB1436%20HFLR.PDF','ok')],
[('http://www.alec.org/model-legislation/the-disclosure-of-hydraulic-fracturing-fluid-composition-act/', 'model_legislation'),
('ftp://ftp.legis.state.tx.us/bills/82R/billtext/html/house_bills/HB03300_HB03399/HB03328S.htm', 'tx')],
[('http://www.legislature.mi.gov/(S(ntrjry55mpj5pv55bv1wd155))/documents/2005-2006/billintroduced/House/htm/2005-HIB-5153.htm', 'mi'),
('http://www.schouse.gov/sess116_2005-2006/bills/4301.htm','sc'),
('http://www.lrc.ky.gov/record/06rs/SB38.htm', 'ky'),
('http://www.okhouse.gov/Legislation/BillFiles/hb2615cs%20db.PDF', 'ok'),
('http://state.tn.us/sos/acts/105/pub/pc0210.pdf', 'tn'),
('https://docs.legis.wisconsin.gov/2011/related/proposals/ab69', 'wi'),
('http://legisweb.state.wy.us/2008/Enroll/HB0137.pdf', 'wy'),
('http://www.kansas.gov/government/legislative/bills/2006/366.pdf', 'ks'),
('http://billstatus.ls.state.ms.us/documents/2006/pdf/SB/2400-2499/SB2426SG.pdf', 'mi')],
[('http://www.alec.org/model-legislation/state-withdrawal-from-regional-climate-initiatives/', 'model_legislation'),
('http://www.legislature.mi.gov/documents/2011-2012/resolutionintroduced/House/htm/2011-HIR-0134.htm', 'mi'),
('http://www.nmlegis.gov/Sessions/11%20Regular/memorials/house/HJM024.html', 'nm')],
[('http://alecexposed.org/w/images/9/90/7J1-Campus_Personal_Protection_Act_Exposed.pdf', 'model_legislation'),
('ftp://ftp.legis.state.tx.us/bills/831/billtext/html/house_bills/HB00001_HB00099/HB00056I.htm', 'tx')],
# [
# ('http://essexuu.org/ctstat.html', 'ct'), we don't have connecituc
# ('http://alisondb.legislature.state.al.us/alison/codeofalabama/constitution/1901/CA-170364.htm', 'al')],
[('http://www.legis.state.ak.us/basis/get_bill_text.asp?hsid=HB0162A&session=27', 'ak'),
('https://legiscan.com/AL/text/HB19/id/327641/Alabama-2011-HB19-Enrolled.pdf', 'al'),
('http://www.leg.state.co.us/clics/clics2012a/csl.nsf/fsbillcont3/0039C9417C9D9D5D87257981007F3CC9?open&file=1111_01.pdf', 'co'),
('http://www.capitol.hawaii.gov/session2012/Bills/HB2221_.PDF', 'hi'),
('http://ilga.gov/legislation/fulltext.asp?DocName=09700HB3058&GA=97&SessionId=84&DocTypeId=HB&LegID=60409&DocNum=3058&GAID=11&Session=&print=true', 'il'),
('http://coolice.legis.iowa.gov/Legislation/84thGA/Bills/SenateFiles/Introduced/SF142.html', 'ia'),
('ftp://www.arkleg.state.ar.us/Bills/2011/Public/HB1797.pdf','ar'),
('http://billstatus.ls.state.ms.us/documents/2012/html/HB/0900-0999/HB0921SG.htm', 'ms'),
('http://www.leg.state.nv.us/Session/76th2011/Bills/SB/SB373.pdf', 'nv'),
('http://www.njleg.state.nj.us/2012/Bills/A1000/674_I1.HTM', 'nj'),
('http://webserver1.lsb.state.ok.us/cf_pdf/2011-12%20INT/hB/HB2821%20INT.PDF', 'ok'),
('http://www.legis.state.pa.us/CFDOCS/Legis/PN/Public/btCheck.cfm?txtType=PDF&sessYr=2011&sessInd=0&billBody=H&billTyp=B&billNbr=0934&pn=1003', 'pa'),
('http://www.capitol.tn.gov/Bills/107/Bill/SB0016.pdf', 'tn')],
[('http://www.legislature.idaho.gov/idstat/Title39/T39CH6SECT39-608.htm', 'id'),
('http://www.legis.nd.gov/cencode/t12-1c20.pdf?20150708171557', 'nd')]
]
bills = create_save_bills(similar_bills)
| 2.59375 | 3 |
rabbit_force/app.py | elaredo/rabbit-force-wefox | 19 | 12761894 | """Application class definition"""
import asyncio
import logging
import signal
from collections import namedtuple
import uvloop
from .factories import create_message_sink, create_message_source, \
create_router
from .exceptions import MessageSinkError
LOGGER = logging.getLogger(__name__)
#: Represents a message and the source it was received from
SourceMessagePair = namedtuple("SourceMessagePair", ["source_name", "message"])
SourceMessagePair.source_name.__doc__ = "Name of the message source"
SourceMessagePair.message.__doc__ = "The received message"
# pylint: disable=too-few-public-methods, too-many-instance-attributes
class Application:
"""Rabbit force application"""
def __init__(self, config, *, ignore_replay_storage_errors=False,
ignore_sink_errors=False,
source_connection_timeout=10.0):
"""
Application is the mediator class which is responsible for listening
for messages from the source objects and routing them to the right
message sinks.
.. note::
The application configures itself the first time :meth:`run` is
called. If you want to run the application with a different
configuration then a new Application instance should be created.
:param dict config: Application configuration
:param bool ignore_replay_storage_errors: If True then no exceptions \
will be raised in case of a network error occurs in the replay marker \
storage object
:param bool ignore_sink_errors: If True then no exceptions \
will be raised in case a message sink error occurs
:param source_connection_timeout: The maximum amount of time to wait \
for the message source to re-establish a connection with the server \
when the connection fails. If ``0`` then the message source will try \
to reconnect indefinitely.
:type source_connection_timeout: int, float or None
"""
#: The application's configuration
self.config = config
#: Marks whether to raise exceptions on replay storage errors or not
self.ignore_replay_storage_errors = ignore_replay_storage_errors
#: Marks whether to raise exceptions on message sink errors or not
self.ignore_sink_errors = ignore_sink_errors
#: Maximum allowed connection timeout for message source
self.source_connection_timeout = source_connection_timeout
#: Marks whether the application is already configured or not
self._configured = False
#: A message source object
self._source = None
#: A message sink object
self._sink = None
#: A message router object
self._router = None
#: The currently running message forwarding tasks
self._forwarding_tasks = {}
#: Event loop
self._loop = None
# The main task of the application
self._main_task = None
def run(self):
"""Run the Rabbit force application, listen for and forward messages
until a keyboard interrupt or a termination signal is received"""
# use the uvloop event loop policy
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# create an event loop and create the main task
self._loop = asyncio.get_event_loop()
self._main_task = asyncio.ensure_future(self._run(), loop=self._loop)
# add SIGTERM handler
self._loop.add_signal_handler(signal.SIGTERM,
self._on_termination_signal,
self._main_task)
# run the task until completion
try:
LOGGER.debug("Starting event loop")
self._loop.run_until_complete(self._main_task)
# on a keyboard interrupt cancel the main task and await its completion
except KeyboardInterrupt:
LOGGER.debug("Received keyboard interrupt")
self._main_task.cancel()
self._loop.run_until_complete(self._main_task)
finally:
LOGGER.debug("Event loop terminated")
@staticmethod
def _on_termination_signal(task):
"""Cancel the *task*"""
LOGGER.debug("Received termination signal")
task.cancel()
async def _run(self):
"""Configure the application and listen for incoming messages until
cancellation"""
LOGGER.info("Configuring application ...")
# configure the application
await self._configure()
LOGGER.debug("Start listening for messages")
# listen for incoming messages
await self._listen_for_messages()
async def _configure(self):
"""Create and configure collaborator objects"""
LOGGER.debug("Creating message source from configuration")
self._source = await create_message_source(
**self.config["source"],
ignore_replay_storage_errors=self.ignore_replay_storage_errors,
connection_timeout=self.source_connection_timeout,
loop=self._loop
)
LOGGER.debug("Creating message sink from configuration")
self._sink = await create_message_sink(
**self.config["sink"],
loop=self._loop
)
LOGGER.debug("Creating message router from configuration")
self._router = create_router(**self.config["router"])
self._configured = True
async def _listen_for_messages(self):
"""Listen for incoming messages and route them to the appropriate
brokers
This method will block until it's cancelled. On cancellation it'll
drain all the pending messages and forwarding tasks.
"""
try:
# open the message source
LOGGER.debug("Opening message source")
await self._source.open()
LOGGER.debug("Waiting for incoming messages")
# consume messages until the message source is not closed, or until
# all the messages are consumed from a closed message source
while not self._source.closed or self._source.has_pending_messages:
try:
# await an incoming message
source_name, message = await self._source.get_message()
LOGGER.debug("Received incoming message from source %r, "
"scheduling message forwarding",
source_name)
# forward the message in non blocking fashion
# (without awaiting the tasks result)
await self._schedule_message_forwarding(source_name,
message)
# on cancellation close the message source but continue to
# consume pending messages until there is no more left
except asyncio.CancelledError:
LOGGER.debug("Canceling wait for incoming messages")
await self._source.close()
LOGGER.info("Shutting down ...")
finally:
# close the source in case it wasn't closed in the inner loop
# (idempotent if already closed)
LOGGER.debug("Closing message source")
await self._source.close()
# if the source is closed and there are no more messages to
# consume, await the completion of scheduled forwaring tasks
LOGGER.debug("Waiting for running forwarding tasks to complete")
await self._wait_scheduled_forwarding_tasks()
# when all the messages are forwarded close the message sink
LOGGER.debug("Closing message sink")
await self._sink.close()
async def _schedule_message_forwarding(self, source_name, message):
"""Create a task for forwarding the *message* from *source_name* and
add it to the map of active forwarding tasks
:param str source_name: Name of the message source
:param dict message: A message
"""
# create a task to forward the message
forwarding_task = asyncio.ensure_future(
self._forward_message(source_name, message),
loop=self._loop
)
# set a callback to consume the tasks result
forwarding_task.add_done_callback(self._forward_message_done)
# add the task and message to the map of running tasks
self._forwarding_tasks[forwarding_task] = \
SourceMessagePair(source_name, message)
async def _wait_scheduled_forwarding_tasks(self):
"""Wait for all the active forwarding tasks to complete"""
# check if there are any running forwarding tasks, and await them
if self._forwarding_tasks:
await asyncio.wait(self._forwarding_tasks, loop=self._loop)
async def _forward_message(self, source_name, message):
"""Forward the *message* from *source_name* with the appropriate route
:param str source_name: Name of the message source
:param dict message: A message
:return: The routing parameters used to forward the message or None \
if no suitable route was found
:rtype: Route or None
"""
# find a matching route for the message
route = self._router.find_route(source_name, message)
# if a route was found for the message then forward it using the
# routing parameters
if route is not None:
await self._sink.consume_message(message,
route.broker_name,
route.exchange_name,
route.routing_key,
route.properties)
# return the message, source_name and the routing parameters
return route
def _forward_message_done(self, future):
"""Consume the result of a completed message forwarding task
:param asyncio.Future future: A future object
"""
# remove task from the map of running tasks
source_message_pair = self._forwarding_tasks.pop(future)
# extract message and source information
source_name = source_message_pair.source_name
channel = source_message_pair.message["channel"]
replay_id = source_message_pair.message["data"]["event"]["replayId"]
try:
route = future.result()
if route:
LOGGER.info("Forwarded message %r on channel %r "
"from %r to %r.",
replay_id, channel, source_name, route)
else:
LOGGER.warning("Dropped message %r on channel %r from %r, "
"no route found.",
replay_id, channel, source_name)
except MessageSinkError as error:
if self.ignore_sink_errors:
LOGGER.error("Dropped message %r on channel %r from %r. %s",
replay_id, channel, source_name, str(error))
else:
self._on_unexpected_error(error)
except Exception as error: # pylint: disable=broad-except
self._on_unexpected_error(error)
def _on_unexpected_error(self, error):
"""Handle unexpected errors of forwarding tasks
Sets the *error* as the exception of the application's main task.
"""
LOGGER.debug("An unexpected error occurred. Setting it as the "
"exception of the main task.")
self._main_task.set_exception(error)
# pylint: enable=too-few-public-methods, too-many-instance-attributes
| 2.328125 | 2 |
src/__init__.py | combro2k/pluGET | 0 | 12761895 | if __package__:
from pluGET.utils.consoleoutput import consoleTitle, clearConsole, printMainMenu
from pluGET.utils.utilities import check_requirements
from pluGET.handlers.handle_input import createInputLists, getInput
from pluGET.handlers.handle_config import checkConfig
else:
from utils.consoleoutput import consoleTitle, clearConsole, printMainMenu
from utils.utilities import check_requirements
from handlers.handle_input import createInputLists, getInput
from handlers.handle_config import checkConfig
def mainFunction():
consoleTitle()
clearConsole()
checkConfig()
check_requirements()
createInputLists()
printMainMenu()
getInput()
mainFunction()
| 1.65625 | 2 |
Module-04-Generators/py09_generator_send_example_3.py | CodingGearsCourses/Python-Advanced-Concepts | 0 | 12761896 | # Copyright 2020 https://www.globaletraining.com/
# Generator send method
def simple_gen(start_number=10):
i = start_number
while True:
x = (yield i * 2)
if x: # check if used send()
i += x
else:
i += 1
gen1 = simple_gen()
print(gen1.__next__())
print(gen1.send(10))
print(gen1.__next__())
print(gen1.send(20))
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__())
print(gen1.send(20))
print(gen1.send(20))
print(gen1.__next__())
print(gen1.__next__())
print(gen1.__next__()) | 3.375 | 3 |
src/main.py | Tomaszu97/game-engine | 1 | 12761897 | from pygame import *
from .game_object import *
from .player import *
from .spawner import *
from .decoration import *
from .label import *
from .shared import *
from .enemy import *
from .tiled import *
from .collision_manager import *
from .resource_handler import *
from threading import Thread
import time
import random
import os
import code
import copy
import random
class App():
def __init__(self):
self.children = []
self.clock = Clock()
self.running = True
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (window_position[0], window_position[1])
self.surface = pygame.display.set_mode((window_size[0], window_size[1]), HWSURFACE | DOUBLEBUF)
#self.surface = pygame.display.set_mode((window_size[0], window_size[1]), HWSURFACE | DOUBLEBUF | FULLSCREEN)
self.collision_manager = CollisionManager()
pygame.init()
pygame.key.set_repeat(200,60)
self.run()
def handle_events(self, event):
if event.type == pygame.QUIT:
self.quit()
else:
for x in event_receiver_objects:
x.on_event(event, self)
def loop(self):
to_collide = []
for object in all_objects:
# w, h = pygame.display.get_surface().get_size()
# if not ( -object.size.x <= object.position.x <= w and -object.size.y <= object.position.y <= h ) and object.type != PLAYER:
# object.kill()
try:
object.every_tick()
except Exception as e:
print(e)
if object.layer == collision_layer:
to_collide.append(object)
self.collision_manager.handle_all_collisions(to_collide)
def render(self):
self.surface.fill(background_color)
try:
#TODO do better
camera_position.x, camera_position.y = [ ( obj.position.x - (window_size[0]/2) + (obj.size.x/2) , obj.position.y - (window_size[1]/2) + (obj.size.y/2) ) for obj in all_objects if obj.type == PLAYER ][0]
#draw object in layered order
for layer in range(min(object.layer for object in all_objects), max(object.layer for object in all_objects)+1):
for object in all_objects:
if object.layer == layer:
self.surface.blit(object.surface, (object.position.x - camera_position.x, object.position.y - camera_position.y))
except Exception as e:
print(e)
pass
pygame.display.flip()
def quit(self):
all_objects.clear()
self.running = False
pygame.quit()
def run(self):
while(self.running):
self.loop()
self.render()
self.clock.tick(tick)
for event in pygame.event.get():
self.handle_events(event)
def exec(self, cmd):
exec(cmd)
Thread(target=App).start()
time.sleep(1)
###########################################
#TODO music doesnt play if file imported from somewhere
#TODO replace above time.sleep to sth that makes more sense
| 2.671875 | 3 |
glance/tests/functional/db/migrations/test_pike_expand01.py | Steap/glance | 309 | 12761898 | <reponame>Steap/glance<gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import utils as db_utils
from glance.tests.functional.db import test_migrations
import glance.tests.utils as test_utils
class TestPikeExpand01Mixin(test_migrations.AlembicMigrationsMixin):
artifacts_table_names = [
'artifact_blob_locations',
'artifact_properties',
'artifact_blobs',
'artifact_dependencies',
'artifact_tags',
'artifacts'
]
def _get_revisions(self, config):
return test_migrations.AlembicMigrationsMixin._get_revisions(
self, config, head='pike_expand01')
def _pre_upgrade_pike_expand01(self, engine):
# verify presence of the artifacts tables
for table_name in self.artifacts_table_names:
table = db_utils.get_table(engine, table_name)
self.assertIsNotNone(table)
def _check_pike_expand01(self, engine, data):
# should be no changes, so re-run pre-upgrade check
self._pre_upgrade_pike_expand01(engine)
class TestPikeExpand01MySQL(
TestPikeExpand01Mixin,
test_fixtures.OpportunisticDBTestMixin,
test_utils.BaseTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
| 1.695313 | 2 |
NNEvol/get_best_nn.py | cvazquezlos/NNEvol-python | 1 | 12761899 | <reponame>cvazquezlos/NNEvol-python
def get_best_nn():
return(None) | 1.289063 | 1 |
contrib/node/src/python/pants/contrib/node/targets/node_preinstalled_module.py | StephanErb/pants | 94 | 12761900 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.contrib.node.targets.node_module import NodeModule
class NodePreinstalledModule(NodeModule):
"""A NodeModule which resolves deps by downloading an archived node_modules directory.
This is basically an example, to demonstrate how additional types of NodeModule targets with
their own resolvers (in this case NodePreinstalledModuleResolver), which still work with
NodeTest, can be registered. To be generallly correct, this target type and associated resolver
would have to use platform- and Node-version-specific node_modules archives, rather than just
a single dependencies_archive_url used verbatim. Consider NodePreinstalledModule and
NodePreinstalledModuleResolver subject to future change or removal for now.
"""
def __init__(self, dependencies_archive_url=None, sources=None,
address=None, payload=None, **kwargs):
"""
:param string url: The location of a tar.gz file containing containing a node_modules directory.
"""
payload = payload or Payload()
payload.add_fields({
'dependencies_archive_url': PrimitiveField(dependencies_archive_url),
})
super(NodePreinstalledModule, self).__init__(sources=sources, address=address,
payload=payload, **kwargs)
@property
def dependencies_archive_url(self):
"""Where to download the archive containing the node_modules directory.
:rtype: string
"""
return self.payload.dependencies_archive_url
| 2.25 | 2 |
drawer/multithread_plot.py | YOYOPIG/master-thesis | 0 | 12761901 | <filename>drawer/multithread_plot.py
import matplotlib.pyplot as plt
import numpy as np
name_dict = {'1': 1, '32': 2, '64': 3, '128': 4, '256': 5}
threads = [1, 2, 3, 4, 5]
# threads = [1, 32, 64, 128, 256]
cpu = [0.203837, 0.050556, 0.050278, 0.053056, 0.055000]
gpu = [0.342212, 0.209722, 0.215000, 0.217778, 0.224167]
X_axis = np.arange(len(cpu))
plt.bar(X_axis - 0.2, cpu, 0.4, label='CPU only')
plt.bar(X_axis + 0.2, gpu, 0.4, label='With GPU')
ax = plt.gca()
# print(name_dict.values())
# print(name_dict.keys())
# ax.set_xticks([1,2,3,4,5])
# ax.set_xticklabels([1,8,16,24,32])
plt.xticks(X_axis, [1,8,16,24,32])
plt.legend()
plt.ylabel('Execution time (hr)')
plt.xlabel('CPU thread count')
plt.show() | 2.65625 | 3 |
clmr/data.py | heraclex12/CLMR | 0 | 12761902 | """Wrapper for Torch Dataset class to enable contrastive training
"""
import torch
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio_augmentations import Compose
from typing import Tuple, List
class ContrastiveDataset(Dataset):
def __init__(self, dataset: Dataset, input_shape: List[int], transform: Compose):
self.dataset = dataset
self.transform = transform
self.input_shape = input_shape
self.ignore_idx = []
def __getitem__(self, idx) -> Tuple[Tensor, Tensor]:
if idx in self.ignore_idx:
return self[idx + 1]
audio, label = self.dataset[idx]
if audio.shape[1] < self.input_shape[1]:
self.ignore_idx.append(idx)
return self[idx + 1]
if self.transform:
audio = self.transform(audio)
return audio, label
def __len__(self) -> int:
return len(self.dataset)
def concat_clip(self, n: int, audio_length: float) -> Tensor:
audio, _ = self.dataset[n]
batch = torch.split(audio, audio_length, dim=1)
batch = torch.cat(batch[:-1])
batch = batch.unsqueeze(dim=1)
if self.transform:
batch = self.transform(batch)
return batch
class SiameseContrastiveDataset(Dataset):
def __init__(self, dataset: Dataset, input_shape: List[int], transform: Compose):
self.dataset = dataset
self.transform = transform
self.input_shape = input_shape
self.ignore_idx = []
def __getitem__(self, idx) -> Tuple[Tensor, Tensor]:
if idx in self.ignore_idx:
return self[idx + 1]
hum, song, label = self.dataset[idx]
if hum.shape[1] < self.input_shape[1] or song.shape[1] < self.input_shape[1]:
self.ignore_idx.append(idx)
return self[idx + 1]
if self.transform:
hum = self.transform(hum)
song = self.transform(song)
return hum, song, label
def __len__(self) -> int:
return len(self.dataset)
def concat_clip(self, n: int, audio_length: float) -> Tensor:
hum, song, _ = self.dataset[n]
hum_batch = torch.split(hum, audio_length, dim=1)
hum_batch = torch.cat(hum_batch[:-1])
hum_batch = hum_batch.unsqueeze(dim=1)
if self.transform:
hum_batch = self.transform(hum_batch)
song_batch = torch.split(song, audio_length, dim=1)
song_batch = torch.cat(song_batch[:-1])
song_batch = song_batch.unsqueeze(dim=1)
if self.transform:
song_batch = self.transform(song_batch)
return hum_batch, song_batch
| 2.78125 | 3 |
src/archive/clay_bricks/PatternBrickLibrary/gCode.py | JonasWard/ClayAdventures | 1 | 12761903 | # g code generator for clay extrusions
def gCodeLine(generation):
def __init__(self, coordinates, z_val = True, extrusion_value = None, feed_value = None, absolute_relative = None):
self.X = coordinates.X
self.Y = coordinates.Y
if z_val:
self.Z = coordinates.Z
class GCodeSettings:
def __init__(self):
self.nozzle_bool = False
self.feed_rate_bool = False
self.extrusion_rate_bool = False
self.layers_bool = False
self.geometry_bool = False
self.distance_bool = False
self.diamond_bool = False
def setNozzle(self, diameter):
self.nozzle_bool = True
self.nozzle_settings = ['diameter: ', str(diameter)]
def setFeedRate(self, standard, max_body = None, min_pin = None, max_pin = None):
self.feed_rate_bool = True
self.feed_rate_settings = ['base feed rate:', str(standard)]
if not(max_body == None):
se
# class GCodeGenerator(object):
# def __init__(self, paths, relative = False):
# self.paths = paths
# self.relative = relative
# self.extrusion_rate = .3 # per mm
# self.z_offset = 1.1 # in mm
# def distanceCalculation(self, set):
# def startStopRoutine(self, lift_height, extrusion_decrese, wait_times):
# def gCodeStringGeneration(self):
| 2.390625 | 2 |
hard-gists/005ceac0483fc5a581cc/snippet.py | jjhenkel/dockerizeme | 21 | 12761904 | import tensorflow as tf
import numpy as np
import input_data
import Image
from util import tile_raster_images
def sample_prob(probs):
return tf.nn.relu(
tf.sign(
probs - tf.random_uniform(tf.shape(probs))))
alpha = 1.0
batchsize = 100
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images,\
mnist.test.labels
X = tf.placeholder("float", [None, 784])
Y = tf.placeholder("float", [None, 10])
rbm_w = tf.placeholder("float", [784, 500])
rbm_vb = tf.placeholder("float", [784])
rbm_hb = tf.placeholder("float", [500])
h0 = sample_prob(tf.nn.sigmoid(tf.matmul(X, rbm_w) + rbm_hb))
v1 = sample_prob(tf.nn.sigmoid(
tf.matmul(h0, tf.transpose(rbm_w)) + rbm_vb))
h1 = tf.nn.sigmoid(tf.matmul(v1, rbm_w) + rbm_hb)
w_positive_grad = tf.matmul(tf.transpose(X), h0)
w_negative_grad = tf.matmul(tf.transpose(v1), h1)
update_w = rbm_w + alpha * \
(w_positive_grad - w_negative_grad) / tf.to_float(tf.shape(X)[0])
update_vb = rbm_vb + alpha * tf.reduce_mean(X - v1, 0)
update_hb = rbm_hb + alpha * tf.reduce_mean(h0 - h1, 0)
h_sample = sample_prob(tf.nn.sigmoid(tf.matmul(X, rbm_w) + rbm_hb))
v_sample = sample_prob(tf.nn.sigmoid(
tf.matmul(h_sample, tf.transpose(rbm_w)) + rbm_vb))
err = X - v_sample
err_sum = tf.reduce_mean(err * err)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
n_w = np.zeros([784, 500], np.float32)
n_vb = np.zeros([784], np.float32)
n_hb = np.zeros([500], np.float32)
o_w = np.zeros([784, 500], np.float32)
o_vb = np.zeros([784], np.float32)
o_hb = np.zeros([500], np.float32)
print sess.run(
err_sum, feed_dict={X: trX, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb})
for start, end in zip(
range(0, len(trX), batchsize), range(batchsize, len(trX), batchsize)):
batch = trX[start:end]
n_w = sess.run(update_w, feed_dict={
X: batch, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb})
n_vb = sess.run(update_vb, feed_dict={
X: batch, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb})
n_hb = sess.run(update_hb, feed_dict={
X: batch, rbm_w: o_w, rbm_vb: o_vb, rbm_hb: o_hb})
o_w = n_w
o_vb = n_vb
o_hb = n_hb
if start % 10000 == 0:
print sess.run(
err_sum, feed_dict={X: trX, rbm_w: n_w, rbm_vb: n_vb, rbm_hb: n_hb})
image = Image.fromarray(
tile_raster_images(
X=n_w.T,
img_shape=(28, 28),
tile_shape=(25, 20),
tile_spacing=(1, 1)
)
)
image.save("rbm_%d.png" % (start / 10000)) | 2.5 | 2 |
nifstd/nifstd_tools/hbp_parc_output.py | memartone/pyontutils | 0 | 12761905 | <reponame>memartone/pyontutils
#!/usr/bin/env python3.6
import subprocess
from pathlib import Path
from collections import defaultdict
import rdflib
from ttlser import natsort
from pyontutils.core import qname, makeGraph
from pyontutils.utils import TermColors as tc
from pyontutils.namespaces import NIFRID, ilxtr
from pyontutils.combinators import restriction, annotation
from pyontutils.closed_namespaces import owl, rdf, rdfs, skos
from IPython import embed
current_file = Path(__file__).absolute()
gitf = current_file.parent.parent.parent
def labelkey(line):
label, *rest = line.split('|', 1)
return natsort(label)
def edkey(line):
ed, label, *rest = line.split('|', 2)
return natsort(ed + ' ' + label)
def main():
for filename in ('mbaslim', 'hbaslim', 'paxinos-rat-labels', 'waxholm-rat-labels'):
filepath = gitf / 'NIF-Ontology/ttl/generated/parcellation' / (filename + '.ttl')
dir_ = filepath.parent.as_posix()
print(dir_)
file_commit = subprocess.check_output(['git', 'log', '-n', '1',
'--pretty=format:%H', '--',
filepath.name],
cwd=dir_,
stderr=subprocess.DEVNULL).decode().rstrip()
graph = rdflib.Graph().parse(filepath.as_posix(), format='ttl')
g = makeGraph('', graph=graph)
annos = defaultdict(set)
anno_trips = defaultdict(set)
for triple, predicate_objects in annotation.parse(graph=graph):
for a_p, a_o in predicate_objects:
annos[a_p, a_o].add(triple)
anno_trips[triple].add((a_p, a_o))
anno_trips = {k:v for k, v in anno_trips.items()}
for lifted_triple in restriction.parse(graph=graph):
graph.add(lifted_triple)
out_header = 'label|abbrev|curie|superPart curie\n'
out = []
editions_header = 'edition|label|abbrev|curie\n'
editions = []
for s in graph.subjects(rdf.type, owl.Class):
rdfsLabel = next(graph.objects(s, rdfs.label))
try:
prefLabel = next(graph.objects(s, skos.prefLabel))
except StopIteration:
print(tc.red('WARNING:'), f'skipping {s} {rdfsLabel} since it has no prefLabel')
continue
syns = sorted(graph.objects(s, NIFRID.synonym)) # TODO are there cases where we need to recaptulate what we are doing for for abbrevs?
abbrevs = sorted(graph.objects(s, NIFRID.abbrev)) # FIXME paxinos has more than one
try:
if annos:
if len(abbrevs) > 1:
print(tc.blue('INFO:'), g.qname(s), repr(prefLabel.value), 'has multiple abbrevs', [a.value for a in abbrevs])
# prefer latest
current_edition = ''
for a in abbrevs:
for a_p, edition in anno_trips[s, NIFRID.abbrev, a]:
if a_p == ilxtr.literalUsedBy:
if current_edition < edition:
current_edition = edition
abbrev = a
else:
abbrev = abbrevs[0]
except IndexError:
abbrev = ''
try:
superPart = next(graph.objects(s, ilxtr.labelPartOf))
except StopIteration:
superPart = ''
out.append(f'{prefLabel}|{abbrev}|{g.qname(s)}|{g.qname(superPart)}')
if annos:
#asdf = {'ed':{'label':,'abbrev':,'curie':}}
asdf = defaultdict(dict)
triple = s, skos.prefLabel, prefLabel
eds = anno_trips[triple]
for a_p, a_o in eds:
asdf[a_o]['curie'] = g.qname(s)
asdf[a_o]['label'] = prefLabel
for syn in graph.objects(s, NIFRID.synonym):
triple = s, NIFRID.synonym, syn
eds = anno_trips[triple]
for a_p, a_o in eds:
asdf[a_o]['curie'] = g.qname(s)
if 'label' in asdf[a_o]:
print(tc.red('WARNING:'), f'{a_o} already has a label "{asdf[a_o]["label"]}" for "{syn}"')
asdf[a_o]['label'] = syn
for abbrev in graph.objects(s, NIFRID.abbrev):
triple = s, NIFRID.abbrev, abbrev
eds = anno_trips[triple]
#print('aaaaaaaaaaa', g.qname(s), )
for a_p, a_o in eds:
asdf[a_o]['curie'] = g.qname(s)
if 'abbrev' in asdf[a_o]:
print(tc.red('WARNING:'), f'{a_o} already has a abbrev "{asdf[a_o]["abbrev"]}" for "{abbrev}"')
asdf[a_o]['abbrev'] = abbrev
#print(asdf)
for ed, kwargs in sorted(asdf.items()):
if 'abbrev' not in kwargs:
print('Skipping', ed, 'for\n', kwargs)
continue
editions.append('{ed}|{label}|{abbrev}|{curie}'.format(ed=g.qname(ed), **kwargs))
with open('/tmp/' + filename + f'-{file_commit[:8]}.psv', 'wt') as f:
f.write(out_header + '\n'.join(sorted(out, key=labelkey)))
if editions:
with open('/tmp/' + filename + f'-editions-{file_commit[:8]}.psv', 'wt') as f:
f.write(editions_header + '\n'.join(sorted(editions, key=edkey)))
if __name__ == '__main__':
main()
| 1.898438 | 2 |
Python/Skrypty/Python - Szkolenie_11-2015/przyklady_rec_python/map_example.py | Elzei/show-off | 0 | 12761906 | <reponame>Elzei/show-off
def show(arg):
print arg
return arg + 1
if __name__ == '__main__':
data = [ 10, 20, 30, 40 ]
result = map(show, data)
print "-" * 10
print result
print "-" * 10
result = map(lambda x : x * 2 + 1, data)
print result
| 3.109375 | 3 |
tools/ports/zlib.py | Nitrillo/emscripten | 6 | 12761907 | <filename>tools/ports/zlib.py
import os, shutil, logging
TAG = 'version_1'
def get(ports, settings, shared): # not currently used; no real need for configure on emscripten users' machines!
if settings.USE_ZLIB == 1:
ports.fetch_project('zlib', 'https://github.com/emscripten-ports/zlib/archive/' + TAG + '.zip', 'zlib-' + TAG)
return [ports.build_project('zlib', 'zlib-' + TAG,
['sh', './configure'],
['libz.a'])]
else:
return []
def process_args(ports, args, settings, shared):
if settings.USE_ZLIB == 1:
get(ports, settings, shared)
args += ['-Xclang', '-isystem' + os.path.join(shared.Cache.get_path('ports-builds'), 'zlib')]
return args
def show():
return 'zlib (zlib license)'
| 2.0625 | 2 |
h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2olog_and_echo.py | My-Technical-Architect/h2o-3 | 1 | 12761908 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
def h2olog_and_echo():
"""
Python API test: h2o.log_and_echo(message=u'')
"""
try:
h2o.log_and_echo("Testing h2o.log_and_echo")
except Exception as e:
assert False, "h2o.log_and_echo() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2olog_and_echo)
else:
h2olog_and_echo()
| 2.328125 | 2 |
app.py | Azariagmt/Ad-campaign-performance | 2 | 12761909 | <reponame>Azariagmt/Ad-campaign-performance<gh_stars>1-10
from flask import Flask, request, render_template
from werkzeug.exceptions import Forbidden, HTTPException, NotFound, RequestTimeout, Unauthorized
import os
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.errorhandler(NotFound)
def page_not_found_handler(e: HTTPException):
return '<h1>404.html</h1>', 404
@app.errorhandler(Unauthorized)
def unauthorized_handler(e: HTTPException):
return '<h1>401.html</h1>', 401
@app.errorhandler(Forbidden)
def forbidden_handler(e: HTTPException):
return '<h1>403.html</h1>', 403
@app.errorhandler(RequestTimeout)
def request_timeout_handler(e: HTTPException):
return '<h1>408.html</h1>', 408
if __name__ == '__main__':
os.environ.setdefault('Flask_SETTINGS_MODULE', 'helloworld.settings')
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
port = int(os.environ.get("PORT", 33507))
app.run(debug=True)
| 2.234375 | 2 |
Deep-Learning/Fast-RCNN/Fast-RCNN(version: chen)/utils.py | ZhongHouyu/CVCode | 30 | 12761910 | # -*- coding:utf-8 -*-
# ------------------------
# written by <NAME>
# 2018-10
# ------------------------
import math
import torch
def get_IoU(ground_truth, region):
# xmin, ymin, xmax, ymax
x1 = max(ground_truth[0], region[0])
y1 = max(ground_truth[1], region[1])
x2 = min(ground_truth[2], region[0] + region[2])
y2 = min(ground_truth[3], region[1] + region[3])
if x2 - x1 < 0:
return 0
inter_area = (x2 - x1 + 1) * (y2 - y1 + 1)
outer_area = (region[2] - region[0] + 1) * (region[3] - region[1] + 1) \
+ (ground_truth[2] - ground_truth[0] + 1) * (ground_truth[3] - ground_truth[1] + 1) - inter_area
if outer_area == 0:
return 0
iou = inter_area / outer_area
return iou
def bbox_loss(bbox_output, rois, roi_labels, ground_truths):
# output: (20, 4) ground_truth: (, 4)
bbox_output = bbox_output.view(-1, 4)
roi_num = rois.size(0)
loss = 0
for i in range(roi_num):
label = roi_labels[i]
if label == 20:
continue
dx, dy, dw, dh = bbox_output[label, :].long()
Gx, Gy, Gw, Gh = ground_truths[i]
Px, Py, Pw, Ph = rois[i].long()
tx = (Gx - Px) / Pw
ty = (Gy - Py) / Ph
try:
tw = math.log(int(Gw) / int(Pw))
th = math.log(int(Gh) / int(Ph))
except:
print("******log exception******")
print(Gw, Pw, Gh, Ph)
print(Gw / Pw, Gh / Ph)
continue
t = torch.FloatTensor([tx, ty, tw, th])
d = torch.FloatTensor([dx, dy, dw, dh])
loss += sum((t - d) ** 2)
return loss / roi_num
def smooth(x):
if abs(x) < 1:
return 0.5 * x ** 2
else:
return abs(x) - 0.5
| 1.953125 | 2 |
venv/lib/python3.8/site-packages/statsmodels/multivariate/tests/test_ml_factor.py | johncollinsai/post-high-frequency-data | 6,931 | 12761911 | <reponame>johncollinsai/post-high-frequency-data
import numpy as np
from statsmodels.multivariate.factor import Factor
from numpy.testing import assert_allclose, assert_equal
from scipy.optimize import approx_fprime
import warnings
# A small model for basic testing
def _toy():
uniq = np.r_[4, 9, 16]
load = np.asarray([[3, 1, 2], [2, 5, 8]]).T
par = np.r_[2, 3, 4, 3, 1, 2, 2, 5, 8]
corr = np.asarray([[1, .5, .25], [.5, 1, .5], [.25, .5, 1]])
return uniq, load, corr, par
def test_loglike():
uniq, load, corr, par = _toy()
fa = Factor(n_factor=2, corr=corr)
# Two ways of passing the parameters to loglike
ll1 = fa.loglike((load, uniq))
ll2 = fa.loglike(par)
assert_allclose(ll1, ll2)
def test_score():
uniq, load, corr, par = _toy()
fa = Factor(n_factor=2, corr=corr)
def f(par):
return fa.loglike(par)
par2 = np.r_[0.1, 0.2, 0.3, 0.4, 0.3, 0.1, 0.2, -0.2, 0, 0.8, 0.5, 0]
for pt in (par, par2):
g1 = approx_fprime(pt, f, 1e-8)
g2 = fa.score(pt)
assert_allclose(g1, g2, atol=1e-3)
def test_exact():
# Test if we can recover exact factor-structured matrices with
# default starting values.
np.random.seed(23324)
# Works for larger k_var but slow for routine testing.
for k_var in 5, 10, 25:
for n_factor in 1, 2, 3:
load = np.random.normal(size=(k_var, n_factor))
uniq = np.linspace(1, 2, k_var)
c = np.dot(load, load.T)
c.flat[::c.shape[0]+1] += uniq
s = np.sqrt(np.diag(c))
c /= np.outer(s, s)
fa = Factor(corr=c, n_factor=n_factor, method='ml')
rslt = fa.fit()
assert_allclose(rslt.fitted_cov, c, rtol=1e-4, atol=1e-4)
rslt.summary() # smoke test
def test_exact_em():
# Test if we can recover exact factor-structured matrices with
# default starting values using the EM algorithm.
np.random.seed(23324)
# Works for larger k_var but slow for routine testing.
for k_var in 5, 10, 25:
for n_factor in 1, 2, 3:
load = np.random.normal(size=(k_var, n_factor))
uniq = np.linspace(1, 2, k_var)
c = np.dot(load, load.T)
c.flat[::c.shape[0]+1] += uniq
s = np.sqrt(np.diag(c))
c /= np.outer(s, s)
fa = Factor(corr=c, n_factor=n_factor, method='ml')
load_e, uniq_e = fa._fit_ml_em(2000)
c_e = np.dot(load_e, load_e.T)
c_e.flat[::c_e.shape[0]+1] += uniq_e
assert_allclose(c_e, c, rtol=1e-4, atol=1e-4)
def test_fit_ml_em_random_state():
# Ensure Factor._fit_ml_em doesn't change numpy's singleton random state
# see #7357
T = 10
epsilon = np.random.multivariate_normal(np.zeros(3), np.eye(3), size=T).T
initial = np.random.get_state()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message='Fitting did not converge')
Factor(endog=epsilon, n_factor=2, method='ml').fit()
final = np.random.get_state()
assert(initial[0] == final[0])
assert_equal(initial[1], final[1])
assert(initial[2:] == final[2:])
def test_em():
n_factor = 1
cor = np.asarray([[1, 0.5, 0.3], [0.5, 1, 0], [0.3, 0, 1]])
fa = Factor(corr=cor, n_factor=n_factor, method='ml')
rslt = fa.fit(opt={'gtol': 1e-3})
load_opt = rslt.loadings
uniq_opt = rslt.uniqueness
load_em, uniq_em = fa._fit_ml_em(1000)
cc = np.dot(load_em, load_em.T)
cc.flat[::cc.shape[0]+1] += uniq_em
assert_allclose(cc, rslt.fitted_cov, rtol=1e-2, atol=1e-2)
def test_1factor():
"""
# R code:
r = 0.4
p = 4
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
fa = factanal(covmat=cm, factors=1)
print(fa, digits=10)
"""
r = 0.4
p = 4
ii = np.arange(p)
cm = r ** np.abs(np.subtract.outer(ii, ii))
fa = Factor(corr=cm, n_factor=1, method='ml')
rslt = fa.fit()
if rslt.loadings[0, 0] < 0:
rslt.loadings[:, 0] *= -1
# R solution, but our likelihood is higher
# uniq = np.r_[0.8392472054, 0.5820958187, 0.5820958187, 0.8392472054]
# load = np.asarray([[0.4009399224, 0.6464550935, 0.6464550935,
# 0.4009399224]]).T
# l1 = fa.loglike(fa._pack(load, uniq))
# l2 = fa.loglike(fa._pack(rslt.loadings, rslt.uniqueness))
# So use a smoke test
uniq = np.r_[0.85290232, 0.60916033, 0.55382266, 0.82610666]
load = np.asarray([[0.38353316], [0.62517171], [0.66796508],
[0.4170052]])
assert_allclose(load, rslt.loadings, rtol=1e-3, atol=1e-3)
assert_allclose(uniq, rslt.uniqueness, rtol=1e-3, atol=1e-3)
assert_equal(rslt.df, 2)
def test_2factor():
"""
# R code:
r = 0.4
p = 6
ii = seq(0, p-1)
ii = outer(ii, ii, "-")
ii = abs(ii)
cm = r^ii
factanal(covmat=cm, factors=2)
"""
r = 0.4
p = 6
ii = np.arange(p)
cm = r ** np.abs(np.subtract.outer(ii, ii))
fa = Factor(corr=cm, n_factor=2, nobs=100, method='ml')
rslt = fa.fit()
for j in 0, 1:
if rslt.loadings[0, j] < 0:
rslt.loadings[:, j] *= -1
uniq = np.r_[0.782, 0.367, 0.696, 0.696, 0.367, 0.782]
assert_allclose(uniq, rslt.uniqueness, rtol=1e-3, atol=1e-3)
loads = [np.r_[0.323, 0.586, 0.519, 0.519, 0.586, 0.323],
np.r_[0.337, 0.538, 0.187, -0.187, -0.538, -0.337]]
for k in 0, 1:
if np.dot(loads[k], rslt.loadings[:, k]) < 0:
loads[k] *= -1
assert_allclose(loads[k], rslt.loadings[:, k], rtol=1e-3, atol=1e-3)
assert_equal(rslt.df, 4)
# Smoke test for standard errors
e = np.asarray([0.11056836, 0.05191071, 0.09836349,
0.09836349, 0.05191071, 0.11056836])
assert_allclose(rslt.uniq_stderr, e, atol=1e-4)
e = np.asarray([[0.08842151, 0.08842151], [0.06058582, 0.06058582],
[0.08339874, 0.08339874], [0.08339874, 0.08339874],
[0.06058582, 0.06058582], [0.08842151, 0.08842151]])
assert_allclose(rslt.load_stderr, e, atol=1e-4)
| 2.03125 | 2 |
usb2mq.py | nodtem66/ca-hub-rpi | 2 | 12761912 | #/usr/bin/python2
"""
udev service for USB
transfer USB data to zeromq pull server via tcp
required pull socket: tcp://localhost:6372
Author: <NAME>. <<EMAIL>>
Python version: 2.7
"""
import argparse
import os
import signal
import struct
import sys
import time
import usb1 as _usb1
import zmq as _zmq
import hardware
from logger import getLogger
parser = argparse.ArgumentParser()
parser.add_argument('bus')
args = parser.parse_args()
_args = args.bus.split(':')
LOG = getLogger('usb2mq')
if len(_args) < 2:
LOG.error('invalid argument %s', args.bus)
sys.exit(1)
bus, address = _args[0:2]
# write pid into file
#pidfile = open('/opt/ca-hub-rpi/pid/{}-{}'.format(bus, address), 'w')
#pidfile.write(str(os.getpid()))
#pidfile.close()
# init zmq broker
zmq = _zmq.Context()
sender = zmq.socket(_zmq.PUSH)
sender.linger = 250
sender.connect('tcp://127.0.0.1:6372')
LOG.info('connect zmq pull server')
def send(*arr):
if not sender:
return
try:
data = bytearray()
for x in arr:
data += bytearray(x)
sender.send(struct.pack('>' + str(len(data)) + 'B', *data), flags=_zmq.NOBLOCK)
except _zmq.ZMQError:
pass
# register signal handler
running = False
def shutdown(signum, frame):
global running
LOG.info('Shutting down...')
if running and not sender.closed:
running = False
send([2, int(bus), int(address), productId])
running = False
if handle is not None:
handle.releaseInterface(0)
handle.close()
if type(device).__name__ == 'USBDevice':
device.close()
sender.close()
zmq.term()
os._exit(0)
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGTERM, shutdown)
# get device from bus and address
usb1 = _usb1.USBContext()
device = None
handle = None
productId = hardware.INVALID_PRODUCT_ID
productName = ''
maxPacketSize = 64
try:
for _device in usb1.getDeviceIterator(skip_on_error=True):
if (_device.getBusNumber() == int(bus) and _device.getDeviceAddress() == int(address)):
LOG.info('Initialize device bus:%s address:%s', bus, address)
productName = _device.getProduct()
maxPacketSize = _device.getMaxPacketSize(hardware.ENDPOINT_ADDRESS)
LOG.info('%s (%s)', productName, _device.getManufacturer())
LOG.info('packet size: %d', maxPacketSize)
productId = hardware.getIdFromProductName(productName)
device = _device
break
except (RuntimeError, IOError, _usb1.USBError) as e:
LOG.error("Unexpected error 1: %s", str(e))
send([3, int(bus), int(address), productId], str(e))
shutdown(0, 0)
if device is None:
LOG.error('Device can not be initialized!')
shutdown(0, 0)
if productId == hardware.INVALID_PRODUCT_ID:
LOG.error('Unsupport USB device')
shutdown(0, 0)
# transfer callback function
def mainloop():
global handle
global running
# init device
try:
handle = device.open()
handle.claimInterface(0)
send([1, int(bus), int(address), productId])
running = True
#scheduler = sched.scheduler(time.time, time.sleep)
while running:
try:
data = handle.interruptRead(hardware.ENDPOINT_ADDRESS, maxPacketSize)
isValid = False
if productId == hardware.SPO2_PRODUCT_ID:
assert len(data) == 6
isValid = True
time.sleep(1.0/hardware.SPO2_SAMPLING_RATE_HZ/1.5)
elif productId == hardware.ECG_PRODUCT_ID:
assert len(data) == 27
isValid = True
time.sleep(1.0/hardware.ECG_SAMPLING_RATE_HZ/1.5)
if isValid and running:
send([0, int(bus), int(address), productId], data)
except _usb1.USBErrorInterrupted as e:
LOG.error("USB Error: %s", str(e))
send([3, int(bus), int(address), productId], str(e))
shutdown(0, 0)
except (RuntimeError, IOError, _usb1.USBError) as e:
LOG.error("Unexpected error 3: %s", str(e))
send([3, int(bus), int(address), productId], str(e))
if __name__ == '__main__':
mainloop()
| 3.078125 | 3 |
levelworks/levelweb/migrations/0005_alter_student_age.py | benNthen/levelworks-site | 0 | 12761913 | <filename>levelworks/levelweb/migrations/0005_alter_student_age.py
# Generated by Django 3.2.4 on 2021-06-24 01:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('levelweb', '0004_alter_student_age'),
]
operations = [
migrations.AlterField(
model_name='student',
name='age',
field=models.IntegerField(),
),
]
| 1.445313 | 1 |
workon/contrib/unique/models.py | dalou/django-workon | 0 | 12761914 | from django.db import models
class Unique(models.Model):
class Meta:
abstract = True
@classmethod
def get(cls):
instance = cls._meta.default_manager.first()
if not instance:
instance = cls()
return instance
def save(self, *args, **kwargs):
previous = self._meta.default_manager.first()
if previous:
self.pk = previous.pk
super(Unique, self).save(*args, **kwargs)
| 2.515625 | 3 |
mCP437.py | SkyLined/mConsole | 2 | 12761915 | <reponame>SkyLined/mConsole
# Non-unicode strings are assumed to be CP437. We have an indexed table to
# convert CP437 to unicode (index range 0-255 => unicode char) and a dict to
# convert Unicode to CP437 (unicode char => CP437 char). These are used by the
# fsuCP437_to_Unicode and fsUnicode_to_CP437 functions respectively.
asUnicodeCharMapCP437 = [isinstance(x, str) and str(x) or chr(x) for x in [
0, 9786, 9787, 9829, 9830, 9827, 9824, 8226,
9688, 9675, 9689, 9794, 9792, 9834, 9835, 9788,
9658, 9668, 8597, 8252, 182, 167, 9644, 8616,
8593, 8595, 8594, 8592, 8735, 8596, 9650, 9660,
" ", "!", '"', "#", "$", "%", "&", "'",
"(", ")", "*", "+", ",", "-", ".", "/",
"0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", ":", ";", "<", "=", ">", "?",
"@", "A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N", "O",
"P", "Q", "R", "S", "T", "U", "V", "W",
"X", "Y", "Z", "[", "\\", "]", "^", "_",
"`", "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o",
"p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "{", "|", "}", "~", 8962,
199, 252, 233, 226, 228, 224, 229, 231,
234, 235, 232, 239, 238, 236, 196, 197,
201, 230, 198, 244, 246, 242, 251, 249,
255, 214, 220, 162, 163, 165, 8359, 402,
225, 237, 243, 250, 241, 209, 170, 186,
191, 8976, 172, 189, 188, 161, 171, 187,
9617, 9618, 9619, 9474, 9508, 9569, 9570, 9558,
9557, 9571, 9553, 9559, 9565, 9564, 9563, 9488,
9492, 9524, 9516, 9500, 9472, 9532, 9566, 9567,
9562, 9556, 9577, 9574, 9568, 9552, 9580, 9575,
9576, 9572, 9573, 9561, 9560, 9554, 9555, 9579,
9578, 9496, 9484, 9608, 9604, 9612, 9616, 9600,
945, 946, 915, 960, 931, 963, 956, 964,
934, 920, 937, 948, 8734, 966, 949, 8745,
8801, 177, 8805, 8804, 8992, 8993, 247, 8776,
176, 8729, 183, 8730, 8319, 178, 9632, 160,
]];
dsbCP437Byte_by_sUnicodeChar = {};
for uCP437Byte in range(0x100):
sUnicodeChar = asUnicodeCharMapCP437[uCP437Byte];
dsbCP437Byte_by_sUnicodeChar[sUnicodeChar] = bytes(uCP437Byte);
def fsBytesToUnicode(sbCP437Bytes):
return "".join([asUnicodeCharMapCP437[ord(sbByte)] for sbByte in sbCP437Bytes]);
fsUnicodeFromBytes = fsBytesToUnicode;
def fsbUnicodeToBytes(sUnicode):
return b"".join([dsbCP437Byte_by_sUnicodeChar.get(sUnicodeChar, b"?") for sUnicodeChar in sUnicode]);
fsbBytesFromUnicode = fsbUnicodeToBytes; | 1.75 | 2 |
py/torch_tensorrt/_compile.py | narendasan/TRTorch | 0 | 12761916 | from typing import List, Dict, Any
from torch_tensorrt import _enums
import torch_tensorrt.ts
from torch_tensorrt import logging
import torch
from enum import Enum
class _IRType(Enum):
"""Enum to set the minimum required logging level to print a message to stdout
"""
ts = 0
fx = 1
def _module_ir(module: Any, ir: str) -> _IRType.ts:
# Possible module types
module_is_tsable = any(isinstance(module, t) for t in [torch.nn.Module, torch.jit.ScriptModule, torch.jit.ScriptFunction])
module_is_fxable = any(isinstance(module, t) for t in [torch.nn.Module, torch.fx.GraphModule])
ir_targets_torchscript = any([ir == opt for opt in ["torchscript", "ts"]])
ir_targets_fx = ir == "fx"
if module_is_tsable and ir_targets_torchscript:
return _IRType.ts
elif module_is_fxable and ir_targets_fx:
if isinstance(module, torch.fx.GraphModule):
raise ValueError("Was given a torch.fx.GraphModule, fx is not currently supported by Torch-TensorRT")
elif ir_targets_fx:
raise ValueError("Preferred ir was set to \"fx\" which is currently not supported by Torch-TensorRT")
else:
raise ValueError("Torch-TensorRT currently does not support fx")
# return _IRType.fx
else:
if ir == "default":
# Options are listed in order of preference
if module_is_tsable:
logging.log(logging.Level.Info, "ir was set to default, using TorchScript as ir")
return _IRType.ts
elif module_is_fxable:
raise ValueError("Was given a torch.fx.GraphModule, fx is not currently supported by Torch-TensorRT")
#logging.log(logging.Level.Info, "ir was set to default, using TorchScript as fx")
#return _IRType.fx
else:
raise ValueError("Module was provided with in an unsupported format")
else:
raise ValueError("Unknown ir was requested")
def compile(module: Any,
ir="default",
inputs=[],
enabled_precisions=set([_enums.dtype.float]),
**kwargs):
target_ir = _module_ir(module, ir)
if target_ir == _IRType.ts:
ts_mod = module
if isinstance(module, torch.nn.Module):
logging.log("Module was provided as a torch.nn.Module, trying to script the module with torch.jit.script. In the event of a failure please preconvert your module to TorchScript")
ts_mod = torch.jit.script(module)
return torch_tensorrt.ts.compile(ts_mod, inputs=inputs, enabled_precisions=enabled_precisions, **kwargs)
elif target_ir == _IRType.fx:
raise RuntimeError("fx is currently not supported")
else:
raise RuntimeError("Module is an unknown format or the ir requested is unknown")
def convert_method_to_trt_engine(module: Any,
method_name: str,
ir="default",
inputs=[],
enabled_precisions=set([_enums.dtype.float]),
**kwargs):
target_ir = _module_ir(module, ir)
if target_ir == _IRType.ts:
ts_mod = module
if isinstance(module, torch.nn.Module):
logging.log("Module was provided as a torch.nn.Module, trying to script the module with torch.jit.script. In the event of a failure please preconvert your module to TorchScript")
ts_mod = torch.jit.script(module)
return torch_tensorrt.ts.convert_method_to_trt_engine(ts_mod, method_name, inputs=inputs, enabled_precisions=enabled_precisions, **kwargs)
elif target_ir == _IRType.fx:
raise RuntimeError("fx is currently not supported")
else:
raise RuntimeError("Module is an unknown format or the ir requested is unknown") | 2.46875 | 2 |
4. Plots/2. Figure 3/figure 3.py | phuycke/code-sharing | 2 | 12761917 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
email: <EMAIL>
GitHub: phuycke
"""
#%%
import matplotlib.pyplot as plt
import mne
import numpy as np
import os
import pandas as pd
import seaborn as sns
from scipy import ndimage
from matplotlib import ticker, rcParams, gridspec
#%%
TEXT_SIZE = 15
rcParams['font.family'] = 'Times New Roman'
rcParams['axes.titlesize'] = TEXT_SIZE
rcParams['axes.labelsize'] = TEXT_SIZE
rcParams['xtick.labelsize'] = TEXT_SIZE
rcParams['ytick.labelsize'] = TEXT_SIZE
#%%
# create grid for plots
fig = plt.figure(figsize=(10, 9))
gs = gridspec.GridSpec(2, 13)
# TFR plot
fig_3a = plt.subplot(gs[0, :8])
# topoplot
fig_3b = plt.subplot(gs[0, 8:])
# alpha on the fast timescale
fig_3c_l = plt.subplot(gs[1, 0:3]) # novel condition
fig_3c_r = plt.subplot(gs[1, 3:6]) # repeating condition
# alpha on the slow timescale
fig_3d_l = plt.subplot(gs[1, 7:10]) # novel condition
fig_3d_r = plt.subplot(gs[1, 10:13]) # repeating condition
#%%
"""
Figure 3A
"""
# path to the result of the permutation data
PERM_DATA = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\Publish\Data\Stimulus-locked\Repetition 1 vs. repetition 8"
TIME_DATA = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\TF\Group level\data"
# define frequency bands (log spaced for setting the y-ticks later on)
FREQS = np.logspace(np.log10(4),
np.log10(30),
15)
# load the time data, and select everything between 0 and 1s
times = np.load(os.path.join(TIME_DATA, "stimulus_times.npy"))
times = times[np.where((times > 0) & (times <= 1))]
# the the difference between x[0] and x[1] for each value in times, and divide
# by 2 if len(times) is larger than 1s, else fix this at 0.0005
time_diff = np.diff(times) / 2. if len(times) > 1 else [0.0005]
# compute the limits of the time window (x-axis)
# start: first value of time (a bit larger than 0) - 0.00048828
# middle: all values except the last + 0.00048828
# final: last value of time (1) + 0.00048828
time_lims = np.concatenate([[times[0] - time_diff[0]], times[:-1] +
time_diff, [times[-1] + time_diff[-1]]])
# get the values that should be on the y-axis
yvals = FREQS
# compute the ratio: x[1] = x[0] * ratio (holds for all values)
ratio = yvals[1:] / yvals[:-1]
# compute the limits of the frequencies (y-axis)
# start: first value of yvals (4) / 1.15479362
# middle: the values of yvals
# last: the last value of yvals (30) * 1.15479362
log_yvals = np.concatenate([[yvals[0] / ratio[0]], yvals,
[yvals[-1] * ratio[0]]])
# get the limits of the y-axis
# note that yvals_lims is in this case equal to yvals since yvals is
# log-spaced. This would not be true if linspace was used to get frequencies
yval_lims = np.sqrt(log_yvals[:-2] * log_yvals[2:])
time_lims = time_lims[:-1]
# create a meshgrid
# time_mesh: row values are the same, column values differ (time)
# yval_mesh: row values differ (freqs), column values are the same
time_mesh, yval_mesh = np.meshgrid(time_lims, yval_lims)
# load the permutation test result array + check dimensions of the data
f_obs = np.load(os.path.join(PERM_DATA, "f_obs.npy"))
assert f_obs.shape == (64, 15, 1024)
# 64: electrodes, 15: frequencies, 1024: time points
# we average over electrodes to retain the frequency and time information
f_obs_mean = np.mean(f_obs, axis = 0)
# apply a gaussian filter to the data, with SD = 1 for both axes
gauss = ndimage.filters.gaussian_filter(f_obs_mean,
[1, 1],
mode = 'constant')
# create a pseudocolor plot
fig_3a.pcolormesh(time_mesh,
yval_mesh,
gauss,
cmap = "RdBu_r",
shading = "gouraud")
# draw a contour around larger values
# we draw the contour around values that are percentile 97.5 or larger
fig_3a.contour(time_mesh,
yval_mesh,
gauss,
levels = [np.percentile(gauss, 97.5)],
colors = "black",
linewidths = 3,
linestyles = "solid")
# set the y-axis parameters, note that the y-axis needs to be converted to
# log, and that a ticker needs to be called to set the y-axis ticks
fig_3a.set_yscale('log')
fig_3a.get_yaxis().set_major_formatter(ticker.ScalarFormatter())
fig_3a.yaxis.set_minor_formatter(ticker.NullFormatter())
fig_3a.yaxis.set_minor_locator(ticker.NullLocator())
# once the ticks are set, we assign the values of FREQS to the ticks
tick_vals = yvals[np.unique(np.linspace(0, len(yvals) - 1, 15).round().astype('int'))]
fig_3a.set_yticks(tick_vals)
# determine the y-ticks
ticks_str = []
for t in tick_vals:
if round(t) in [4, 8, 13, 19, 30]:
ticks_str.append("{0:.2f}".format(t))
else:
ticks_str.append(" ")
fig_3a.set_yticklabels(ticks_str)
# set the x-axis parameters: every 100 ms a label is placed
fig_3a.set_xticks(np.arange(0, 1.1, .25))
fig_3a.set_xticklabels([str(int(label)) for label in np.arange(0, 1001, 250)])
# set the general title, and the titles of the x-axis and the y-axis
fig_3a.set_xlabel('Time after stimulus (ms)')
fig_3a.set_ylabel('Frequency (Hz)')
fig_3a.set_title("Stimulus 1 vs. 8: permutation test TFR\nAlpha on the fast timescale (p = 0.001)")
# load the cluster data, and keep only the significant clusters
clust = np.load(os.path.join(PERM_DATA, "clust.npy"), allow_pickle = True)
clust_p_val = np.load(os.path.join(PERM_DATA, "clust_p_val.npy"))
f_obs_plot = np.zeros_like(f_obs)
for c, p_val in zip(clust, clust_p_val):
if p_val <= 0.05:
f_obs_plot[tuple(c)] = f_obs[tuple(c)]
# take the average (excluding NaNs) of the significant data
f_obs_plot_mean = np.nanmean(f_obs_plot, axis = 0)
# create a 2D raster of the significant data (no plot) to use for the colorbar
im = fig_3a.imshow(f_obs_plot_mean,
extent = [times[0], times[-1],
FREQS[0], FREQS[-1]],
aspect = "auto",
origin = "lower",
interpolation = "hanning",
cmap = "RdBu_r")
# get the colorbar of the above 2D raster, and paste it on the existing TFR plot
# note that this data is used to create the colorbar, and not the filtered data
# since the values become smaller due to the filtering process. The plot reflects
# the actual data, filtering is only done for visual appeal
cbar = fig.colorbar(im, ax = fig_3a)
# set some colorbar parameters, such as the title, ticks and tick labels
cbar.ax.set_title("F-statistic",
fontdict = {"fontsize": TEXT_SIZE})
cbar.ax.get_yaxis().set_ticks(np.arange(0, np.round(np.max(f_obs_plot_mean), 1) + 0.05, 4))
cbar.ax.tick_params(labelsize = TEXT_SIZE - 3)
# big fix: make sure that the 0 is shown on the x-axis of the final plot
fig_3a.set_xbound(0, 1)
#%%
"""
Figure 3B
"""
# Determines which part of the analysis to run + some plotting parameters
STIM_LOCKED = True
COMPUTE_TFR = False
BAND = [(8, 12, "Alpha")]
TMIN, TMAX = .65, .9
VMIN, VMAX = 0.5, 4.5
rcParams['font.family'] = 'Times New Roman'
rcParams['font.size'] = 8
# these are the subjects that had all 512 epochs recorded and stored safely
full_epochs = ["sub-02", "sub-03", "sub-04", "sub-05", "sub-06", "sub-08",
"sub-10", "sub-12", "sub-13", "sub-15", "sub-16", "sub-17",
"sub-18", "sub-19", "sub-20", "sub-21", "sub-22", "sub-23",
"sub-25", "sub-26", "sub-27", "sub-28", "sub-29", "sub-30"]
# load the TFR data
rep1 = mne.time_frequency.read_tfrs(r"C:\Users\pieter\Downloads\repetition 1 (24 subs)-tfr.h5")[0]
rep8 = mne.time_frequency.read_tfrs(r"C:\Users\pieter\Downloads\repetition 8 (24 subs)-tfr.h5")[0]
# save rep8 in temp, dB transform
temp = rep8
temp._data = 10 * np.log10(rep8._data)
# save rep1 in temp2, dB transform
temp2 = rep1
temp2._data = 10 * np.log10(rep1._data)
temp._data -= temp2._data
# check whether the difference does not equal rep_1 or rep_8
assert np.all(temp._data != rep1._data)
assert not np.sum(temp._data != rep8._data)
# colorbar with log scaled labels
def fmt_float(x, pos):
return r'${0:.2f}$'.format(x)
# define the data
avg_tfr = temp
# get the frequency bands
FMIN, FMAX, FNAME = BAND[0]
# make topoplot
avg_tfr.plot_topomap(tmin = TMIN,
tmax = TMAX,
fmin = FMIN,
fmax = FMAX,
vmin = VMIN,
vmax = VMAX,
unit = " ",
ch_type = "eeg",
cmap = "RdBu_r",
outlines = "head",
contours = 10,
colorbar = True,
cbar_fmt = fmt_float,
sensors = "ko",
axes = fig_3b,
title = " ")
# set a title which can be altered
fig_3b.set_title(r"$\alpha$ topography", size = TEXT_SIZE)
#%%
"""
Figure 3C
"""
# where to find the data files
ROOT = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\Publish\Data\Stimulus-locked\Theta, alpha, beta + behavioral data"
# seaborn param
sns.set_style("ticks")
sns.set_context("paper")
# read the data
df = pd.read_csv(os.path.join(ROOT, "theta_alpha_beta_behavioural.csv"))
# change the column names to their appropriate label
df.columns = ['Reaction time (ms)', 'RT_log', 'Accuracy', 'Accuracy_int',
'Error_int', 'Theta power', 'Alpha power', 'Beta power',
'Subject nr', 'Repetitions_overall', 'Repetition count',
'Block_overall', 'Block number', 'Condition', 'Trial_overall',
'Trial_block', 'Response', 'Stimulus_ID']
x_title, y_title = "Repetition count", "Alpha power"
# Novel condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Novel"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "darkgrey",
ax = fig_3c_l)
# Recurring condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Recurring"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "black",
ax = fig_3c_r)
# figure parameters (left figure)
fig_3c_l.set_title(r"Novel condition", size = TEXT_SIZE)
fig_3c_l.set_ylim([-.5, -.1])
fig_3c_l.set_yticks(np.arange(-.5, -.09, .1))
fig_3c_l.set_xticks(np.arange(1, 9))
fig_3c_l.set_xlim(0.5, 8.5)
fig_3c_l.set_xlabel(r"Stimulus number")
fig_3c_l.set_ylabel(r"$\alpha$ power")
# figure parameters (right figure)
fig_3c_r.set_xlim(0.5, 8.5)
fig_3c_r.set_xticks(np.arange(1, 9))
fig_3c_r.set_ylim([-.5, -.1])
fig_3c_r.set_yticks(np.arange(-.5, -.09, .1))
fig_3c_r.set_yticklabels([])
fig_3c_r.set_title(r"Repeating condition", size = TEXT_SIZE)
fig_3c_r.set_xlabel(r"Stimulus number")
fig_3c_r.set_ylabel(" ")
#%%
"""
Figure 3D
"""
# new variables
x_title, y_title = "Block number", "Alpha power"
# Novel condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Novel"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "darkgrey",
ax = fig_3d_l)
# Recurring condition
g = sns.regplot(x = x_title,
y = y_title,
data = df.loc[df["Condition"] == "Recurring"],
x_estimator = np.mean,
x_ci = "ci",
ci = 95,
n_boot = 5000,
scatter_kws = {"s":15},
line_kws = {'lw': .75},
color = "black",
ax = fig_3d_r)
# figure parameters (left figure)
fig_3d_l.set_title(r"Novel condition", size = TEXT_SIZE)
fig_3d_l.set_ylim([-.5, -.1])
fig_3d_l.set_yticks(np.arange(-.5, -.09, .1))
fig_3d_l.set_xticks(np.arange(1, 9))
fig_3d_l.set_xlim(0.5, 8.5)
fig_3d_l.set_xlabel(r"Block number")
fig_3d_l.set_ylabel(r"$\alpha$ power")
# figure parameters (right figure)
fig_3d_r.set_xlim(0.5, 8.5)
fig_3d_r.set_xticks(np.arange(1, 9))
fig_3d_r.set_ylim([-.5, -.1])
fig_3d_r.set_yticks(np.arange(-.5, -.09, .1))
fig_3d_r.set_yticklabels([])
fig_3d_r.set_title(r"Repeating condition", size = TEXT_SIZE)
fig_3d_r.set_xlabel(r"Block number")
fig_3d_r.set_ylabel(" ")
#%%
"""
Save figure
"""
# define the Figure dir + set the size of the image
FIG = r"C:\Users\pieter\OneDrive - UGent\Projects\2019\overtraining - PILOT 3\figures\Publish\Correct DPI plots"
# play around until the figure is satisfactory (difficult with high DPI)
plt.subplots_adjust(top=0.932, bottom=0.077, left=0.097, right=0.938,
hspace=0.5, wspace=0.35)
# letters indicating the panels
plt.text(-245, 5, "A", size = TEXT_SIZE+5)
plt.text(-85, 5, "B", size = TEXT_SIZE+5)
plt.text(-245, -1, "C", size = TEXT_SIZE+5)
plt.text(-115, -1, "D", size = TEXT_SIZE+5)
# dB label for panel B
plt.text(-1.5, 4.6, "dB", size = TEXT_SIZE)
# titles for panels C and D
plt.text(-200, -1.15, r"$\alpha$ power ~ fast timescale", size = TEXT_SIZE)
plt.text(-75, -1.15, r"$\alpha$ power ~ slow timescale", size = TEXT_SIZE)
# save as tiff and pdf
plt.savefig(fname = os.path.join(FIG, "Figure 3.tiff"), dpi = 300)
plt.savefig(fname = os.path.join(FIG, "Figure 3.pdf"), dpi = 300)
plt.close("all")
| 2.171875 | 2 |
unwarp.py | kylemcdonald/FisheyeToEquirectangular | 12 | 12761918 | <reponame>kylemcdonald/FisheyeToEquirectangular<filename>unwarp.py
import os
import argparse
import shutil
import errno
import ffmpeg
import numpy as np
from tqdm import tqdm
from fisheye import FisheyeToEquirectangular
from utils.imutil import imresize, imwrite
def get_tmp_audio(tmp_folder, fn):
os.makedirs(tmp_folder, exist_ok=True)
basename = os.path.basename(fn)
return os.path.join(tmp_folder, f'{basename}.wav')
def get_tmp_video(tmp_folder, fn):
os.makedirs(tmp_folder, exist_ok=True)
basename = os.path.basename(fn)
return os.path.join(tmp_folder, basename)
def print_meta(fn, meta):
video_stream = get_stream(meta, 'video')
audio_stream = get_stream(meta, 'audio')
print(fn)
print(f' video: {video_stream["width"]}x{video_stream["height"]} @ {video_stream["avg_frame_rate"]}')
print(' audio: ' + ('yes' if audio_stream else 'no'))
for key in 'duration start_time'.split(' '):
print(f' {key}: {video_stream[key]}')
def get_meta(fn):
if not os.path.exists(fn):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), fn)
return ffmpeg.probe(fn)
def get_stream(meta, codec_type):
for stream in meta['streams']:
if stream['codec_type'] == codec_type:
return stream
return None
def get_input_process(fn, width, height, fps, target_width, target_height, target_fps, vframes):
process = ffmpeg.input(fn)
if fps != f'{target_fps}/1':
process = process.filter('fps', fps=target_fps)
if width != target_width or height != target_height:
process = process.filter('scale', target_width, target_height)
extra = {}
if vframes:
extra['vframes'] = vframes
process = (
process
.output('pipe:', format='rawvideo', pix_fmt='rgb24', **extra)
.global_args('-hide_banner', '-nostats', '-loglevel', 'panic')
.run_async(pipe_stdout=True)
)
return process
def main():
parser = argparse.ArgumentParser(
epilog='Usage: python unwarp.py -l ch01.mp4 -r ch02.mp4 -d 10 -o warped.mp4'
)
parser.add_argument('-l', '--left_video', type=str,
help='Left video filename', required=True)
parser.add_argument('--skip_left', type=int,
help='Left video frames to skip', default=0)
parser.add_argument('-r', '--right_video', type=str,
help='Right video filename', required=True)
parser.add_argument('--skip_right', type=int,
help='Right video frames to skip', default=0)
parser.add_argument('-o', '--output', type=str,
help='Output video filename', required=True)
parser.add_argument('--height', type=int,
help='Output video height', default=2048)
parser.add_argument('--frame_rate', type=int,
help='Output video frame rate', default=24)
parser.add_argument('--blending', type=int,
help='Blending area in pixels', default=16)
parser.add_argument('--aperture', type=float,
help='Ratio of the camera FOV to image size', default=1)
parser.add_argument('--preset', type=str,
help='ffmpeg output video codec preset', default='veryslow')
parser.add_argument('--crf', type=int,
help='ffmpeg output video codec crf (0 best to 51 worst, 17-28 is good range, default 17)', default=17)
parser.add_argument('-d', '--duration', type=float,
help='Duration in seconds, uses entire video if ommitted')
parser.add_argument('--vcodec', type=str,
help='ffmpeg output video codec', default='libx264')
parser.add_argument('--fisheye', action='store_true',
help='Output raw fisheye pair, do not unwarp')
parser.add_argument('--tmp_folder', type=str,
help='Location of temp folder.', default='.tmp')
parser.add_argument('--preview', action='store_true',
help='Save a .png of the first frame for reference.')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
left_meta = get_meta(args.left_video)
left_video_stream = get_stream(left_meta, 'video')
left_width, left_height = left_video_stream['width'], left_video_stream['height']
left_fps = left_video_stream['avg_frame_rate']
right_meta = get_meta(args.right_video)
right_video_stream = get_stream(right_meta, 'video')
right_width, right_height = right_video_stream['width'], right_video_stream['height']
right_fps = right_video_stream['avg_frame_rate']
if args.verbose:
print_meta(args.left_video, left_meta)
print_meta(args.right_video, right_meta)
left_duration = float(left_video_stream['duration']) - args.skip_left / args.frame_rate
right_duration = float(right_video_stream['duration']) - args.skip_right / args.frame_rate
max_duration = min(left_duration, right_duration)
if args.duration is None:
if args.verbose:
print(f'No duration specified. Using maximum duration {max_duration} seconds')
args.duration = max_duration
if args.duration > max_duration:
if args.verbose:
print(f'Duration {args.duration} seconds is too long, using maximum duration {max_duration} seconds')
args.duration = max_duration
n_frames = int(args.frame_rate * args.duration)
input_width = max(left_width, right_width)
input_height = max(left_height, right_height)
left_process = get_input_process(args.left_video,
left_width, left_height, left_fps,
input_width, input_height, args.frame_rate,
args.skip_left + n_frames)
right_process = get_input_process(args.right_video,
right_width, right_height, right_fps,
input_width, input_height, args.frame_rate,
args.skip_right + n_frames)
out_process = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='rgb24', s=f'{args.height*2}x{args.height}')
.output(get_tmp_video(args.tmp_folder, args.output),
preset=args.preset,
crf=args.crf,
pix_fmt='yuv420p',
vcodec=args.vcodec)
.global_args('-hide_banner', '-nostats', '-loglevel', 'panic')
.overwrite_output()
.run_async(pipe_stdin=True)
)
byte_count = input_width * input_height * 3
unwarp = FisheyeToEquirectangular(args.height, input_width, args.blending)
if args.skip_left:
if args.verbose:
print(f'Skipping left frames: {args.skip_left}')
for i in tqdm(range(args.skip_left)):
left_process.stdout.read(byte_count)
if args.skip_right:
if args.verbose:
print(f'Skipping right frames: {args.skip_right}')
for i in tqdm(range(args.skip_right)):
right_process.stdout.read(byte_count)
if args.verbose:
print(f'Warping frames: {n_frames}')
for i in tqdm(range(n_frames)):
left_bytes = left_process.stdout.read(byte_count)
right_bytes = right_process.stdout.read(byte_count)
if not left_bytes:
if args.verbose:
print(f'Reached end of {args.left_video}')
break
if not right_bytes:
if args.verbose:
print(f'Reached end of {args.right_video}')
break
left_frame = (
np
.frombuffer(left_bytes, np.uint8)
.reshape([input_height, input_width, 3])
)
right_frame = (
np
.frombuffer(right_bytes, np.uint8)
.reshape([input_height, input_width, 3])
)
if args.fisheye:
out_frame = np.hstack((
imresize(left_frame, output_wh=(args.height, args.height)),
imresize(right_frame, output_wh=(args.height, args.height))
))
else:
out_frame = unwarp.unwarp_pair(left_frame, right_frame)
out_process.stdin.write(
out_frame
.astype(np.uint8)
.tobytes()
)
if args.preview and i == 0:
if args.verbose:
print('Saving preview frame...')
imwrite(args.output + '.png', out_frame)
if args.verbose:
print('Closing all processes...')
left_process.stdout.close()
right_process.stdout.close()
out_process.stdin.close()
if args.verbose:
print('Waiting for all processes to finish...')
left_process.wait()
right_process.wait()
out_process.wait()
filenames = [args.left_video, args.right_video]
metas = [left_meta, right_meta]
skips = [args.skip_left, args.skip_right]
in_audio = []
for fn, meta, skip in zip(filenames, metas, skips):
if not get_stream(meta, 'audio'):
if args.verbose:
print('No audio from', fn)
continue
tmp_fn = get_tmp_audio(args.tmp_folder, fn)
if args.verbose:
print('Re-encoding audio from', fn, 'to', tmp_fn)
(
ffmpeg
.input(fn)
.output(tmp_fn)
.global_args('-hide_banner', '-nostats', '-loglevel', 'panic')
.overwrite_output()
.run()
)
skip_seconds = skip / args.frame_rate
in_audio.append(
ffmpeg
.input(tmp_fn)
.filter('atrim', start=skip_seconds)
.filter('asetpts', 'PTS-STARTPTS')
)
video_tmp = get_tmp_video(args.tmp_folder, args.output)
in_video = ffmpeg.input(video_tmp)
if len(in_audio) == 0:
if args.verbose:
print('No audio channels, using video directly.')
shutil.copy(video_tmp, args.output)
if len(in_audio) == 1:
if args.verbose:
print('Merging video and single audio channel into', args.output)
(
ffmpeg
.output(in_video.video, in_audio[0], args.output, shortest=None, vcodec='copy')
.global_args('-hide_banner', '-nostats', '-loglevel', 'panic')
.overwrite_output()
.run()
)
if len(in_audio) == 2:
if args.verbose:
print('Merging video and two audio channels into', args.output)
(
ffmpeg
.filter(in_audio, 'join', inputs=2, channel_layout='stereo')
.output(in_video.video, args.output, shortest=None, vcodec='copy')
.global_args('-hide_banner', '-nostats', '-loglevel', 'panic')
.overwrite_output()
.run()
)
if args.verbose:
print('Finished encoding')
if args.verbose:
print('Removing folder', args.tmp_folder)
if os.path.exists(args.tmp_folder):
shutil.rmtree(args.tmp_folder)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
# https://github.com/kkroening/ffmpeg-python/issues/108
os.system('stty echo')
| 2.25 | 2 |
rllib/utils/replay_buffers/prioritized_replay_buffer.py | willfrey/ray | 1 | 12761919 | <reponame>willfrey/ray
import random
from typing import Any, Dict, List, Optional
import numpy as np
# Import ray before psutil will make sure we use psutil's bundled version
import ray # noqa F401
import psutil # noqa E402
from ray.rllib.execution.segment_tree import SumSegmentTree, MinSegmentTree
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.annotations import override
from ray.rllib.utils.metrics.window_stat import WindowStat
from ray.rllib.utils.replay_buffers.replay_buffer import ReplayBuffer
from ray.rllib.utils.typing import SampleBatchType
from ray.util.annotations import DeveloperAPI
@DeveloperAPI
class PrioritizedReplayBuffer(ReplayBuffer):
"""This buffer implements Prioritized Experience Replay
The algorithm has been described by <NAME> et. al. in "Prioritized
Experience Replay". See https://arxiv.org/pdf/1511.05952.pdf for
the full paper.
"""
def __init__(
self,
capacity: int = 10000,
storage_unit: str = "timesteps",
alpha: float = 1.0,
**kwargs
):
"""Initializes a PrioritizedReplayBuffer instance.
Args:
capacity: Max number of timesteps to store in the FIFO
buffer. After reaching this number, older samples will be
dropped to make space for new ones.
storage_unit: Either 'timesteps', 'sequences' or
'episodes'. Specifies how experiences are stored.
alpha: How much prioritization is used
(0.0=no prioritization, 1.0=full prioritization).
**kwargs: Forward compatibility kwargs.
"""
ReplayBuffer.__init__(self, capacity, storage_unit, **kwargs)
assert alpha > 0
self._alpha = alpha
# Segment tree must have capacity that is a power of 2
it_capacity = 1
while it_capacity < self.capacity:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
self._prio_change_stats = WindowStat("reprio", 1000)
@DeveloperAPI
@override(ReplayBuffer)
def _add_single_batch(self, item: SampleBatchType, **kwargs) -> None:
"""Add a batch of experiences to self._storage with weight.
An item consists of either one or more timesteps, a sequence or an
episode. Differs from add() in that it does not consider the storage
unit or type of batch and simply stores it.
Args:
item: The item to be added.
**kwargs: Forward compatibility kwargs.
"""
weight = kwargs.get("weight", None)
if weight is None:
weight = self._max_priority
self._it_sum[self._next_idx] = weight ** self._alpha
self._it_min[self._next_idx] = weight ** self._alpha
ReplayBuffer._add_single_batch(self, item)
def _sample_proportional(self, num_items: int) -> List[int]:
res = []
for _ in range(num_items):
# TODO(szymon): should we ensure no repeats?
mass = random.random() * self._it_sum.sum(0, len(self._storage))
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
@DeveloperAPI
@override(ReplayBuffer)
def sample(
self, num_items: int, beta: float, **kwargs
) -> Optional[SampleBatchType]:
"""Sample `num_items` items from this buffer, including prio. weights.
Samples in the results may be repeated.
Examples for storage of SamplesBatches:
- If storage unit `timesteps` has been chosen and batches of
size 5 have been added, sample(5) will yield a concatenated batch of
15 timesteps.
- If storage unit 'sequences' has been chosen and sequences of
different lengths have been added, sample(5) will yield a concatenated
batch with a number of timesteps equal to the sum of timesteps in
the 5 sampled sequences.
- If storage unit 'episodes' has been chosen and episodes of
different lengths have been added, sample(5) will yield a concatenated
batch with a number of timesteps equal to the sum of timesteps in
the 5 sampled episodes.
Args:
num_items: Number of items to sample from this buffer.
beta: To what degree to use importance weights
(0 - no corrections, 1 - full correction).
**kwargs: Forward compatibility kwargs.
Returns:
Concatenated SampleBatch of items including "weights" and
"batch_indexes" fields denoting IS of each sampled
transition and original idxes in buffer of sampled experiences.
"""
assert beta >= 0.0
idxes = self._sample_proportional(num_items)
weights = []
batch_indexes = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self)) ** (-beta)
count = self._storage[idx].count
# If zero-padded, count will not be the actual batch size of the
# data.
if (
isinstance(self._storage[idx], SampleBatch)
and self._storage[idx].zero_padded
):
actual_size = self._storage[idx].max_seq_len
else:
actual_size = count
weights.extend([weight / max_weight] * actual_size)
batch_indexes.extend([idx] * actual_size)
self._num_timesteps_sampled += count
batch = self._encode_sample(idxes)
# Note: prioritization is not supported in multi agent lockstep
if isinstance(batch, SampleBatch):
batch["weights"] = np.array(weights)
batch["batch_indexes"] = np.array(batch_indexes)
return batch
@DeveloperAPI
def update_priorities(self, idxes: List[int], priorities: List[float]) -> None:
"""Update priorities of items at given indices.
Sets priority of item at index idxes[i] in buffer
to priorities[i].
Args:
idxes: List of indices of items
priorities: List of updated priorities corresponding to
items at the idxes denoted by variable `idxes`.
"""
# Making sure we don't pass in e.g. a torch tensor.
assert isinstance(
idxes, (list, np.ndarray)
), "ERROR: `idxes` is not a list or np.ndarray, but {}!".format(
type(idxes).__name__
)
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
delta = priority ** self._alpha - self._it_sum[idx]
self._prio_change_stats.push(delta)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
@DeveloperAPI
@override(ReplayBuffer)
def stats(self, debug: bool = False) -> Dict:
"""Returns the stats of this buffer.
Args:
debug: If true, adds sample eviction statistics to the
returned stats dict.
Returns:
A dictionary of stats about this buffer.
"""
parent = ReplayBuffer.stats(self, debug)
if debug:
parent.update(self._prio_change_stats.stats())
return parent
@DeveloperAPI
@override(ReplayBuffer)
def get_state(self) -> Dict[str, Any]:
"""Returns all local state.
Returns:
The serializable local state.
"""
# Get parent state.
state = super().get_state()
# Add prio weights.
state.update(
{
"sum_segment_tree": self._it_sum.get_state(),
"min_segment_tree": self._it_min.get_state(),
"max_priority": self._max_priority,
}
)
return state
@DeveloperAPI
@override(ReplayBuffer)
def set_state(self, state: Dict[str, Any]) -> None:
"""Restores all local state to the provided `state`.
Args:
state: The new state to set this buffer. Can be obtained by
calling `self.get_state()`.
"""
super().set_state(state)
self._it_sum.set_state(state["sum_segment_tree"])
self._it_min.set_state(state["min_segment_tree"])
self._max_priority = state["max_priority"]
| 2.296875 | 2 |
src/PythonScripts/ArduinoSerialListener.py | nick11roberts/sleepAdjustmentScripts | 1 | 12761920 | <gh_stars>1-10
# This program anticipates values every 15000ms ranging from 0-100 from the serial port.
import serial
import os
import datetime
# CONSTANTS
PORT = "/dev/ttyACM0"
SER = serial.Serial(PORT, 9600)
# This is the length of the range used for averaging the data
#ARDUINO_DELAY = 15000
#AVERAGING_ITER_FACTOR = 40
#TIME_LENGTH_OF_AVERAGES = ARDUINO_DELAY * AVERAGING_ITER_FACTOR
#RANGE_ITERATIONS = (TIME_LENGTH_OF_AVERAGES/ARDUINO_DELAY)
CUTOFF_VAL = 85
NAME = "Nick"
running = True
dat_mean = 0;
dat_index = [0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0,
0,0,0,0]
def average_of_list(num_list):
sum_of_list_items = 0
list_length = len(num_list)
average = 0
for i in range (0, list_length):
sum_of_list_items += num_list[i]
average = sum_of_list_items/list_length
return average
def low():
for i in range (len(dat_index)):
# Will ultimately wait for arduino to provide values
dat_index[i] = int(SER.readline())
dat_mean = average_of_list(dat_index)
print dat_index
print dat_mean
print " "
if dat_mean <= CUTOFF_VAL-1:
is_low = True
else:
is_low = False
return is_low
def text_to_speech(text):
return os.system("espeak -s 155 -a 200 "+text+" " )
# The first reading is most likely for calibration, so skip it.
SER.readline()
while running:
m = datetime.datetime.now().strftime("%I %M %S")
if not low():
# TODO
# do something
# like make the user enter the numbers from LOST
# Perhaps later it can grab text from an online service or an AI? Maybe Reddit?
text_to_speech("'Master "
+NAME+
", the time is "
+str(int(m[0:2]))
+" "+str(int(m[3:5]))
+" : go forth and "
+"prepare yourself some coffee : "
+"The day awaits : ' ")
while True:
in_string = raw_input(" >: ")
if in_string == '4 8 15 16 23 42':
break
# play a beeping sound from an audio file in another task
text_to_speech("'Just saving the world.' ")
running = False
| 3.390625 | 3 |
setup.py | li-wjohnson/py-log-symbols | 17 | 12761921 | import sys
from setuptools import setup, find_packages # pylint: disable=no-name-in-module,import-error
def dependencies(file):
with open(file) as f:
return f.read().splitlines()
setup(
name='log_symbols',
packages=find_packages(exclude=('tests', 'examples')),
version='0.0.14',
license='MIT',
description='Colored symbols for various log levels for Python',
long_description='Colored symbols for various log levels for Python. Find the documentation here: https://github.com/manrajgrover/py-log-symbols.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/manrajgrover/py-log-symbols',
keywords=[
'log symbols',
'symbols',
'log'
],
install_requires=dependencies('requirements.txt'),
extras_require={
':python_version < "3.4"': [
'enum34==1.1.6',
],
},
tests_require=dependencies('requirements-dev.txt'),
include_package_data=True
)
| 1.640625 | 2 |
DataModify.py | xiayule158/ImageClassification | 0 | 12761922 | """
日期修改
"""
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.inf)
root_dir = '/media/xiayule/bdcp/other'
def modify_date():
img_path = os.path.join(root_dir, '3.jpg')
img = cv2.imread(img_path)
# _, img1 = cv2.threshold(img, 150, 200, cv2.THRESH_BINARY)
hue_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
l_range = np.array([140, 43, 46])
h_range = np.array([180, 255, 255])
th = cv2.inRange(hue_img, l_range, h_range)
index1 = th == 255
img1 = np.zeros(img.shape, np.uint8)
img1[:, :] = (255, 255, 255)
img1[index1] = img[index1]
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', img1)
cv2.waitKey()
cv2.destroyAllWindows()
def get_print():
"""
:return:
"""
img_path = os.path.join(root_dir, 'zhuangbei2.jpg')
img = cv2.imread(img_path)
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8)*255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
if 80 <= b < 160 and 80 <= g < 150 and 140 <= r < 240:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r
# dst_img[i, j, 3] = [b, g, r]
else:
dst_img[i, j, 3] = 0
cv2.imwrite(os.path.join(root_dir, 'zhuangbei2.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
def get_print1():
"""
:return:
"""
img_path = os.path.join(root_dir, 'zhuangbei2.jpg')
img = cv2.imread(img_path)
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8)*255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
m = (int(b)+int(g)+int(r))/3
if abs(b-m) < 20 and abs(g-m) < 20 and abs(r-m) < 20:
dst_img[i, j, 3] = 0
else:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r
cv2.imwrite(os.path.join(root_dir, 'zhuangbei2.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
def get_touming():
"""
:return:
"""
img_path = os.path.join(root_dir, '26.jpg')
img = cv2.imread(img_path)
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8)*255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
if 0 <= b < 50 and 0 <= g < 50 and 0 <= r < 50:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r
# dst_img[i, j, 3] = [b, g, r]
else:
dst_img[i, j, 3] = 0
cv2.imwrite(os.path.join(root_dir, '26_1.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
def myfunc1(x):
if x >= 0:
return x
else:
return 2*x/(1+np.exp(-x))
def myfunc1_der1(x):
if x >= 0:
return 1
else:
return 2*(1 + np.exp(-x) + x * np.exp(-x)) / pow(1 + np.exp(-x), 2)
def plot_swish():
"""
swish图像
:return:
"""
x = np.linspace(-4, 4, 1001)
y = np.array([myfunc1(i) for i in x])
y_d1 = np.array([myfunc1_der1(i) for i in x])
plt.plot(x, y, x, y_d1)
plt.show()
def modify_pixel():
img_path = os.path.join(root_dir, '51.png')
img = cv2.imread(img_path).astype('int')
w, h, c = img.shape
dst_img = np.ones((w, h, c), np.uint8) * 255
dst_img = cv2.cvtColor(dst_img, cv2.COLOR_BGR2BGRA)
for i in range(w):
for j in range(h):
pixel = img[i, j, :]
b, g, r = pixel[0], pixel[1], pixel[2]
if b < 255 and g < 255 and r < 255:
dst_img[i, j, 0] = b
dst_img[i, j, 1] = g
dst_img[i, j, 2] = r+15
# dst_img[i, j, 3] = [b, g, r]
else:
dst_img[i, j, 3] = 0
dst_img[dst_img > 255] = 255
cv2.imwrite(os.path.join(root_dir, '5_1.png'), dst_img)
cv2.namedWindow('1', cv2.WINDOW_NORMAL)
cv2.imshow('1', dst_img)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == r'__main__':
get_touming()
# plot_swish()
# modify_pixel() | 2.8125 | 3 |
tests/algorithms/test_pbr.py | UCL/scikit-surgeryfredwebapp | 5 | 12761923 | <reponame>UCL/scikit-surgeryfredwebapp<filename>tests/algorithms/test_pbr.py
# coding=utf-8
"""Fiducial Registration Educational Demonstration tests"""
import math
import numpy as np
from scipy.stats import linregress
import pytest
from sksurgeryfred.algorithms.errors import expected_absolute_value
import sksurgeryfred.algorithms.point_based_reg as pbreg
def _make_circle_fiducials(no_fids, centre, radius,
fixed_stddevs, moving_stddevs):
fixed_fids = np.zeros(shape=(no_fids, 3), dtype=np.float64)
moving_fids = np.zeros(shape=(no_fids, 3), dtype=np.float64)
angle_inc = math.pi * 2.0 / float(no_fids)
for fid in range(no_fids):
fixed_fids[fid] = ([radius * math.cos(angle_inc*fid),
radius * math.sin(angle_inc*fid),
0.0] +
np.random.normal(scale=fixed_stddevs) +
centre)
moving_fids[fid] = ([radius * math.cos(angle_inc*fid),
radius * math.sin(angle_inc*fid),
0.0] +
np.random.normal(scale=moving_stddevs) +
centre)
return fixed_fids, moving_fids
def _run_registrations (pbr, no_fids, centre, radius, fixed_stddevs,
moving_stddevs, repeats):
tres=np.empty(repeats, dtype=np.float64)
fres=np.empty(repeats, dtype=np.float64)
np.random.seed(0)
for i in range(repeats):
fixed_fids, moving_fids = _make_circle_fiducials(no_fids, centre,
radius,
fixed_stddevs,
moving_stddevs)
[_success, fres[i], _mean_fle, expected_tre_squared, expected_fre,
_transformed_target_2d, tres[i], _no_fids] = pbr.register(
fixed_fids, moving_fids)
ave_tre = np.average(tres * tres)
ave_fre = np.average(fres * fres)
_slope, _intercept, _r_value, p_value, _std_err = linregress(tres, fres)
return ave_tre, ave_fre, expected_tre_squared, expected_fre, p_value
def test_init_with_moving_fle():
"""
Init pbr with moving fle should yield non implemented error
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[0.0, 0.0, 0.0]], dtype=np.float64)
with pytest.raises(NotImplementedError):
pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
def test_pbr_3_fids():
"""
Tests for tre_from_fle_2d
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([0.0, 0.0, 0.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[0.0, 0.0, 0.0]], dtype=np.float64)
pbr = pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
centre = np.array([0.0, 0.0, 0.0], dtype=np.float64)
radius = 20.0
expected_tre_squared = 0
expected_fre = 0
repeats = 100
no_fids = 3
ave_tresq, ave_fresq, expected_tre_squared, expected_fre, p_value = \
_run_registrations(pbr, no_fids, centre, radius,
fixed_fle_std_dev,
moving_fle_std_dev, repeats)
assert np.isclose(ave_tresq, expected_tre_squared, atol=0.0, rtol=0.10)
assert np.isclose(ave_fresq, expected_fre, atol=0.0, rtol=0.05)
assert p_value > 0.05
def test_pbr_10_fids():
"""
Tests for tre_from_fle_2d
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([0.0, 0.0, 0.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[0.0, 0.0, 0.0]], dtype=np.float64)
pbr = pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
centre = np.array([0.0, 0.0, 0.0], dtype=np.float64)
radius = 2.0
repeats = 200
no_fids = 10
ave_tresq, ave_fresq, expected_tre_squared, expected_fre, p_value = \
_run_registrations(pbr, no_fids, centre, radius,
fixed_fle_std_dev,
moving_fle_std_dev, repeats)
assert np.isclose(ave_tresq, expected_tre_squared, atol=0.0, rtol=0.10)
assert np.isclose(ave_fresq, expected_fre, atol=0.0, rtol=0.05)
assert p_value > 0.05
def test_pbr_10_fids_offset_target():
"""
Tests for tre_from_fle_2d
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([0.0, 0.0, 0.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[2.0, 1.0, 0.0]], dtype=np.float64)
pbr = pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
centre = np.array([0.0, 0.0, 0.0], dtype=np.float64)
radius = 2.0
repeats = 200
no_fids = 10
ave_tresq, ave_fresq, expected_tre_squared, expected_fre, p_value = \
_run_registrations(pbr, no_fids, centre, radius,
fixed_fle_std_dev,
moving_fle_std_dev, repeats)
assert np.isclose(ave_tresq, expected_tre_squared, atol=0.0, rtol=0.10)
assert np.isclose(ave_fresq, expected_fre, atol=0.0, rtol=0.05)
assert p_value > 0.05
def test_pbr_20_fids_offset_target():
"""
Tests for tre_from_fle_2d
"""
fixed_fle_std_dev = np.array([1.0, 1.0, 1.0], dtype=np.float64)
moving_fle_std_dev = np.array([0.0, 0.0, 0.0], dtype=np.float64)
fixed_fle_easv = expected_absolute_value(fixed_fle_std_dev)
moving_fle_easv = expected_absolute_value(moving_fle_std_dev)
target = np.array([[2.0, 1.0, 0.0]], dtype=np.float64)
pbr = pbreg.PointBasedRegistration(target, fixed_fle_easv, moving_fle_easv)
centre = np.array([0.0, 0.0, 0.0], dtype=np.float64)
radius = 20.0
repeats = 200
no_fids = 20
#test get transformed target before registration
status, transformed_target = pbr.get_transformed_target()
assert not status
assert transformed_target is None
ave_tresq, ave_fresq, expected_tre_squared, expected_fre, p_value = \
_run_registrations(pbr, no_fids, centre, radius,
fixed_fle_std_dev,
moving_fle_std_dev, repeats)
assert np.isclose(ave_tresq, expected_tre_squared, atol=0.0, rtol=0.10)
assert np.isclose(ave_fresq, expected_fre, atol=0.0, rtol=0.05)
assert p_value > 0.05
#test get transformed target after registration
status, transformed_target = pbr.get_transformed_target()
assert status
assert np.allclose(np.transpose(transformed_target), target, atol=1.0)
| 2.203125 | 2 |
src/stories/contrib/sentry/django.py | dargor/stories | 1 | 12761924 | import stories.contrib.sentry.breadcrumbs # noqa
from raven.contrib.django.client import DjangoClient # noqa
| 1.140625 | 1 |
fixtures/users.py | mitodl/open-discussions | 12 | 12761925 | """User fixtures"""
# pylint: disable=unused-argument, redefined-outer-name
from io import BytesIO
import pytest
from PIL import Image
from rest_framework.test import APIClient
from rest_framework_jwt.settings import api_settings
from open_discussions.factories import UserFactory
from sites.factories import AuthenticatedSiteFactory
@pytest.fixture
def user(db, use_betamax, request):
"""Create a user"""
if use_betamax:
return request.getfixturevalue("reddit_user")
return UserFactory.create()
@pytest.fixture
def staff_user(db, use_betamax, request):
"""Create a staff user"""
if use_betamax:
request.getfixturevalue("configure_betamax")
return request.getfixturevalue("reddit_staff_user")
return UserFactory.create(is_staff=True)
@pytest.fixture()
def index_user(db, use_betamax, request):
"""Create a user to be used for indexing"""
if use_betamax:
request.getfixturevalue("configure_betamax")
return request.getfixturevalue("reddit_index_user")
user = UserFactory.create(is_staff=True)
return user
@pytest.fixture()
def logged_in_user(client, user):
"""Log the user in and yield the user object"""
client.force_login(user)
return user
@pytest.fixture()
def logged_in_profile(client):
"""Add a Profile and logged-in User"""
user = UserFactory.create(username="george")
client.force_login(user)
return user.profile
@pytest.fixture
def jwt_token(db, user, client, rf, settings):
"""Creates a JWT token for a regular user"""
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
client.cookies[settings.OPEN_DISCUSSIONS_COOKIE_NAME] = token
rf.cookies.load({settings.OPEN_DISCUSSIONS_COOKIE_NAME: token})
return token
@pytest.fixture
def client(db):
"""
Similar to the builtin client but this provides the DRF client instead of the Django test client.
"""
return APIClient()
@pytest.fixture
def user_client(client, user):
"""Version of the client that is authenticated with the user"""
client.force_login(user)
return client
@pytest.fixture
def staff_client(client, staff_user):
"""Version of the client that is authenticated with the staff_user"""
client.force_login(staff_user)
return client
@pytest.fixture
def authenticated_site(db, settings):
"""The authenticated site"""
return AuthenticatedSiteFactory.create(
key=settings.OPEN_DISCUSSIONS_DEFAULT_SITE_KEY
)
@pytest.fixture
def profile_image():
""" Create a PNG image """
image_file = BytesIO()
image = Image.new("RGBA", size=(250, 250), color=(256, 0, 0))
image.save(image_file, "png")
image_file.seek(0)
return image_file
| 2.203125 | 2 |
rh_pathfinding/src/rh_pathfinding/pathfinderserver.py | RhinohawkUAV/rh_ros | 4 | 12761926 | #!/usr/bin/env python
import rospy
from ros.rosPathFinderServer import RosPathFinderServer
if __name__ == '__main__':
server = RosPathFinderServer()
rospy.spin()
| 1.429688 | 1 |
src/pyext/FetchData.py | saijananiganesan/LDAPathwayPrediction | 1 | 12761927 | <gh_stars>1-10
import requests
import os
import pandas as pd
from collections import Counter
class KEGGData(object):
def __init__(self):
self.url='http://rest.kegg.jp'
self.path='../../data/'
def get_response_from_url(self,url):
response=requests.get(url)
if response.status_code!=200:
print ("Error in fetching data, check if url is {}".format(url))
return response.text
def get_all_pathways(self):
url_new=self.url+'/list/pathway';pathway_id={}
pathway_file=open(os.path.join(self.path+"list_of_pathways.csv"),'w+')
pathway_file.write(self.get_response_from_url(url_new))
for k,j in enumerate(response.text.splitlines()):
pathway_id[j.strip('\t').split(':')[1].strip().split()[0]]='_'.join(j.strip('\t').split(':')[1].strip().split()[1:])
pathway_map=open(os.path.join(self.path+"pathway_ID.csv"),'w+')
for m,n in pathway_id.items():
pathway_map.write("%s:%s\n" %(m,n))
pathway_file.close()
return pathway_id,pathway_id.keys()
def get_all_organisms(self):
url_new=self.url+'/list/organism'
organism_file=open(os.path.join(self.path+"list_of_organisms.csv"),'r+')
organism_file.write(self.get_response_from_url(url_new))
organism_file.close()
def get_prokaryotes(self):
org_list=[]
file=open(os.path.join(self.path+"list_of_organisms.csv"),'r+')
for i,j in enumerate(file.readlines()):
if ('Prokaryotes' in j.strip()):
org_list.append(j.strip().split()[1])
prokaryotes_file=open(os.path.join(self.path+"prokaryotes.csv"),'w+')
for item in org_list:
prokaryotes_file.write("%s\n" %item)
prokaryotes_file.close()
return org_list
def get_prok_path(self):
prok_path=[];prok_path_dict={}
org_list=self.get_prokaryotes()
prok_file=open(os.path.join(self.path+"prok_pathways.csv"),'w+')
prok_file_stats=open(os.path.join(self.path+"prok_path_stats.csv"),'w+')
for i in org_list:
url_new=self.url+'/list/pathway/'+i
for k,j in enumerate(self.get_response_from_url(url_new).splitlines()):
path=j.strip('\t').split(':')[1].strip().split()[0]
path_id=path.replace(i,'')
prok_path.append('map'+path_id)
prok_path_dict=Counter(prok_path)
for i,j in prok_path_dict.items():
prok_file_stats.write("map%s:%s\n" %(i,j))
prok_file_stats.close()
prok_path_final=list(set(prok_path))
for item in prok_path_final:
prok_file.write("%s\n" %item)
prok_file.close()
return prok_path_final,prok_path_dict
def get_rxn_list_for_pathways(self):
pathway_file=open(os.path.join(self.path+"prok_pathways.csv"),'r+')
rxn_file=open(os.path.join(self.path+"rxn_pathways.csv"), 'w+')
for i,item in enumerate(pathway_file.readlines()):
url_new=self.url+'/link/rn/'+item.strip()
rxn_file.write(self.get_response_from_url(url_new))
rxn_file.close()
def get_ec_for_rxn(self):
rxn_file=open(os.path.join(self.path+"rxn_pathways.csv"),'r+')
ec_file=open(os.path.join(self.path+"ec_gram.csv"),'w+')
for i,item in enumerate(rxn_file.readlines()):
if len(item.strip().split())>0:
print (i, item.strip().split()[1].split(':')[1])
rxn=item.strip().split()[1].split(':')[1]
mapid=item.strip().split()[0].split(':')[1]
url_new=self.url+'/link/ec/'+rxn
ec_file.write(mapid+':'+rxn+':')
ec_text=self.get_response_from_url(url_new);ec=[]
for i,j in enumerate(ec_text.splitlines()):
if(len(j.strip().split())>0):
ec.append(j.strip().split('\t')[1])
ec='_'.join(ec)
ec_file.write(ec+'\n')
ec_file.close()
def get_ko_for_rxn(self):
rxn_file=open(os.path.join(self.path+"rxn_pathways.csv"),'r+')
ko_file=open(os.path.join(self.path+"ko_gram.csv"),'w+')
for i,item in enumerate(rxn_file.readlines()):
if len(item.strip().split())>0:
print (i, item.strip().split()[1].split(':')[1])
rxn=item.strip().split()[1].split(':')[1]
mapid=item.strip().split()[0].split(':')[1]
url_new=self.url+'/link/ko/'+rxn
ko_file.write(mapid+':'+rxn+':')
ko_text=self.get_response_from_url(url_new);ko=[]
for i,j in enumerate(ko_text.splitlines()):
if(len(j.strip().split())>0):
ko.append(j.strip().split('\t')[1])
ko='_'.join(ko)
ko_file.write(ko+'\n')
ko_file.close()
def get_ec_for_rxn_table(self):
rxn_file=open(os.path.join(self.path+"rxn_pathways.csv"),'r+')
ec_file=open(os.path.join(self.path+"ec_table.csv"),'w+')
for i,item in enumerate(rxn_file.readlines()):
if len(item.strip().split())>0:
print (i, item.strip().split()[1].split(':')[1])
rxn=item.strip().split()[1].split(':')[1]
mapid=item.strip().split()[0].split(':')[1]
url_new=self.url+'/link/ec/'+rxn
ec_text=self.get_response_from_url(url_new);ec=[]
for i,j in enumerate(ec_text.splitlines()):
if(len(j.strip().split())>0):
ec_file.write(mapid+','+rxn+','+j.strip().split('\t')[1]+'\n')
ec_file.close()
def get_ec_for_map_table(self):
prok=open(os.path.join(self.path+"prok_pathways.csv"),'r+')
ec_file=open(os.path.join(self.path+"ec_map_table.csv"),'w+')
for i,item in enumerate(prok.readlines()):
if len(item.strip().split())>0:
print (i, item.strip().split()[0])
mapid=item.strip().split()[0]
url_new=self.url+'/link/ec/'+mapid
ec_text=self.get_response_from_url(url_new);ec=[]
for i,j in enumerate(ec_text.splitlines()):
if(len(j.strip().split())>0):
ec_file.write(mapid+','+j.strip().split('\t')[1]+'\n')
ec_file.close()
def get_ko_for_rxn_table(self):
rxn_file=open(os.path.join(self.path+"rxn_pathways.csv"),'r+')
ko_file=open(os.path.join(self.path+"ko_table.csv"),'w+')
for i,item in enumerate(rxn_file.readlines()):
if len(item.strip().split())>0:
print (i, item.strip().split()[1].split(':')[1])
rxn=item.strip().split()[1].split(':')[1]
mapid=item.strip().split()[0].split(':')[1]
url_new=self.url+'/link/ko/'+rxn
ko_text=self.get_response_from_url(url_new);ko=[]
for i,j in enumerate(ko_text.splitlines()):
if(len(j.strip().split())>0):
ko_file.write(mapid+','+rxn+','+j.strip().split('\t')[1]+'\n')
ko_file.close()
if __name__=='__main__':
KEGGData().get_all_pathways()
KEGGData().get_all_organisms()
KEGGData().get_prokaryotes()
KEGGData().get_prok_path()
KEGGData().get_rxn_list_for_pathways()
KEGGData().get_ec_for_map_table()
| 3 | 3 |
examples/official/trial/fashion_mnist_tf_keras/data.py | ybt195/determined | 3 | 12761928 | """
This files mimics keras.dataset download's function.
For parallel and distributed training, we need to account
for multiple processes (one per GPU) per agent.
For more information on data in Determined, read our data-access tutorial.
"""
import gzip
import tempfile
import numpy as np
from tensorflow.python.keras.utils.data_utils import get_file
def load_training_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train)`.
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
download_directory = tempfile.mkdtemp()
base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
files = [
"train-labels-idx1-ubyte.gz",
"train-images-idx3-ubyte.gz",
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=download_directory))
with gzip.open(paths[0], "rb") as lbpath:
y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], "rb") as imgpath:
x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
return x_train, y_train
def load_validation_data():
"""Loads the Fashion-MNIST dataset.
Returns:
Tuple of Numpy arrays: `(x_test, y_test)`.
License:
The copyright for Fashion-MNIST is held by Zalando SE.
Fashion-MNIST is licensed under the [MIT license](
https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE).
"""
download_directory = tempfile.mkdtemp()
base = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/"
files = [
"t10k-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
]
paths = []
for fname in files:
paths.append(get_file(fname, origin=base + fname, cache_subdir=download_directory))
with gzip.open(paths[0], "rb") as lbpath:
y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)
with gzip.open(paths[1], "rb") as imgpath:
x_test = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)
return x_test, y_test
| 2.8125 | 3 |
src/zsl/tasks/zsl/sum_task.py | AtteqCom/zsl | 2 | 12761929 | """
:mod:`zsl.tasks.asl.sum_task`
-----------------------------
Created on 22.12.2012
..moduleauthor:: <NAME>
"""
from __future__ import unicode_literals
from builtins import object
from injector import inject
from zsl import Zsl
from zsl.task.task_data import TaskData
from zsl.task.task_decorator import json_input, json_output
class SumTask(object):
@inject(app=Zsl)
def __init__(self, app):
self._app = app
@json_input
@json_output
def perform(self, data):
# type: (TaskData)->str
payload = data.payload
self._app.logger.debug("Sum task with data '{0}'.".format(payload))
return {"input": payload, "result": sum(payload)}
| 1.898438 | 2 |
script/extract_surface.py | Gkdnz/SfePy | 0 | 12761930 | <reponame>Gkdnz/SfePy<filename>script/extract_surface.py
#!/usr/bin/env python
# 05.10.2005, c
"""
Given a mesh file, this script extracts its surface and prints it to stdout in
form of a list where each row is [element, face, component]. A component
corresponds to a contiguous surface region - for example, a cubical mesh with a
spherical hole has two surface components. Two surface faces sharing a single
node belong to one component.
With '-m' option, a mesh of the surface is created and saved in
'<original path>/surf_<original mesh file name>.mesh'.
"""
import sys
sys.path.append('.')
from optparse import OptionParser
import numpy as nm
import scipy.sparse as sp
import sfepy
from sfepy.base.base import output
from sfepy.base.ioutils import edit_filename
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.common.extmods.cmesh import (create_mesh_graph,
graph_components)
def _get_facets(vertices, offsets, ii, n_fp):
facets = []
for ic in range(n_fp):
facets.append(vertices[offsets[ii] + ic][:, None])
facets = nm.concatenate(facets, axis=1)
return nm.ascontiguousarray(facets.astype(nm.int32))
def get_surface_faces(domain):
cmesh = domain.cmesh
faces = cmesh.get_surface_facets()
vertices_f, offs_f = cmesh.get_incident(0, faces,
cmesh.dim - 1, ret_offsets=True)
n_fp = nm.diff(offs_f)
surf_faces = []
itri = nm.where(n_fp == 3)[0]
if itri.size:
surf_faces.append(_get_facets(vertices_f, offs_f, itri, 3))
itet = nm.where(n_fp == 4)[0]
if itet.size:
surf_faces.append(_get_facets(vertices_f, offs_f, itet, 4))
cells_c, offs_c = cmesh.get_incident(cmesh.dim, faces, cmesh.dim - 1,
ret_offsets=True)
ids = cmesh.get_local_ids(faces, cmesh.dim - 1, cells_c, offs_c,
cmesh.dim)
lst = nm.c_[cells_c, ids]
return lst, surf_faces
def surface_graph(surf_faces, n_nod):
nnz, prow, icol = create_mesh_graph(n_nod, n_nod, len(surf_faces),
surf_faces, surf_faces)
data = nm.empty((nnz,), dtype=nm.int32)
data.fill(2)
return sp.csr_matrix((data, icol, prow), (n_nod, n_nod))
def surface_components(gr_s, surf_faces):
"""
Determine surface components given surface mesh connectivity graph.
"""
n_nod = gr_s.shape[0]
n_comp, flag = graph_components(n_nod, gr_s.indptr, gr_s.indices)
comps = []
for ii, face in enumerate(surf_faces):
comp = flag[face[:,0]]
comps.append(comp)
return n_comp, comps
usage = """%prog [options] filename_in|- filename_out|-
'-' is for stdin, stdout
""" + __doc__.rstrip()
def main():
parser = OptionParser(usage=usage, version="%prog " + sfepy.__version__)
parser.add_option("-m", "--mesh",
action="store_true", dest="save_mesh",
default=False,
help="save surface mesh")
parser.add_option("-n", "--no-surface",
action="store_true", dest="no_surface",
default=False,
help="do not output surface [default: %default]")
(options, args) = parser.parse_args()
if (len(args) == 2):
filename_in = args[0];
filename_out = args[1];
else:
parser.print_help(),
return
if (filename_in == '-'):
file_in = sys.stdin
else:
file_in = open(filename_in, "r");
mesh = Mesh.from_file(filename_in)
if (filename_in != '-'):
file_in.close()
domain = FEDomain('domain', mesh)
if options.save_mesh:
region = domain.create_region('surf', 'vertices of surface', 'facet')
surf_mesh = Mesh.from_region(region, mesh,
localize=True, is_surface=True)
aux = edit_filename(filename_in, prefix='surf_', new_ext='.mesh')
surf_mesh.write(aux, io='auto')
if domain.has_faces():
domain.fix_element_orientation()
lst, surf_faces = get_surface_faces(domain)
if options.no_surface:
return
gr_s = surface_graph(surf_faces, mesh.n_nod)
n_comp, comps = surface_components(gr_s, surf_faces)
output('number of surface components:', n_comp)
ccs, comps = comps, nm.zeros((0,1), nm.int32)
for cc in ccs:
comps = nm.concatenate((comps, cc[:,nm.newaxis]), 0)
out = nm.concatenate((lst, comps), 1)
if (filename_out == '-'):
file_out = sys.stdout
else:
file_out = open(filename_out, "w");
for row in out:
file_out.write('%d %d %d\n' % (row[0], row[1], row[2]))
if (filename_out != '-'):
file_out.close()
if __name__=='__main__':
main()
| 2.890625 | 3 |
protolite/test/test_encoder.py | thelinuxkid/python-protolite | 6 | 12761931 | import pytest
from protolite import encoder
class decoding(object):
message_foo = dict([
(1, dict([
('type', 'string'),
('name', 'body'),
('scope', 'optional'),
])),
(2, dict([
('type', 'string'),
('name', 'messages'),
('scope', 'repeated'),
])),
])
message_bar = dict([
(1, dict([
('type', 'enum'),
('name', 'type'),
('scope', 'optional'),
])),
(4, dict([
('type', 'embedded'),
('name', 'message_foo'),
('message', message_foo),
('scope', 'optional'),
])),
])
message_baz = dict([
(1, dict([
('type', 'embedded'),
('name', 'message_bar'),
('message', message_bar),
('scope', 'optional'),
])),
(3, dict([
('type', 'uint64'),
('name', 'baz_id'),
('scope', 'optional'),
])),
])
message_sna = dict([
(1, dict([
('type', 'enum'),
('name', 'type'),
('scope', 'optional'),
])),
(8, dict([
('type', 'embedded'),
('name', 'message_baz'),
('message', message_baz),
('scope', 'optional'),
])),
])
foo = dict([
(1, dict([
('type', 'uint64'),
('name', 'foo_id'),
('scope', 'optional'),
])),
(2, dict([
('type', 'bool'),
('name', 'is_foo'),
('scope', 'optional'),
])),
(3, dict([
('type', 'uint32'),
('name', 'foo_count'),
('scope', 'optional'),
])),
(305, dict([
('type', 'int32'),
('name', 'foo_value'),
('scope', 'optional'),
])),
])
bar = dict([
(1, dict([
('type', 'uint64'),
('name', 'bar_id'),
('scope', 'optional'),
])),
(2, dict([
('type', 'float'),
('name', 'bar_value'),
('scope', 'optional'),
])),
(3, dict([
('type', 'double'),
('name', 'bar_result'),
('scope', 'optional'),
])),
(5, dict([
('type', 'embedded'),
('name', 'foos'),
('message', foo),
('scope', 'repeated'),
])),
])
sna = dict([
(1, dict([
('type', 'uint64'),
('name', 'sna_ids'),
('scope', 'repeated'),
])),
(2, dict([
('type', 'double'),
('name', 'snas'),
('scope', 'repeated'),
])),
(3, dict([
('type', 'float'),
('name', 'foos'),
('scope', 'repeated'),
])),
(4, dict([
('type', 'uint32'),
('name', 'counts'),
('scope', 'repeated'),
])),
])
class encoding(object):
message_foo = dict([
('body', dict([
('type', 'string'),
('field', 1),
('scope', 'optional'),
])),
('messages', dict([
('type', 'string'),
('field', 2),
('scope', 'repeated'),
])),
])
message_bar = dict([
('type', dict([
('type', 'enum'),
('field', 1),
('scope', 'optional'),
])),
('message_foo', dict([
('type', 'embedded'),
('field', 4),
('message', message_foo),
('scope', 'optional'),
])),
])
message_baz = dict([
('message_bar', dict([
('type', 'embedded'),
('field', 1),
('message', message_bar),
('scope', 'optional'),
])),
('baz_id', dict([
('type', 'uint64'),
('field', 3),
('scope', 'optional'),
])),
])
foo = dict([
('foo_id', dict([
('type', 'uint64'),
('field', 1),
('scope', 'optional'),
])),
('is_foo', dict([
('type', 'bool'),
('field', 2),
('scope', 'optional'),
])),
('foo_count', dict([
('type', 'uint32'),
('field', 3),
('scope', 'optional'),
])),
('foo_value', dict([
('type', 'int32'),
('field', 305),
('scope', 'optional'),
])),
])
bar = dict([
('bar_id', dict([
('type', 'uint64'),
('field', 1),
('scope', 'optional'),
])),
('bar_value', dict([
('type', 'float'),
('field', 2),
('scope', 'optional'),
])),
('bar_result', dict([
('type', 'double'),
('field', 3),
('scope', 'optional'),
])),
('foos', dict([
('type', 'embedded'),
('field', 5),
('message', foo),
('scope', 'repeated'),
])),
])
message_sna = dict([
('type', dict([
('type', 'enum'),
('field', 1),
('scope', 'optional'),
])),
('message_baz', dict([
('type', 'embedded'),
('field', 8),
('message', message_baz),
('scope', 'optional'),
])),
])
sna = dict([
('sna_ids', dict([
('type', 'uint64'),
('field', 1),
('scope', 'repeated'),
])),
('snas', dict([
('type', 'double'),
('field', 2),
('scope', 'repeated'),
])),
('foos', dict([
('type', 'float'),
('field', 3),
('scope', 'repeated'),
])),
('counts', dict([
('type', 'uint32'),
('field', 4),
('scope', 'repeated'),
])),
])
def test_decode_key_as_varint():
data = '\x88\x13\x08'
msg = encoder.decode(decoding.foo, data)
want = dict([
('foo_value', 8),
])
assert want == msg
def test_encode_key_as_varint():
# Don't check against data string since protolite doesn't use OrderedDict
msg = dict([
('foo_value', 8),
])
data = encoder.encode(encoding.foo, msg)
res = encoder.decode(decoding.foo, data)
assert msg == res
def test_decode_int32():
data = '\x18\x7f'
msg = encoder.decode(decoding.foo, data)
want = dict([('foo_count', 127)])
assert want == msg
def test_encode_int32():
# Don't check against data string since protolite doesn't use OrderedDict
msg = dict([('foo_count', 127)])
data = encoder.encode(encoding.foo, msg)
res = encoder.decode(decoding.foo, data)
assert msg == res
def test_decode_uint64():
data = '\x08\x80\xa0\x88\x84\x80\x8a\xa5\xfe\r'
msg = encoder.decode(decoding.bar, data)
want = dict([
('bar_id', 1007843487950966784L),
])
assert want == msg
def test_encode_uint64():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('bar_id', 1007843487950966784L),
])
data = encoder.encode(encoding.bar, msg)
res = encoder.decode(decoding.bar, data)
assert msg == res
def test_encode_uint64_negative():
with pytest.raises(ValueError) as einfo:
msg = dict([
('bar_id', -155496620801056360),
])
encoder.encode(encoding.bar, msg)
want = 'ValueError: uint64 value cannot be negative: -155496620801056360'
assert einfo.exconly() == want
def test_decode_bool():
data = '\x10\x00'
msg = encoder.decode(decoding.foo, data)
want = dict([('is_foo', False)])
assert want == msg
def test_encode_bool():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([('is_foo', False)])
data = encoder.encode(encoding.foo, msg)
res = encoder.decode(decoding.foo, data)
assert msg == res
def test_decode_enum():
data = '\x08\x07'
msg = encoder.decode(decoding.message_bar, data)
want = dict([('type', 7)])
assert want == msg
def test_encode_enum():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([('type', 7)])
data = encoder.encode(encoding.message_bar, msg)
res = encoder.decode(decoding.message_bar, data)
assert msg == res
def test_decode_repeated_varint():
data = '\x08\n\x08\x14'
msg = encoder.decode(decoding.sna, data)
want = dict([
('sna_ids', [10, 20]),
])
assert want == msg
def test_encode_repeated_varint():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('sna_ids', [10, 20]),
])
data = encoder.encode(encoding.sna, msg)
res = encoder.decode(decoding.sna, data)
assert msg == res
def test_encode_repeated_uint_negative():
with pytest.raises(ValueError) as einfo:
msg = dict([
('counts', [1, -2, 3]),
])
encoder.encode(encoding.sna, msg)
want = 'ValueError: uint32 value cannot be negative: -2'
assert einfo.exconly() == want
def test_decode_64bit():
data = '\x19\x00\x00\x00\xe0%\x99^\xc0'
msg = encoder.decode(decoding.bar, data)
want = dict([
('bar_result', -122.39293670654297),
])
assert want == msg
def test_encode_64bit():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('bar_result', -122.39293670654297),
])
data = encoder.encode(encoding.bar, msg)
res = encoder.decode(decoding.bar, data)
assert msg == res
def test_decode_64bit_repeated():
data = '\x11\x00\x00\x00\xe0%\x99^\xc0\x11\x8fB\x9a\xf4\xdcZm@'
msg = encoder.decode(decoding.sna, data)
want = dict([
('snas', [-122.39293670654297, 234.839472104348218943324]),
])
assert want == msg
def test_encode_64bit_repeated():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('snas', [-122.39293670654297, 234.839472104348218943324]),
])
data = encoder.encode(encoding.sna, msg)
res = encoder.decode(decoding.sna, data)
assert msg == res
def test_decode_delimited_length_as_varint():
dec_message = dict([
(1, dict([
('type', 'string'),
('name', 'first_name'),
('scope', 'optional'),
])),
])
dec_proto = dict([
(305, dict([
('type', 'embedded'),
('name', 'dec_message'),
('message', dec_message),
('scope', 'optional'),
])),
])
data = '\x8a\x13\xcf\t'
msg = encoder.decode(dec_proto, data)
# we don't care about the items, only the value of the length
want = dict([
('dec_message', dict()),
])
assert want == msg
def test_encode_delimited_length_as_varint():
# Don't check against data string since encoder doesn't use OrderedDict
# We need lots of items to create a large length value
def _index():
for i in range(0, 22):
for j in range(32, 127):
yield j+(127*i), chr(j)*(i+1)
enc_message = dict()
for i, c in _index():
enc_message[c] = dict([
('type', 'string'),
('field', i),
('scope', 'optional'),
])
enc_proto = dict([
('message_foo', dict([
('type', 'embedded'),
('field', 305),
('message', enc_message),
('scope', 'optional'),
])),
])
dec_message = dict()
for i, c in _index():
dec_message[i] = dict([
('type', 'string'),
('name', c),
('scope', 'optional'),
])
dec_proto = dict([
(305, dict([
('type', 'embedded'),
('name', 'message_foo'),
('message', dec_message),
('scope', 'optional'),
])),
])
msg = dict()
for i, c in _index():
msg[c] = str(i)
msg = dict([
('message_foo', msg),
])
data = encoder.encode(enc_proto, msg)
res = encoder.decode(dec_proto, data)
assert msg == res
def test_decode_embedded():
data = '\x08\x08B\x12\n\r\x08\x04"\t\n\x07foobody\x18\xb9`'
msg = encoder.decode(decoding.message_sna, data)
want = dict([
('message_baz', dict([
('baz_id', 12345),
('message_bar', dict([
('message_foo', dict([
('body', 'foobody'),
])),
('type', 4),
])),
])),
('type', 8),
])
assert want == msg
def test_encode_embedded():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('message_baz', dict([
('baz_id', 12345),
('message_bar', dict([
('message_foo', dict([
('body', 'foobody'),
])),
('type', 4),
])),
])),
('type', 8),
])
data = encoder.encode(encoding.message_sna, msg)
res = encoder.decode(decoding.message_sna, data)
assert msg == res
def test_decode_string():
data = '\n\hello world'
msg = encoder.decode(decoding.message_foo, data)
want = dict([
('body', 'hello world'),
])
assert want == msg
def test_encode_string():
# Don't check against data string since protolite doesn't use OrderedDict
msg = dict([
('body', 'hello world'),
])
data = encoder.encode(encoding.message_foo, msg)
res = encoder.decode(decoding.message_foo, data)
assert msg == res
msg = dict([
('body', u'\u03b3\u03b5\u03b9\u03b1'),
])
data = encoder.encode(encoding.message_foo, msg)
res = encoder.decode(decoding.message_foo, data)
assert msg == res
def test_decode_embedded_repeated():
data = '\x08\x1e*\x02\x08\n*\x02\x08\x14'
msg = encoder.decode(decoding.bar, data)
want = dict([
('bar_id', 30),
('foos', [
dict([('foo_id', 10)]),
dict([('foo_id', 20)]),
]),
])
assert want == msg
def test_encode_embedded_repeated():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('bar_id', 30),
('foos', [
dict([('foo_id', 10)]),
dict([('foo_id', 20)]),
]),
])
data = encoder.encode(encoding.bar, msg)
res = encoder.decode(decoding.bar, data)
assert msg == res
def test_decode_string_repeated():
data = '\x12\x03bar\x12\x03baz'
msg = encoder.decode(decoding.message_foo, data)
want = dict([
('messages', ['bar', 'baz']),
])
assert want == msg
def test_encode_string_repeated():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('messages', ['bar', 'baz']),
])
data = encoder.encode(encoding.message_foo, msg)
res = encoder.decode(decoding.message_foo, data)
assert msg == res
def test_decode_32bit():
data = '\x15/\xc9\xf4\xc2'
msg = encoder.decode(decoding.bar, data)
want = dict([
('bar_value', -122.39293670654297),
])
assert want == msg
def test_encode_32bit():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('bar_value', -122.39293670654297),
])
data = encoder.encode(encoding.bar, msg)
res = encoder.decode(decoding.bar, data)
assert msg == res
def test_decode_32bit_repeated():
data = '\x1d/\xc9\xf4\xc2\x1d\xeb\xe2V?'
msg = encoder.decode(decoding.sna, data)
want = dict([
('foos', [-122.39293670654297, 0.8393999934196472]),
])
assert want == msg
def test_encode_32bit_repeated():
# Don't check against data string since encoder doesn't use OrderedDict
msg = dict([
('foos', [-122.39293670654297, 0.8393999934196472]),
])
data = encoder.encode(encoding.sna, msg)
res = encoder.decode(decoding.sna, data)
assert msg == res
| 2.3125 | 2 |
backhaul/ui/event.py | tmacro/backhaul | 1 | 12761932 | <reponame>tmacro/backhaul
from pyglet.event import EventDispatcher as EventDispatcher
from functools import partial
from ..util.log import Log
from collections import defaultdict
_log = Log('ui.event')
def interact(**kwargs):
import code
code.InteractiveConsole(locals=kwargs).interact()
# Simple method rename and auto-adding self to handler args
class EventEmitter():
def __init__(self):
self.__emitter = None
self.__handlers = defaultdict(list)
@property
def _emitter(self):
if self.__emitter is None:
class _Emitter(EventDispatcher):
pass
for htype in self.__handlers.keys():
_Emitter.register_event_type(htype)
self.__emitter = _Emitter()
for event, handlers in self.__handlers.items():
for handler in handlers:
self.__emitter.push_handlers(**{event: handler})
return self.__emitter
def register(self, **kwargs):
for event, handler in kwargs.items():
if event not in self.__handlers and self.__emitter is not None:
raise Exception('Cannot add event types after first emission!')
_log.debug('Registering handler %s for event %s'%(event, handler))
self.__handlers[event].append(handler)
def emit(self, event, *args):
_log.debug('Emitting Event %s'%event)
return self._emitter.dispatch_event(event, self, *args)
def _lazy_emit(self, event, *args):
_log.debug('Emitting lazy event %s'%event)
return self.emit(event, *args)
def lazy_emit(self, event, *args):
_log.debug('Creating lazy event %s'%event)
return partial(self._lazy_emit, event, *args) | 2.171875 | 2 |
insertion_sort.py | matteoalberici4/algorithms | 0 | 12761933 | <reponame>matteoalberici4/algorithms<gh_stars>0
# Insertion sort
def insertion_sort(A: list):
for i in range(1, len(A)):
j = i
while j > 0 and A[j - 1] > A[j]:
A[j], A[j - 1] = A[j - 1], A[j]
j -= 1
# Complexity:
# worst-case: Θ(n^2)
# best-case: Θ(n)
# average-case: Θ(n^2)
# in-place: yes
| 3.796875 | 4 |
osx/devkit/plug-ins/scripted/splitUVCmd.py | leegoonz/Maya-devkit | 10 | 12761934 | #-
# ==========================================================================
# Copyright (C) 1995 - 2006 Autodesk, Inc. and/or its licensors. All
# rights reserved.
#
# The coded instructions, statements, computer programs, and/or related
# material (collectively the "Data") in these files contain unpublished
# information proprietary to Autodesk, Inc. ("Autodesk") and/or its
# licensors, which is protected by U.S. and Canadian federal copyright
# law and by international treaties.
#
# The Data is provided for use exclusively by You. You have the right
# to use, modify, and incorporate this Data into other products for
# purposes authorized by the Autodesk software license agreement,
# without fee.
#
# The copyright notices in the Software and this entire statement,
# including the above license grant, this restriction and the
# following disclaimer, must be included in all copies of the
# Software, in whole or in part, and all derivative works of
# the Software, unless such copies or derivative works are solely
# in the form of machine-executable object code generated by a
# source language processor.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
# AUTODESK DOES NOT MAKE AND HEREBY DISCLAIMS ANY EXPRESS OR IMPLIED
# WARRANTIES INCLUDING, BUT NOT LIMITED TO, THE WARRANTIES OF
# NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
# PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE, OR
# TRADE PRACTICE. IN NO EVENT WILL AUTODESK AND/OR ITS LICENSORS
# BE LIABLE FOR ANY LOST REVENUES, DATA, OR PROFITS, OR SPECIAL,
# DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES, EVEN IF AUTODESK
# AND/OR ITS LICENSORS HAS BEEN ADVISED OF THE POSSIBILITY
# OR PROBABILITY OF SUCH DAMAGES.
#
# ==========================================================================
#+
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
import sys
import polyModifier
def statusError(message):
fullMsg = "Status failed: %s\n" % message
sys.stderr.write(fullMsg)
OpenMaya.MGlobal.displayError(fullMsg)
raise # called from exception handlers only, reraise exception
kPluginCmdName = "spSplitUV"
kPluginNodeTypeName = "spSplitUVNode"
kPluginNodeId = OpenMaya.MTypeId(0x87013)
#####################################################################
## COMMAND ##########################################################
#####################################################################
# Overview:
#
# The purpose of the splitUV command is to unshare (split) any selected UVs
# on a given object.
#
# How it works:
#
# This command is based on the polyModifierCmd. It relies on the polyModifierCmd
# to manage "how" the effects of the splitUV operation are applied (ie. directly
# on the mesh or through a modifier node). See polyModifier.py for more details
#
# To understand the algorithm behind the splitUV operation, refer to splitUVFty
#
# Limitations:
#
# (1) Can only operate on a single mesh at a given time. If there are more than one
# mesh with selected UVs, only the first mesh found in the selection list is
# operated on.
#
class splitUV(polyModifier.polyModifierCmd):
def __init__(self):
polyModifier.polyModifierCmd.__init__(self)
# Selected UVs
#
# Note: The MObject, fComponentList, is only ever accessed on a single call to the plugin.
# It is never accessed between calls and is stored on the class for access in the
# overriden initModifierNode() method.
#
self.__fComponentList = OpenMaya.MObject()
self.__fSelUVs = OpenMaya.MIntArray()
self.__fSplitUVFactory = splitUVFty()
def isUndoable(self):
return True
def doIt(self, args):
"""
implements the scripted splitUV command.
Arguments:
args - the argument list that was passes to the command from MEL
"""
# Parse the selection list for objects with selected UV components.
# To simplify things, we only take the first object that we find with
# selected UVs and operate on that object alone.
#
# All other objects are ignored and return warning messages indicating
# this limitation.
#
selList = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList(selList)
selListIter = OpenMaya.MItSelectionList(selList)
# The splitUV node only accepts a component list input, so we build
# a component list using MFnComponentListData.
#
# MIntArrays could also be passed into the node to represent the uvIds,
# but are less storage efficient than component lists, since consecutive
# components are bundled into a single entry in component lists.
#
compListFn = OpenMaya.MFnComponentListData()
compListFn.create()
found = False
foundMultiple = False
while not selListIter.isDone():
dagPath = OpenMaya.MDagPath()
component = OpenMaya.MObject()
itemMatches = True
selListIter.getDagPath(dagPath, component)
# Check for selected UV components
#
if itemMatches and (component.apiType() == OpenMaya.MFn.kMeshMapComponent):
if not found:
# The variable 'component' holds all selected components on the selected
# object, thus only a single call to MFnComponentListData::add() is needed
# to store the selected components for a given object.
#
compListFn.add(component)
# Copy the component list created by MFnComponentListData into our local
# component list MObject member.
#
self.__fComponentList = compListFn.object()
# Locally store the actual uvIds of the selected UVs so that this command
# can directly modify the mesh in the case when there is no history and
# history is turned off.
#
compFn = OpenMaya.MFnSingleIndexedComponent(component)
compFn.getElements(self.__fSelUVs)
# Ensure that this DAG path will point to the shape of our object.
# Set the DAG path for the polyModifierCmd.
#
dagPath.extendToShape()
self._setMeshNode(dagPath)
found = True
else:
# Break once we have found a multiple object holding selected UVs, since
# we are not interested in how many multiple objects there are, only
# the fact that there are multiple objects.
#
foundMultiple = True
break
selListIter.next()
if foundMultiple:
self.displayWarning("Found more than one object with selected UVs - Only operating on first found object.")
# Initialize the polyModifierCmd node type - mesh node already set
#
self._setModifierNodeType(kPluginNodeId)
if found:
if self.__validateUVs():
# Now, pass control over to the polyModifierCmd._doModifyPoly() method
# to handle the operation.
#
try:
self._doModifyPoly()
except:
self.displayError("splitUV command failed!")
raise
else:
self.setResult("splitUV command succeeded!")
else:
self.displayError("splitUV command failed: Selected UVs are not splittable")
else:
self.displayError("splitUV command failed: Unable to find selected UVs")
def redoIt(self):
"""
Implements redo for the scripted splitUV command.
This method is called when the user has undone a command of this type
and then redoes it. No arguments are passed in as all of the necessary
information is cached by the doIt method.
"""
try:
self._redoModifyPoly()
self.setResult("splitUV command succeeded!")
except:
self.displayError("splitUV command failed!")
raise
def undoIt(self):
"""
implements undo for the scripted splitUV command.
This method is called to undo a previous command of this type. The
system should be returned to the exact state that it was it previous
to this command being executed. That includes the selection state.
"""
try:
self._undoModifyPoly()
self.setResult("splitUV undo succeeded!")
except:
self.displayError("splitUV undo failed!")
raise
def _initModifierNode(self, modifierNode):
# We need to tell the splitUV node which UVs to operate on. By overriding
# the polyModifierCmd._initModifierNode() method, we can insert our own
# modifierNode initialization code.
#
depNodeFn = OpenMaya.MFnDependencyNode(modifierNode)
uvListAttr = depNodeFn.attribute("inputComponents")
# Pass the component list down to the splitUV node
#
uvListPlug = OpenMaya.MPlug(modifierNode, uvListAttr)
uvListPlug.setMObject(self.__fComponentList)
def _directModifier(self, mesh):
self.__fSplitUVFactory.setMesh(mesh)
self.__fSplitUVFactory.setUVIds(self.__fSelUVs)
# Now, perform the splitUV
#
self.__fSplitUVFactory.doIt()
def __validateUVs(self):
"""
Validate the UVs for the splitUV operation. UVs are valid only if they are shared
by more than one face. While the splitUVNode is smart enough to not process the
split if a UV is not splittable, a splitUV node is still created by the polyModifierCmd.
So call this method to validate the UVs before calling _doModifyPoly().
validateUVs() will return true so long as there is at least one valid UV. It will
also prune out any invalid UVs from both the component list and UVId array.
"""
# Get the mesh that we are operating on
#
dagPath = self._getMeshNode()
mesh = dagPath.node()
# Get the number of faces sharing the selected UVs
#
meshFn = OpenMaya.MFnMesh(mesh)
polyIter = OpenMaya.MItMeshPolygon(mesh)
selUVFaceCountArray = OpenMaya.MIntArray()
indexParam = OpenMaya.MScriptUtil(0)
indexPtr = indexParam.asIntPtr()
count = 0
selUVsCount = self.__fSelUVs.length()
for i in range(selUVsCount):
while not polyIter.isDone():
if polyIter.hasUVs():
polyVertCount = polyIter.polygonVertexCount()
for j in range(polyVertCount):
polyIter.getUVIndex(j, indexPtr)
UVIndex = indexParam.getInt(indexPtr)
if UVIndex == self.__fSelUVs[i]:
count += 1
break
polyIter.next()
selUVFaceCountArray.append(count)
# Now, check to make sure that at least one UV is being shared by more than one
# face. So long as we have one UV that we can operate on, we should proceed and let
# the splitUVNode ignore the UVs which are only shared by one face.
#
isValid = False
validUVIndices = OpenMaya.MIntArray()
for i in range(selUVsCount):
if selUVFaceCountArray[i] > 1:
isValid = True
validUVIndices.append(i)
if isValid:
self.__pruneUVs(validUVIndices)
return isValid
def __pruneUVs(self, validUVIndices):
"""
This method will remove any invalid UVIds from the component list and UVId array.
The benefit of this is to reduce the amount of extra processing that the node would
have to perform. It will result in less iterations through the mesh as there are
less UVs to search for.
"""
validUVIds = OpenMaya.MIntArray()
for i in range(validUVIndices.length()):
uvIndex = validUVIndices[i]
validUVIds.append(self.__fSelUVs[uvIndex])
# Replace the local int array of UVIds
#
self.__fSelUVs.clear()
self.__fSelUVs = validUVIds
# Build the list of valid components
#
compFn = OpenMaya.MFnSingleIndexedComponent()
try:
compFn.create(OpenMaya.MFn.kMeshMapComponent)
except:
statusError("compFn.create( MFn::kMeshMapComponent )")
try:
compFn.addElements(validUVIds)
except:
statusError("compFn.addElements( validUVIds )")
# Replace the component list
#
component = compFn.object()
compListFn = OpenMaya.MFnComponentListData()
compListFn.create()
try:
compListFn.add(component)
except:
statusError("compListFn.add( component )")
self.__fComponentList = compListFn.object()
#####################################################################
## FACTORY ##########################################################
#####################################################################
# Overview:
#
# The splitUV factory implements the actual splitUV operation. It takes in
# only two parameters:
#
# 1) A polygonal mesh
# 2) An array of selected UV Ids
#
# The algorithm works as follows:
#
# 1) Parse the mesh for the selected UVs and collect:
#
# (a) Number of faces sharing each UV
# (stored as two arrays: face array, indexing/offset array)
# (b) Associated vertex Id
#
# 2) Create (N-1) new UVIds for each selected UV, where N represents the number of faces
# sharing the UV.
#
# 3) Set each of the new UVs to the same 2D location on the UVmap.
#
# 3) Arbitrarily let the last face in the list of faces sharing this UV to keep the original
# UV.
#
# 4) Assign each other face one of the new UVIds.
#
#
class splitUVFty(polyModifier.polyModifierFty):
def __init__(self):
polyModifier.polyModifierFty.__init__(self)
# Mesh Node
# Note: We only make use of this MObject during a single call of
# the splitUV plugin. It is never maintained and used between
# calls to the plugin as the MObject handle could be invalidated
# between calls to the plugin.
#
self.__fMesh = OpenMaya.MObject()
self.__fSelUVs = OpenMaya.MIntArray()
self.__fSelUVs.clear()
def setMesh(self, mesh):
self.__fMesh = mesh
def setUVIds(self, uvIds):
self.__fSelUVs = uvIds
def doIt(self):
"""
Performs the actual splitUV operation on the given object and UVs
"""
####################################
# Declare our processing variables #
####################################
# Face Id and Face Offset map to the selected UVs
#
selUVFaceIdMap = OpenMaya.MIntArray()
selUVFaceOffsetMap = OpenMaya.MIntArray()
# Local Vertex Index map to the selected UVs
#
selUVLocalVertIdMap = OpenMaya.MIntArray()
#################################################
# Collect necessary information for the splitUV #
# #
# - uvSet #
# - faceIds / localVertIds per selected UV #
#################################################
meshFn = OpenMaya.MFnMesh(self.__fMesh)
selUVSet = meshFn.currentUVSetName()
indexParam = OpenMaya.MScriptUtil(0)
indexPtr = indexParam.asIntPtr()
offset = 0
selUVsCount = self.__fSelUVs.length()
polyIter = OpenMaya.MItMeshPolygon(self.__fMesh)
for i in range(selUVsCount):
selUVFaceOffsetMap.append(offset)
polyIter.reset()
while not polyIter.isDone():
if polyIter.hasUVs():
polyVertCount = polyIter.polygonVertexCount()
for j in range(polyVertCount):
polyIter.getUVIndex(j, indexPtr)
UVIndex = indexParam.getInt(indexPtr)
if UVIndex == self.__fSelUVs[i]:
selUVFaceIdMap.append(polyIter.index())
selUVLocalVertIdMap.append(j)
offset += 1
break
polyIter.next()
# Store total length of the faceId map in the last element of
# the offset map so that there is a way to get the number of faces
# sharing each of the selected UVs
#
selUVFaceOffsetMap.append(offset)
###############################
# Begin the splitUV operation #
###############################
currentUVCount = meshFn.numUVs(selUVSet)
for i in range(selUVsCount):
# Get the current FaceId map offset
#
offset = selUVFaceOffsetMap[i]
# Get the U and V values of the current UV
#
uvId = self.__fSelUVs[i]
uParam = OpenMaya.MScriptUtil(0.0)
uPtr = uParam.asFloatPtr()
vParam = OpenMaya.MScriptUtil(0.0)
vPtr = vParam.asFloatPtr()
meshFn.getUV(uvId, uPtr, vPtr, selUVSet)
u = uParam.getFloat(uPtr)
v = vParam.getFloat(vPtr)
# Get the number of faces sharing the current UV
#
faceCount = selUVFaceOffsetMap[i + 1] - selUVFaceOffsetMap[i]
# Arbitrarily choose that the last faceId in the list of faces
# sharing this UV, will keep the original UV.
#
for j in range(faceCount-1):
meshFn.setUV(currentUVCount, u, v, selUVSet)
localVertId = selUVLocalVertIdMap[offset]
faceId = selUVFaceIdMap[offset]
meshFn.assignUV(faceId, localVertId, currentUVCount, selUVSet)
currentUVCount += 1
offset += 1
#####################################################################
## NODE #############################################################
#####################################################################
class splitUVNode(polyModifier.polyModifierNode):
uvList = OpenMaya.MObject()
def __init__(self):
polyModifier.polyModifierNode.__init__(self)
self.fSplitUVFactory = splitUVFty()
def compute(self, plug, data):
"""
Description:
This method computes the value of the given output plug based
on the values of the input attributes.
Arguments:
plug - the plug to compute
data - object that provides access to the attributes for this node
"""
stateData = 0
state = OpenMayaMPx.cvar.MPxNode_state
try:
stateData = data.outputValue(state)
except:
statusError("ERROR getting state")
# Check for the HasNoEffect/PassThrough flag on the node.
#
# (stateData is an enumeration standard in all depend nodes - stored as short)
#
# (0 = Normal)
# (1 = HasNoEffect/PassThrough)
# (2 = Blocking)
# ...
#
if stateData.asShort() == 1:
try:
inputData = data.inputValue(splitUVNode.inMesh)
except:
statusError("ERROR getting inMesh")
try:
outputData = data.outputValue(splitUVNode.outMesh)
except:
statusError("ERROR getting outMesh")
# Simply redirect the inMesh to the outMesh for the PassThrough effect
#
outputData.setMObject(inputData.asMesh())
else:
# Check which output attribute we have been asked to
# compute. If this node doesn't know how to compute it,
# we must return MS::kUnknownParameter
#
if plug == splitUVNode.outMesh:
try:
inputData = data.inputValue(splitUVNode.inMesh)
except:
statusError("ERROR getting inMesh")
try:
outputData = data.outputValue(splitUVNode.outMesh)
except:
statusError("ERROR getting outMesh")
# Now, we get the value of the uvList and use it to perform
# the operation on this mesh
#
try:
inputUVs = data.inputValue(splitUVNode.uvList)
except:
statusError("ERROR getting uvList")
# Copy the inMesh to the outMesh, and now you can
# perform operations in-place on the outMesh
#
outputData.setMObject(inputData.asMesh())
mesh = outputData.asMesh()
# Retrieve the UV list from the component list.
#
# Note, we use a component list to store the components
# because it is more compact memory wise. (ie. comp[81:85]
# is smaller than comp[81], comp[82],...,comp[85])
#
compList = inputUVs.data()
compListFn = OpenMaya.MFnComponentListData(compList)
uvIds = OpenMaya.MIntArray()
for i in range(compListFn.length()):
comp = compListFn[i]
if comp.apiType() == OpenMaya.MFn.kMeshMapComponent:
uvComp = OpenMaya.MFnSingleIndexedComponent(comp)
for j in range(uvComp.elementCount()):
uvId = uvComp.element(j)
uvIds.append(uvId)
# Set the mesh object and uvList on the factory
#
self.fSplitUVFactory.setMesh(mesh)
self.fSplitUVFactory.setUVIds(uvIds)
# Now, perform the splitUV
#
try:
self.fSplitUVFactory.doIt()
except:
statusError("ERROR in splitUVFty.doIt()")
# Mark the output mesh as clean
#
outputData.setClean()
else:
return OpenMaya.kUnknownParameter
return None
#####################################################################
## REGISTRATION #####################################################
#####################################################################
def cmdCreator():
return OpenMayaMPx.asMPxPtr(splitUV())
def nodeCreator():
return OpenMayaMPx.asMPxPtr(splitUVNode())
def nodeInitializer():
attrFn = OpenMaya.MFnTypedAttribute()
splitUVNode.uvList = attrFn.create("inputComponents", "ics", OpenMaya.MFnComponentListData.kComponentList)
attrFn.setStorable(True) # To be stored during file-save
splitUVNode.inMesh = attrFn.create("inMesh", "im", OpenMaya.MFnMeshData.kMesh)
attrFn.setStorable(True) # To be stored during file-save
# Attribute is read-only because it is an output attribute
#
splitUVNode.outMesh = attrFn.create("outMesh", "om", OpenMaya.MFnMeshData.kMesh)
attrFn.setStorable(False)
attrFn.setWritable(False)
# Add the attributes we have created to the node
#
splitUVNode.addAttribute(splitUVNode.uvList)
splitUVNode.addAttribute(splitUVNode.inMesh)
splitUVNode.addAttribute(splitUVNode.outMesh)
# Set up a dependency between the input and the output. This will cause
# the output to be marked dirty when the input changes. The output will
# then be recomputed the next time the value of the output is requested.
#
splitUVNode.attributeAffects(splitUVNode.inMesh, splitUVNode.outMesh)
splitUVNode.attributeAffects(splitUVNode.uvList, splitUVNode.outMesh)
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, "Autodesk", "1.0", "Any")
try:
mplugin.registerCommand(kPluginCmdName, cmdCreator)
except:
sys.stderr.write( "Failed to register command: %s\n" % kPluginCmdName)
raise
try:
mplugin.registerNode(kPluginNodeTypeName, kPluginNodeId, nodeCreator, nodeInitializer)
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeTypeName)
raise
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterCommand(kPluginCmdName)
except:
sys.stderr.write("Failed to unregister command: %s\n" % kPluginCmdName)
raise
try:
mplugin.deregisterNode(kPluginNodeId)
except:
sys.stderr.write("Failed to deregister node: %s" % kPluginNodeTypeName)
raise
| 0.996094 | 1 |
cosymlib/utils.py | efrembernuz/symeess | 1 | 12761935 | from cosymlib.shape import maps
import numpy as np
import sys
def plot_minimum_distortion_path_shape(shape_label1, shape_label2, num_points=20, output=sys.stdout, show_plot=True):
import matplotlib.pyplot as plt
path = get_shape_path(shape_label1, shape_label2, num_points)
shape_map_txt = " {:6} {:6}\n".format(shape_label1, shape_label2)
for idx, value in enumerate(path[0]):
shape_map_txt += '{:6.3f}, {:6.3f}'.format(path[0][idx], path[1][idx])
shape_map_txt += '\n'
print(shape_map_txt)
if show_plot:
plt.plot(path[0], path[1], 'k', linewidth=2.0)
plt.xlabel(shape_label1)
plt.ylabel(shape_label2)
plt.show()
def get_shape_path(shape_label1, shape_label2, num_points):
return maps.get_shape_map(shape_label1, shape_label2, num_points)
def plot_molecular_orbital_diagram(molecule, wfnsym, mo_range=None):
import matplotlib.pyplot as plt
labels = wfnsym.IRLab
if mo_range is not None:
ird_a_max = [np.argmax(ird_a_orb) for ird_a_orb in wfnsym.mo_IRd_a][mo_range[0]:mo_range[1]]
energies = molecule.electronic_structure.alpha_energies[mo_range[0]:mo_range[1]]
else:
ird_a_max = [np.argmax(ird_a_orb) for ird_a_orb in wfnsym.mo_IRd_a]
energies = molecule.electronic_structure.alpha_energies
ax1 = plt.axes()
ax1.axes.get_xaxis().set_visible(False) # Hide x axis
# ax1.axes.get_yaxis().set_visible(True)
degeneracy = [[energies[0]]]
for energy in energies[1:]:
if abs(energy - degeneracy[-1][-1]) < 1e-3:
degeneracy[-1].append(energy)
else:
degeneracy.append([energy])
max_value = 5e-3
x_center = []
for ix in degeneracy:
if len(ix) == 1:
x_center.append([0])
else:
x_center.append(np.linspace(-max_value, max_value, len(ix)))
x_center = [y for x in x_center for y in x]
plt.scatter(x_center, energies, s=500, marker="_", linewidth=3)
for i in range(len(energies)):
plt.text(-max_value * 2, energies[i], labels[ird_a_max[i]])
plt.show()
def swap_vectors(v1, v2, position):
vector1 = v1.get_copy()
vector2 = v2.get_copy()
for i in range(len(v1)):
if i >= position:
vector1[i] = v2[i]
vector2[i] = v1[i]
return vector1, vector2
def plot_symmetry_energy_evolution(molecules, wfnsym, mo_range=None):
import matplotlib.pyplot as plt
energies = []
ird_a_max = []
for idm, molecule in enumerate(molecules):
labels = wfnsym[idm].IRLab
if mo_range is not None:
ird_a_max.append(np.array([np.argmax(ird_a_orb) for ird_a_orb in wfnsym[idm].mo_IRd_a]
[mo_range[0]:mo_range[1]]))
energies.append(molecule.electronic_structure.alpha_energies[mo_range[0]:mo_range[1]])
else:
ird_a_max.append(np.array([np.argmax(ird_a_orb) for ird_a_orb in wfnsym[idm].mo_IRd_a]))
energies.append(molecule.electronic_structure.alpha_energies)
energies_x_orbital = np.array(energies).T
ird_a_x_orbital = np.array(ird_a_max).T
for i in range(len(ird_a_x_orbital)):
for j in range(len(ird_a_x_orbital[i])):
if j == 0:
old_ird = ird_a_x_orbital[i][0]
else:
if old_ird != ird_a_x_orbital[i][j]:
for k in range(len(ird_a_x_orbital) - i):
if old_ird == ird_a_x_orbital[k + i][j]:
ird_a_x_orbital[i], ird_a_x_orbital[k + i] = swap_vectors(ird_a_x_orbital[i],
ird_a_x_orbital[k + i], j)
energies_x_orbital[i], energies_x_orbital[k + i] = swap_vectors(energies_x_orbital[i],
energies_x_orbital[k + i],
j)
break
old_ird = ird_a_x_orbital[i][j]
for ide, energy in enumerate(energies_x_orbital):
x = np.arange(len(energy))
plt.plot(x, energy, marker='_')
for i in range(len(energy)):
plt.text(x[i], energy[i] + abs(energy[i])*0.001, labels[ird_a_x_orbital[ide][i]])
plt.show()
| 2.46875 | 2 |
doc/steps_to_make/my_code/0101_0101_pycallgraph_asyncio.py | ggservice007/my-happy-flow | 0 | 12761936 | import asyncio
from pycallgraph2 import PyCallGraph
from pycallgraph2.output import GraphvizOutput
async def gen_1():
for value in range(0, 10):
await asyncio.sleep(1) # Could be a slow HTTP request
yield value
async def gen_2(it):
async for value in it:
await asyncio.sleep(1) # Could be a slow HTTP request
yield value * 2
async def gen_3(it):
async for value in it:
await asyncio.sleep(1) # Could be a slow HTTP request
yield value + 3
async def run():
file_path = '/'.join([
'data/output/images',
'0201_0101_asyncio.png'
])
graphviz = GraphvizOutput()
graphviz.output_file = file_path
with PyCallGraph(output=graphviz):
it_1 = gen_1()
it_2 = gen_2(it_1)
it_3 = gen_3(it_2)
async for val in it_3:
print(val)
if __name__ == '__main__':
asyncio.run(run())
| 3.28125 | 3 |
setup.py | alunduil/muniments | 1 | 12761937 | <gh_stars>1-10
# Copyright (C) 2015 by <NAME> <<EMAIL>>
#
# muniments is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
import os
from setuptools import find_packages
from setuptools import setup
from codecs import open
with open(os.path.join(os.path.dirname(__file__), 'muniments', 'information.py'), 'r', encoding = 'utf-8') as fh:
exec(fh.read(), globals(), locals())
PARAMS = {}
PARAMS['name'] = NAME # flake8: noqa—provided by exec
PARAMS['version'] = VERSION # flake8: noqa—provided by exec
PARAMS['description'] = DESCRIPTION # flake8: noqa—provided by exec
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'r', encoding = 'utf-8') as fh:
PARAMS['long_description'] = fh.read()
PARAMS['url'] = URL # flake8: noqa—provided by exec
PARAMS['author'] = AUTHOR # flake8: noqa—provided by exec
PARAMS['author_email'] = AUTHOR_EMAIL # flake8: noqa—provided by exec
PARAMS['license'] = LICENSE # flake8: noqa—provided by exec
PARAMS['classifiers'] = (
'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
# 'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
'Environment :: Console',
'Environment :: No Input/Output (Daemon)',
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: System',
'Topic :: System :: Archiving',
'Topic :: System :: Archiving :: Backup',
'Topic :: System :: Distributed Computing',
)
PARAMS['keywords'] = (
'backup',
'cloud',
'distributed',
'scheduled',
)
PARAMS['packages'] = find_packages(exclude = ( 'test_*', ))
PARAMS['install_requires'] = (
'crumbs',
'tornado',
)
# ..note::
# Documentation Requires:
# * sphinx_rtd_theme
PARAMS['extras_require'] = {}
PARAMS['test_suite'] = 'nose.collector'
PARAMS['tests_require'] = (
'coverage',
'nose',
)
PARAMS['entry_points'] = {
'console_scripts': (
'muniments = muniments:main',
'muniments-scheduler = muniments.scheduler.api:main',
),
}
PARAMS['data_files'] = (
( 'share/doc/{P[name]}-{P[version]}'.format(P = PARAMS), (
'README.rst',
)),
( 'share/doc/{P[name]}-{P[version]}/conf'.format(P = PARAMS), (
'conf/logging.ini',
'conf/muniments.ini',
)),
)
setup(**PARAMS)
| 1.429688 | 1 |
src/ui/shell/views/car_rental.py | lucassaporetti/car-rental | 1 | 12761938 | from core.enum.menu_type import MenuType
from ui.shell.menu_factory import MenuFactory
class CarRental:
def __init__(self):
self.done = False
self.ui = MenuFactory.get(MenuType.MAIN)
self.prev_ui = self.ui
def change_ui(self, menu_type: MenuType):
self.prev_ui = self.ui
self.ui = MenuFactory.get(menu_type)
def run(self):
while not self.done:
if self.ui:
next_ui = self.ui.execute()
if next_ui is None or next_ui == MenuType.EXIT_MENU:
self.done = True
else:
self.change_ui(next_ui)
else:
self.done = True
| 2.65625 | 3 |
FreeCodeCamp.org/Dictionary.py | MizaN13/PythonAbc | 0 | 12761939 | <gh_stars>0
monthConversions = {
"Jan": "January",
"Feb": "Februry",
"Mar": "March",
"Apr": "April",
"May": "May",
"Jun": "June",
"Jul": "July",
"Aug": "August",
"Sep": "September",
"Oct": "October",
"Nov": "November",
"Dec": "December",
}
print(monthConversions["Oct"])
print(monthConversions.get("Dec"))
# Loop Through a Dictionary
for item in monthConversions:
print(monthConversions[item])
# from Mosh
phone = input("Phone: ")
digit_mapping = {
"1": "One",
"2": "Two",
"3": "Three",
"4": "Four",
}
output = ""
for digit in phone:
output += digit_mapping.get(digit, "!") + " "
print(output) | 3.375 | 3 |
chapter01/demo_1.2.py | OsbornHu/tensorflow-ml | 0 | 12761940 | #!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2018/11/2 下午9:23
# 1.2 TensorFlow 如何工作
import tensorflow as tf
# 1. 导入/生成样本数据集。
# 2. 转换和归一化数据。
# data = tf.nn.batch_norm_with_global_normalization(...)
# 3. 划分样本数据集为训练样本集、测试样本集和验证样本集。
# 4. 设置机器学习参数(超参数)。
learning_rate = 0.01
batch_size = 100
iterations = 1000
# 5. 初始化变量和占位符。
a_var = tf.constant(42)
# x_input = tf.placeholder(tf.float32, [None, input_size])
# y_input = tf.placeholder(tf.float32, [None, num_classses])
# 6. 定义模型结构。
# y_pred = tf.add(tf.mul(x_input, weight_matrix), b_matrix)
# 7. 声明损失函数。
# loss = tf.reduce_mean(tf.square(y_actual - y_pred))
# 8. 初始化模型和训练模型。
# with tf.Session(graph=graph) as session:
# ...
# session.run(...)
# ...
# 9. 评估机器学习模型。
# 10. 调优超参数。
# 11. 发布/预测结果。
| 2.59375 | 3 |
tools/toolsfeatures.py | MiguelSimao/GAN_outlier_detection | 2 | 12761941 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 17:38:37 2018
@author: simao
"""
import numpy as np
from scipy import stats
def onehotencoder(tind, *args):
if len(args) == 0:
maxclasses = max(tind)+1
elif len(args) == 1:
maxclasses = args[0]
else:
raise NotImplementedError
t = np.zeros((tind.shape[0], maxclasses))
t[np.arange(tind.shape[0]),tind.astype(np.int).reshape((-1,))] = 1
return t
def onehotnoise(tind, maxclasses, maxprob=0.5):
tind = tind.astype('int')
t = np.zeros((tind.shape[0], maxclasses))
t = t + (1 - maxprob) / (maxclasses - 1)
t[np.arange(tind.shape[0]), tind.reshape((-1,))] = maxprob
return t
def label_noise(t, pmin=0.8, pmax=1.0):
j = np.argmax(t, 1)
n = t.shape[0]
phigh = np.random.uniform(pmin, pmax, (n,))
plow = (1 - phigh) / (t.shape[1] - 1)
for i in range(n):
t[i] = plow[i]
t[i,j[i]] = phigh[i]
return t
def targetmode(tar_sequence):
idx = stats.mode(tar_sequence)[0][0]
return np.tile(idx, len(tar_sequence))
| 2.40625 | 2 |
robotframework-jira/__init__.py | IlfirinPL/robotframework-jira | 2 | 12761942 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.0.1"
| 1.007813 | 1 |
scripts/make_pop_table.py | ourresearch/total-impact-webapp | 2 | 12761943 | <gh_stars>1-10
"""
Joins a list of country populations and a list of alpha-2 iso country codes.
Run from ~/projects/total-impact-webapp/scripts
country_populations.csv comes from many sources, compiled by the World Bank:
http://databank.worldbank.org/data/views/reports/tableview.aspx. It's been
slightly modified when some of the codes were wrong.
iso_country_codes comes from http://datahub.io/dataset/iso-3166-1-alpha-2-country-codes/resource/9c3b30dd-f5f3-4bbe-a3cb-d7b2c21d66ce
and had to have around 10 lines modified, updated, or added using wikipedia data
* http://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
* http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
"""
import csv
import json
def dict_by_alpha2():
# make a dictionary to lookup alpha2 codes from alpha3 keys
country_codes = {}
with open('iso_country_codes.csv', 'Urb') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
country_codes[row[1]] = row[0]
return country_codes
def make_population_dict(alpha2_to_alpha2_table):
# make a population dict keyed by alpha2 iso code
populations = {}
with open('country_populations.csv', 'Urb') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
alpha2_code = alpha2_to_alpha2_table[row[2]]
populations[alpha2_code] = row[3]
print populations
return populations
def make_internet_usage_per_100_dict(alpha2_to_alpha2_table):
internet_users = {}
with open('country_internet_users.csv', 'Urb') as csvfile:
rows = csv.reader(csvfile, delimiter=',')
for row in rows:
try:
alpha2_code = alpha2_to_alpha2_table[row[1]]
except KeyError:
print "this country isn't in the alpha2 table:", row[0], row[1]
pass
if row[2]:
users_per_100 = row[2]
else:
# for NAs, use the world avg
users_per_100 = 38.13233855
internet_users[alpha2_code] = users_per_100
print internet_users
return internet_users
def make_total_internet_users_dict(pop_dict, internet_per_100_dict):
ret = {}
for country_code, users_per_100 in internet_per_100_dict.iteritems():
print country_code, ":", users_per_100
my_population = pop_dict[country_code]
ret[country_code] = int(float(users_per_100) * int(my_population) / 100)
print ret
return ret
# procedural code:
print "making the ISO alpha2 to alpha3 talble"
alpha2_to_alpha3 = dict_by_alpha2()
print "making the population dict, keyed by alpha2"
pop_dict = make_population_dict(alpha2_to_alpha3)
print "making the internet users per 100 dict, keyed by alpha2"
internet_usage_per_100_dict = make_internet_usage_per_100_dict(alpha2_to_alpha3)
print "making the total internet users dict"
total_internet_users_dict = make_total_internet_users_dict(pop_dict, internet_usage_per_100_dict)
print "saving country_populations.json"
with open("country_populations.json", "w") as outfile:
json.dump(pop_dict, outfile)
print "success!" | 3.203125 | 3 |
rolepermissions/tests/test_verifications.py | rensg001/django-role-permissions | 0 | 12761944 |
from django.test import TestCase
from django.contrib.auth import get_user_model
from model_mommy import mommy
from rolepermissions.roles import AbstractUserRole
from rolepermissions.checkers import has_role, has_permission, has_object_permission
from rolepermissions.permissions import register_object_checker
class VerRole1(AbstractUserRole):
available_permissions = {
'permission1': True,
'permission2': True,
}
class VerRole2(AbstractUserRole):
available_permissions = {
'permission3': True,
'permission4': False,
}
class VerRole3(AbstractUserRole):
role_name = 'ver_new_name'
available_permissions = {
'permission5': False,
'permission6': False,
}
class HasRoleTests(TestCase):
def setUp(self):
self.user = mommy.make(get_user_model())
VerRole1.assign_role_to_user(self.user)
def test_user_has_VerRole1(self):
user = self.user
self.assertTrue(has_role(user, VerRole1))
def test_user_does_not_have_VerRole2(self):
user = self.user
self.assertFalse(has_role(user, VerRole2))
def test_user_has_VerRole1_or_VerRole2(self):
user = self.user
self.assertTrue(has_role(user, [VerRole1, VerRole2]))
def test_has_role_by_name(self):
user = self.user
self.assertTrue(has_role(user, 'ver_role1'))
def test_user_has_VerRole1_or_VerRole3_by_name(self):
user = self.user
VerRole3.assign_role_to_user(user)
self.assertTrue(has_role(user, ['ver_role1', 'ver_new_name']))
def test_not_existent_role(self):
user = self.user
self.assertFalse(has_role(user, 'not_a_role'))
def test_none_user_param(self):
self.assertFalse(has_role(None, 'ver_role1'))
class HasPermissionTests(TestCase):
def setUp(self):
self.user = mommy.make(get_user_model())
VerRole1.assign_role_to_user(self.user)
def test_has_VerRole1_permission(self):
user = self.user
self.assertTrue(has_permission(user, 'permission1'))
def test_dos_not_have_VerRole1_permission(self):
user = self.user
VerRole1.assign_role_to_user(user)
self.assertFalse(has_permission(user, 'permission3'))
def test_not_existent_permission(self):
user = self.user
self.assertFalse(has_permission(user, 'not_a_permission'))
def test_user_with_no_role(self):
user = mommy.make(get_user_model())
self.assertFalse(has_permission(user, 'permission1'))
def test_none_user_param(self):
self.assertFalse(has_permission(None, 'ver_role1'))
class HasObjectPermissionTests(TestCase):
def setUp(self):
self.user = mommy.make(get_user_model())
VerRole1.assign_role_to_user(self.user)
@register_object_checker()
def obj_checker(role, user, obj):
return obj and True
def test_has_object_permission(self):
user = self.user
self.assertTrue(has_object_permission('obj_checker', user, True))
def test_does_not_have_object_permission(self):
user = self.user
self.assertFalse(has_object_permission('obj_checker', user, False))
def test_check_none_role_if_user_has_no_role(self):
user = mommy.make(get_user_model())
self.assertTrue(has_object_permission('obj_checker', user, True))
| 2.5 | 2 |
w2_regex/find_nums.py | polde-live/python-mich-3 | 0 | 12761945 | import re
fh = open('data.txt')
def sumNums(line):
"""
Sum a numbers found in a line
"""
s = 0
nums = re.findall('[0-9]+', line)
for num in nums:
s += int(num)
return s
s = 0
for line in fh:
s += sumNums(line.rstrip())
print ("Sum of numbers in file:\t %d" % s)
| 3.78125 | 4 |
Plot-Data-with-Erros.py | AlexTsagas/Quality-Graphs | 1 | 12761946 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
from scipy.optimize import curve_fit
import matplotlib.colors as mcolors
#Write with LaTeX
rc('text', usetex=True)
rc('font', family='serif')
def func(x, a, b):
return (a * x) + b
# Data
B1 = np.array([9.38, 12.46, 15.57])
dB1 = np.array([0.04, 0.04, 0.04])
r1 = np.array([0.217, 0.28, 0.38])
dr1 = np.array([0.024, 0.04, 0.07])
B2 = np.array([9.38, 12.46, 15.57])
dB2 = np.array([0.04, 0.04, 0.04])
r2 = np.array([0.2, 0.2500, 0.33])
dr2 = np.array([0.02, 0.03, 0.06])
# Fitting
x = np.linspace(0.15, 0.4, 5)
popt1, pcov1 = curve_fit(func, r1, B1, sigma=1./(dB1*dB1))
perr1 = np.sqrt(np.diag(pcov1))
popt2, pcov2 = curve_fit(func, r2, B2, sigma=1./(dB2*dB2))
perr2 = np.sqrt(np.diag(pcov2))
# Plot
fig, ax = plt.subplots(1, 1)
# B1 = B1(1/r1)
ax.errorbar(r1, B1, xerr = dr1, yerr = dB1, capsize=3, color='black', elinewidth=1, markeredgewidth=1, linestyle='None', marker='o', label='Calculated \n Values of $B_1$')
ax.plot(x, func(x, *popt1), color='orange', label='$B1 = B1(1/r_1)$', linewidth=1.5)
# B2 = B2(1/r2)
ax.errorbar(r2, B2, xerr = dr2, yerr = dB2, capsize=3, color='black', elinewidth=1, markeredgewidth=1, linestyle='None', marker='s', label='Calculated \n Values of $B_2$')
ax.plot(x, func(x, *popt2), color='royalblue', label='$B2 = B2(1/r_2)$', linewidth=1.5)
# Figure Specifications
ax.set_ylabel('$B$ $(\mathrm{10^{-4}\,\mathrm{T}})$')
ax.set_xlabel('$1/r$ $(\mathrm{1/\mathrm{cm}})$')
ax.legend(loc = 'upper left', prop={'size': 11})
# Show the major grid lines with dark grey lines
ax.grid(b=True, which='major', color='#666666', linestyle='--')
# Show the minor grid lines
ax.minorticks_on()
ax.grid(b=True, which='minor', color='#999999', linestyle='--', alpha=0.2)
# fix quality
fig.tight_layout()
plt.show()
# Print lines' slopes and constant coefficients
print(f"\n\n a1 = {'%0.5f'%popt1[0]} ± {'%0.5f'%perr1[0]}", f",b1 = {'%0.5f'%popt1[1]} ± {'%0.5f'%perr1[1]}")
print(f"\n\n a2 = {'%0.5f'%popt2[0]} ± {'%0.5f'%perr2[0]}", f",b2 = {'%0.5f'%popt2[1]} ± {'%0.5f'%perr2[1]}")
| 2.59375 | 3 |
deeppy/dataset/stl10.py | purushothamgowthu/deeppy | 1,170 | 12761947 | import os
import numpy as np
import logging
from ..base import float_, int_
from .util import dataset_home, download, checksum, archive_extract, checkpoint
log = logging.getLogger(__name__)
_URL = 'http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz'
_SHA1 = 'b22ebbd7f3c4384ebc9ba3152939186d3750b902'
class STL10(object):
'''
The STL-10 dataset [1]
http://cs.stanford.edu/~acoates/stl10
References:
[1]: An Analysis of Single Layer Networks in Unsupervised Feature Learning,
<NAME>, <NAME>, <NAME>, AISTATS, 2011.
'''
def __init__(self):
self.name = 'stl10'
self.n_classes = 10
self.n_train = 5000
self.n_test = 8000
self.n_unlabeled = 100000
self.img_shape = (3, 96, 96)
self.data_dir = os.path.join(dataset_home, self.name)
self._npz_path = os.path.join(self.data_dir, 'stl10.npz')
self._install()
self._arrays, self.folds = self._load()
def arrays(self, dp_dtypes=False):
x_train, y_train, x_test, y_test, x_unlabeled = self._arrays
if dp_dtypes:
x_train = x_train.astype(float_)
y_train = y_train.astype(int_)
x_test = x_test.astype(float_)
y_test = y_test.astype(int_)
x_unlabeled = x_unlabeled.astype(float_)
return x_train, y_train, x_test, y_test, x_unlabeled
def _install(self):
checkpoint_file = os.path.join(self.data_dir, '__install_check')
with checkpoint(checkpoint_file) as exists:
if exists:
return
log.info('Downloading %s', _URL)
filepath = download(_URL, self.data_dir)
if _SHA1 != checksum(filepath, method='sha1'):
raise RuntimeError('Checksum mismatch for %s.' % _URL)
log.info('Unpacking %s', filepath)
archive_extract(filepath, self.data_dir)
unpack_dir = os.path.join(self.data_dir, 'stl10_binary')
log.info('Converting data to Numpy arrays')
filenames = ['train_X.bin', 'train_y.bin', 'test_X.bin',
'test_y.bin', 'unlabeled_X.bin']
def bin2numpy(filepath):
with open(filepath, 'rb') as f:
arr = np.fromfile(f, dtype=np.uint8)
if '_X' in filepath:
arr = np.reshape(arr, (-1,) + self.img_shape)
return arr
filepaths = [os.path.join(unpack_dir, f) for f in filenames]
x_train, y_train, x_test, y_test, x_unlabeled = map(bin2numpy,
filepaths)
folds = []
with open(os.path.join(unpack_dir, 'fold_indices.txt'), 'r') as f:
for line in f:
folds.append([int(s) for s in line.strip().split(' ')])
folds = np.array(folds)
with open(self._npz_path, 'wb') as f:
np.savez(f, x_train=x_train, y_train=y_train, x_test=x_test,
y_test=y_test, x_unlabeled=x_unlabeled, folds=folds)
def _load(self):
with open(self._npz_path, 'rb') as f:
dic = np.load(f)
return ((dic['x_train'], dic['y_train'], dic['x_test'],
dic['y_test'], dic['x_unlabeled']), dic['folds'])
| 2.484375 | 2 |
web_download_manager.py | litebook/litebook | 20 | 12761948 | <reponame>litebook/litebook<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Sun Jul 08 15:59:49 2012
import wx
import wx.lib.newevent
import fileDownloader
import urlparse
import sys
import os
import thread
import traceback
import urllib
import platform
import os
# begin wxGlade: extracode
# end wxGlade
(DownloadReport,EVT_DRA)=wx.lib.newevent.NewEvent()
#(DownloadUpdateAlert,EVT_DUA)=wx.lib.newevent.NewEvent()
MYOS = platform.system()
def cur_file_dir():
#获取脚本路径
global MYOS
if MYOS == 'Linux':
path = sys.path[0]
elif MYOS == 'Windows':
return os.path.dirname(os.path.abspath(sys.argv[0]))
else:
if sys.argv[0].find('/') != -1:
path = sys.argv[0]
else:
path = sys.path[0]
if isinstance(path,str):
path=path.decode('utf-8')
#判断为脚本文件还是py2exe编译后的文件,如果是脚本文件,则返回的是脚本的目录,如果是编译后的文件,则返回的是编译后的文件路径
if os.path.isdir(path):
return path
elif os.path.isfile(path):
return os.path.dirname(path)
##def HumanSize(ffsize):
## fsize=float(ffsize)
## if fsize >= 1000000000.0:
## r=float(fsize)/1000000000.0
## return '%(#).2f' % {'#':r}+' GB'
## else:
## if fsize>=1000000:
## r=float(fsize)/1000000.0
## return '%(#).2f' % {'#':r}+' MB'
## else:
## if fsize>=1000:
## r=float(fsize)/1000.0
## return '%(#).2f' % {'#':r}+' KB'
## else:
## return '< 1KB'
class WebDownloadManager(wx.Frame):
def __init__(self,parent):
"""
savepath is the directory save the download file
"""
# begin wxGlade: DownloadManager.__init__
#kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, parent,-1)
self.sizer_4_staticbox = wx.StaticBox(self, -1, "")
self.sizer_3_staticbox = wx.StaticBox(self, -1, u"当前任务")
self.list_ctrl_1 = wx.ListCtrl(self, -1, style=wx.LC_REPORT|wx.SUNKEN_BORDER)
#self.button_8 = wx.Button(self, -1, u"添加")
self.btn_del = wx.Button(self,wx.ID_DELETE,label=u'删除')
self.btn_cancel = wx.Button(self,wx.ID_CANCEL,label=u'取消')
self.tasklist = {}
##
## self.Bind(EVT_DRA,self.updateProgress)
self.Bind(wx.EVT_BUTTON, self.onClose, self.btn_cancel)
self.Bind(wx.EVT_BUTTON, self.onDel, self.btn_del)
## self.Bind(wx.EVT_BUTTON, self.inputURL, self.button_8)
self.Bind(wx.EVT_CLOSE,self.onClose)
## self.list_ctrl_1.Bind(wx.EVT_LIST_ITEM_SELECTED,self.onSelect)
self.__set_properties()
self.__do_layout()
# end wxGlade
def __set_properties(self):
# begin wxGlade: DownloadManager.__set_properties
_icon = wx.EmptyIcon()
_icon.CopyFromBitmap(wx.Bitmap(cur_file_dir()+u"/icon/litebook-icon_32x32.png", wx.BITMAP_TYPE_ANY))
self.SetIcon(_icon)
self.SetTitle(u"WEB下载管理器")
self.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
self.list_ctrl_1.InsertColumn(0,u'书名',width=200)
self.list_ctrl_1.InsertColumn(1,u'网址',width=300)
self.list_ctrl_1.InsertColumn(2,u'进度')
self.list_ctrl_1.InsertColumn(3,u'大小')
self.SetSize((700,400))
# end wxGlade
def __do_layout(self):
# begin wxGlade: DownloadManager.__do_layout
sizer_2 = wx.BoxSizer(wx.VERTICAL)
sizer_4 = wx.StaticBoxSizer(self.sizer_4_staticbox, wx.HORIZONTAL)
sizer_3 = wx.StaticBoxSizer(self.sizer_3_staticbox, wx.HORIZONTAL)
sizer_3.Add(self.list_ctrl_1, 1, wx.EXPAND, 0)
sizer_2.Add(sizer_3, 1, wx.EXPAND, 0)
sizer_4.Add((20, 20), 1, 0, 0)
sizer_4.Add(self.btn_del, 0,0, 0)
sizer_4.Add((20, 20), 0,0, 0)
sizer_4.Add(self.btn_cancel, 0,0, 0)
sizer_2.Add(sizer_4, 0, wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, 5)
self.SetSizer(sizer_2)
#sizer_2.Fit(self)
self.Layout()
# end wxGlade
def addTask(self,task):
ti=self.list_ctrl_1.InsertStringItem(sys.maxint,task['bkname'])
#self.list_ctrl_1.SetItemData(ti,task['url'])
self.list_ctrl_1.SetStringItem(ti,1,task['url'])
self.list_ctrl_1.SetStringItem(ti,2,u'开始下载...')
self.list_ctrl_1.SetStringItem(ti,3,task['size'])
self.tasklist[task['url']]=[]
def findItem(self,url):
i=-1
while True:
i=self.list_ctrl_1.GetNextItem(i)
if i==-1: return -1
if self.list_ctrl_1.GetItem(i,1).GetText()==url:
return i
def updateProgress(self,msg,url):
item=self.findItem(url)
if item == -1:
return
self.list_ctrl_1.SetStringItem(item,2,msg)
def _delItemviaData(self,data):
i=-1
while True:
i=self.list_ctrl_1.GetNextItem(i)
if i==-1: return False
if self.list_ctrl_1.GetItemData(i)==data:
self.list_ctrl_1.DeleteItem(i)
return i
def onDel(self,evt):
item=-1
item_list=[]
while True:
item=self.list_ctrl_1.GetNextSelected(item)
if item == -1: break
item_list.append(item)
self.delTask(item_list)
def delTask(self,item_list):
for item in item_list:
url=self.list_ctrl_1.GetItem(item,1).GetText()
self.tasklist[url].append(False)
break
self.list_ctrl_1.DeleteItem(item)
def onClose(self,evt):
self.Hide()
# end of class DownloadManager
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = WebDownloadManager(None)
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
| 1.789063 | 2 |
Geometry/Rect.py | xvzezi/cd2d-python | 0 | 12761949 | <gh_stars>0
# coding=utf-8
###########################
# file: Rect.py
# date: 2021-7-25
# author: Sturmfy
# desc: Basic definition of Rect
# version:
# 2021-7-25 init design
###########################
import sys
sys.path.append("..")
from Grid import Grid
import Shape
import numpy as np
import Point
import Circle
class MapCube(Shape.Shape):
def __init__(self, slen=1):
# type: (int,int)->None
super(MapCube, self).__init__()
self.SetSize(slen)
self.is_static = True
def SetSize(self, slen):
'''
Clock-wise corners from left-bottom
'''
# type: (int, int)->None
# 1. record init size
self.side_len = slen
# 2. record corners
self.corners = []
max_x = self.side_len / 2
min_x = - max_x
max_y = max_x
min_y = - max_y
self.corners.append(np.array([min_x, min_y]))
self.corners.append(np.array([min_x, max_y]))
self.corners.append(np.array([max_x, max_y]))
self.corners.append(np.array([max_x, min_y]))
def GetSize(self):
return self.side_len
def GetWorldCorners(self):
res = []
c_pos = self.gameObject.tranform.position
for c in self.corners:
res.append(c + c_pos)
return res
def BoundingRadius(self):
return 1.414 * self.side_len / 2
def IsPointIn(self, x, y):
c_pos = self.gameObject.tranform.position
half_l = self.side_len / 2
min_x = c_pos[0] - half_l
max_x = c_pos[0] + half_l
min_y = c_pos[1] - half_l
max_y = c_pos[1] + half_l
return min_x < x and x < max_x and min_y < y and y < max_y
def PaintOnGrid(self, static_grid, dyna_grid):
# type: (Grid, Grid)->set
# only paint, do not return conflicts
c_pos = self.gameObject.tranform.position + self.center
static_grid.Add(int(c_pos[0]), int(c_pos[1]), self)
return None
def UnpaintOnGrid(self, grid):
# type: (Grid)->set
c_pos = self.gameObject.tranform.position + self.center
grid.Remove(int(c_pos[0]), int(c_pos[1]), self)
return None
def TestCollision(self, otherShape):
if isinstance(otherShape, MapCube):
return False
else:
return otherShape.TestCollision(self)
| 2.671875 | 3 |
fixtures/fragments/test2.py | jdkato/txtlint | 0 | 12761950 | <gh_stars>0
"""
This module defines pdoc's documentation objects. A documentation object
corresponds to *something* in your Python code that has a docstring or type
annotation. Typically, this only includes modules, classes, functions and
methods. However, `pdoc` adds support for extracting documentation from the
abstract syntax tree, which means that variables (module, class or instance)
are supported too.
There are four main types of documentation objects:
- `Module`
- `Class`
- `Function`
- `Variable`
All docmentation types make heavy use of `@functools.cached_property`
decorators.
This means they have a large set of attributes that are lazily computed on
first access.
By convention, all attributes are read-only, although this is not enforced at
runtime.
"""
from __future__ import annotations
import enum
import inspect
import os
import pkgutil
import re
import sys
import textwrap
import traceback
import types
import warnings
from abc import ABCMeta, abstractmethod
from collections.abc import Callable
from functools import wraps
from pathlib import Path
from typing import Any, ClassVar, Generic, TypeVar, Union
from pdoc import doc_ast, doc_pyi, extract
from pdoc.doc_types import (
GenericAlias,
NonUserDefinedCallables,
empty,
resolve_annotations,
safe_eval_type,
)
from ._compat import cache, cached_property, formatannotation, get_origin
def _include_fullname_in_traceback(f):
"""
`Doc.__repr__` should not raise, but it may raise if we screwed up.
Debugging this is a bit tricky, because, well, we can't repr() in the
traceback either then.
This decorator adds location information to the traceback, which helps
tracking down bugs.
"""
@wraps(f)
def wrapper(self):
try:
return f(self)
except Exception as e:
raise RuntimeError(f"Error in {self.fullname}'s repr!") from e
return wrapper
T = TypeVar("T")
class Doc(Generic[T]):
"""
A base class for all documentation objects.
"""
modulename: str
"""
The module that this object is in, for example `pdoc.doc`.
"""
qualname: str
"""
The qualified identifier name for this object. For example, if we have the
following code:
```python
class Foo:
def bar(self):
pass
```
The qualname of `Foo`'s `bar` method is `Foo.bar`. The qualname of the
`Foo` class is just `Foo`.
See <https://www.python.org/dev/peps/pep-3155/> for details.
"""
obj: T
"""
The underlying Python object.
"""
taken_from: tuple[str, str]
"""
`(modulename, qualname)` of this doc object's original location.
In the context of a module, this points to the location it was imported
from, in the context of classes, this points to the class an attribute is
inherited from.
"""
def __init__(
self, modulename: str, qualname: str, obj: T,
taken_from: tuple[str, str]
):
"""
Initializes a documentation object, where `modulename` is the name this
module is defined in, `qualname` contains a dotted path leading to the
object from the module top-level, and `obj` is the object to document.
"""
self.modulename = modulename
self.qualname = qualname
self.obj = obj
self.taken_from = taken_from
@cached_property
def fullname(self) -> str:
"""
The full qualified name of this doc object, for example `pdoc.doc.Doc`.
"""
# qualname is empty for modules
return f"{self.modulename}.{self.qualname}".rstrip(".")
@cached_property
def name(self) -> str:
"""
The name of this object. For top-level functions and classes, this is
equal to the qualname attribute.
"""
return self.fullname.split(".")[-1]
@cached_property
def docstring(self) -> str:
"""
The docstring for this object. It has already been cleaned by
`inspect.cleandoc`.
If no docstring can be found, an empty string is returned.
"""
return _safe_getdoc(self.obj)
@cached_property
def source(self) -> str:
"""
The source code of the Python object as a `str`.
If the source cannot be obtained (for example, because we are dealing
with a native C object), an empty string is returned.
"""
return doc_ast.get_source(self.obj)
@cached_property
def source_file(self) -> Path | None:
"""
The name of the Python source file in which this object was defined.
`None` for built-in objects.
"""
try:
return Path(
inspect.getsourcefile(self.obj) or inspect.getfile(self.obj)
) # type: ignore
except TypeError:
return None
@cached_property
def source_lines(self) -> tuple[int, int] | None:
"""
Return a `(start, end)` line nuber tuple for this object.
If no source file can be found, `None` is returned.
"""
try:
lines, start = inspect.getsourcelines(self.obj) # type: ignore
return start, start + len(lines) - 1
except Exception:
return None
@cached_property
def is_inherited(self) -> bool:
"""
If True, the doc object is inherited from another location.
This most commonly refers to methods inherited by a subclass,
but can also apply to variables that are assigned a class defined
in a different module.
"""
return (self.modulename, self.qualname) != self.taken_from
@classmethod
@property
def type(cls) -> str:
"""
The type of the doc object, either `"module"`, `"class"`, `"function"`,
or `"variable"`.
"""
return cls.__name__.lower()
if sys.version_info < (3, 9): # pragma: no cover
# no @classmethod @property in 3.8
@property
def type(self) -> str: # noqa
return self.__class__.__name__.lower()
def __lt__(self, other):
assert isinstance(other, Doc)
return self.fullname.replace("__init__", "").__lt__(
other.fullname.replace("__init__", "")
)
class Namespace(Doc[T], metaclass=ABCMeta):
"""
A documentation object that can have children. In other words, either a
module or a class.
"""
@cached_property
@abstractmethod
def _member_objects(self) -> dict[str, Any]:
"""
A mapping from *all* public and private member names to their Python
objects.
"""
@cached_property
@abstractmethod
def _var_docstrings(self) -> dict[str, str]:
"""A mapping from some member vaiable names to their docstrings."""
@cached_property
@abstractmethod
def _var_annotations(self) -> dict[str, Any]:
"""
A mapping from some member vaiable names to their type annotations.
"""
@abstractmethod
def _taken_from(self, member_name: str, obj: Any) -> tuple[str, str]:
"""
The location this member was taken from. If unknown,
`(modulename, qualname)` is returned.
"""
| 2.296875 | 2 |