repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
sprout42/StarStruct | starstruct/elementbitfield.py | """StarStruct element class."""
import struct
import re
from starstruct.element import register, Element
from starstruct.modes import Mode
from starstruct.bitfield import BitField
from starstruct.packedbitfield import PackedBitField
@register
class ElementBitField(Element):
"""
The bitfield StarStruct element class.
"""
def __init__(self, field, mode=Mode.Native, alignment=1):
"""Initialize a StarStruct element object."""
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
self.ref = field[2]
self._mode = mode
self._alignment = alignment
# Validate that the format specifiers are valid struct formats, this
# doesn't have to be done now because the format will be checked when
# any struct functions are called, but it's better to inform the user of
# any errors earlier.
# The easiest way to perform this check is to create a "Struct" class
# instance, this will also increase the efficiency of all struct related
# functions called.
self.format = mode.value + field[1]
self._struct = struct.Struct(self.format)
@staticmethod
def valid(field):
"""
Validation function to determine if a field tuple represents a valid
enum element type.
The basics have already been validated by the Element factory class,
validate that the struct format is a valid numeric value.
Signed fields are not allowed, bit manipulation does not work well
"""
return (len(field) == 3 and
isinstance(field[1], str) and
re.match(r'\d*[BHILQN]', field[1]) and
isinstance(field[2], (BitField, PackedBitField)))
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
The "enum" element requires no further validation.
"""
pass
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if alignment:
self._alignment = alignment
if mode:
self._mode = mode
self.format = mode.value + self.format[1:]
# recreate the struct with the new format
self._struct = struct.Struct(self.format)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
# Turn the enum value list into a single number and pack it into the
# specified format
data = self._struct.pack(self.ref.pack(msg[self.name]))
# If the data does not meet the alignment, add some padding
missing_bytes = len(data) % self._alignment
if missing_bytes:
data += b'\x00' * missing_bytes
return data
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
ret = self._struct.unpack_from(buf, 0)
# Remember to remove any alignment-based padding
extra_bytes = self._alignment - 1 - (struct.calcsize(self.format) %
self._alignment)
unused = buf[struct.calcsize(self.format) + extra_bytes:]
# Convert the returned value to the referenced BitField type
try:
member = self.ref.unpack(ret[0])
except ValueError as e:
raise ValueError(
'Value: {0} was not valid for {1}\n\twith msg: {2},\n\tbuf: {3}'.format(
ret[0], self.ref, msg, buf
)).with_traceback(e.__traceback__)
return (member, unused)
def make(self, msg):
"""Return the "transformed" value for this element"""
return self.ref.make(msg[self.name])
|
sprout42/StarStruct | starstruct/tests/test_elementbase.py | <reponame>sprout42/StarStruct
#!/usr/bin/env python3
"""Tests for the elementbase class"""
import unittest
from starstruct.elementbase import ElementBase
# pylint: disable=line-too-long,invalid-name,no-self-use
class TestElementBase(unittest.TestCase):
"""ElementBase module tests"""
def test_valid(self):
"""Test field formats that are valid ElementBase elements."""
test_fields = [
('a', 'd'), # double
('b', 'f'), # float
('e', '?'), # bool: 0, 1
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementBase.valid(field)
self.assertTrue(out)
def test_not_valid(self):
"""Test field formats that are not valid ElementBase elements."""
test_fields = [
('a', '4x'), # 4 pad bytes
('b', 'z'), # invalid
('c', '1'), # invalid
('d', '9S'), # invalid (must be lowercase)
('e', 'b'), # signed byte: -128, 127
('f', 'H'), # unsigned short: 0, 65535
('g', '10s'), # 10 byte string
('h', 'L'), # unsigned long: 0, 2^32-1
('i', '/'), # invalid
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementBase.valid(field)
self.assertFalse(out)
|
sprout42/StarStruct | starstruct/elementconstant.py | <reponame>sprout42/StarStruct
"""
The constant element class for StarStruct
To be used in the following way:
.. code-block:: python
ExampleMessage = Message('constant_message', [
('regular', 'B'), # Two regular messages
('fill_in_later', 'H'),
('ending_sequence', 'II', (0xAA, 0xBB)), # An ending sequence to a message
# that's always the same
])
:todo: Not sure if alignment is working correctly here, or if it needs to do anything
"""
import struct
from typing import Optional, Tuple
from starstruct.element import register, Element
from starstruct.modes import Mode
@register
class ElementConstant(Element):
def __init__(self, field, mode=Mode.Native, alignment=1):
self.name = field[0]
self.format = field[1]
self.values = field[2]
self._mode = mode
self._alignment = alignment
@property
def _struct(self):
return struct.Struct(self._mode.value + self.format)
@property
def _packed(self):
return self._struct.pack(*self.values)
@staticmethod
def valid(field: list) -> bool:
"""
Validate whether this field could supply this element with the corect values
"""
return len(field) == 3 \
and isinstance(field[0], str) \
and isinstance(field[1], str) \
and struct.calcsize(field[1]) \
and isinstance(field[2], tuple)
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
Constants will alawys be valid
"""
return True
def update(self, mode: Optional[Tuple]=None, alignment: Optional[int]=1) -> None:
"""change the mode of the struct format"""
if mode:
self._mode = mode
if alignment:
self._alignment = alignment
def pack(self, msg: dict) -> bytes:
"""
Pack the provided values into the supplied buffer.
:param msg: The message specifying the values to pack
"""
return self._packed
def unpack(self, msg: dict, buf: bytes) -> Tuple[bytes, bytes]:
"""Unpack data from the supplied buffer using the initialized format."""
return (self._struct.unpack_from(buf), buf[self._struct.size:])
def make(self, msg: dict):
"""
Return the expected "made" value
:param msg: The values to make
"""
return self.values
|
sprout42/StarStruct | starstruct/elementpad.py | <filename>starstruct/elementpad.py
"""StarStruct element class."""
import struct
import re
from starstruct.element import register, Element
from starstruct.modes import Mode
@register
class ElementPad(Element):
"""
The basic StarStruct element class.
"""
def __init__(self, field, mode=Mode.Native, alignment=1):
"""Initialize a StarStruct element object."""
# All of the type checks have already been performed by the class
# factory
# Pad elements effectively have no name
self.name = None
# The ref attribute is required for all elements, but the base element
# type does not have one
self.ref = None
self._mode = mode
# Strictly speaking, a non-byte aligned message probably wouldn't have
# explicit padding fields, and if it does they probably wouldn't be
# unaligned, but however rare that case might be we need to ensure that
# this field is properly aligned also.
self._alignment = alignment
# Validate that the format specifiers are valid struct formats, this
# doesn't have to be done now because the format will be checked when
# any struct functions are called, but it's better to inform the user of
# any errors earlier.
# The easiest way to perform this check is to create a "Struct" class
# instance, this will also increase the efficiency of all struct related
# functions called.
self.format = mode.value + field[1]
self._struct = struct.Struct(self.format)
@staticmethod
def valid(field):
"""
Validation function to determine if a field tuple represents a valid
base element type.
The basics have already been validated by the Element factory class,
validate the specific struct format now.
"""
return len(field) == 2 \
and isinstance(field[1], str) \
and re.match(r'\d*x', field[1])
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
The "padding" element requires no further validation.
"""
pass
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if alignment:
self._alignment = alignment
if mode:
self._mode = mode
self.format = mode.value + self.format[1:]
# recreate the struct with the new format
self._struct = struct.Struct(self.format)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
data = self._struct.pack()
# If the data does not meet the alignment, add some padding
missing_bytes = len(data) % self._alignment
if missing_bytes:
data += b'\x00' * missing_bytes
return data
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
# Remember to remove any alignment-based padding
extra_bytes = self._alignment - 1 - (struct.calcsize(self.format) %
self._alignment)
unused = buf[struct.calcsize(self.format) + extra_bytes:]
return (None, unused)
def make(self, msg):
"""This shouldn't be called, but if called it returns nothing."""
return None
|
sprout42/StarStruct | starstruct/message.py | """StarStruct class."""
import collections
import struct
import starstruct.modes
from starstruct.element import Element
from starstruct.startuple import StarTuple
# pylint: disable=line-too-long
class Message(object):
"""An object much like NamedTuple, but with additional formatting."""
# pylint: disable=too-many-branches
def __init__(self, name, fields, mode=starstruct.modes.Mode.Native, alignment=1):
"""
Initialize a StarStruct object.
Creates 2 internal items, a format string which is used to call the
struct module functions for packing and unpacking data, and a
namedtuple instance which is used to organize the data provided to the
pack functions and returned from the unpack functions.
"""
# The name must be a string, this is provided to the
# collections.namedtuple constructor when creating the namedtuple class.
if not name or not isinstance(name, str):
raise TypeError('invalid name: {}'.format(name))
self._name = name
self.mode = mode
self.alignment = alignment
# The structure definition must be a list of
# ('name', 'format', <optional>)
# tuples
if not isinstance(fields, list) \
or not all(isinstance(x, tuple) for x in fields):
raise TypeError('invalid fields: {}'.format(fields))
if not isinstance(mode, starstruct.modes.Mode):
raise TypeError('invalid mode: {}'.format(mode))
# Create an ordered dictionary (so element order is preserved) out of
# the individual message fields. Ensure that there are no duplicate
# field names.
self._elements = collections.OrderedDict()
for field in fields:
if field[0] not in self._elements:
if isinstance(field[0], str):
self._elements[field[0]] = Element.factory(field, mode, alignment)
elif isinstance(field[0], bytes):
self._elements[field[0].decode('utf-8')] = Element.factory(field, mode, alignment)
else:
raise NotImplementedError
else:
raise TypeError('duplicate field {} in {}'.format(field[0], fields))
# Validate all of the elements of this message
for elem in self._elements.values():
elem.validate(self._elements)
# Give each element information about the other elements
elem._elements = self._elements
# Now that the format has been validated, create a named tuple with the
# correct fields.
named_fields = [elem.name for elem in self._elements.values() if elem.name]
self._tuple = StarTuple(self._name, named_fields, self._elements)
def update(self, mode=None, alignment=None):
""" Change the mode of a message. """
if mode and not isinstance(mode, starstruct.modes.Mode):
raise TypeError('invalid mode: {}'.format(mode))
# Change the mode for all elements
for key in self._elements.keys():
self._elements[key].update(mode, alignment)
def is_unpacked(self, other):
"""
Provide a function that allows checking if an unpacked message tuple
is an instance of what could be unpacked from a particular message
object.
"""
# First check to see if the passed in object is a namedtuple
# that matches this message type
if not isinstance(other, self._tuple):
return False
# Then check any element values that may be another message type to
# ensure that the sub-elements are valid types.
for key in self._elements.keys():
if hasattr(self._elements[key].format, 'is_unpacked'):
# If the format for an element is Message object (that has the
# is_unpacked() function defined), call the is_unpacked()
# function.
msg = self._elements[key].format
if not msg.is_unpacked(getattr(other, key)):
return False
if hasattr(self._elements[key].format, 'keys'):
# If the format for an element is a dictionary, attempt to
# extract the correct item with the assumption that the ref
# attribute identifies the discriminator
# Select the correct message object based on the value of the
# referenced item
ref_val = getattr(other, self._elements[key].ref)
if ref_val not in self._elements[key].format.keys():
return False
msg = self._elements[key].format[ref_val]
if not msg.is_unpacked(getattr(other, key)):
return False
return True
def pack(self, obj=None, **kwargs):
"""Pack the provided values using the initialized format."""
# Handle a positional dictionary argument as well as the more generic kwargs
if obj and isinstance(obj, dict):
kwargs = obj
return b''.join(elem.pack(kwargs) for elem in self._elements.values())
def unpack_partial(self, buf):
"""
Unpack a partial message from a buffer.
This doesn't re-use the "unpack_from" function name from the struct
module because the parameters and return values are not consistent
between this function and the struct module.
"""
msg = self._tuple._make([None] * len(self._tuple._fields))
for elem in self._elements.values():
(val, unused) = elem.unpack(msg, buf)
buf = unused
# Update the unpacked message with all non-padding elements
if elem.name:
msg = msg._replace(**dict([(elem.name, val)]))
return (msg, buf)
def unpack(self, buf):
"""Unpack the buffer using the initialized format."""
(msg, unused) = self.unpack_partial(buf)
if unused:
error = 'buffer not fully used by unpack: {}'.format(unused)
raise ValueError(error)
return msg
def make(self, obj=None, **kwargs):
"""
A utility function that returns a namedtuple based on the current
object's format for the supplied object.
"""
if obj is not None:
if isinstance(obj, dict):
kwargs = obj
elif isinstance(obj, tuple):
kwargs = obj._asdict()
msg = self._tuple._make([None] * len(self._tuple._fields))
# Only attempt to "make" fields that are in the tuple
for field in self._tuple._fields:
val = self._elements[field].make(kwargs)
msg = msg._replace(**dict([(field, val)]))
# msg.__packed = self.pack(**kwargs)
return msg
def __len__(self):
if self._elements == {}:
return 0
size = 0
for val in self._elements.values():
if isinstance(val.format, (bytes, str)):
size += struct.calcsize(val.format)
elif isinstance(val.format, (dict, )):
lengths = {len(item) for item in val.format.values()}
if len(lengths) > 1:
raise AttributeError('Unable to calculate size due to differing size sub items')
size += sum(lengths)
else:
size += len(val.format)
return size
|
sprout42/StarStruct | starstruct/elementfixedpoint.py | """StarStruct fixedpoint element class."""
# pylint: disable=line-too-long
import re
import struct
import decimal
from decimal import Decimal
from starstruct.element import register, Element
from starstruct.modes import Mode
# TODO: I think we could probably just do most of this with struct.calcsize
BITS_FORMAT = {
('c', 'b', 'B'): 1,
('h', 'H'): 2,
('i', 'I', 'l', 'L'): 4,
('q', 'Q'): 8
}
def get_bits_length(pack_format):
"""
Helper function to return the number of bits for the format
"""
if pack_format[0] in ['@', '=', '<', '>', '+']:
pack_format = pack_format[1:]
bits = -1
for fmt in BITS_FORMAT:
match_str = r'|'.join(fmt)
# match_str = r'(@|=|<|>|+)' + match_str
# match_str = r'*' + match_str + r'*'
if re.match(match_str, pack_format):
bits = BITS_FORMAT[fmt] * 8
if bits == -1:
err = 'Pack format {0} was not a valid fixed point specifier'
raise ValueError(err.format(pack_format))
return bits
def get_fixed_point(num, pack_format, precision):
"""
Helper function to get the right bytes once we're done
"""
if not isinstance(num, Decimal):
try:
num = Decimal(num)
except:
raise ValueError('Num {0} could not be converted to a Decimal'.format(num))
bits = get_bits_length(pack_format)
if bits < precision:
raise ValueError('Format {1} too small for the given precision of {0}'.format(pack_format, precision))
if num >= 2 ** (bits - precision):
raise ValueError('num: {0} must fit in the specified number of available bits {1}'.format(num, 8 * (bits - precision)))
num_shifted = int(num * (2 ** precision))
return num_shifted
def get_fixed_bits(num, pack_format, precision):
"""
Helper function to get the integer portion of a fixed point value
"""
num_shifted = get_fixed_point(num, pack_format, precision)
return struct.pack(pack_format, num_shifted)
@register
class ElementFixedPoint(Element):
"""
A StarStruct element class for fixed point number fields.
Uses the built in Decimal class
Example Usage::
from starstruct.message import Message
example_precision = 8
example_struct = starstruct.Message('example', [('my_fixed_point', 'F', 'I', example_precision)])
my_data = {
'my_fixed_point': '120.0'
}
packed_struct = example_struct.make(my_data)
"""
def __init__(self, field, mode=Mode.Native, alignment=1):
"""Initialize a StarStruct element object."""
# TODO: Add checks in the class factory?
self.name = field[0]
# the ref attribute is required, but this element doesn't quite have
# one, instead use the ref field to hold the fixed point format
# attributes
self.ref = {}
self.ref['precision'] = field[3]
if len(field) == 5:
self.ref['decimal_prec'] = field[4]
else:
self.ref['decimal_prec'] = None
self._mode = mode
self._alignment = alignment
self.format = mode.value + field[2]
self._struct = struct.Struct(self.format)
@staticmethod
def valid(field):
"""
Validation function to determine if a field tuple represents a valid
fixedpoint element type.
The basics have already been validated by the Element factory class,
validate the specific struct format now.
"""
return len(field) >= 4 \
and isinstance(field[1], str) \
and re.match(r'\d*F', field[1]) \
and isinstance(field[2], str) \
and isinstance(field[3], (int, float, Decimal))
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
The "fixedpoint" element requires no further validation.
"""
pass
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if alignment:
self._alignment = alignment
if mode:
self._mode = mode
self.format = mode.value + self.format[1:]
# recreate the struct with the new format
self._struct = struct.Struct(self.format)
def pack(self, msg):
"""Pack the provided values into the specified buffer."""
packing_decimal = Decimal(msg[self.name])
# integer = int(self.decimal // 1)
# top_bits = integer.to_bytes(int((self.bits - self.precision) / 8), self._mode.to_byteorder())
# top_bits = b'{0:%db}' % (self.bits - self.precision)
# top_bits = top_bits.format(integer)
# bot_bits = b'0' * self.precision
# print('top_bits:', top_bits.bin)
# print('bot_bits:', bot_bits)
# print('all_bits:', top_bits + bot_bits)
# self._struct.pack(top_bits + bot_bits)
fixed_point = get_fixed_point(packing_decimal, self.format, self.ref['precision'])
data = self._struct.pack(fixed_point)
# If the data does not meet the alignment, add some padding
missing_bytes = len(data) % self._alignment
if missing_bytes:
data += b'\x00' * missing_bytes
return data
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
# ret = self._struct.unpack_from(buf, 0)
ret = self._struct.unpack_from(buf, 0)[0]
# Remember to remove any alignment-based padding
extra_bytes = self._alignment - 1 - (struct.calcsize(self.format) %
self._alignment)
unused = buf[struct.calcsize(self.format) + extra_bytes:]
if self.ref['decimal_prec']:
decimal.getcontext().prec = self.ref['decimal_prec']
else:
decimal.getcontext().prec = 26
ret_decimal = Decimal(ret) / Decimal(2 ** self.ref['precision'])
return (ret_decimal, unused)
def make(self, msg):
"""Return bytes of the expected format"""
# return self._struct.pack(msg[self.name])
return msg[self.name]
|
sprout42/StarStruct | starstruct/tests/test_elementlength.py | #!/usr/bin/env python3
"""Tests for the elementlength class"""
import unittest
from starstruct.elementlength import ElementLength
# pylint: disable=line-too-long,invalid-name
class TestElementLength(unittest.TestCase):
"""ElementLength module tests"""
def test_valid(self):
"""Test field formats that are valid ElementLength elements."""
test_fields = [
('a', 'B', 'data'), # unsigned byte: 0, 255
('b', 'H', 'data'), # unsigned short: 0, 65535
('d', 'L', 'data'), # unsigned long: 0, 2^32-1
('d', 'Q', 'data'), # unsigned long long: 0, 2^64-1
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementLength.valid(field)
self.assertTrue(out)
def test_not_valid(self):
"""Test field formats that are not valid ElementLength elements."""
test_fields = [
('a', '4x', 'data'), # 4 pad bytes
('b', 'z', 'data'), # invalid
('c', '1', 'data'), # invalid
('d', '10s', 'data'), # 10 byte string
('e', '9S', 'data'), # invalid (must be lowercase)
('f', '/', 'data'), # invalid
('g', '?', 'data'), # bool: 0, 1
('h', '10s', 'data'), # 10 byte string
('i', 'b', 'data'), # unsigned byte: 0, 255
('j', 'h', 'data'), # unsigned short: 0, 65535
('k', 'l', 'data'), # unsigned long: 0, 2^32-1
('l', 'B'), # unsigned byte (no ref string)
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementLength.valid(field)
self.assertFalse(out)
|
sprout42/StarStruct | starstruct/tests/test_elementfixedpoint.py | #!/usr/bin/env python3
"""Tests for the elementfixedpoint class"""
import unittest
from decimal import Decimal
from starstruct.elementfixedpoint import ElementFixedPoint, get_fixed_bits
from starstruct.message import Message
from starstruct.modes import Mode
# pylint: disable=no-self-use
class TestElementFixedPointHelpers(unittest.TestCase):
"""Test the helpers for this class"""
def test_invalid_formats(self):
with self.assertRaises(ValueError):
get_fixed_bits(5, 'Z', 1)
with self.assertRaises(ValueError):
get_fixed_bits(8, 'f', 4)
# TODO: Make sure that it won't accept more than one number?
def test_valid_formats(self):
get_fixed_bits(5, 'h', 1)
get_fixed_bits(15, 'L', 0)
def test_invalid_higher_bits(self):
with self.assertRaises(ValueError):
get_fixed_bits(257, 'i', 32)
with self.assertRaises(ValueError):
get_fixed_bits(256, 'i', 32)
with self.assertRaises(ValueError):
get_fixed_bits('hello', 'i', 3)
with self.assertRaises(ValueError):
get_fixed_bits(Decimal('20'), 'h', 17)
with self.assertRaises(ValueError):
get_fixed_bits(42, 'i', 33)
def test_valid_higher_bits(self):
"""Just make sure these all don't fail"""
get_fixed_bits(255, 'I', 8)
get_fixed_bits(15.5, 'I', 4)
get_fixed_bits(22.75, 'i', 11)
get_fixed_bits(Decimal('13.0'), 'q', 16)
def test_zero(self):
assert get_fixed_bits(0, 'H', 8) == (0).to_bytes(2, 'big')
def test_basic_example(self):
assert get_fixed_bits(Decimal('0.9375'), 'B', 4) == (15).to_bytes(1, 'little')
def test_another_example(self):
assert get_fixed_bits(Decimal('12.9375'), 'h', 4) == (192 + 15).to_bytes(2, 'little')
assert get_fixed_bits(Decimal('12.9375'), 'i', 4) == (192 + 15).to_bytes(4, 'little')
assert get_fixed_bits(Decimal('12.9375'), 'q', 4) == (192 + 15).to_bytes(8, 'little')
class TestElementFixedPoint(unittest.TestCase):
"""ElementFixedPoint module tests"""
def test_valid(self):
"""Test field formats that are valid fixedpoint elements."""
test_fields = [
('a', 'F', 'i', 8),
('b', 'F', 'h', 4),
('c', 'F', 'h', 7),
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementFixedPoint.valid(field)
self.assertTrue(out)
def test_not_valid(self):
"""Test field formats that are not valid fixedpoint elements."""
test_fields = [
('a', 'PF', 16, 8), # Must have numbers preceding the F
('b', '3D', 8, 8), # D is not a valid prefix. Must be F for fixedpoint
('c', 'D', 8.0, 8), # Must be int, not float
('d', 'D', 8, 16), # bytes must be larger than precision.
('e', 'D', 8), # Must be of length four
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementFixedPoint.valid(field)
self.assertFalse(out)
def test_valid_pack(self):
"""Test packing valid fixed point values."""
precision = 8
field = ('a', 'F', 'i', precision)
self.assertTrue(ElementFixedPoint.valid(field))
elem = ElementFixedPoint(field, Mode.Big)
test_values = [
({'a': '4'}, 4),
({'a': 13.5}, 13.5),
({'a': '13.5'}, '13.5'),
({'a': '13.500'}, '13.500'),
({'a': Decimal('13.500')}, '13.500'),
({'a': 1.1 + 2.2}, '3.3'),
]
multiplier = 2 ** precision
for (in_val, out_val) in test_values:
ret = elem.pack(in_val)
if not isinstance(out_val, Decimal):
out_val = Decimal(out_val)
self.assertEqual(ret, int((out_val * multiplier)).to_bytes(4, 'big'))
def test_valid_make(self):
"""Test full packing."""
my_message = Message('my_msg', [
('my_fixed', 'F', 'i', 8),
('not_specified_fixed', 'F', 'i', 8),
('other_fixed', 'F', 'i', 8, 3),
('just_a_num', 'i'),
('a_string', '32s'),
('this_fixed', 'F', 'i', 5),
], Mode.Big)
data = {
'my_fixed': 1.1 + 2.2,
'other_fixed': Decimal('1.1') + Decimal('2.2'),
'not_specified_fixed': Decimal('1.1') + Decimal('2.2'),
'just_a_num': 16,
'a_string': '=====================',
'this_fixed': '1.9375',
}
packed = my_message.pack(data)
unpacked = my_message.unpack(packed)
assert unpacked.a_string == data['a_string']
assert unpacked.just_a_num == data['just_a_num']
assert unpacked.other_fixed == Decimal('3.3')
assert unpacked.my_fixed == Decimal('3.296875')
assert unpacked.not_specified_fixed == Decimal('3.296875')
assert unpacked.this_fixed == Decimal(data['this_fixed'])
|
sprout42/StarStruct | starstruct/tests/test_elementescaped.py | <reponame>sprout42/StarStruct
"""Tests for the starstruct class"""
# import pytest
from starstruct.message import Message
# from starstruct.modes import Mode
# pylint: disable=line-too-long,invalid-name
class TestStarStruct:
"""StarStruct module tests"""
Repeated = Message('Repeated', [
('x', 'B'),
('y', 'B'),
('z', 'H'),
])
def test_escape_sequence_items(self):
TestStruct = Message('TestStruct', [
('escaped_data', self.Repeated, {
'escape': {
'start': b'\xff\x00\xff\x11',
'separator': b'\x12\x12',
'end': b'\x11\xff\x00\xff',
},
}),
])
test_data = {
'escaped_data': [
{'x': 7, 'y': 9, 'z': 13},
{'x': 2, 'y': 8, 'z': 27},
{'x': 6, 'y': 7, 'z': 11},
],
}
made = TestStruct.make(test_data)
assert made.escaped_data[0].x == 7
assert made.escaped_data[0].y == 9
packed = TestStruct.pack(test_data)
unpacked = TestStruct.unpack(packed)
assert unpacked == made
|
sprout42/StarStruct | starstruct/bitfield.py | import re
import functools
class BitField(object):
def __init__(self, enum):
if not all(isinstance(member.value, int) for member in enum):
msg = 'Enum {} members must have integer values'.format(repr(enum))
raise TypeError(msg)
try:
assert enum(0)
msg = 'Cannot construct BitField from {} with a value for 0: {}'
raise TypeError(msg.format(repr(enum), enum(0)))
except ValueError:
# A ValueError is raised if the enum does not have a value for 0
pass
self.enum = enum
# Determine the bit mask and length for this bitfield
self.bit_mask = functools.reduce(lambda x, y: x | y, [e.value for e in self.enum])
self.bit_length = self.bit_mask.bit_length()
def __repr__(self):
return 'BitField({})'.format(self.enum)
def __str__(self):
return 'BitField({})'.format(self.enum)
def find_value(self, item):
"""
Take a value and determine the enumeration value based on value or
enum memeber name.
"""
# pylint: disable=too-many-branches
if isinstance(item, str):
# To make usage a bit nice/easier if the elements of the list are
# strings assume that they are enum names and attempt to convert
# them to the correct enumeration values.
try:
value = getattr(self.enum, item)
except AttributeError:
# This is the normal error to throw if the enum name is
# not valid for this enumeration type.
enum_name = re.match(r"<enum '(\S+)'>", str(self.enum)).group(1)
msg = '{} is not a valid {}'.format(item, enum_name)
raise ValueError(msg)
elif isinstance(item, self.enum):
value = item
else:
# Assume that the item is an integer value, convert it to an enum
# value to ensure it is a valid value for this bitfield
try:
value = self.enum(item)
except ValueError:
# This value is not a valid enumeration value
enum_name = re.match(r"<enum '(\S+)'>", str(self.enum)).group(1)
msg = '{} is not a valid {}'.format(item, enum_name)
raise ValueError(msg)
return value
def pack(self, arg):
"""
Take a list (or single value) and bitwise-or all the values together
"""
value = 0
if arg is not None:
# Handle a variety of inputs: list or single, enum or raw
if hasattr(arg, '__iter__'):
arg_list = arg
else:
arg_list = [arg]
for item in arg_list:
value |= self.find_value(item).value
return value
def unpack(self, val):
"""
Take a single number and split it out into all values that are present
"""
return frozenset(e for e in self.enum if e.value & val)
def make(self, arg):
"""
Take an input list and return a frozenset
useful for testing
"""
values = []
if arg is not None:
# Handle a variety of inputs: list or single, enum or raw
if hasattr(arg, '__iter__'):
arg_list = arg
else:
arg_list = [arg]
for item in arg_list:
values.append(self.find_value(item))
# return this list as a frozenset
return frozenset(values)
|
sprout42/StarStruct | starstruct/tests/test_mode.py | <reponame>sprout42/StarStruct
import pytest
from starstruct import Mode
def test_from_byte_order():
assert Mode.from_byteorder('little') == Mode.Little
assert Mode.from_byteorder('big') == Mode.Big
assert Mode.from_byteorder('native') == Mode.Native
assert Mode.from_byteorder('network') == Mode.Network
with pytest.raises(TypeError):
Mode.from_byteorder('random thing')
|
sprout42/StarStruct | starstruct/packedbitfield.py | import re
import collections
import functools
import starstruct.bitfield
class PackedBitField(object):
"""
A class that is used to bitwise pack/unpack one or more enumerations or
bitfields to/from an integer value
"""
def __init__(self, *args):
# Ensure that there are no duplicate enum or bitfield types in the list
member_bitfields = (k for k in args if isinstance(k, starstruct.bitfield.BitField))
all_enums = args + tuple(b.enum for b in member_bitfields)
if len(all_enums) != len(set(all_enums)):
msg = 'Duplicate fields not allowed: {}'.format(args)
raise TypeError(msg)
# Ensure that all fields are either bitfields, or enums with all
# members of each enumeration type are integers
member_enums = (k for k in args if not isinstance(k, starstruct.bitfield.BitField))
for key in member_enums:
if not all(isinstance(member.value, int) for member in key):
msg = 'Enum {} members must have integer values'.format(repr(key))
raise TypeError(msg)
# Allow enum to be a list of enumerations that need bitpacked sequentially
self._fields = collections.OrderedDict(zip(args, [{}] * len(args)))
# Determine the bits required for the enumeration so they can all be
# packed correctly. Assume that the furthest right enumeration should
# have a bit offset of 0.
total_width = 0
for key in reversed(self._fields):
if isinstance(key, starstruct.bitfield.BitField):
all_bits = key.bit_mask
else:
all_bits = functools.reduce(lambda x, y: x | y, [k.value for k in key])
self._fields[key] = {
'offset': total_width,
'mask': all_bits << total_width,
'width': all_bits.bit_length(),
}
total_width += self._fields[key]['width']
# Track the bit mask and bit length attributes just like BitField
self.bit_mask = functools.reduce(lambda x, y: x | y, [v['mask'] for v in self._fields.values()])
self.bit_length = total_width
def __repr__(self):
return 'PackedBitField({})'.format(list(self._fields))
def __str__(self):
return 'PackedBitField({})'.format(list(self._fields))
def find_value(self, item):
"""
Take a value, determine if it matches one, and only one, of the member fields
"""
# pylint: disable=too-many-branches
# Split the member fields into bitfields and enums
member_enums = [k for k in self._fields if not isinstance(k, starstruct.bitfield.BitField)]
member_bitfields = [k for k in self._fields if isinstance(k, starstruct.bitfield.BitField)]
# See if the supplied value is an enum or bitfield value
matches = []
for key in member_bitfields:
try:
matches.append((key.find_value(item), key))
except ValueError:
# This just means it isn't a member of this bitfield
pass
# Also check for matches in the enums. This helps guard against
# ambiguous inputs where the bitfield and enum types overlap.
if isinstance(item, tuple(member_enums)):
for key in member_enums:
if isinstance(item, key):
# This is guaranteed a unique match, so return now
return (item, key)
elif isinstance(item, str):
# If it's a string, then check it against the enum fields
# (bitfields should already have been validated)
for key in member_enums:
try:
matches.append((getattr(key, item), key))
except AttributeError:
# This is the normal error to throw if the enum name is
# not valid for this enumeration type. Check the next enum.
pass
else:
# Lastly, assume that the item is an integer value, attempt to
# convert it to one of the enum values to ensure it is a valid
# value. But if it matches more than one member field, we are
# unable to pack this properly.
for key in member_enums:
try:
matches.append((key(item), key))
except ValueError:
# This just means that the value is not valid for a
# specific enum type, check all enums for a match before
# raising a ValueError
pass
if len(matches) == 1:
return matches[0]
elif len(matches) < 1:
msg = '{} is not a valid {}'.format(item, list(self._fields))
raise ValueError(msg)
elif len(matches) > 1:
msg = '{} is not a unique {}'.format(item, list(self._fields))
raise ValueError(msg)
def pack(self, arg):
"""
Take a list (or single value) and bitwise-or all the values together
"""
value = 0
if arg is not None:
# Handle a variety of inputs: list or single, enum or raw
if hasattr(arg, '__iter__'):
arg_list = arg
else:
arg_list = [arg]
for item in arg_list:
(enum_val, key) = self.find_value(item)
value |= (enum_val.value << self._fields[key]['offset'])
return value
def unpack(self, val):
"""
Take a single number and split it out into all values that are present
"""
values = []
for key in self._fields:
enum_specific_bits = (val & self._fields[key]['mask']) >> self._fields[key]['offset']
if isinstance(key, starstruct.bitfield.BitField):
values.extend(key.unpack(enum_specific_bits))
else:
try:
values.append(key(enum_specific_bits))
except ValueError:
enum_name = re.match(r"<enum '(\S+)'>", str(key)).group(1)
msg = '{} is not a valid {}'.format(enum_specific_bits, enum_name)
raise ValueError(msg)
return frozenset(values)
def make(self, arg):
"""
Take an input list and return a frozenset
useful for testing
"""
values = []
if arg is not None:
# Handle a variety of inputs: list or single, enum or raw
if hasattr(arg, '__iter__'):
arg_list = arg
else:
arg_list = [arg]
for item in arg_list:
values.append(self.find_value(item)[0])
# return this list as a frozenset
return frozenset(values)
|
sprout42/StarStruct | starstruct/tests/test_elementcallable.py | <filename>starstruct/tests/test_elementcallable.py<gh_stars>1-10
#!/usr/bin/env python3
"""Tests for the starstruct class"""
# import struct
import unittest
from binascii import crc32
import pytest
from starstruct.message import Message
# from starstruct.modes import Mode
# pylint: disable=line-too-long,invalid-name
class TestStarStruct(unittest.TestCase):
"""StarStruct module tests"""
VarTest = Message('VarTest', [
('x', 'B'),
('y', 'B'),
])
Repeated = Message('Repeated', [
('x', 'B'),
('z', 'H'),
])
def test_single_element_with_set(self):
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
])
CRCedMessage = Message('CRCedMessage', [
('data', TestStruct),
('function_data', 'I', {
(crc32, b'data')
}),
])
test_data = {
'data': {
'length_in_objects': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
},
}
made = CRCedMessage.make(test_data)
# assert len(made) == 5
assert len(made.data.vardata) == 2
assert made.data.vardata[0].x == 1
assert made.data.vardata[0].y == 2
assert made.function_data == crc32(TestStruct.pack(test_data['data']))
def test_single_element_2(self):
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
])
CRCedMessage = Message('CRCedMessage', [
('data', TestStruct),
('function_data', 'I', {
'pack': (crc32, b'data'),
'make': (crc32, b'data'),
'unpack': (crc32, b'data'),
}),
])
test_data = {
'data': {
'length_in_objects': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
},
}
made = CRCedMessage.make(test_data)
# assert len(made) == 5
assert len(made.data.vardata) == 2
assert made.data.vardata[0].x == 1
assert made.data.vardata[0].y == 2
assert made.function_data == crc32(TestStruct.pack(test_data['data']))
def test_one_element(self):
def crc32_wrapper(*args):
return crc32(b''.join(args))
CompareMessage = Message('CompareMessage', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
])
CRCedMessage = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
('function_data', 'I', {
(crc32_wrapper, b'length_in_objects', b'vardata')
}),
])
test_data = {
'length_in_objects': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
}
made = CRCedMessage.make(test_data)
# assert len(made) == 5
assert len(made.vardata) == 2
assert made.vardata[0].x == 1
assert made.vardata[0].y == 2
assert made.function_data == crc32(CompareMessage.pack(test_data))
def test_adding_element(self):
def adder(x, y):
return x + y
AdderMessage = Message('AdderMessage', [
('item_a', 'H'),
('item_b', 'B'),
('function_data', 'I', {
(adder, 'item_a', 'item_b')
}),
])
test_data = {
'item_a': 2,
'item_b': 5,
}
made = AdderMessage.make(test_data)
assert made.item_a == 2
assert made.item_b == 5
assert made.function_data == 7
def test_adding_element_list(self):
def adder(*args):
return sum(args)
AdderMessage = Message('AdderMessage', [
('item_a', 'H'),
('item_b', 'B'),
('item_c', 'B'),
('item_d', 'B'),
('item_e', 'B'),
# Note, there is no item 'e' in the list of arguments
('function_data', 'I', {
(adder, 'item_a', 'item_b', 'item_c', 'item_d')
}),
])
# Test getting the correct result
test_data = {
'item_a': 2,
'item_b': 5,
'item_c': 7,
'item_d': 4,
'item_e': 6,
}
made = AdderMessage.make(test_data)
assert made.item_a == 2
assert made.item_b == 5
assert made.function_data == 2 + 5 + 7 + 4
# Check packing and unpacking
packed = AdderMessage.pack(test_data)
assert packed == b'\x02\x00\x05\x07\x04\x06\x12\x00\x00\x00'
assert packed == made.pack()
unpacked = AdderMessage.unpack(packed)
assert made == unpacked
# Test with correct result
test_data_2 = {
'item_a': 2,
'item_b': 5,
'item_c': 7,
'item_d': 4,
'item_e': 6,
'function_data': 2 + 5 + 7 + 4,
}
made = AdderMessage.make(test_data_2)
assert made.item_a == 2
assert made.item_b == 5
assert made.function_data == 2 + 5 + 7 + 4
# Test with incorrect result
test_data_2 = {
'item_a': 2,
'item_b': 5,
'item_c': 7,
'item_d': 4,
'item_e': 6,
'function_data': -1,
}
with pytest.raises(ValueError):
made = AdderMessage.make(test_data_2)
def test_no_error_message(self):
def adder(*args):
return sum(args)
AdderMessage = Message('AdderMessage', [
('item_a', 'H'),
('item_b', 'B'),
('item_c', 'B'),
('item_d', 'B'),
('item_e', 'B'),
# Note, there is no item 'e' in the list of arguments
('function_data', 'I', {
(adder, 'item_a', 'item_b', 'item_c', 'item_d')
}, False),
])
# Test with incorrect result
test_data_2 = {
'item_a': 2,
'item_b': 5,
'item_c': 7,
'item_d': 4,
'item_e': 6,
'function_data': -1,
}
made = AdderMessage.make(test_data_2)
assert made.function_data == -1
def test_verifying_unpack(self):
def adder(*args):
return sum(args)
AdderMessage = Message('AdderMessage', [
('item_a', 'H'),
('item_b', 'B'),
('item_c', 'B'),
('item_d', 'B'),
('item_e', 'B'),
# Note, there is no item 'e' in the list of arguments
('function_data', 'I', {
(adder, 'item_a', 'item_b', 'item_c', 'item_d')
}),
])
# Test getting the correct result
test_data = {
'item_a': 2,
'item_b': 5,
'item_c': 7,
'item_d': 4,
'item_e': 6,
}
made = AdderMessage.make(test_data)
assert made.item_a == 2
assert made.item_b == 5
assert made.function_data == 2 + 5 + 7 + 4
# Check packing and unpacking
packed = AdderMessage.pack(test_data)
assert packed == b'\x02\x00\x05\x07\x04\x06\x12\x00\x00\x00'
assert packed == made.pack()
unpacked = AdderMessage.unpack(packed)
assert made == unpacked
# Now we modify the data we are going to unpack, and we should get an error
modified_packed = b'\x02\x00\x05\x07\x04\x06\x11\x11\x11\x11'
with pytest.raises(ValueError):
unpacked = AdderMessage.unpack(modified_packed)
AdderMessageFalse = Message('AdderMessageFalse', [
('item_a', 'H'),
('item_b', 'B'),
('item_c', 'B'),
('item_d', 'B'),
('item_e', 'B'),
# Note, there is no item 'e' in the list of arguments
('function_data', 'I', {
(adder, 'item_a', 'item_b', 'item_c', 'item_d')
}, False),
])
# Test getting the correct result
test_data = {
'item_a': 2,
'item_b': 5,
'item_c': 7,
'item_d': 4,
'item_e': 6,
}
made = AdderMessageFalse.make(test_data)
assert made.item_a == 2
assert made.item_b == 5
assert made.function_data == 2 + 5 + 7 + 4
# Check packing and unpacking
packed = AdderMessageFalse.pack(test_data)
assert packed == b'\x02\x00\x05\x07\x04\x06\x12\x00\x00\x00'
assert packed == made.pack()
unpacked = AdderMessageFalse.unpack(packed)
assert made == unpacked
# Now we modify the data we are going to unpack, and we should get an error
modified_packed = b'\x02\x00\x05\x07\x04\x06\x11\x11\x11\x11'
# This time it won't fail because we set False for this message
unpacked = AdderMessageFalse.unpack(modified_packed)
assert unpacked.item_a == 2
|
sprout42/StarStruct | starstruct/elementlength.py | <reponame>sprout42/StarStruct<filename>starstruct/elementlength.py
"""StarStruct element class."""
import struct
import re
from starstruct.element import register, Element
from starstruct.modes import Mode
@register
class ElementLength(Element):
"""
The length StarStruct element class.
"""
def __init__(self, field, mode=Mode.Native, alignment=1):
"""Initialize a StarStruct element object."""
# All of the type checks have already been performed by the class
# factory
if isinstance(field[0], str):
self.name = field[0]
self.object_length = True
elif isinstance(field[0], bytes):
self.name = field[0].decode('utf-8')
self.object_length = False
self.ref = field[2]
self._mode = mode
self._alignment = alignment
# Validate that the format specifiers are valid struct formats, this
# doesn't have to be done now because the format will be checked when
# any struct functions are called, but it's better to inform the user of
# any errors earlier.
# The easiest way to perform this check is to create a "Struct" class
# instance, this will also increase the efficiency of all struct related
# functions called.
self.format = mode.value + field[1]
self._struct = struct.Struct(self.format)
@staticmethod
def valid(field):
"""
Validation function to determine if a field tuple represents a valid
enum element type.
The basics have already been validated by the Element factory class,
validate that the struct format is a valid unsigned numeric value.
"""
return len(field) == 3 \
and isinstance(field[1], str) \
and re.match(r'[BHILQ]', field[1]) \
and isinstance(field[2], str) and len(field[2])
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
All elements that are Variable must reference valid Length elements.
"""
# TODO: Allow referencing multiple elements for byte lengths?
from starstruct.elementvariable import ElementVariable
if not isinstance(msg[self.ref], ElementVariable):
err = 'length field {} reference {} invalid type'
raise TypeError(err.format(self.name, self.ref))
elif not msg[self.ref].ref == self.name:
err = 'length field {} reference {} mismatch'
raise TypeError(err.format(self.name, self.ref))
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if alignment:
self._alignment = alignment
if mode:
self._mode = mode
self.format = mode.value + self.format[1:]
# recreate the struct with the new format
self._struct = struct.Struct(self.format)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
if self.object_length:
# When packing a length element, use the length of the referenced
# element not the value of the current element in the supplied
# object.
data = self._struct.pack(len(msg[self.ref]))
else:
# When packing something via byte length,
# we use our self to determine the length
data = self._struct.pack(msg[self.name])
# If the data does not meet the alignment, add some padding
missing_bytes = len(data) % self._alignment
if missing_bytes:
data += b'\x00' * missing_bytes
return data
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
ret = self._struct.unpack_from(buf, 0)
# Remember to remove any alignment-based padding
extra_bytes = self._alignment - 1 - (struct.calcsize(self.format) %
self._alignment)
unused = buf[struct.calcsize(self.format) + extra_bytes:]
return (ret[0], unused)
def make(self, msg):
"""Return the length of the referenced array"""
if self.object_length:
return len(msg[self.ref])
else:
return msg[self.name]
|
sprout42/StarStruct | starstruct/__init__.py | """Package for StarStruct."""
import importlib
import glob
import os
import sys
__project__ = 'StarStruct'
__version__ = '0.9.1'
VERSION = __project__ + '-' + __version__
PYTHON_VERSION = 3, 5
if not sys.version_info >= PYTHON_VERSION: # pragma: no cover (manual test)
exit("Python {}.{}+ is required.".format(*PYTHON_VERSION))
# Import all the different elements automatically.
# This makes sure than any new elements added are imported into the project
# and registered.
file_path = os.path.dirname(os.path.abspath(__file__))
added_elements = []
for f in glob.glob(file_path + '/element*'):
import_name = os.path.basename(f)[:-3]
added_elements.append(importlib.import_module('starstruct.' + import_name))
# To find out which elements have been added, just uncomment this statement
# print(added_elements)
# pylint: disable=wrong-import-position
from starstruct.message import Message
from starstruct.modes import Mode
# silence F401 flake8 error
assert Message
assert Mode
from starstruct.startuple import StarTuple
assert StarTuple
from starstruct.bitfield import BitField
assert BitField
from starstruct.packedbitfield import PackedBitField
assert PackedBitField
__all__ = ['Message', 'Mode', 'StarTuple', 'BitField', 'PackedBitField']
|
sprout42/StarStruct | starstruct/tests/test_elementnone.py | <reponame>sprout42/StarStruct<gh_stars>1-10
#!/usr/bin/env python3
"""Tests for the starstruct class"""
import struct
import unittest
from hashlib import md5
from starstruct.message import Message
class TestStarStructNone(unittest.TestCase):
"""StarStruct module tests"""
# TODO: Clean up these tests, change names, and move a bunch of the items to a helper function
VarTest = Message('VarTest', [
('x', 'B'),
('y', 'B'),
])
def test_single_element_1(self):
def pseudo_salted_md5(salt, original):
temp_md5 = md5(original)
if salt is None:
salt = b''
return md5(salt + temp_md5.digest()).digest()
def pack_salt(data):
return b''.join(item.to_bytes(1, 'little') for item in data)
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
])
CRCedMessage = Message('CRCedMessage', [
('data', TestStruct),
('salted', None),
('function_data', '16B', {
'make': (pseudo_salted_md5, 'salted', b'data'),
'pack': (pseudo_salted_md5, 'salted', b'data'),
'unpack': (pack_salt, 'function_data'),
}, False),
])
test_data = {
'data': {
'length_in_objects': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
},
'salted': b'random_salter',
}
made = CRCedMessage.make(test_data)
assert len(made.data.vardata) == 2
assert made.data.vardata[0].x == 1
assert made.data.vardata[0].y == 2
no_data = made.pack()
regular = CRCedMessage.pack(**test_data)
assert regular == no_data
# Show that there's no room to have the random salter be packed
len_data = len(no_data) - 16
assert no_data[0:len_data] == struct.pack('HBBBB', 2, 1, 2, 3, 4)
assert md5(
b'random_salter' +
md5(no_data[0:len_data]).digest()
).digest() == no_data[len_data:]
unpacked = CRCedMessage.unpack(no_data)
assert unpacked.salted is None
assert unpacked.function_data == made.function_data
# TEMP
new = unpacked._replace(**{'salted': b'random_salter'})
assert new.salted == b'random_salter'
# print(new._asdict())
def test_single_element_2(self):
def pseudo_salted_md5(salt, original):
temp_md5 = md5(original)
if salt is None:
salt = b''
return md5(salt + temp_md5.digest()).digest()
def do_nothing(data):
return data
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
])
CRCedMessage = Message('CRCedMessage', [
('data', TestStruct),
('salted', None),
('function_data', '16B', {
'make': (pseudo_salted_md5, 'salted', b'data'),
'pack': (pseudo_salted_md5, 'salted', b'data'),
'unpack': (do_nothing, 'function_data'),
}, False),
])
test_data = {
'data': {
'length_in_objects': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
},
'salted': b'random_salter',
}
made = CRCedMessage.make(test_data)
assert len(made.data.vardata) == 2
assert made.data.vardata[0].x == 1
assert made.data.vardata[0].y == 2
no_data = made.pack()
regular = CRCedMessage.pack(**test_data)
assert regular == no_data
# Show that there's no room to have the random salter be packed
len_data = len(no_data) - 16
assert no_data[0:len_data] == struct.pack('HBBBB', 2, 1, 2, 3, 4)
assert md5(
b'random_salter' +
md5(no_data[0:len_data]).digest()
).digest() == no_data[len_data:]
unpacked = CRCedMessage.unpack(no_data)
assert unpacked.salted is None
# This is non symmetric for this test, so we can't just check based on the made item
assert unpacked.function_data == (157, 38, 247, 245, 5, 71, 43, 227, 80, 44, 10, 243, 48, 248, 163, 207)
# TEMP
new = unpacked._replace(**{'salted': b'random_salter'})
assert new.salted == b'random_salter'
# print(new._asdict())
def test_single_element_3(self):
def pseudo_salted_md5(salt, original):
temp_md5 = md5(original)
if salt is None:
salt = b''
return md5(salt + temp_md5.digest()).digest()
def double(data):
return [item * 2 for item in data]
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
])
CRCedMessage = Message('CRCedMessage', [
('data', TestStruct),
('salted', None),
('function_data', '16B', {
'make': (pseudo_salted_md5, 'salted', b'data'),
'pack': (pseudo_salted_md5, 'salted', b'data'),
'unpack': (double, 'function_data'),
}, False),
])
test_data = {
'data': {
'length_in_objects': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
},
'salted': b'random_salter',
}
made = CRCedMessage.make(test_data)
assert len(made.data.vardata) == 2
assert made.data.vardata[0].x == 1
assert made.data.vardata[0].y == 2
no_data = made.pack()
regular = CRCedMessage.pack(**test_data)
assert regular == no_data
# Show that there's no room to have the random salter be packed
len_data = len(no_data) - 16
assert no_data[0:len_data] == struct.pack('HBBBB', 2, 1, 2, 3, 4)
assert md5(
b'random_salter' +
md5(no_data[0:len_data]).digest()
).digest() == no_data[len_data:]
unpacked = CRCedMessage.unpack(no_data)
assert unpacked.salted is None
# This is non symmetric for this test, so we can't just check based on the made item
assert unpacked.function_data == [314, 76, 494, 490, 10, 142, 86, 454, 160, 88, 20, 486, 96, 496, 326, 414]
def test_single_element_4(self):
def pseudo_salted_md5(salt, original):
temp_md5 = md5(original)
if salt is None:
salt = b''
return md5(salt + temp_md5.digest()).digest()
def double(data):
return [item * 2 for item in data]
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
])
# This one has nothing to do with the original packed message
CRCedMessage = Message('CRCedMessage', [
('data', TestStruct),
('salted', None),
('function_data', '16B', {
'make': (pseudo_salted_md5, 'salted', b'data'),
'pack': (pseudo_salted_md5, 'salted', b'data'),
'unpack': (double, b'data'),
}, False),
])
test_data = {
'data': {
'length_in_objects': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
},
'salted': b'random_salter',
}
made = CRCedMessage.make(test_data)
assert len(made.data.vardata) == 2
assert made.data.vardata[0].x == 1
assert made.data.vardata[0].y == 2
no_data = made.pack()
regular = CRCedMessage.pack(**test_data)
assert regular == no_data
# Show that there's no room to have the random salter be packed
len_data = len(no_data) - 16
assert no_data[0:len_data] == struct.pack('HBBBB', 2, 1, 2, 3, 4)
assert md5(
b'random_salter' +
md5(no_data[0:len_data]).digest()
).digest() == no_data[len_data:]
unpacked = CRCedMessage.unpack(no_data)
assert unpacked.salted is None
# This is non symmetric for this test, so we can't just check based on the made item
assert unpacked.function_data == [4, 0, 2, 4, 6, 8]
def test_nounpack_function(self):
def pseudo_salted_md5(salt, original):
temp_md5 = md5(original)
if salt is None:
salt = b''
return md5(salt + temp_md5.digest()).digest()
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
])
# This one has nothing to do with the original packed message
ExplicitNone = Message('Explicit', [
('data', TestStruct),
('salted', None),
('function_data', '16B', {
'make': (pseudo_salted_md5, 'salted', b'data'),
'pack': (pseudo_salted_md5, 'salted', b'data'),
'unpack': (None, ),
}, False),
])
ImplicitNone = Message('Implicit', [
('data', TestStruct),
('salted', None),
('function_data', '16B', {
'make': (pseudo_salted_md5, 'salted', b'data'),
'pack': (pseudo_salted_md5, 'salted', b'data'),
}, False),
])
test_data = {
'data': {
'length_in_objects': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
},
'salted': b'random_salter',
}
made = ExplicitNone.make(test_data)
assert len(made.data.vardata) == 2
assert made.data.vardata[0].x == 1
assert made.data.vardata[0].y == 2
no_data = made.pack()
regular = ExplicitNone.pack(**test_data)
assert regular == no_data
# Show that there's no room to have the random salter be packed
len_data = len(no_data) - 16
assert no_data[0:len_data] == struct.pack('HBBBB', 2, 1, 2, 3, 4)
assert md5(
b'random_salter' +
md5(no_data[0:len_data]).digest()
).digest() == no_data[len_data:]
unpacked = ExplicitNone.unpack(no_data)
assert unpacked.salted is None
assert unpacked.function_data == (157, 38, 247, 245, 5, 71, 43, 227, 80, 44, 10, 243, 48, 248, 163, 207)
made = ImplicitNone.make(test_data)
assert len(made.data.vardata) == 2
assert made.data.vardata[0].x == 1
assert made.data.vardata[0].y == 2
no_data = made.pack()
regular = ImplicitNone.pack(**test_data)
assert regular == no_data
# Show that there's no room to have the random salter be packed
len_data = len(no_data) - 16
assert no_data[0:len_data] == struct.pack('HBBBB', 2, 1, 2, 3, 4)
assert md5(
b'random_salter' +
md5(no_data[0:len_data]).digest()
).digest() == no_data[len_data:]
unpacked = ImplicitNone.unpack(no_data)
assert unpacked.salted is None
assert unpacked.function_data == (157, 38, 247, 245, 5, 71, 43, 227, 80, 44, 10, 243, 48, 248, 163, 207)
|
sprout42/StarStruct | starstruct/tests/test_elementvariable.py | <reponame>sprout42/StarStruct
#!/usr/bin/env python3
"""Tests for the starstruct class"""
import enum
import struct
import unittest
import pytest
from starstruct.message import Message
# from starstruct.modes import Mode
class SimpleEnum(enum.Enum):
"""Simple enum class for testing message pack/unpack"""
one = 1
two = 2
three = 3
# pylint: disable=line-too-long,invalid-name
class TestStarStruct(unittest.TestCase):
"""StarStruct module tests"""
VarTest = Message('VarTest', [
('x', 'B'),
('y', 'B'),
])
Repeated = Message('Repeated', [
('x', 'B'),
('z', 'H'),
])
def test_no_data(self):
num_repeats = 4
TestStruct = Message('TestStruct', [
('length', 'H', 'vardata'),
('vardata', self.VarTest, 'length'),
('repeated_data', self.Repeated, num_repeats),
])
test_data_no_data = {
'length': 0,
'vardata': [],
'repeated_data': [
],
}
made = TestStruct.make(test_data_no_data)
assert made.length == 0
assert made.vardata == []
assert made.repeated_data == []
packed = TestStruct.pack(test_data_no_data)
assert packed == struct.pack('H', 0) + (struct.pack('B', 0) + struct.pack('H', 0)) * num_repeats
def test_some_data(self):
num_repeats = 3
TestStruct = Message('TestStruct', [
('length', 'H', 'vardata'),
('vardata', self.VarTest, 'length'),
('repeated_data', self.Repeated, num_repeats),
])
test_data = {
'length': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
'repeated_data': [
{'x': 7, 'z': 13},
{'x': 2, 'z': 27},
{'x': 6, 'z': 11},
],
}
made = TestStruct.make(test_data)
assert made.length == 2
assert len(made.vardata) == 2
assert len(made.repeated_data) == 3
packed = TestStruct.pack(test_data)
assert packed == struct.pack('H', 2) + \
struct.pack('BB', 1, 2) + \
struct.pack('BB', 3, 4) + \
(struct.pack('B', 7) + struct.pack('H', 13)) + \
(struct.pack('B', 2) + struct.pack('H', 27)) + \
(struct.pack('B', 6) + struct.pack('H', 11))
def test_not_all_fixed_data(self):
num_repeats = 5
TestStruct = Message('TestStruct', [
('length', 'H', 'vardata'),
('vardata', self.VarTest, 'length'),
('repeated_data', self.Repeated, num_repeats),
])
test_data = {
'length': 1,
'vardata': [
{'x': 255, 'y': 127},
],
'repeated_data': [
{'x': 6, 'z': 12},
{'x': 1, 'z': 26},
{'x': 5, 'z': 10},
],
}
made = TestStruct.make(test_data)
assert made.length == 1
assert len(made.vardata) == 1
assert len(made.repeated_data) == 3
packed = TestStruct.pack(test_data)
assert packed == struct.pack('H', 1) + \
struct.pack('BB', 255, 127) + \
(struct.pack('B', 6) + struct.pack('H', 12)) + \
(struct.pack('B', 1) + struct.pack('H', 26)) + \
(struct.pack('B', 5) + struct.pack('H', 10)) + \
(struct.pack('B', 0) + struct.pack('H', 0)) * 2
def test_byte_length_no_data(self):
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
(b'length_in_bytes', 'H', 'bytesdata'),
('bytesdata', self.VarTest, b'length_in_bytes'),
])
test_data_no_data = {
'length_in_objects': 0,
'vardata': [],
'length_in_bytes': 0,
'bytesdata': [],
}
made = TestStruct.make(test_data_no_data)
assert made.length_in_objects == 0
assert made.vardata == []
assert made.length_in_bytes == 0
assert made.bytesdata == []
packed = TestStruct.pack(test_data_no_data)
assert packed == \
struct.pack('H', 0) + \
struct.pack('H', 0)
def test_byte_length_some_data(self):
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
(b'length_in_bytes', 'H', 'bytesdata'),
('bytesdata', self.VarTest, b'length_in_bytes'),
])
test_data_no_data = {
'length_in_objects': 1,
'vardata': [
{'x': 255, 'y': 127},
],
'length_in_bytes': 2,
'bytesdata': [
{'x': 254, 'y': 126},
],
}
made = TestStruct.make(test_data_no_data)
assert made.length_in_objects == 1
assert made.vardata == [
self.VarTest.make(
{'x': 255, 'y': 127}
)]
assert made.length_in_bytes == 2
assert made.bytesdata == [
self.VarTest.make(
{'x': 254, 'y': 126}
)]
packed = TestStruct.pack(test_data_no_data)
assert packed == \
struct.pack('H', 1) + \
struct.pack('BB', 255, 127) + \
struct.pack('H', 2) + \
struct.pack('BB', 254, 126)
def test_byte_length_more_data(self):
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
(b'length_in_bytes', 'H', 'bytesdata'),
('bytesdata', self.VarTest, b'length_in_bytes'),
])
test_data_no_data = {
'length_in_objects': 1,
'vardata': [
{'x': 255, 'y': 127},
],
'length_in_bytes': 10,
'bytesdata': [
{'x': 254, 'y': 126},
{'x': 25, 'y': 16},
{'x': 24, 'y': 26},
{'x': 54, 'y': 17},
{'x': 25, 'y': 12},
],
}
made = TestStruct.make(test_data_no_data)
assert made.length_in_objects == 1
assert made.vardata == [
self.VarTest.make(
{'x': 255, 'y': 127}
)]
assert made.length_in_bytes == 10
assert made.bytesdata == [
self.VarTest.make(
{'x': 254, 'y': 126}
),
self.VarTest.make(
{'x': 25, 'y': 16},
),
self.VarTest.make(
{'x': 24, 'y': 26},
),
self.VarTest.make(
{'x': 54, 'y': 17},
),
self.VarTest.make(
{'x': 25, 'y': 12},
),
]
packed = TestStruct.pack(test_data_no_data)
assert packed == \
struct.pack('H', 1) + \
struct.pack('BB', 255, 127) + \
struct.pack('H', 10) + \
struct.pack('BB', 254, 126) + \
struct.pack('BB', 25, 16) + \
struct.pack('BB', 24, 26) + \
struct.pack('BB', 54, 17) + \
struct.pack('BB', 25, 12)
def test_unpacking_of_correct_size(self):
packed_element = \
struct.pack('H', 1) + \
struct.pack('BB', 255, 127) + \
struct.pack('H', 10) + \
struct.pack('BB', 254, 126) + \
struct.pack('BB', 25, 16) + \
struct.pack('BB', 24, 26) + \
struct.pack('BB', 54, 17) + \
struct.pack('BB', 25, 12)
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
(b'length_in_bytes', 'H', 'bytesdata'),
('bytesdata', self.VarTest, b'length_in_bytes'),
])
unpacked = TestStruct.unpack(packed_element)
assert unpacked
assert unpacked.length_in_objects == 1
assert unpacked.length_in_bytes == 10
def test_unpacking_of_too_little_bytes(self):
# Only pack four elements, instead of the five
packed_element = \
struct.pack('H', 1) + \
struct.pack('BB', 255, 127) + \
struct.pack('H', 10) + \
struct.pack('BB', 254, 126) + \
struct.pack('BB', 25, 16) + \
struct.pack('BB', 24, 26) + \
struct.pack('BB', 54, 17)
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
(b'length_in_bytes', 'H', 'bytesdata'),
('bytesdata', self.VarTest, b'length_in_bytes'),
])
with pytest.raises(struct.error):
unpacked = TestStruct.unpack(packed_element)
assert unpacked
def test_unpacking_of_too_many_bytes(self):
packed_element = \
struct.pack('H', 1) + \
struct.pack('BB', 255, 127) + \
struct.pack('H', 10) + \
struct.pack('BB', 254, 126) + \
struct.pack('BB', 25, 16) + \
struct.pack('BB', 24, 26) + \
struct.pack('BB', 24, 26) + \
struct.pack('BB', 24, 26) + \
struct.pack('BB', 24, 26) + \
struct.pack('BB', 24, 26) + \
struct.pack('BB', 24, 26) + \
struct.pack('BB', 54, 17)
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
(b'length_in_bytes', 'H', 'bytesdata'),
('bytesdata', self.VarTest, b'length_in_bytes'),
])
with pytest.raises(ValueError):
unpacked = TestStruct.unpack(packed_element)
assert unpacked
def test_single_element(self):
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
('single_data', self.VarTest),
])
test_data = {
'length': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
'single_data': [
{'x': 6, 'y': 11},
],
}
made = TestStruct.make(test_data)
assert len(made.vardata) == 2
assert made.single_data.x == 6
assert made.single_data.y == 11
def test_single_element_2(self):
TestStruct = Message('TestStruct', [
('length_in_objects', 'H', 'vardata'),
('vardata', self.VarTest, 'length_in_objects'),
('single_data', self.VarTest),
])
test_data = {
'length': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
'single_data': {'x': 6, 'y': 11},
}
made = TestStruct.make(test_data)
assert len(made.vardata) == 2
assert made.single_data.x == 6
assert made.single_data.y == 11
@pytest.mark.skip('Not implemented. Might not be possible')
def test_length_after_item(self):
num_repeats = 3
TestStruct = Message('TestStruct', [
('vardata', self.VarTest, 'length'),
('length', 'H', 'vardata'),
('repeated_data', self.Repeated, num_repeats),
])
test_data = {
'length': 2,
'vardata': [
{'x': 1, 'y': 2},
{'x': 3, 'y': 4},
],
'repeated_data': [
{'x': 7, 'z': 13},
{'x': 2, 'z': 27},
{'x': 6, 'z': 11},
],
}
made = TestStruct.make(test_data)
assert made.length == 2
assert made.vardata[0].x == 1
assert made.vardata[0].y == 2
packed = TestStruct.pack(test_data)
unpacked = TestStruct.unpack(packed)
assert unpacked
|
sprout42/StarStruct | starstruct/tests/test_elementstring.py | #!/usr/bin/env python3
"""Tests for the elementstring class"""
import unittest
import pytest
from starstruct.message import Message
from starstruct.elementstring import ElementString
# pylint: disable=line-too-long,invalid-name
class TestElementString(unittest.TestCase):
"""ElementString module tests"""
def test_valid(self):
"""Test field formats that are valid ElementString elements."""
test_fields = [
('a', 'c'), # single character
('b', '2c'), # 2 char string
('c', '10s'), # 10 char string (variable)
('d', '5p'), # 5 char string (fixed)
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementString.valid(field)
self.assertTrue(out)
def test_not_valid(self):
"""Test field formats that are not valid ElementString elements."""
test_fields = [
('a', '4x'), # 4 pad bytes
('b', 'z'), # invalid
('c', '1'), # invalid
('d', '9S'), # invalid (must be lowercase)
('e', '/'), # invalid
('a', 'b'), # signed byte: -128, 127
('b', 'H'), # unsigned short: 0, 65535
('d', 'L'), # unsigned long: 0, 2^32-1
('e', '?'), # bool: 0, 1
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementString.valid(field)
self.assertFalse(out)
def test_make_andk_pack(self):
"""Test field formats that are valid ElementString elements."""
TestStruct = Message('TestStruct', [
('a', 'c'), # single character
('b', '2c'), # 2 char string
('c', '10s'), # 10 char string (variable)
('d', '9p'), # 9 ( - 1 ) char string (fixed)
('e', '5c'),
])
test_data = {
'a': 'i',
'b': 'hi',
'c': 'short',
'd': 'long',
'e': ['l', 'i', 's', 't'],
}
made = TestStruct.make(test_data)
assert made.a == ['i']
assert made.b == ['h', 'i']
assert made.c == 'short'
assert made.d == 'long\x00\x00\x00\x00'
assert made.e == ['l', 'i', 's', 't', '\x00']
packed = TestStruct.pack(test_data)
unpacked = TestStruct.unpack(packed)
assert made == unpacked
def test_alignment(self):
"""Test field formats that are valid ElementString elements."""
TestStruct = Message('TestStruct', [
('a', 'c'), # single character
('b', '2c'), # 2 char string
])
test_data = {
'a': 'a',
'b': 'no',
}
TestStruct.update(alignment=4)
packed = TestStruct.pack(test_data)
assert packed == b'a\x00\x00\x00no\x00\x00'
def test_bad_values(self):
"""Test field formats that are valid ElementString elements."""
TestStruct = Message('TestStruct', [
('a', 'c'), # single character
('b', '2c'), # 2 char string
])
test_data = {
'a': [5],
'b': 'no',
}
with pytest.raises(TypeError):
TestStruct.make(test_data)
|
sprout42/StarStruct | starstruct/startuple.py | <reponame>sprout42/StarStruct
import collections
def StarTuple(name, named_fields, elements):
restricted_fields = {
# Default dunders
'__getnewargs__',
'__new__',
'__slots__ ',
'__repr__',
# Default #oneders
'_asdict',
'_make',
'_replace',
# Fields specifier
'_fields',
# Startuple additions
'pack',
'_elements',
'__str__',
'_name',
}
intersection = restricted_fields.intersection(set(named_fields))
if intersection:
raise ValueError('Restricted field used. Bad fields: {0}'.format(intersection))
named_tuple = collections.namedtuple(name, named_fields)
# TODO: Auto update and replace!
def this_pack(self):
packed = bytes()
for _, value in self._elements.items():
packed += value.pack(self._asdict())
return packed
def this_str(self):
import pprint
fmt = 'StarTuple: <{0}>\n'.format(str(name))
len_of_keys = 0
for key in self._asdict().keys():
if len(key) > len_of_keys:
len_of_keys = len(key)
for key, value in self._asdict().items():
fmt += (' {key:%d}: {value}\n' % len_of_keys).format(
key=key,
value=pprint.pformat(value, width=150),
)
return fmt
named_tuple.pack = this_pack
named_tuple.__str__ = this_str
named_tuple._elements = elements
named_tuple._name = name
return named_tuple
|
sprout42/StarStruct | starstruct/tests/test_elementpad.py | #!/usr/bin/env python3
"""Tests for the elementpad class"""
import unittest
from starstruct.elementpad import ElementPad
# pylint: disable=line-too-long,invalid-name
class TestElementPad(unittest.TestCase):
"""ElementPad module tests"""
def test_valid(self):
"""Test field formats that are valid ElementPad elements."""
test_fields = [
('a', '4x'), # 4 pad bytes
('a', 'x'), # 1 pad bytes
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementPad.valid(field)
self.assertTrue(out)
def test_not_valid(self):
"""Test field formats that are not valid ElementPad elements."""
test_fields = [
('b', 'z'), # invalid
('c', '1'), # invalid
('d', '9S'), # invalid (must be lowercase)
('e', '/'), # invalid
('a', 'b'), # signed byte: -128, 127
('b', 'H'), # unsigned short: 0, 65535
('c', '10s'), # 10 byte string
('d', 'L'), # unsigned long: 0, 2^32-1
('e', '?'), # bool: 0, 1
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementPad.valid(field)
self.assertFalse(out)
|
sprout42/StarStruct | starstruct/elementvariable.py | <gh_stars>1-10
"""
The variable NamedStruct element class.
Can be used in multiple ways ways:
1: Variable Lengths, in terms of namedstruct elements
.. code-block:: python
ExampleMessage = Message('VarTest', [('x', 'B'), ('y', 'B')])
message_struct = [
('length_in_objects', 'H', 'vardata'), # length field
('vardata', ExampleMessage, 'length_in_objects') # variable length data
]
The length is the string and you can think of it as "linking" to the
length that is provided in the length field.
.. note:: The length item is specified as a string, not as bytes
2: Variable lengths, in terms of byte size
.. code-block:: python
SomeMessage = namedstruct.Message(...)
message_struct = [
(b'length_in_bytes', 'B', 'vardata'),
('vardata', SomeMessage, b'length_in_bytes'),
]
Now if our program specifies taht we should have a length in bytes field
we can say 'length_in_bytes' = 8, while only have 2 SomeMessage, (assuming
that the length of SomeMessge == 4).
.. note:: The length item is specified as bytes, not as a string
3: Fixed length, in terms of namedstruct elements
.. code-block:: python
RepeatedMessage = Message('Repeated', [('x', 'B'), ('y', 'H')])
message_struct = [
('repeated_data', RepeatedMessage, 3),
]
Now we provide an integer that tells us that there will ALWAYS be that
many messages in this message. You also no longer need to have another
field that specifies the number of these messages.
4: Fixed length, in terms of bytes?
TODO: write this
Might have something that can only fit a certain number of bytes, like a
CAN message, and this would break it up automatically?
"""
# pylint: disable=line-too-long
import struct
from typing import Optional
import starstruct
from starstruct.element import register, Element
from starstruct.modes import Mode
@register
class ElementVariable(Element):
"""
Initialize a StarStruct element object.
:param field: The fields passed into the constructor of the element
:param mode: The mode in which to pack the bytes
:param alignment: Number of bytes to align to
"""
# pylint: disable=too-many-instance-attributes
# We need to keep track of several different styles of output here
def __init__(self, field: list, mode: Optional[Mode]=Mode.Native, alignment: Optional[int]=1):
# All of the type checks have already been performed by the class
# factory
self.name = field[0]
try:
self.list_return = True
self.ref = field[2]
except IndexError:
self.list_return = False
self.ref = 1
# Variable elements don't use the normal struct format, the format is
# a StarStruct.Message object, but change the mode to match the
# current mode.
self.format = field[1]
# Set the packing style for the struct
if isinstance(self.ref, (str, bytes)):
self.variable_repeat = True
# Determine whether bytes or objects are the measurement tool
if isinstance(self.ref, str):
self.object_length = True
elif isinstance(self.ref, bytes):
self.object_length = False
# Change our ref to be a string, for NamedTuple
# pylint: disable=no-member
self.ref = self.ref.decode('utf-8')
else:
self.variable_repeat = False
# TODO: If we add #4, then we would have to have a check here
self.object_length = True
self._mode = mode
self._alignment = alignment
self.update(mode, alignment)
@staticmethod
def valid(field: tuple) -> bool:
"""
See :py:func:`starstruct.element.Element.valid`
:param field: The items to determine the structure of the element
"""
if len(field) == 2:
return isinstance(field[1], starstruct.message.Message)
elif len(field) == 3:
return isinstance(field[1], starstruct.message.Message) \
and isinstance(field[2], (str, int, bytes))
else:
return False
def validate(self, msg):
"""
Ensure that the supplied message contains the required information for
this element object to operate.
All elements that are Variable must reference valid Length elements.
"""
from starstruct.elementlength import ElementLength
if self.variable_repeat:
# Handle object length, not byte length
if self.object_length:
if not isinstance(msg[self.ref], ElementLength):
err = 'variable field {} reference {} invalid type'
raise TypeError(err.format(self.name, self.ref))
elif not msg[self.ref].ref == self.name:
err = 'variable field {} reference {} mismatch'
raise TypeError(err.format(self.name, self.ref))
# Handle byte length, not object length
else:
# TODO: Validate the object
pass
else:
if not isinstance(self.ref, int):
err = 'fixed repetition field {} reference {} not an integer'
raise TypeError(err.format(self.name, self.ref))
def update(self, mode=None, alignment=None):
"""change the mode of the struct format"""
if self._mode is not None:
self._mode = mode
if self._alignment is not None:
self._alignment = alignment
self.format.update(self._mode, self._alignment)
def pack(self, msg):
"""Pack the provided values into the supplied buffer."""
# When packing use the length of the current element to determine
# how many elements to pack, not the length element of the message
# (which should not be specified manually).
iterator = msg[self.name]
if not isinstance(iterator, list):
iterator = [iterator]
iterator = [item if not hasattr(item, '_asdict') else item._asdict()
for item in iterator]
if self.variable_repeat:
if self.object_length:
ret = [self.format.pack(dict(elem)) if elem else self.format.pack({})
for elem in iterator]
else:
ret = []
length = 0
for elem in iterator:
temp_elem = self.format.pack(dict(elem))
if length + len(temp_elem) <= msg[self.ref]:
ret.append(temp_elem)
# Pack as many bytes as we have been given
# and fill the rest of the byets with empty packing
else:
empty_byte = struct.pack('x')
ret = [self.format.pack(iterator[index]) if index < len(iterator) else empty_byte * len(self.format)
for index in range(self.ref)]
# There is no need to make sure that the packed data is properly
# aligned, because that should already be done by the individual
# messages that have been packed.
return b''.join(ret)
def unpack(self, msg, buf):
"""Unpack data from the supplied buffer using the initialized format."""
# When unpacking a variable element, reference the already unpacked
# length field to determine how many elements need unpacked.
ret = []
unused = buf
if self.object_length:
if self.variable_repeat:
msg_range = getattr(msg, self.ref)
else:
msg_range = self.ref
for _ in range(msg_range):
(val, unused) = self.format.unpack_partial(unused)
ret.append(val)
else:
length = 0
while length < getattr(msg, self.ref):
(val, unused) = self.format.unpack_partial(unused)
length += len(val)
ret.append(val)
# There is no need to make sure that the unpacked data consumes a
# properly aligned number of bytes because that should already be done
# by the individual messages that have been unpacked.
return (ret, unused)
def make(self, msg):
"""Return the expected "made" value"""
if self.list_return:
ret = []
for val in msg[self.name]:
ret.append(self.format.make(val))
else:
if isinstance(msg[self.name], list):
maker = msg[self.name][0]
else:
maker = msg[self.name]
ret = self.format.make(maker)
return ret
|
sprout42/StarStruct | starstruct/tests/test_elementbitfield.py | #!/usr/bin/env python3
"""Tests for the elementenum class"""
import unittest
import struct
import enum
from starstruct.bitfield import BitField
from starstruct.packedbitfield import PackedBitField
from starstruct.elementbitfield import ElementBitField
class SimpleEnum(enum.Enum):
"""Simple enum class for testing message pack/unpack"""
one = 1
two = 2
four = 4
class SimpleEnumWithZero(enum.Enum):
"""Simple enum class for testing message pack/unpack"""
zero = 0
one = 1
two = 2
# pylint: disable=blacklisted-name
class StrEnum(enum.Enum):
"""string based enum class for testing message pack/unpack"""
foo = 'foo'
bar = 'bar'
# pylint: disable=line-too-long,invalid-name,no-self-use
class TestElementBitField(unittest.TestCase):
"""ElementBitField module tests"""
def test_invalid_enum(self):
"""Test field formats that are valid ElementBitField elements."""
# pylint: disable=unused-variable
with self.assertRaises(TypeError) as cm:
test_bitfield = BitField(StrEnum) # noqa: F841
msg = 'Enum {} members must have integer values'.format(repr(StrEnum))
self.assertEqual(str(cm.exception), msg)
with self.assertRaises(TypeError) as cm:
test_bitfield = BitField(SimpleEnumWithZero) # noqa: F841
msg = 'Cannot construct BitField from {} with a value for 0: {}'.format(repr(SimpleEnumWithZero), SimpleEnumWithZero.zero)
self.assertEqual(str(cm.exception), msg)
with self.assertRaises(TypeError) as cm:
test_packedbitfield = PackedBitField(SimpleEnumWithZero, SimpleEnumWithZero) # noqa: F841
msg = 'Duplicate fields not allowed: {}'.format((SimpleEnumWithZero, SimpleEnumWithZero))
self.assertEqual(str(cm.exception), msg)
test_bitfield = BitField(SimpleEnum)
with self.assertRaises(TypeError) as cm:
test_packedbitfield = PackedBitField(SimpleEnum, test_bitfield) # noqa: F841
msg = 'Duplicate fields not allowed: {}'.format((SimpleEnum, test_bitfield))
self.assertEqual(str(cm.exception), msg)
with self.assertRaises(TypeError) as cm:
test_packedbitfield = PackedBitField(StrEnum, test_bitfield) # noqa: F841
msg = 'Enum {} members must have integer values'.format(repr(StrEnum))
self.assertEqual(str(cm.exception), msg)
def test_not_valid(self):
"""Test field formats that are not valid ElementEnum elements."""
test_bitfield = BitField(SimpleEnum)
test_packedbitfield = PackedBitField(SimpleEnumWithZero, test_bitfield)
test_fields = [
('a', 'B', SimpleEnum), # enum field
('a', 'B', StrEnum), # enum field
('a', '4x', test_bitfield), # 4 pad bytes
('b', 'z', test_bitfield), # invalid
('b', 'b', test_bitfield), # invalid
('c', '1', test_bitfield), # invalid
('e', '9s', test_bitfield), # invalid (no strings allowed)
('d', '/', test_bitfield), # invalid
('a', '4x', test_packedbitfield), # 4 pad bytes
('b', 'z', test_packedbitfield), # invalid
('c', '1', test_packedbitfield), # invalid
('e', '9s', test_packedbitfield), # invalid (no strings allowed)
('d', '/', test_packedbitfield), # invalid
('f', 'H'), # unsigned short (no class)
]
for field in test_fields:
with self.subTest(field): # pylint: disable=no-member
out = ElementBitField.valid(field)
self.assertFalse(out)
def test_valid_pack(self):
"""Test packing valid enum values."""
test_bitfield = BitField(SimpleEnum)
field = ('a', 'B', test_bitfield) # unsigned byte: 0, 256
out = ElementBitField.valid(field)
self.assertTrue(out)
elem = ElementBitField(field)
test_values = [
({'a': 2}, b'\x02'),
({'a': []}, b'\x00'),
({'a': None}, b'\x00'),
({'a': [SimpleEnum.one]}, b'\x01'),
({'a': ['one']}, b'\x01'),
({'a': [SimpleEnum.two]}, b'\x02'),
({'a': [SimpleEnum.one, SimpleEnum.two]}, b'\x03'),
({'a': [1, SimpleEnum.two]}, b'\x03'),
({'a': [1, SimpleEnum.two, 'four']}, b'\x07'),
]
for (in_val, out_val) in test_values:
with self.subTest((out_val, in_val)): # pylint: disable=no-member
ret = elem.pack(in_val)
self.assertEqual(ret, out_val)
test_packedbitfield = PackedBitField(SimpleEnumWithZero, test_bitfield)
field = ('a', 'B', test_packedbitfield) # unsigned byte: 0, 256
out = ElementBitField.valid(field)
self.assertTrue(out)
self.assertEqual(list(test_packedbitfield._fields.keys()), [SimpleEnumWithZero, test_bitfield])
self.assertEqual(test_packedbitfield._fields[test_bitfield], {'offset': 0, 'mask': 0x07, 'width': 3})
self.assertEqual(test_packedbitfield._fields[SimpleEnumWithZero], {'offset': 3, 'mask': 0x18, 'width': 2})
elem = ElementBitField(field)
test_values = [
({'a': []}, b'\x00'),
({'a': None}, b'\x00'),
({'a': 0}, b'\x00'), # 0 is a valid SimpleEnumWithZero
({'a': 4}, b'\x04'),
({'a': [SimpleEnum.one]}, b'\x01'),
({'a': [SimpleEnum.two]}, b'\x02'),
({'a': [SimpleEnum.one, SimpleEnum.two]}, b'\x03'),
({'a': ['zero', SimpleEnumWithZero.one, SimpleEnum.two]}, b'\x0a'),
({'a': [SimpleEnumWithZero.two, 'four']}, b'\x14'),
]
for (in_val, out_val) in test_values:
with self.subTest((out_val, in_val)): # pylint: disable=no-member
ret = elem.pack(in_val)
self.assertEqual(ret, out_val)
def test_out_of_range_values_pack(self):
"""Test packing invalid enum values."""
test_bitfield = BitField(SimpleEnum)
field = ('a', 'B', test_bitfield) # unsigned byte: 0, 256
out = ElementBitField.valid(field)
self.assertTrue(out)
elem = ElementBitField(field)
test_values = [
({'a': -1}, -1),
({'a': 3}, 3),
({'a': 0}, 0),
({'a': [0, SimpleEnum.one]}, 0),
({'a': [SimpleEnum.one, -1]}, -1),
({'a': [SimpleEnum.two, 3]}, 3),
({'a': ['TWO']}, 'TWO'),
]
msg = '{} is not a valid {}'
for (in_val, bad_val) in test_values:
with self.subTest((in_val, bad_val)): # pylint: disable=no-member
with self.assertRaises(ValueError) as cm:
elem.pack(in_val)
self.assertEqual(str(cm.exception), msg.format(bad_val, 'SimpleEnum'))
test_packedbitfield = PackedBitField(SimpleEnumWithZero, test_bitfield)
field = ('a', 'B', test_packedbitfield) # unsigned byte: 0, 256
out = ElementBitField.valid(field)
self.assertTrue(out)
elem = ElementBitField(field)
test_values = [
({'a': -1}, 'valid', -1),
({'a': 3}, 'valid', 3),
({'a': ['one']}, 'unique', 'one'),
({'a': [1, SimpleEnum.two]}, 'unique', 1),
({'a': [1, SimpleEnum.two, 'four']}, 'unique', 1),
({'a': [SimpleEnumWithZero.one, -1]}, 'valid', -1),
({'a': ['TWO']}, 'valid', 'TWO'),
]
msg = '{} is not a {} {}'
for (in_val, err_str, bad_val) in test_values:
with self.subTest((in_val, err_str, bad_val)): # pylint: disable=no-member
with self.assertRaises(ValueError) as cm:
elem.pack(in_val)
self.assertEqual(str(cm.exception), msg.format(bad_val, err_str, [SimpleEnumWithZero, test_bitfield]))
def test_unpack(self):
"""Test unpacking valid enum values."""
test_bitfield = BitField(SimpleEnum)
field = ('a', 'B', test_bitfield) # unsigned byte: 0, 256
out = ElementBitField.valid(field)
self.assertTrue(out)
elem = ElementBitField(field)
test_values = [
(b'\x00', frozenset([])),
(b'\xF8', frozenset([])),
(b'\x01', frozenset([SimpleEnum.one])),
(b'\x02', frozenset([SimpleEnum.two])),
(b'\x03', frozenset([SimpleEnum.one, SimpleEnum.two])),
(b'\xFF', frozenset([SimpleEnum.one, SimpleEnum.two, SimpleEnum.four])),
(b'\xAA', frozenset([SimpleEnum.two])),
]
for (in_val, out_val) in test_values:
with self.subTest((in_val, out_val)): # pylint: disable=no-member
(ret, unused) = elem.unpack({}, in_val)
self.assertEqual(unused, b'')
self.assertEqual(ret, out_val)
test_packedbitfield = PackedBitField(SimpleEnumWithZero, test_bitfield)
field = ('a', 'B', test_packedbitfield) # unsigned byte: 0, 256
out = ElementBitField.valid(field)
self.assertTrue(out)
elem = ElementBitField(field)
test_values = [
(b'\x00', frozenset([SimpleEnumWithZero.zero])),
(b'\x01', frozenset([SimpleEnumWithZero.zero, SimpleEnum.one])),
(b'\x02', frozenset([SimpleEnumWithZero.zero, SimpleEnum.two])),
(b'\x13', frozenset([SimpleEnumWithZero.two, SimpleEnum.one, SimpleEnum.two])),
(b'\xAA', frozenset([SimpleEnumWithZero.one, SimpleEnum.two])),
]
for (in_val, out_val) in test_values:
with self.subTest((in_val, out_val)): # pylint: disable=no-member
(ret, unused) = elem.unpack({}, in_val)
self.assertEqual(unused, b'')
self.assertEqual(ret, out_val)
def test_out_of_range_values_unpack(self):
"""Test packing invalid enum values."""
test_bitfield = BitField(SimpleEnum)
test_packedbitfield = PackedBitField(SimpleEnumWithZero, test_bitfield)
field = ('a', 'B', test_packedbitfield) # unsigned byte: 0, 256
out = ElementBitField.valid(field)
self.assertTrue(out)
elem = ElementBitField(field)
test_values = [
(b'\xF8', 3),
]
msg = '{} is not a valid {}'
for (in_val, bad_val) in test_values:
with self.subTest((in_val, bad_val)): # pylint: disable=no-member
int_in_val = struct.unpack('B', in_val)[0]
with self.assertRaises(ValueError) as cm:
test_packedbitfield.unpack(int_in_val)
self.assertEqual(str(cm.exception), msg.format(bad_val, 'SimpleEnumWithZero'))
with self.assertRaises(ValueError) as cm:
elem.unpack({}, in_val)
unpack_msg = 'Value: {0} was not valid for {1}\n\twith msg: {2},\n\tbuf: {3}'.format(
int_in_val, test_packedbitfield, {}, in_val)
self.assertEqual(str(cm.exception), unpack_msg)
|
NumericalMax/Dashboard | Dashboard/main.py | <gh_stars>10-100
# Author: <NAME>
# Created on: 31.10.2017
# Purpose: Simple Example of how to use bootstrap template and PivotTable in order to build a small Dashboard collecting data from a SQL DB.
# License: License Files can be found in the root folder 'Dashboard'
# The Code is mainly based on implementation from CreativeTim (HTML, Style, Javascript) and Nicolas Kruchten and Datacratic (PivotTable). Please include their MIT-License file also in your project if you use any resource from their project.
from flask import Flask, render_template, redirect, request
import pandas as pd
import mysql.connector
app = Flask(__name__)
# loads query from sql database to pandas df
def load_sql_table(query):
try:
df = pd.read_sql(query, con=connector)
return df
except:
return None
@app.route('/')
def start_redirect():
return redirect("http://127.0.0.1:5000/dashboard.html", code=302)
@app.route('/dashboard.html', methods=['GET', 'POST'])
def start_dashboard():
# Ask for all tables in your SQL Database
# Request might look different for non MySQL
# E.g. for SQL Server: sql_statement = "SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE'"
# TODO: Modify for your needs
sql_statement = "SHOW TABLES"
tables = load_sql_table(sql_statement)
if request.method == 'POST':
whichTable = request.form['whichTable']
# Load the requested table from Database
# TODO: Set your database query for the chosen table (e.g. modify db schema)
SQL_table = "SELECT * FROM " + whichTable
table = load_sql_table(SQL_table)
result = table.reset_index().to_json(orient='records')
return render_template('dashboard.html', tables=tables, table=result, selectedTable=whichTable)
else:
result = []
return render_template('dashboard.html', tables=tables, table=result, selectedTable='None')
if __name__ == '__main__':
# connect to your database
try:
# TODO: Use Library of your needs
# E.g. for SQL Server it might be pyodbc
# use 127.0.0.1 if localhost
connector = mysql.connector.connect(user='...', password='...', host='...', database='...')
except:
print "No access to the required database"
app.run()
|
SergeyShurkhovetckii/Best-Current_Python_telegram_bot | bot.py | <reponame>SergeyShurkhovetckii/Best-Current_Python_telegram_bot<filename>bot.py
import config #Конфинурация для Telegram Bot
import requests # Модуль для обработки URL
from bs4 import BeautifulSoup as BS # Модуль для работы с HTML
import time # Модуль для остановки программы
import telebot
import emoji #Смайлики
from telebot import types
import re
# Парсер
Main = "https://kaliningrad.bankiros.ru/currency"
MAin_CB = "https://bankiros.ru/currency/cbrf"
Main_Moex ="https://bankiros.ru/currency/moex/usdrub-tod"
Main_Moex_euro = "https://bankiros.ru/currency/moex/eurrub-tod"
# Заголовки для передачи вместе с URL
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'}
#######
# Ищем доллар покупка
current_alls = requests.get(Main ,headers=headers)
current_CB = requests.get(MAin_CB,headers=headers)
current_moex_USD = requests.get(Main_Moex,headers=headers)
current_moex_EURO = requests.get(Main_Moex_euro,headers=headers)
####################
soup_current_all = BS(current_alls.content,'html.parser')
soup_current_CB = BS(current_CB.content,'html.parser')
soup_current_moex_usd = BS(current_moex_USD.content,'html.parser')
soup_current_moex_euro = BS(current_moex_EURO.content,'html.parser')
# Финальный Парсинг
conver_soup_dollars = soup_current_all.find_all("span",{"class":"conv-val triger-usd"})
conver_soup_euro = soup_current_all.find_all("span",{"class":"conv-val triger-eur"})
conver_soup_pl = soup_current_all.find_all("span",{"class":"conv-val triger-pln"})
conver_soup_time = soup_current_all.find_all("div",{"class":"actual-currency"})
conver_soup_moex_usd = soup_current_moex_usd.find_all("span",{"class":"xxx-font-size-30 xxx-text-bold"})
conver_soup_moex_usd_time = soup_current_moex_usd.find_all("span",{"class":"xxx-trading-preview__date xxx-font-size-14 xxx-text-color-darck-gray"})
conver_soup_moex_euro = soup_current_moex_euro.find_all("span",{"class":"xxx-font-size-30 xxx-text-bold"})
conver_soup_moex_euro_time = soup_current_moex_euro.find_all("span",{"class":"xxx-trading-preview__date xxx-font-size-14 xxx-text-color-darck-gray"})
# bank_name = soup_current_all.find("td",{"class":"currency-value"})
# for i in bank_name.find_all('a'):
# print(i.text)
# Переменный доллар
USD_BUY = conver_soup_dollars[0].text
USD_SELL = conver_soup_dollars[1].text
USD_CB = conver_soup_dollars[2].text
USD_TR = conver_soup_moex_usd[0].text
USD_TR_time = conver_soup_moex_usd_time[0].text
# Переменные Евро
EURO_BUY = conver_soup_euro[0].text
EURO_SELL = conver_soup_euro[1].text
EURO_CB = conver_soup_euro[2].text
EURO_TR = conver_soup_moex_euro[0].text
EURO_TR_time = conver_soup_moex_euro_time[0].text
# Переменные PLN
PL_BUY = conver_soup_pl[0].text
PL_SELL = conver_soup_pl[1].text
# Прочее
actual_time = conver_soup_time[0].text
#########################################
# Начало
bot = telebot.TeleBot(config.token)
@bot.message_handler(commands=['start'])
def get_user_info(message):
# Вывод клавиатуры Шаг 1
markup_inline =types.InlineKeyboardMarkup(row_width=1)
btn_inline_1 = types.InlineKeyboardButton(text=" Начать ",callback_data = 'current')
markup_inline.add(btn_inline_1)
bot.send_message(message.chat.id, "Привет👋🏻" + message.from_user.first_name + " я бот \n \n Моя задача показывать лучший курс валюты в Калининграде. \n \n Курс валюты по Центральному банку \n \n Курс валюты Московской биржы \n \n \n Для повторного запуска используете комманду /start или напишите в чат /start " ,reply_markup = markup_inline)
@bot.callback_query_handler(func=lambda call:True)
def answer(call):
if call.data== 'current':
# Вывод клавиатуры Шаг 1
markup_inline_step_2 =types.InlineKeyboardMarkup(row_width=3)
btn_inline_6_step_2 = types.InlineKeyboardButton(text="Обмена",callback_data = 'BB')
btn_inline_4_step_2 = types.InlineKeyboardButton(text="Курс ЦБ",callback_data = 'cb')
btn_inline_5_step_2 = types.InlineKeyboardButton(text="Курс Биржы",callback_data = 'tr')
markup_inline_step_2.add(btn_inline_4_step_2,btn_inline_5_step_2,btn_inline_6_step_2)
msg = bot.send_message(call.message.chat.id,"✅Пожалуйста выберете раздел",reply_markup = markup_inline_step_2)
# Вывод клавиатуры Шаг 2
if call.data== 'BB':
markup_inline_step_21 =types.InlineKeyboardMarkup(row_width=2)
btn_inline_1_step_21 = types.InlineKeyboardButton(text="🇺🇸 Доллар ",callback_data = 'dollars')
btn_inline_2_step_21 = types.InlineKeyboardButton(text="🇪🇺 Евро",callback_data = 'euro')
btn_inline_3_step_21 = types.InlineKeyboardButton(text="🇵🇱 PL",callback_data = 'pln')
markup_inline_step_21.add(btn_inline_1_step_21,btn_inline_2_step_21,btn_inline_3_step_21)
bot.send_message(call.message.chat.id," \n \n ✅Узнать самый выгодный курс в пунтках обмена",reply_markup = markup_inline_step_21)
# Вывод dollars Шаг 3
elif call.data =='dollars':
bot.send_message(call.message.chat.id,"🇺🇸 Покупка|Продажа \n \n☑️ {0} | {1} \n \n \n Время обновления МСК {2} ".format(USD_BUY,USD_SELL,actual_time ))
# Вывод euro Шаг 3
elif call.data =='euro':
bot.send_message(call.message.chat.id,"🇪🇺 Покупка|Продажа \n \n ☑️ {0} | {1} \n \n \n Время обновления МСК {2} ".format(EURO_BUY,EURO_SELL,actual_time ))
# Вывод злоты Шаг 3
elif call.data =='pln':
bot.send_message(call.message.chat.id,"🇵🇱 Покупка|Продажа \n \n ☑️ {0} | {1} \n \n \n Время обновления МСК {2} ".format(PL_BUY,PL_SELL,actual_time ))
# Что то другое Шаг 4
if call.data =='cb':
markup_inline_step_3 =types.InlineKeyboardMarkup(row_width=2)
btn_inline_1_step_3 = types.InlineKeyboardButton(text="🇺🇸 Доллар ",callback_data = 'dollars_cb')
btn_inline_2_step_3 = types.InlineKeyboardButton(text="🇪🇺 Евро",callback_data = 'euro_cb')
markup_inline_step_3.add(btn_inline_1_step_3,btn_inline_2_step_3)
bot.send_message(call.message.chat.id," \n \n ✅ Узнать курс по Центральному Банку ",reply_markup = markup_inline_step_3)
# Вывод dollars Шаг 4.1
elif call.data =='dollars_cb':
bot.send_message(call.message.chat.id,'\n \n'"🇺🇸 {} ".format(USD_CB))
# Вывод euro Шаг 4.2
elif call.data == 'euro_cb':
bot.send_message(call.message.chat.id,'\n \n'"🇪🇺 {} ".format(EURO_CB))
# Что то другое Шаг 5
if call.data =='tr':
markup_inline_step_4 =types.InlineKeyboardMarkup(row_width=2)
btn_inline_1_step_4 = types.InlineKeyboardButton(text="🇺🇸 Доллар ",callback_data = 'dollars_tr')
btn_inline_2_step_4 = types.InlineKeyboardButton(text="🇪🇺 Евро",callback_data = 'euro_tr')
markup_inline_step_4.add(btn_inline_1_step_4,btn_inline_2_step_4)
bot.send_message(call.message.chat.id,"\n \n ✅ Узнать курс Московской Биржы ",reply_markup = markup_inline_step_4)
# Вывод dollars Шаг 5.1
elif call.data =='dollars_tr':
bot.send_message(call.message.chat.id,'\n \n'"🇺🇸 {0} \n \n Время обновления МСК {1} ".format(USD_TR,USD_TR_time))
# Вывод euro Шаг 5.2
elif call.data == 'euro_tr':
bot.send_message(call.message.chat.id,'\n \n'"🇪🇺 {0} \n \n Время обновления МСК {1} ".format(EURO_TR,EURO_TR_time))
@bot.message_handler(commands=['help'])
def get_user_help(message):
bot.send_message(message.chat.id, "Привет👋🏻" + message.from_user.first_name + " мой создатель @S19S93 , вся информация была взята https://kaliningrad.bankiros.ru/ ")
bot.polling(none_stop=True) |
SergeyShurkhovetckii/Best-Current_Python_telegram_bot | config.py | <filename>config.py
token = "<KEY>" |
jacobq/csci5221-viro-project | ext/viro_constant.py | # Manually set L
L = 4
# OpenFlow Ethernet Frame type codes (dl_type)
VIRO_DATA = 0x0802 # See ../pox/lib/packet/ethernet.py: VIRO_TYPE = 0x0802
VIRO_CONTROL = 0x0803
OP_NAMES = {
0x0000: 'VIRO_DATA_OP',
0x1000: 'RDV_PUBLISH',
0x2000: 'RDV_QUERY',
0x3000: 'RDV_REPLY',
0x4000: 'DISCOVERY_ECHO_REQUEST',
0x5000: 'DISCOVERY_ECHO_REPLY',
0x6000: 'GW_WITHDRAW',
0x7000: 'RDV_WITHDRAW'
}
# Create inverse: op_name -> op_code mapping
OP_CODES = {}
for op_code, name in OP_NAMES.items():
OP_CODES[name] = op_code
# OTHER PARTS OF THE HEADER
HTYPE = 0x1
PTYPE = 0x0800
HLEN = 0x06
PLEN = 0x04
MAX_TTL = 16 # 16 hops ought to be more than enough since that's every node in the example topology!
# Per the CSCI 5221 Project 2 assignment document:
# "We limit the maximal number of < Gateway; Nexthop > pairs in each level to 3."
MAX_GW_PER_LEVEL = 3
MAX_GW_PER_RDV_REPLY = MAX_GW_PER_LEVEL
# The range of hardware/MAC addresses
# 00:14:4F:F8:00:00 - 00:14:4F:FF:FF:FF
# is registered to Oracle Corporation and appears to have been used
# in some of their old software (Logical Domain Manager?)
# rather than in the manufacturing of NICs.
# See http://docs.oracle.com/cd/E19604-01/821-0406/rangeofmacaddressesassignedtoldoms/index.html
# However, I believe FAKE_SRC_MAC is basically just an arbitrary address (not specially chosen)
FAKE_SRC_MAC = '00:14:4f:e2:b3:70'
VEIL_MASTER_MAC = '00:00:00:00:00:00'
# OFFSET FOR THE OPER
# Account for extra 8 bytes stuffed in front (fwd + res + dl_type) whose meaning is beyond the scope of this assignment
OPER_OFFSET = 8 + 6
OPER_LEN = 2
# OFFSET FOR THE ECHO_SRC_VID
ECHO_SRC_OFFSET = 8
# The following _TIME parameter are all measured in seconds
ROUND_TIME = 10 # Time between "bottom up" rounds for routing table construction (RDV_PUBLISH / RDV_QUERY)
DISCOVER_TIME = 5 # Time between checking neighbor status
FAILURE_TIME = 7 # Time between checks for failures
PRINT_REPORT_TIME = 20 # Time between printing statistics report
ROUTING_DEMO_PACKET_TIME = 2 # Time between sending sample VIRO_DATA packets to demonstrate routing functionality
NEIGHBOR_EXPIRATION_TIME = 3*DISCOVER_TIME # Neighbor entries older than this will be removed
|
jacobq/csci5221-viro-project | ext/viro_controller.py | # Copyright 2011 <NAME>
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
An L2 viro POX controller.
"""
import traceback
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import *
from pox.lib.recoco import Timer
from viro_constant import L
from viro_module import ViroModule
from viro_switch import ViroSwitch
from viro_veil import *
log = core.getLogger()
class ViroController(object):
"""
Waits for OpenFlow switches to connect.
"""
def __init__(self, transparent):
core.openflow.addListeners(self)
self.transparent = transparent
# Runs when the switch/controller is started (not when a link comes up!)
def _handle_ConnectionUp(self, event):
log.debug("Connection %s" % (event.connection))
self.dpid = dpidToStr(event.connection.dpid) # gets the switch dpid identifier
self.vid = self.get_vid_from_dpid(self.dpid)
self.viro = ViroModule(self.dpid, self.vid)
self.viro_switch = ViroSwitch(event.connection, self.transparent, self.viro)
log.debug("Starting periodic tasks (recurring timers)")
Timer(DISCOVER_TIME, self.discover_neighbors, args=[event], recurring=True)
Timer(FAILURE_TIME, self.discover_failures, recurring=True)
Timer(ROUND_TIME, self.viro_switch.start_round, recurring=True)
Timer(ROUTING_DEMO_PACKET_TIME, self.viro_switch.send_sample_viro_data, recurring=True)
Timer(PRINT_REPORT_TIME, self.viro_switch.print_switch_stats, recurring=True)
def get_vid_from_dpid(self, dpid):
# To convert a dpid string (assumed to be formatted like a MAC address: xx-xx-xx-xx-xx-xx)
# starting at "00-00-00-00-00-01" to a vid string (of '1' and '0' characters)
# starting at "000" we do the following:
# 1. Remove "-" characters to make the string numeric
# 2. Convert that string to an integer (assuming base 16)
# 3. Subtract 1 so that "00-00-00-00-00-01" corresponds with "000"
# 4. Convert the int back into a string using base 2
# 5. Zero-pad the result to L bits
return format(int(dpid.replace('-', ''), 16) - 1, 'b').zfill(L)
def discover_neighbors(self, event):
try:
r = create_DISCOVER_ECHO_REQUEST(self.vid, self.dpid)
msg = self.viro_switch.create_openflow_message(of.OFPP_FLOOD, FAKE_SRC_MAC, r, None)
event.connection.send(msg)
print "Sending neighbor discovery packets"
except:
print "ERROR: caught exception during neighbor discovery. Not able to send discovery packets?"
print traceback.format_exc()
def discover_failures(self):
try:
self.viro.remove_expired_neighbors()
except:
print "ERROR: Caught exception during discover_failures"
print traceback.format_exc()
def launch(transparent=False):
"""
Starts a VIRO controller/switch
"""
core.registerNew(ViroController, str_to_bool(transparent))
|
jacobq/csci5221-viro-project | ext/viro_module.py | <reponame>jacobq/csci5221-viro-project
import socket, struct, sys, time, random
from viro_veil import *
class ViroModule(object):
def __init__(self, my_dpid, my_vid):
self.dpid = my_dpid
self.vid = my_vid
self.L = len(my_vid)
self.neighbors = {}
self.rdv_store = {}
self.rdv_request_tracker = {}
self.routing_table = {}
def update_routing_table_based_on_neighbor(self, neighbor_vid, port):
print "update_routing_table_based_on_neighbor: neighbor_vid =", neighbor_vid, "port =", port
bucket = delta(neighbor_vid, self.vid)
# If we don't have any entries at this level -> create a new bucket list
if bucket not in self.routing_table:
self.routing_table[bucket] = []
bucket_info = {
'prefix': get_prefix(self.vid, bucket),
'gateway': int(self.vid, 2),
'next_hop': int(neighbor_vid, 2),
'port': port
}
self.add_bucket_if_not_duplicate(bucket_info, bucket)
print "Updating the Neighbors list..."
self.update_neighbors(neighbor_vid, bucket)
self.print_routing_table()
def add_bucket_if_not_duplicate(self, bucket_info, k):
if not is_duplicate_bucket(self.routing_table[k], bucket_info):
self.routing_table[k].append(bucket_info)
self.recalculate_default_gw_for_bucket(k)
# Presumably a gateway has just been added to or removed from the list for this bucket,
# so we need to do the following:
# - (Re)compute the logical distance of each gateway
# - Set a gateway having minimal distance to be the default (and all others not to be the default)
# - Limit the number of gateways stored to the maximum allowed
# as defined by MAX_GW_PER_LEVEL parameter (which is assumed to be > 1).
# To do that we remove a gateway whose distance is maximal,
# and which was not selected as the default (in the case of all gateways being equidistant)
def recalculate_default_gw_for_bucket(self, bucket):
print "Recalculating default gateway for bucket", bucket
entries = self.routing_table[bucket]
min_distance = float("inf")
min_entry = None
max_distance = -1
max_entry = None
for entry in entries:
# Clear default flag -- will set again once all distances have been computed
entry['default'] = False
# Compute distance
gw = bin2str(entry['gateway'], self.L)
distance = delta(gw, self.vid)
# Update min/max pointers
if distance > max_distance:
max_distance = distance
max_entry = entry
if distance < min_distance:
min_distance = distance
min_entry = entry
if min_entry is None or max_entry is None:
print "recalculate_default_gw_for_bucket did not find a min and max distance gateways (no gateways)"
return
# DEBUG
# print "min_distance =", min_distance, "min_entry =", min_entry
# print "max_distance =", max_distance, "max_entry =", max_entry
# Set (possibly new) default gateway for this bucket to be one having minimal distance
min_entry['default'] = True
# Limit number of entries (assume for now that there will be at most 1 too many)
if len(entries) > MAX_GW_PER_LEVEL:
max_gw_index = entries.index(max_entry)
if not max_entry['default']:
# Delete gateway at maximal distance (non-equidistant case)
del entries[max_gw_index]
else:
# max_distance == min_distance (equidistant case)
# So just delete any non-default gateway
next_gw_index = (max_gw_index + 1) % len(entries)
del entries[next_gw_index]
# In case somehow there were more than 1 too many gateways then do this again.
# If this were expected to happen often then we could do something more efficient for that case,
# such as sort the entries in order of increasing distance then removing all beyond maximum,
# but this is not expected to happen. We just have this check here to ensure correctness in case
# of this unexpected scenario where there is more than 1 gateway that needs to be removed
# (since this function should be called each time a gateway is added or removed).
if len(entries) > MAX_GW_PER_LEVEL:
print "WARNING: Recursively calling recalculate_default_gw_for_bucket; unexpected situation"
self.recalculate_default_gw_for_bucket(bucket)
def update_neighbors(self, neighbor_vid, distance):
if neighbor_vid not in self.neighbors:
self.neighbors[neighbor_vid] = {}
self.neighbors[neighbor_vid][distance] = time.time()
# Note: routing_table is a dictionary of k -> entries_list
# where entries_list is a list of dictionaries
# e.g. { 1: [{'gateway': ...}, {'gateway': ...}, ...], ...}
def print_routing_table(self):
print '\n----> Routing Table at :', self.vid, '|', self.dpid, ' <----'
for distance in range(1, self.L + 1):
if distance in self.routing_table and len(self.routing_table[distance]) > 0:
for entry in self.routing_table[distance]:
print 'Bucket:', distance, \
'Port:', entry['port'], \
'Prefix:', entry['prefix'],\
'Gateway:', bin2str(entry['gateway'], self.L), \
'Next hop:', bin2str(entry['next_hop'], self.L), \
'Default:', entry['default']
else:
print 'Bucket:', distance, '--- E M P T Y ---'
print 'RDV STORE: ', self.rdv_store, "\n"
# This function reviews all the entries in the neighbor list and removes
# entries that are expired (older than NEIGHBOR_EXPIRATION_TIME seconds)
# It then calls remove_failed_next_hops_from_routing_table
def remove_expired_neighbors(self):
print "Now checking to see if any local links / neighbors have gone down"
now = time.time()
to_be_deleted = []
for neighbor_vid, k_to_time in self.neighbors.items():
for k, time_last_seen in k_to_time.items():
delta_t = now - time_last_seen
if delta_t >= NEIGHBOR_EXPIRATION_TIME:
print "Going to remove stale entry for neighbor", neighbor_vid, "at distance", k, "since delta_t =", delta_t
to_be_deleted.append({'vid': neighbor_vid, 'distance':k})
else:
print "Keeping entry for neighbor", neighbor_vid, "at distance", k, "since delta_t =", delta_t
self.remove_failed_next_hops_from_routing_table(to_be_deleted)
# send RDV_WITHDRAW packets?
for neighbor in to_be_deleted:
del self.neighbors[neighbor['vid']][neighbor['distance']]
def remove_failed_next_hops_from_routing_table(self, failed_next_hops):
if len(failed_next_hops) < 1:
print "No failed next hop entries to remove"
return
print "Now removing entries for failed next hops in routing table"
for next_hop in failed_next_hops:
vid = next_hop['vid']
k = next_hop['distance']
for i, entry in enumerate(self.routing_table[k]):
gw_vid = bin2str(entry['gateway'], L)
if gw_vid == vid:
print "Removing entry from routing table:", entry
del self.routing_table[k][i]
self.recalculate_default_gw_for_bucket(k)
def remove_failed_gw(self, packet, gw=None):
if gw is None:
payload = bin2str((struct.unpack("!I", packet[24:28]))[0], self.L)
payload = int(payload, 2)
else:
payload = int(gw, 2)
to_be_deleted = {}
for level in self.routing_table:
to_be_deleted[level] = []
for idx in xrange(0, len(self.routing_table[level])):
entry = self.routing_table[level][idx]
if entry['gateway'] == payload or entry['next_hop'] == payload:
to_be_deleted[level].append(idx)
for level in to_be_deleted:
for index in to_be_deleted[level]:
del self.routing_table[level][index]
bucket_ = []
for level in self.routing_table:
if len(self.routing_table[level]) == 0:
bucket_.append(level)
for level in bucket_:
del self.routing_table[level]
return
def publish(self, bucket, k):
dst = get_rdv_id(k, self.vid)
packet = create_RDV_PUBLISH(bucket, self.vid, dst)
print 'Node:', self.vid, 'is publishing neighbor', bin2str(bucket['next_hop'], self.L), 'to rdv:', dst
return (packet, dst)
def query(self, k):
dst = get_rdv_id(k, self.vid)
packet = create_RDV_QUERY(k, self.vid, dst)
print 'Node:', self.vid, 'is querying to reach bucket:', k, 'to rdv:', dst
return packet, dst
def get_next_hop(self, dst_vid, is_query_or_publish=False):
next_hop = None
port = None
while next_hop is None:
distance = delta(self.vid, dst_vid)
if distance == 0:
break
if distance in self.routing_table and len(self.routing_table[distance]) > 0:
for entry in self.routing_table[distance]:
if entry['default']:
next_hop = str(entry['next_hop'])
port = int(entry['port'])
break
if next_hop is not None:
break
# TODO: This code "smells" bad -- not sure if it's even doing anything important/correct
if not is_query_or_publish:
break
print 'No next hop for destination: ', dst_vid, 'distance: ', distance
# flip the distance bit to
dst_vid = flip_bit(dst_vid, distance)
if next_hop is None:
print 'No route to destination', 'MyVID: ', self.vid, 'DEST: ', dst_vid
return next_hop, port
# Adds an entry to rdv_store, and also ensures that there are no duplicates
def add_if_no_duplicate_rdv_entry(self, distance, new_entry):
for x in self.rdv_store[distance]:
if x[0] == new_entry[0] and x[1] == new_entry[1]:
return
self.rdv_store[distance].append(new_entry)
# Adds an entry to rdv_store, and also ensures that there are no duplicates
def add_if_no_duplicate_gw_entry(self, gw, new_entry):
for x in self.rdv_request_tracker[gw]:
if x == new_entry:
return
self.rdv_request_tracker[gw].append(new_entry)
def process_rdv_publish(self, packet):
src_vid = bin2str((struct.unpack("!I", packet[16:20]))[0], self.L)
next_hop = bin2str((struct.unpack("!I", packet[24:28]))[0], self.L)
print "RDV_PUBLISH message received from: ", src_vid
distance = delta(self.vid, next_hop)
if distance not in self.rdv_store:
self.rdv_store[distance] = []
new_entry = [src_vid, next_hop]
self.add_if_no_duplicate_rdv_entry(distance, new_entry)
def process_rdv_query(self, packet):
src_vid = bin2str((struct.unpack("!I", packet[16:20]))[0], self.L)
payload = bin2str((struct.unpack("!I", packet[24:28]))[0], self.L)
k = int(payload, 2)
print "RDV_QUERY message received from: ", src_vid
# search in rdv store for the logically closest gateway to reach kth distance away neighbor
gw_str_list = self.find_gateways_in_rdv_store(k, src_vid)
print "Got gw_str_list =", gw_str_list
# if found then form the reply packet and send to src_vid
if len(gw_str_list) < 1:
# No gateway found
print 'Node: ', self.vid, 'has no gateway for the rdv_query packet to reach bucket: ', k, 'for node: ', src_vid
return ''
gw_list = []
for gw_str in gw_str_list:
gw_list.append(int(gw_str,2))
# create a RDV_REPLY packet that will be sent back
reply_packet = create_RDV_REPLY(gw_list, k, self.vid, src_vid)
# Keeps track of the Nodes that requests each Gateways at
# specific level
for gw_str in gw_str_list:
if gw_str not in self.rdv_request_tracker:
self.rdv_request_tracker[gw_str] = []
self.add_if_no_duplicate_gw_entry(gw_str, src_vid)
return reply_packet
# k is an integer
# src_vid is a string of '0's and '1's
def find_gateways_in_rdv_store(self, k, src_vid):
gw_dist = {}
if k not in self.rdv_store:
return []
# Look through rdv store for next_hop entries
# and build up a map/dictionary of gw_vid -> distance
# (this eliminates the need to remove duplicates,
# which might otherwise happen since a gateway may
# have several edges connecting to a node in other subtree)
for t in self.rdv_store[k]:
gw_vid = t[0]
distance = delta(gw_vid, src_vid)
gw_dist[gw_vid] = distance
gw_list = []
for gw_vid, distance in gw_dist.items():
gw_list.append({'gw_vid': gw_vid, 'distance': distance})
if len(gw_list) < 1:
return []
# Sort the list of available gateways by distance (closest first)
gw_list.sort(key=lambda gw: gw['distance'])
# print "find_gateways_in_rdv_store found these gateways:", gw_list
# Truncate list so that it has at most MAX_GW_PER_RDV_REPLY entries
gw_list = gw_list[:MAX_GW_PER_RDV_REPLY]
# Remove the distance information from the list so it's a list of VIDs again instead of a list of dictionaries
gw_list = map(lambda x: x['gw_vid'], gw_list)
return gw_list
def process_rdv_reply(self, packet):
# Fill my routing table using this new information
[k] = struct.unpack("!I", packet[24:28])
gw_offset = 28
num_of_gw = (len(packet) - gw_offset)/4
gw_list = struct.unpack("!" + "I"*num_of_gw, packet[28:(28+4*num_of_gw)])
print "RDV_REPLY contained", num_of_gw, "gateway(s):", map(lambda s: bin2str(s, self.L), gw_list)
for gw in gw_list:
gw_str = bin2str(gw, self.L)
if gw_str == self.vid:
print "(Ignoring gateway in RDV_REPLY because it is us)"
continue
if not k in self.routing_table:
self.routing_table[k] = []
next_hop, port = self.get_next_hop_rdv(gw_str)
if next_hop is None:
print 'ERROR: no next_hop found for the gateway:', gw_str
print "New routing information couldn't be added!"
continue
next_hop_int = int(next_hop, 2)
bucket_info = {
'prefix': get_prefix(self.vid, k),
'gateway': gw,
'next_hop': next_hop_int,
'port': port
}
self.add_bucket_if_not_duplicate(bucket_info, k)
def get_next_hop_rdv(self, gw_str):
next_hop = None
port = None
distance = delta(self.vid, gw_str)
if distance in self.routing_table:
for entry in self.routing_table[distance]:
if entry['default']:
next_hop = bin2str(entry['next_hop'], self.L)
port = str(entry['port'])
return next_hop, port
# Selects random entry from appropriate level bucket/entry in the routing table
# Returns gateway and next hop as strings of '0's and '1's
# and port as an integer
def choose_gateway_for_forwarding_directive(self, dst_vid):
print "Choosing gateway to use as forwarding directive for dst_vid =", dst_vid
distance = delta(dst_vid, self.vid)
if distance < 1:
print "WARNING: choose_gateway_for_forwarding_directive was asked to get a gateway to reach ourselves"
if distance in self.routing_table and len(self.routing_table[distance]) > 0:
entries = self.routing_table[distance]
random_index = random.randrange(0, len(entries))
selected_entry = entries[random_index]
print "Selected (random) gateway for forwarding directive:", selected_entry
return bin2str(selected_entry['gateway'], L),\
bin2str(selected_entry['next_hop'], L),\
selected_entry['port']
else:
print "Could not find a gateway for distance =", distance, "for dst_vid=", dst_vid
return None, None, None
|
jacobq/csci5221-viro-project | ext/viro_veil.py | import socket, struct, sys, random, traceback
from viro_constant import *
def get_dpid_length(dpid):
dpid_bytes = dpid.split("-")
return len(dpid_bytes) * 8
# convert a string containing mac address into a byte array
def get_mac_array(mac):
mac_array = [0] * 6
mac_bytes = mac.split("-")
if len(mac_bytes) != 6:
print 'Error: MalFormed mac, expected 6 bytes, found : ', len(mac_bytes), 'bytes in the input array: ', mac
for i in range(0, 6):
if i < len(mac_bytes):
mac_array[i] = int(mac_bytes[i], 16)
return mac_array
# convert a byte array into the string format
def get_mac_hex_string(mac_bytes):
mac_string = ''
for i in range(0, 6):
s = hex(mac_bytes[i]).replace('0x', '')
if len(s) < 2:
mac_string += '0'
mac_string += s
if i < 5:
mac_string += ':'
return mac_string
# get the prefix of kth bucket (k = dist) for node vid
def get_prefix(vid, dist):
L = len(vid)
prefix = vid[:L - dist]
# flip the (dist-1)th bit from the right
if vid[L - dist] == '0':
prefix += '1'
else:
prefix += '0'
prefix += (dist - 1) * '*'
return prefix
# Extract the operation from the packet
def get_operation(packet):
operation = (struct.unpack('!H', packet[OPER_OFFSET: OPER_OFFSET + OPER_LEN]))[0]
return operation
# convert operation number into a string
def get_operation_name(operation):
if operation in OP_NAMES:
return OP_NAMES[operation]
else:
return 'UNKNOWN OPERATION'
# returns the destination in the string format
def get_dest(packet, L):
t = struct.unpack("!I", packet[20:24])
dest = bin2str(t[0], L)
return dest
# returns the source in the string format
def get_src(packet, L):
t = struct.unpack("!I", packet[16:20])
src = bin2str(t[0], L)
return src
# returns the ID of the rendezvous point for distance = dist from node = vid
# Right now, this is the first (L - dist + 1) bits of the vid followed by (dist-1) zeros
def get_rdv_id(dist, vid):
def hash_val(key, length):
return length * '0'
L = len(vid)
rdv_id = vid[:L - dist + 1]
return rdv_id + hash_val(rdv_id, dist - 1)
# check if the bucket is already present in the set or not:
def is_duplicate_bucket(bucket_list, new_bucket):
is_duplicate = False
for bucket in bucket_list:
if bucket['prefix'] == new_bucket['prefix'] and \
bucket['gateway'] == new_bucket['gateway'] and \
bucket['next_hop'] == new_bucket['next_hop']:
is_duplicate = True
return is_duplicate
return is_duplicate
def pack_header(operation):
return struct.pack("!HHBBH", HTYPE, PTYPE, HLEN, PLEN, operation)
def pack_mac(data):
return pack_bytes(get_mac_array(data))
def pack_bytes(data):
result = ''
for byte in data:
result += struct.pack("!B", byte)
return result
def create_DISCOVER_ECHO_REQUEST(vid, dst_dpid):
fwd = struct.pack('!I', 0)
res = struct.pack('!HH', 0x0000, VIRO_CONTROL)
src_vid = struct.pack("!I", int(vid, 2)) # Sender VID (32 bits)
return fwd + res + pack_header(OP_CODES['DISCOVERY_ECHO_REQUEST']) + src_vid + pack_mac(dst_dpid)
def create_DISCOVER_ECHO_REPLY(vid, dpid):
fwd = struct.pack('!I', int('0', 2))
res = struct.pack('!HH', 0x0000, VIRO_CONTROL)
src_vid = struct.pack("!I", int(vid, 2)) # Sender VID (32 bits)
return fwd + res + pack_header(OP_CODES['DISCOVERY_ECHO_REPLY']) + src_vid + pack_mac(dpid)
# This function generates/encodes/packs the "Data Packet" described in the
# first subtask of task 2. Conveniently, the fwd_vid and ttl parameters
# are already listed in the arguments.
# Note that "fwd" and "res" are unrelated & outside the scope of this project
# (Guobao said to leave them in place as they're for POX / Open vSwitch, so I will)
# ttl is an integer, and all other arguments are strings of '0's and '1's
def create_VIRO_DATA(src_vid, dst_vid, fwd_vid, ttl, payload):
fwd = struct.pack('!I', int(dst_vid, 2))
res = struct.pack('!HH', 0x0000, VIRO_CONTROL)
src_vid_packed = struct.pack("!I", int(src_vid, 2))
dst_vid_packed = struct.pack("!I", int(dst_vid, 2))
fwd_vid_packed = struct.pack("!I", int(fwd_vid, 2)) # FWD-VID: forwarding directive
ttl_and_padding = struct.pack("!BBH", ttl, 0, 0)
payload_packed = struct.pack("!I", int(payload, 2)) # Assume for now that payload is just an integer
return fwd + res + pack_header(OP_CODES['VIRO_DATA_OP']) +\
src_vid_packed + dst_vid_packed + fwd_vid_packed +\
ttl_and_padding + payload_packed
def create_RDV_PUBLISH(bucket, vid, dst):
fwd = struct.pack('!I', int(dst, 2))
res = struct.pack('!HH', 0x0000, VIRO_CONTROL)
src_vid = struct.pack("!I", int(vid, 2)) # Sender VID (32 bits)
dst_vid = struct.pack("!I", int(dst, 2)) # Destination VID (32 bits)
next_hop = struct.pack("!I", bucket['next_hop']) # Destination Subtree-k
return fwd + res + pack_header(OP_CODES['RDV_PUBLISH']) + src_vid + dst_vid + next_hop
# bucket_dist is an int; other arguments are binary strings
def create_RDV_QUERY(bucket_distance, vid, dst):
fwd = struct.pack('!I', int(dst, 2))
res = struct.pack('!HH', 0x0000, VIRO_CONTROL)
src_vid = struct.pack("!I", int(vid, 2)) # Sender VID (32 bits)
dst_vid = struct.pack("!I", int(dst, 2)) # Destination VID (32 bits)
return fwd + res + pack_header(OP_CODES['RDV_QUERY']) + src_vid + dst_vid + struct.pack("!I", bucket_distance)
# gw_list is a list of integers; other arguments are binary strings
def create_RDV_REPLY(gw_list, bucket_distance, vid, dst):
fwd = struct.pack('!I', int(dst, 2))
res = struct.pack('!HH', 0x0000, VIRO_CONTROL)
src_vid = struct.pack("!I", int(vid, 2)) # Sender VID (32 bits)
dst_vid = struct.pack("!I", int(dst, 2)) # Destination VID (32 bits)
bucket_distance = struct.pack("!I", bucket_distance)
gateways = ""
for gw in gw_list:
gateways += struct.pack("!I", gw)
return fwd + res + pack_header(OP_CODES['RDV_REPLY']) + src_vid + dst_vid + bucket_distance + gateways
def create_RDV_WITHDRAW(failed_node, vid, dst):
# print 'create_RDV_WITHDRAW', failed_node, vid, dst
fwd = struct.pack('!I', int(dst, 2))
res = struct.pack('!HH', 0x0000, VIRO_CONTROL)
src_vid = struct.pack("!I", int(vid, 2)) # Sender VID (32 bits)
dst_vid = struct.pack("!I", int(dst, 2)) # Destination VID (32 bits)
return fwd + res + pack_header(OP_CODES['RDV_WITHDRAW']) + src_vid + dst_vid + struct.pack("!I", failed_node)
def create_GW_WITHDRAW(failed_gw, vid, dst):
# print 'create_GW Withdraw', vid, dst, failed_gw
fwd = struct.pack('!I', int(dst, 2))
res = struct.pack('!HH', 0x0000, VIRO_CONTROL)
src_vid = struct.pack("!I", int(vid, 2)) # Sender VID (32 bits)
dst_vid = struct.pack("!I", int(dst, 2)) # Destination VID (32 bits)
z = struct.pack("!I", int(failed_gw, 2)) # Destination Subtree-k
return fwd + res + pack_header(OP_CODES['GW_WITHDRAW']) + src_vid + dst_vid + z
# flips the kth bit (from the right) in the dst and returns it.
def flip_bit(dst, distance):
L = len(dst)
prefix = dst[:L - distance]
if dst[L - distance] == '0':
prefix += '1'
else:
prefix += '0'
prefix = prefix + dst[L - distance + 1:]
return prefix
def decode_discovery_packet(packet, L, dpid_length):
[op_code] = struct.unpack("!H", packet[14:16])
[svid] = struct.unpack("!I", packet[16:20])
dpid_bytes = (dpid_length/8)
if len(packet) >= (20+dpid_bytes):
dst_dpid_str = ""
for dst_dpid_byte in struct.unpack("!" + "B"*dpid_bytes, packet[20:(20+dpid_bytes)]):
dst_dpid_str += hex(dst_dpid_byte).replace("0x", "").zfill(2)
dst_dpid = int(dst_dpid_str, 16)
else:
print "ERROR: decode_discovery_packet did not get dst_dpid"
dst_dpid = 0
return {
'op_code': op_code,
'sender_vid': bin2str(svid, L),
'dst_dpid': dst_dpid
}
# Takes packed string representation of VIRO_DATA_OP packet and L
# Returns dictionary with the fields after the op code
# (i.e. the one specific to VIRO_DATA_OP packets)
# The VIDs are returned as strings of '0's and '1's
# The TTL is returned as an integer
# The payload is also returned as a string of '0's and '1's
def decode_viro_data_packet_contents(packet, L):
try:
# Ignore encapsulating header bytes 0-15
[src_vid] = struct.unpack("!I", packet[16:20])
[dst_vid] = struct.unpack("!I", packet[20:24])
[fwd_vid] = struct.unpack("!I", packet[24:28])
[ttl] = struct.unpack("!B", packet[28:29])
# Ignore padding bytes 29-31
[payload] = struct.unpack("!I", packet[32:36])
return {
'src_vid': bin2str(src_vid, L),
'dst_vid': bin2str(dst_vid, L),
'fwd_vid': bin2str(fwd_vid, L),
'ttl': ttl,
'payload': bin2str(payload, 8)
}
except:
# Should never happen in our isolated system since no one else is
# sending us these packets and we guarantee proper format/encoding.
# Nevertheless we try to detect this error and log as a best practice.
print "ERROR: encountered malformed VIRO_DATA_OP packet"
print traceback.format_exc()
# converts the binary representation of an integer to binary string.
def bin2str(id, L):
bin_str = bin(id).replace('0b', '')
bin_str = (L - len(bin_str)) * '0' + bin_str
return bin_str
# logical distance
def delta(vid1, vid2):
L = len(vid1)
distance = L
for i in range(0, L):
if vid1[i] == vid2[i]:
distance -= 1
else:
return distance
# print "Logical distance between ", vid1, "and", vid2, "is", distance
return distance
def ones_complement_str(s):
xs = ''
for c in s:
if c == '1':
xs += '0'
else:
xs += '1'
return xs
######################################
# Debug functions
def print_packet(packet, L, verbose=False):
def hex_value(i, num_bytes=1):
return '0x' + hex(i).replace('0x', '').zfill(2*num_bytes)
# print "print_packet found", len(packet), "bytes:"
if verbose:
print get_pretty_hex(packet, 2, 4)
if verbose:
if (len(packet) >= 4):
[fwd] = struct.unpack("!I", packet[0:4])
print 'fwd:', hex_value(fwd, 4)
if (len(packet) >= 6):
[res] = struct.unpack("!H", packet[4:6])
print 'res:', hex_value(fwd, 2)
if (len(packet) >= 8):
[of_type] = struct.unpack("!H", packet[6:8])
if of_type != VIRO_CONTROL:
print "WARNING: This packet does not have dl_type == VIRO_CONTROL"
print 'of_type:', hex_value(of_type, 2)
if (len(packet) >= 10):
[htype] = struct.unpack("!H", packet[8:10])
print 'HTYPE:', hex_value(htype, 2)
if (len(packet) >= 12):
[ptype] = struct.unpack("!H", packet[10:12])
print 'PTYPE:', hex_value(ptype, 2)
if (len(packet) >= 13):
[hlen] = struct.unpack("!B", packet[12])
print 'HLEN:', hlen
if (len(packet) >= 14):
[plen] = struct.unpack("!B", packet[13])
print 'PLEN:', plen
if (len(packet) >= 16):
# Could use get_operation(packet) here instead if we wanted to have better code reuse...
# ...but this is just for debugging
[op_code] = struct.unpack("!H", packet[14:16])
print 'Type:', get_operation_name(op_code)
if (len(packet) >= 20):
[src_vid] = struct.unpack("!I", packet[16:20])
print 'Source:', bin2str(src_vid, L)
if (len(packet) >= 24):
[dst_vid] = struct.unpack("!I", packet[20:24])
print 'Destination:', bin2str(dst_vid, L)
if (len(packet) > 24):
if op_code == OP_CODES['VIRO_DATA_OP'] and len(packet) >= 32:
[fwd_vid] = struct.unpack("!I", packet[24:28])
print 'Forwarding directive:', bin2str(fwd_vid, L)
[ttl] = struct.unpack("!B", packet[28])
print 'TTL:', ttl
print 'Payload:', "0x" + packet[32:].encode("hex")
else:
print 'Payload:', "0x" + packet[24:].encode("hex")
print "" # add new line to separate this output in the log
def get_pretty_hex(packed_data, nybbles_per_word, words_per_line):
# Breaks sequences/arrays into nice groups
# http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python
def chunks(l, n):
n = max(1, n)
return [l[i:i + n] for i in range(0, len(l), n)]
return '\n'.join(chunks(
' '.join(chunks(packed_data.encode("hex"), nybbles_per_word)),
(nybbles_per_word+1)*words_per_line))
|
jacobq/csci5221-viro-project | ext/viro_switch.py | import traceback
import pox.openflow.libopenflow_01 as of
from pox.lib.packet import *
from pox.lib.addresses import *
from viro_veil import *
class ViroSwitch(object):
def __init__(self, connection, transparent, viro_module):
self.connection = connection
self.transparent = transparent
self.viro = viro_module
self.dpid = viro_module.dpid
self.vid = viro_module.vid
self.round = 1
self.demo_packet_sequence = self.generate_demo_packet_sequence()
self.demo_sequence_number = 0
# Statistics for performance / verification report
self.switch_stats = {}
for op_code, op_name in OP_NAMES.items():
self.switch_stats[op_name] = {
'sent': 0,
'processed': 0,
'received': 0
}
# When receiving VIRO data packets we can peek at the TTL
# and determine the number of hops used since we know the
# initial TTL
for field in ['total_hops', 'originated', 'consumed', 'ttl_expired']:
self.switch_stats['VIRO_DATA_OP'][field] = 0
# We want to hear PacketIn messages, so we listen
connection.addListeners(self)
def _handle_PacketIn(self, event):
"""
Handle packet in messages from the switch to implement above algorithm.
"""
packet = event.parsed
match = of.ofp_match.from_packet(packet)
# matching the packet type
# print "OpenFlow (Ethernet) packet: ", packet
try:
if match.dl_type == packet.VIRO_TYPE:
payload = packet.payload
my_packet = payload
[packet_type] = struct.unpack("!H", my_packet[6:8])
if (packet_type == VIRO_CONTROL):
self.process_viro_packet(my_packet, match, event) # handling the VIRO REQUEST
op_code = get_operation(my_packet)
self.switch_stats[get_operation_name(op_code)]['received'] += 1
return
else:
print "Ignoring packet since packet_type was not VIRO_CONTROL"
except Exception:
print "Error while processing packet"
print traceback.format_exc()
# Print messages in log when port status changed
# (i.e. when administratively up/down)
# See https://openflow.stanford.edu/display/ONL/POX+Wiki#POXWiki-PortStatus
def _handle_PortStatus (self, event):
if event.added:
action = "added"
elif event.deleted:
action = "removed"
else:
action = "modified"
print "Port %s on (switch %s) has been %s." % (event.port, event.dpid, action)
def process_viro_packet(self, packet, match=None, event=None):
L = len(self.vid)
# print_packet(packet, L, True)
dpid_length = get_dpid_length(self.dpid)
op_code = get_operation(packet)
self.switch_stats[get_operation_name(op_code)]['processed'] += 1
if op_code == OP_CODES['DISCOVERY_ECHO_REQUEST']:
packet_fields = decode_discovery_packet(packet, L, dpid_length)
neighbor_vid = packet_fields['sender_vid']
print "Neighbor discovery request message received from: ", neighbor_vid
# Reply
viro_packet = create_DISCOVER_ECHO_REPLY(self.vid, self.dpid)
msg = self.create_openflow_message(of.OFPP_IN_PORT, FAKE_SRC_MAC, viro_packet, event.port)
self.connection.send(msg)
# print "Neighbor discovery reply message sent"
elif op_code == OP_CODES['DISCOVERY_ECHO_REPLY']:
packet_fields = decode_discovery_packet(packet, L, dpid_length)
neighbor_vid = packet_fields['sender_vid']
neighbor_port = event.port
print "Neighbor discovery reply message received from vid: ", neighbor_vid, "port:", neighbor_port
# Update routing table with this (possibly new) neighbors
self.viro.update_routing_table_based_on_neighbor(neighbor_vid, neighbor_port)
else:
dst_vid = get_dest(packet, L)
src_vid = get_src(packet, L)
# forward the packet if it's not for us
if dst_vid != self.vid:
self.route_viro_packet(packet)
return
if op_code == OP_CODES['RDV_QUERY']:
print "RDV_QUERY message received"
rvdReplyPacket = self.viro.process_rdv_query(packet)
if rvdReplyPacket == '':
return
if src_vid == self.vid:
print "(processing my own RDV_REPLY)"
self.viro.process_rdv_reply(rvdReplyPacket)
else:
msg = self.create_openflow_message(of.OFPP_IN_PORT, FAKE_SRC_MAC, rvdReplyPacket, event.port)
self.connection.send(msg)
print "RDV_REPLY message sent"
elif op_code == OP_CODES['RDV_PUBLISH']:
self.viro.process_rdv_publish(packet)
elif op_code == OP_CODES['RDV_REPLY']:
self.viro.process_rdv_reply(packet)
elif op_code == OP_CODES['VIRO_DATA_OP']:
# The part where it handles VIRO data packet (by printing it then dropping it)
contents = decode_viro_data_packet_contents(packet, L)
print "Received a VIRO data packet:", contents
stats = self.switch_stats['VIRO_DATA_OP']
self.switch_stats['VIRO_DATA_OP']['consumed'] += 1
self.switch_stats['VIRO_DATA_OP']['total_hops'] += MAX_TTL - contents['ttl']
# Send reply back
if self.vid == '0000':
print "(not replying to data packet since we are node 0000)"
else:
print "Sending VIRO data reply"
src_vid = self.vid
dst_vid = contents['src_vid']
payload = ones_complement_str(contents['payload'])
reply_packet = create_VIRO_DATA(src_vid, dst_vid, src_vid, MAX_TTL, payload)
self.process_viro_packet(reply_packet)
self.switch_stats['VIRO_DATA_OP']['originated'] += 1
def create_openflow_message(self, openflow_port, mac, packet, event_port=None):
# Track statistics on sent messages
# (assume that any message created using this function will be immediately sent)
op_code = get_operation(packet)
self.switch_stats[get_operation_name(op_code)]['sent'] += 1
# encapsulating the VIRO packet into an ethernet frame
# dst MAC defaults to ETHER_ANY = 00:00:00:00:00:00
# (currently that's the same as VEIL_MASTER_MAC)
e = ethernet(type=VIRO_DATA, src=EthAddr(mac), dst=EthAddr(VEIL_MASTER_MAC))
e.set_payload(packet)
# composing openFlow message
msg = of.ofp_packet_out()
msg.data = e.pack()
# send the message to the same port as the openflow port
msg.actions.append(of.ofp_action_output(port=openflow_port))
if (event_port is not None):
msg.in_port = event_port
return msg
def start_round(self):
print "vid", self.vid, 'starting round: ', self.round
self.run_round(self.round)
# Advance to next round (for next time), if not already at final round (L)
L = len(self.vid)
self.round += 1
if self.round > L:
self.round = L
self.viro.print_routing_table()
def run_round(self, round):
routing_table = self.viro.routing_table
L = len(self.vid)
# start from round 2 since connectivity in round 1 is already learnt using the physical neighbors
for k in range(2, round + 1):
if not k in routing_table:
routing_table[k] = []
# publish ourself as a gateway to our physical neighbors
for entry in routing_table[k]:
if entry['gateway'] == int(self.vid, 2):
print "Sending RDV_PUBLISH for k =", k
packet, dst = self.viro.publish(entry, k)
self.route_viro_packet(packet)
# If we don't yet have the maximum number of gateways / entries in our routing table, query for more
if len(routing_table[k]) < MAX_GW_PER_LEVEL:
print "Sending RDV_QUERY for k =", k
packet, dst = self.viro.query(k)
self.route_viro_packet(packet)
# This function runs during initialization and just serves to generate
# a set of VIDs that will be used for sending sample data packets for routing demonstration
def generate_demo_packet_sequence(self):
vid_sequence = []
# Just generate sample data from 0000 to 1111
if self.vid == '0000':
vid_sequence.append('1111')
return vid_sequence
# This function gets called periodically by a timer
# It simply steps through a sequence of destination VIDs, sending a message to another one
# each time it is executed. The payload is just a rolling 32-bit counter value
# that doesn't have any meaning in this simple demonstration except to distinguish / identify it
# it in the switches log messages.
def send_sample_viro_data(self):
try:
if len(self.demo_packet_sequence) < 1:
# print "(not configured to send any sample packets)"
return
src_vid = self.vid
dst_vid = self.demo_packet_sequence[self.demo_sequence_number % len(self.demo_packet_sequence)]
# Start with our own VID as the forwarding directive and let the routing function
# select an appropriate forwarding directive
fwd_vid = src_vid
self.demo_sequence_number += 1
payload = bin(self.demo_sequence_number % 2**32).replace("0b", "")
print "Sending sample VIRO data packet to", dst_vid, "with payload", payload
packet = create_VIRO_DATA(src_vid, dst_vid, fwd_vid, MAX_TTL, payload)
self.process_viro_packet(packet)
self.switch_stats['VIRO_DATA_OP']['originated'] += 1
except:
print "ERROR: send_sample_viro_data encountered exception"
print traceback.format_exc()
# If this packet is destined for us then process it.
# Otherwise, if it's a "data packet" then route it using multi-path routing.
# Otherwise use the single-path routing algorithm provided since
# the packet doesn't have the forwarding directive field in its header.
# (We could update the packet format for these or encapsulate them into data packets,
# but this is not necessary for this assignment.)
def route_viro_packet(self, packet):
L = len(self.vid)
dst = get_dest(packet, L)
if (dst == self.vid):
# Packet is for this node, so consume it rather than forward it
print 'I am the destination!'
self.process_viro_packet(packet)
return
op_code = get_operation(packet)
if op_code == OP_CODES['VIRO_DATA_OP']:
self.route_viro_packet_via_forwarding_directive(packet)
else:
print "Using single-path routing for", get_operation_name(op_code), "packet"
self.route_viro_packet_via_default_path(packet)
def route_viro_packet_via_forwarding_directive(self, packet):
packet_fields = decode_viro_data_packet_contents(packet, L)
# print "Routing VIRO data packet:", packet_fields
ttl = packet_fields['ttl']
if ttl < 1:
print "TTL expired: dropping data packet"
self.switch_stats['VIRO_DATA_OP']['ttl_expired'] += 1
return
# Decrease the TTL to ensure that the packet won't get stuck forever in a routing loop
ttl -= 1
dst_vid = packet_fields['dst_vid']
fwd_vid = packet_fields['fwd_vid']
if fwd_vid == self.vid:
# We are the node that the sender selected in its forwarding directive
# so now we need to select a new gateway to use instead.
# Since we look through the routing table to pick a random gateway we go ahead
# and grab the next hop and port rather than looking them up
fwd_vid, next_hop, port = self.viro.choose_gateway_for_forwarding_directive(dst_vid)
else:
# Don't need to change forwarding directive, but do need to find next hop from routing table
# for the forwarding directive that was already specified
next_hop, port = self.viro.get_next_hop(dst_vid)
# Now send the packet to the next hop associated with the VID in the forwarding directive
if next_hop is not None:
# We could just modify the field in the original packet then send but
# since the packed format makes that inconvenient here
# we just create a new packet with the updated values instead.
src_vid = packet_fields['src_vid']
payload = packet_fields['payload']
packet = create_VIRO_DATA(src_vid, dst_vid, fwd_vid, ttl, payload)
self.send_packet_out_port(packet, port)
else:
print "No next hop found, so cannot route packet (using forwarding directive)"
def route_viro_packet_via_default_path(self, packet):
# get next_hop and port
dst_vid = get_dest(packet, L)
op_code = get_operation(packet)
is_query_or_publish = op_code == OP_CODES['RDV_PUBLISH'] or op_code == OP_CODES['RDV_QUERY']
next_hop, port = self.viro.get_next_hop(dst_vid, is_query_or_publish)
if next_hop is not None:
self.send_packet_out_port(packet, port)
else:
print "No next hop found, so cannot route packet (using default/single path)"
def send_packet_out_port(self, packet, port):
msg = self.create_openflow_message(of.OFPP_IN_PORT, FAKE_SRC_MAC, packet, int(port))
self.connection.send(msg)
# Called periodically for performance | debugging information
def print_switch_stats(self):
try:
print '\n---- Statistics for:', self.vid, '|', self.dpid, ' ----'
# Sort operations
stats_items = self.switch_stats.items()
stats_items.sort(key=lambda x: x[0])
for op_name, stats in stats_items:
print op_name
for stat_name, stat_value in stats.items():
if stat_name == "total_hops":
consumed = stats['consumed']
if consumed > 0:
print " average_hops:", stat_value/consumed
else:
print " %s: %s" % (stat_name, stat_value)
print '\n'
except:
print "ERROR: caught exception while trying to print switch statistics"
print traceback.format_exc()
|
addman2/KvantSim | Exercises/EX03-relax/Si/cellpar/plot.dist.py | <reponame>addman2/KvantSim
#!/usr/bin/env python
import os
import numpy as np
from matplotlib import pyplot as plt
results = []
for d, dn, fn in os.walk("."):
for dir in sorted(dn):
if "scf-" in dir:
try:
E = 0
with open(d+"/"+dir+"/scf.out") as f:
for line in f:
if "!" in line:
E = float(line.split()[4])
results.append([float(dir[4:]), E])
except:
pass
results = sorted(results, key=lambda x: x[0])
results = np.array(results).T
plt.plot(results[0],results[1], linewidth=2)
plt.savefig("plot.png")
|
addman2/KvantSim | Exercises/bin/sdos.plot.py | <reponame>addman2/KvantSim
#!/usr/bin/env python
import sys
import numpy as np
from matplotlib import pyplot as plt
E, Ndown, Nup = np.loadtxt(sys.argv[1], usecols = (0,1,2), unpack = True)
plt.plot(E, -Ndown,label="down")
plt.plot(E, Nup,label="up" )
try:
plt.plot([float(sys.argv[2])] * 2, [max(Nup),-max(Ndown)],"r-")
except:
pass
plt.xlim([min(E),max(E)])
plt.ylim([-max(Ndown),max(Nup)])
plt.plot(plt.xlim(),[0.0]*2,"k-")
plt.legend()
plt.tight_layout()
plt.savefig(sys.argv[1] + ".png")
|
addman2/KvantSim | Exercises/EX03-relax/CO/plotdist/run.dist.py | <gh_stars>0
#!/usr/bin/env python
import os
import numpy as np
source = "CO.scf.in"
min_a = 1.0
max_a = 1.2
stp_a = 15
run_cmd = "mpirun -np 1 pw.x < scf.in > scf.out"
for a in np.linspace(min_a, max_a, stp_a):
sa = "{:5.3f}".format(a)
print("Calculating " + sa)
wd = "scf-" + sa
if not os.path.exists(wd):
os.makedirs(wd)
with open(source) as f:
with open(wd + "/scf.in", "w") as fa:
for line in f:
fa.write(line.replace("$A$",sa))
os.system("cd "+wd+";"+run_cmd)
|
addman2/KvantSim | Exercises/EX01-Si_bulk/scf/conv-test/run.conv-test.py | #!/usr/bin/env python
import os
import numpy as np
source = "si.scf.in"
min_scf = 8
max_scf = 40
stp_scf = 24
run_cmd = "mpirun -np 1 pw.x < scf.in > scf.out"
for scf in np.linspace(min_scf, max_scf, stp_scf):
sscf = "{:5.3f}".format(scf)
print("Calculating " + sscf)
wd = "scf-" + sscf
if not os.path.exists(wd):
os.makedirs(wd)
with open(source) as f:
with open(wd + "/scf.in", "w") as fa:
for line in f:
fa.write(line.replace("$KATOF$",sscf))
os.system("cd "+wd+";"+run_cmd)
|
addman2/KvantSim | Exercises/bin/dos.plot.py | #!/usr/bin/env python
import sys
import numpy as np
from matplotlib import pyplot as plt
E, N = np.loadtxt(sys.argv[1], usecols = (0,1), unpack = True)
plt.plot(E, N)
try:
plt.plot([float(sys.argv[2])] * 2, [max(N),0.0],"r-")
except:
pass
plt.xlim([min(E),max(E)])
plt.ylim([0.0,max(N)])
plt.plot(plt.xlim(),[0.0]*2,"k-")
plt.savefig(sys.argv[1].replace("dos","png"))
|
addman2/KvantSim | Exercises/EX02-Al_bulk/scf/conv-test/plot.conv-test.py |
import os
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
results = []
for d, dn, fn in os.walk("."):
for dir in sorted(dn):
if "scf-" in dir:
try:
E = 0
with open(d+"/"+dir+"/scf.out") as f:
for line in f:
if "!" in line:
E = float(line.split()[4])
results.append([float(dir.split("-")[1]), float(dir.split("-")[2]), E])
except:
pass
X, Y, Z = np.array(results).T
print(X)
print(Y)
print(Z)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X,Y,Z)
|
addman2/KvantSim | Exercises/EX02-Al_bulk/scf/conv-test/run.conv-test.py | import os
import numpy as np
source = "al.scf.in"
CO_min = 18
CO_max = 20
CO_stp = 3
KP_min = 10
KP_max = 12
KP_stp = 3
for kp in np.linspace(KP_min, KP_max, KP_stp):
kp = int(kp)
for scf in np.linspace(CO_min, CO_max, CO_stp):
print("Calculating " + str(scf))
print("Calculating KP = {:3d}".format(kp))
wd = "scf-" + str(scf) + "-" + str(kp)
if not os.path.exists(wd):
os.makedirs(wd)
with open(source) as f:
os.system("rm -rf "+wd+"/scf.in")
for line in f:
with open(wd + "/scf.in", "a") as fa:
line = line.replace("$KATOF$",str(scf))
line = line.replace("$KP$","{:3d}{:3d}{:3d}".format(kp,kp,kp))
fa.write(line)
os.system("cd "+wd+";pw.x <scf.in>scf.out")
|
addman2/KvantSim | Beamer/kpath.py | from ase.io.espresso import read_espresso_in
from ase.dft.kpoints import ibz_points
from ase.dft.kpoints import get_bandpath
xtal = read_espresso_in("x.scf.in")
ip = ibz_points["cubic"]
points = [ "Gamma", "X", "M", "Gamma"]
path = [ip[p] for p in points]
print(xtal.cell)
kpts, x, X = get_bandpath(path,
xtal.cell,
npoints = 200)
print(kpts)
print(x)
print(X)
|
addman2/KvantSim | Exercises/bin/bands.plot.py | <reponame>addman2/KvantSim<filename>Exercises/bin/bands.plot.py<gh_stars>0
#!/usr/bin/env python
import sys
import os
import numpy as np
from matplotlib import pyplot as plt
bands = []
with open(os.getcwd()+"/"+sys.argv[1]) as f:
for i, line in enumerate(f):
if i%2 == 0:
try:
d = np.array(line.split(), np.float)
bands.append(d)
except:
pass
bands = np.array(bands)
for b in bands.T:
plt.plot(b, "b-", linewidth=2)
plt.savefig(sys.argv[1].replace("bands","png"))
|
akezhanmussa/SimulationCovidApp | main.py | import simulation
import requests
import pydantic
import typing
import yaml
CONFIG_YAML = 'config.yaml'
class SimulationConfig(pydantic.BaseModel):
endpoint: str
lat_range: typing.Tuple[float, float]
lon_range: typing.Tuple[float, float]
count: int
class OauthConfig(pydantic.BaseModel):
endpoint: str
grant_type: str
username: str
password: str
class ServerConfig(pydantic.BaseModel):
host: str
basic_auth: typing.Tuple[str, str]
oauth: OauthConfig
class AppConfig(pydantic.BaseModel):
server: ServerConfig
simulation: SimulationConfig
def app():
with open(CONFIG_YAML) as f:
raw_conf = yaml.load(f)
conf = AppConfig(**raw_conf)
simulation.set_api_host(conf.server.host)
auth_token, token_type = simulation.fetch_authoriztion_token(
basic_auth=conf.server.basic_auth,
oauth=conf.server.oauth.dict(exclude={'endpoint'}),
endpoint=conf.server.oauth.endpoint,
)
responses = simulation.send_new_cases(
auth_token=auth_token,
token_type=token_type,
lat_range=conf.simulation.lat_range,
lon_range=conf.simulation.lon_range,
endpoint=conf.simulation.endpoint,
count=conf.simulation.count,
)
for response in responses:
assert response.status_code == 200
if __name__ == '__main__':
app()
|
akezhanmussa/SimulationCovidApp | simulation.py | <gh_stars>0
import requests
import random
import typing
import json
import logging
back_url = None
def set_api_host(host):
global back_url
if back_url is None:
back_url = host
def fetch_authoriztion_token(
basic_auth: typing.Tuple[float, float],
oauth: typing.Dict[str, str],
endpoint: str,
):
response = requests.post(
f'{back_url}/{endpoint}',
data=oauth,
auth=basic_auth,
headers={'Content-Type': 'application/x-www-form-urlencoded'},
)
if response.status_code != 200:
return None, None
body = response.json()
return body['access_token'], body['token_type']
def send_new_cases(
auth_token: str,
token_type: str,
lat_range: typing.Tuple[float, float],
lon_range: typing.Tuple[float, float],
count: int,
endpoint: str,
) -> typing.List[requests.Response]:
responses = []
for _ in range(count):
lat_offset = random.random() / 5
lon_offset = random.random() / 5
new_lat = round(lat_range[0], 1) + lat_offset
if new_lat > lat_range[1] or new_lat < lat_range[0]:
continue
new_lon = round(lon_range[0], 1) + lon_offset
if new_lon > lon_range[1] or new_lon < lon_range[0]:
continue
print(f'Generated new hotspot with latitude: {new_lat}, longitude: {new_lon}')
response = requests.api.post(
f'{back_url}/{endpoint}',
json={
'latitude': new_lat,
'longitude': new_lon,
},
headers={
'Authorization': f'{token_type} {auth_token}',
},
)
responses.append(response)
return responses
|
brainmorsel/python-dhcp-sprout | ds/dhcp/server.py | import asyncio
from enum import Enum
import logging
import socket
from collections import defaultdict
import ipaddress
import time
from datetime import datetime
import aiopg
from sqlalchemy.dialects import postgresql as pg
import sqlalchemy as sa
import psycopg2
from .proto.packet import Packet
from .proto.opttypes import OptionType
from .proto.dhcpmsg import MessageType
from ds import db
class BindToDeviceError(Exception): pass
class _Listener:
def __init__(self, interface, reader, loop, *, port=67, server_addr=None, bufsize=4096, wqueue=10):
self.interface = interface
self.server_addr = None
self.bufsize = bufsize
self.loop = loop
self._reader = reader
self._is_writing = False
self._write_queue = asyncio.Queue(wqueue, loop=loop)
self.logger = logging.getLogger(__name__)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
sock.bind((interface, int(port)))
self.loop.add_reader(sock.fileno(), self._handle_read)
self._s = sock
if interface != '0.0.0.0':
self.server_addr = interface
self.logger.info('listener binded to %s:%s', interface, port)
def _handle_read(self):
data, address = self._s.recvfrom(self.bufsize)
self.logger.debug('listener %s: recieved %d octets from %s', self.interface, len(data), address)
future = self._reader(self, address, data)
asyncio.ensure_future(future, loop=self.loop)
def _handle_write(self):
try:
address, data = self._write_queue.get_nowait()
sent = self._s.sendto(data, address)
self.logger.debug('listener %s: sent %d/%d octets to %s', self.interface, sent, len(data), address)
except asyncio.QueueEmpty:
self.loop.remove_writer(self._s.fileno())
self._is_writing = False
async def _write(self, address, data):
if not self._is_writing:
self.loop.add_writer(self._s.fileno(), self._handle_write)
self._is_writing = True
await self._write_queue.put((address, data))
async def send(self, address, data):
host, port = address
if host == '0.0.0.0':
address = ('255.255.255.255', port)
await self._write(address, data)
class AsyncServer:
def __init__(self, loop=None):
self.loop = loop or asyncio.get_event_loop()
self._listeners = {}
self.logger = logging.getLogger(__name__)
def bind(self, interface, **kwargs):
self._listeners[interface] = _Listener(interface, self._handle_packet, self.loop, **kwargs)
async def _handle_packet(self, listener, address, data):
try:
pkt = Packet.unpack_from(data)
self.logger.debug('REQUEST PACKET:\n%s', pkt)
if pkt.op == Packet.Op.REQUEST:
reply_pkt = await self.handle_request(pkt, address, listener)
elif pkt.op == Packet.Op.REPLY:
reply_pkt = await self.handle_reply(pkt, address, listener)
else:
reply_pkt = None
if reply_pkt:
reply_pkt.op = Packet.Op.REPLY
reply_pkt.flags = 0
data = reply_pkt.pack()
self.logger.debug('REPLY PACKET:\n%s', reply_pkt)
await listener.send(address, data)
except Exception:
self.logger.exception('an error occured when handling input packet from %s (%s)', listener.interface, address)
async def handle_request(self, pkt, address, listener):
return None
async def handle_reply(self, pkt, address, listener):
return None
class DBTask(Enum):
SHUTDOWN = 0 # Завершить обрабутку
LOAD_OWNERS = 1 # Загрузить весь список привязок
ADD_STAGING = 2 # Добавить в список ожидающих привязки
UPDATE_LEASE = 3 # Обновить дату последней аренды
RELOAD_ITEM = 4 # перезагрузить профиль для указаной записи
REMOVE_STAGING = 5 # удалить MAC из staging кэша
REMOVE_ACTIVE = 6 # удалить MAC из активного кэша
RELOAD_PROFILE = 7 # перезагрузить все записи с заданым профилем
class DBChannelListener:
def __init__(self, conn_params, channel):
self.conn_params = conn_params
self.channel = channel
self.queue = None
async def start(self):
self.conn = await aiopg.connect(**self.conn_params)
async with self.conn.cursor() as cur:
await cur.execute('LISTEN {}'.format(self.channel))
self.queue = self.conn.notifies
async def stop(self):
self.conn.close()
await self.queue.put(None)
class DHCPServer(AsyncServer):
sql_select_owner = sa.select([
db.profile.c.relay_ip,
db.profile.c.router_ip,
db.profile.c.network_addr,
db.profile.c.lease_time,
db.profile.c.dns_ips,
db.profile.c.ntp_ips,
db.owner.c.mac_addr,
db.owner.c.ip_addr,
db.owner.c.id,
]).select_from(
db.owner.join(db.profile)
)
def __init__(self, db, channel, default_server_addr=None, loop=None):
super().__init__(loop)
self.default_server_addr = default_server_addr
self.db = db
self.channel = channel
self.db_tasks = asyncio.Queue(maxsize=1000, loop=loop)
self.maps = {}
self.maps_staging = {}
future = self.db_task_handling_loop()
asyncio.ensure_future(future, loop=self.loop)
future = self.db_channel_handling_loop()
asyncio.ensure_future(future, loop=self.loop)
self.is_stoping = False
async def stop(self):
self.logger.info('Started grace shutdown...')
self.is_stoping = True
await self.db_tasks.put((DBTask.SHUTDOWN, None))
async def handle_request(self, request, address, listener):
if self.is_stoping:
return None
if request.message_type is None:
return None
if request.message_type not in (MessageType.DISCOVER, MessageType.REQUEST):
return None
if not request.hops:
# обрабатывать только запросы с релеев
return None
relay_ip = request.giaddr or ipaddress.IPv4Address(address)
self.logger.info('%s %s from relay: %s', request.chaddr, request.message_type.name, relay_ip)
circuit_id = (request.get_circuit_id() or b'').decode('utf-8')
if request.chaddr in self.maps:
profile = self.maps[request.chaddr]
if profile['relay_ip'] != relay_ip and request.chaddr not in self.maps_staging:
self.db_task_add_staging(request.chaddr, relay_ip, circuit_id)
return None
elif request.chaddr in self.maps_staging:
self.logger.debug('%s is awaiting resolution, ignore request', request.chaddr)
return None
else:
self.db_task_add_staging(request.chaddr, relay_ip, circuit_id)
return None
server_addr = listener.server_addr or self.default_server_addr
pkt = request.make_reply(server_addr, profile['ip_addr'])
if pkt.message_type == MessageType.ACK:
self.db_task_update_lease(request.chaddr, relay_ip)
pkt.add_option(OptionType.SubnetMask, profile['netmask'])
if profile.get('router_ip'):
pkt.add_option(OptionType.Router, profile['router_ip'])
if profile.get('dns_ips'):
pkt.add_option(OptionType.DomainNameServers, profile['dns_ips'])
if profile.get('ntp_ips'):
pkt.add_option(OptionType.NTPServer, profile['ntp_ips'])
lease_time = int(profile['lease_time'].total_seconds())
pkt.add_option(OptionType.IPaddressLeaseTime, lease_time)
if server_addr:
pkt.add_option(OptionType.ServerIdentifier, server_addr)
for option in request._options:
if option.type not in (
OptionType.DHCPMessageType, OptionType.RequestedIPaddress,
OptionType.ParameterRequestList, OptionType.Pad):
pkt._options.append(option)
return pkt
def db_task_add_staging(self, macaddr, relay_ip, circuit_id):
try:
task = DBTask.ADD_STAGING, (datetime.now(), macaddr, relay_ip, circuit_id)
self.db_tasks.put_nowait(task)
self.maps_staging[macaddr] = relay_ip
except asyncio.QueueFull:
self.logger.warning('db tasks queue is full, new task droped')
def db_task_update_lease(self, macaddr, relay_ip):
try:
task = DBTask.UPDATE_LEASE, (datetime.now(), macaddr, relay_ip)
self.db_tasks.put_nowait(task)
except asyncio.QueueFull:
self.logger.warning('db tasks queue is full, new task droped')
async def db_task_handling_loop(self):
async with self.db.acquire() as conn:
while True:
task, params = await self.db_tasks.get()
self.logger.info('handling db task: %s', task.name)
if task is DBTask.SHUTDOWN:
break
elif task is DBTask.ADD_STAGING:
date, mac_addr, relay_ip, circuit_id = params
try:
res = await (await conn.execute(
db.owner.insert().from_select(
['mac_addr', 'profile_id', 'description'],
sa.select([
sa.literal(mac_addr),
db.profile.c.id,
sa.literal(circuit_id),
]).
select_from(db.profile).
where(db.profile.c.relay_ip == str(relay_ip))
).returning(db.owner.c.id)
)).fetchone()
if not res:
del self.maps_staging[mac_addr]
self.logger.warning('no profile for relay %s', relay_ip)
except psycopg2.IntegrityError:
pass
elif task is DBTask.UPDATE_LEASE:
date, macaddr, relay_ip = params
item = self.maps.get(macaddr)
if item:
await conn.execute(
db.owner.update().
values(lease_date=date).
where(db.owner.c.id == item['id'])
)
elif task is DBTask.REMOVE_ACTIVE:
mac_addr, = params
del self.maps[mac_addr]
elif task is DBTask.REMOVE_STAGING:
mac_addr, = params
del self.maps_staging[mac_addr]
elif task is DBTask.RELOAD_ITEM:
item_id, = params
item = await (await conn.execute(
self.sql_select_owner.where(db.owner.c.id == item_id)
)).fetchone()
self._update_item(item)
elif task is DBTask.RELOAD_PROFILE:
profile_id, = params
items = await conn.execute(
self.sql_select_owner.
where(db.owner.c.profile_id == profile_id).
order_by(sa.asc(db.owner.c.modify_date))
)
async for item in items:
self._update_item(item)
self.db.close()
await self.db.wait_closed()
async def db_load_owners(self):
async with self.db.acquire() as conn:
items = await (await conn.execute(
self.sql_select_owner.order_by(sa.asc(db.owner.c.modify_date))
)).fetchall()
for item in items:
self._update_item(item)
def _update_item(self, item):
if item.ip_addr:
if item.mac_addr in self.maps_staging:
del self.maps_staging[item.mac_addr]
self.maps[item.mac_addr] = dict(item)
self.maps[item.mac_addr]['netmask'] = item.network_addr.with_netmask.split('/')[1]
else:
self.maps_staging[item.mac_addr] = item.relay_ip
async def db_channel_handling_loop(self):
while True:
msg = await self.channel.queue.get()
if msg is None:
break
self.logger.info('got channel msg: %s', msg.payload)
action, param = msg.payload.split(' ', 1)
if action == 'RELOAD_ITEM':
item_id = int(param)
task = DBTask[action], (item_id,)
await self.db_tasks.put(task)
elif action in ('REMOVE_STAGING', 'REMOVE_ACTIVE'):
mac_addr = param.lower()
task = DBTask[action], (mac_addr,)
await self.db_tasks.put(task)
elif action == 'RELOAD_PROFILE':
profile_id = int(param)
task = DBTask[action], (profile_id,)
await self.db_tasks.put(task)
|
brainmorsel/python-dhcp-sprout | ds/web/views.py | import datetime
from aiohttp import web
from aiohttp_jinja2 import template
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
import psycopg2
from ds import db
from . import forms
@template('index.jinja2')
async def index(request):
return {}
@template('profile_list.jinja2')
async def profile_list(request):
async with request.app.db.acquire() as conn:
items = await (await conn.execute(
sa.select([
db.profile, 'ips_used',
(sa.func.broadcast(db.profile.c.network_addr) - db.profile.c.network_addr - 2).label('ips_total')
]).
select_from(
db.profile.
join(sa.select([
db.owner.c.profile_id, sa.func.count(db.owner.c.id).label('ips_used')
]).group_by(db.owner.c.profile_id).alias('cnts'))
).
order_by(db.profile.c.name)
)).fetchall()
return {'items': items}
def _cast_str_to_inet_arr(ip_list_str):
return sa.cast(map(str, forms.str_to_ip_list(ip_list_str)), pg.ARRAY(pg.INET))
@template('profile_edit.jinja2')
async def profile_edit(request):
tbl = db.profile
item_id = request.match_info.get('id')
await request.post()
async with request.app.db.acquire() as conn:
async with conn.begin():
item = await (await conn.execute(
tbl.select().where(tbl.c.id == item_id)
)).fetchone()
form = forms.ProfileEditForm(await request.post(), item)
if request.method == 'POST' and form.validate():
params = db.fit_params_dict(form.data, tbl.c.keys())
print(params['dns_ips'])
params['dns_ips'] = _cast_str_to_inet_arr(params['dns_ips'])
params['ntp_ips'] = _cast_str_to_inet_arr(params['ntp_ips'])
if item_id is None:
await conn.execute(tbl.insert().values(params))
else:
await conn.execute(
tbl.update().values(params).where(tbl.c.id == item_id)
)
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'RELOAD_PROFILE {}'.format(item_id))])
)
return web.HTTPFound('/profile/')
return {'form': form}
async def profile_delete(request):
tbl = db.profile
item_id = request.match_info.get('id')
async with request.app.db.acquire() as conn:
await conn.execute(tbl.delete().where(tbl.c.id == item_id))
return web.HTTPFound('/profile/')
@template('staging_list.jinja2')
async def staging_list(request):
async with request.app.db.acquire() as conn:
items = await (await conn.execute(
sa.select([
db.owner,
db.profile.c.name.label('profile_name'),
db.profile.c.relay_ip,
]).
select_from(
db.owner.
join(db.profile)
).
where(db.owner.c.ip_addr == None).
order_by(sa.desc(db.owner.c.create_date))
)).fetchall()
return {'items': items}
async def staging_assign_ip(request):
item_id = int(request.match_info.get('id'))
async with request.app.db.acquire() as conn:
async with conn.begin():
profile_id = await conn.scalar(
sa.select([db.owner.c.profile_id]).where(db.owner.c.id == item_id)
)
gen = sa.select([
(sa.cast('0.0.0.0', pg.INET) + sa.func.generate_series(
sa.cast(db.profile.c.network_addr, pg.INET) - '0.0.0.0' + 1,
sa.func.broadcast(db.profile.c.network_addr) - '0.0.0.0' - 1
)).label('ip_addr')
]).\
select_from(db.profile.join(db.owner)). \
where(db.profile.c.id == profile_id)
sel = sa.select([db.owner.c.ip_addr]). \
where(db.owner.c.profile_id == profile_id). \
where(db.owner.c.ip_addr != None)
ip_addr = gen.except_(sel).order_by('ip_addr').limit(1)
await conn.execute(
db.owner.update().values(
ip_addr=ip_addr,
modify_date=sa.func.now()
).
where(db.owner.c.id == item_id)
)
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'RELOAD_ITEM {}'.format(item_id))])
)
if 'edit' in request.rel_url.query:
return web.HTTPFound('/assigned/{}/edit?redirect=/staging/'.format(item_id))
return web.HTTPFound('/staging/')
async def staging_delete(request):
tbl = db.owner
item_id = request.match_info.get('id')
async with request.app.db.acquire() as conn:
async with conn.begin():
mac_addr = await conn.scalar(
sa.select([tbl.c.mac_addr]).
where(tbl.c.id == item_id)
)
await conn.execute(tbl.delete().where(tbl.c.id == item_id))
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'REMOVE_STAGING {}'.format(mac_addr))])
)
return web.HTTPFound('/staging/')
@template('assigned_list.jinja2')
async def assigned_list(request):
async with request.app.db.acquire() as conn:
items = await (await conn.execute(
sa.select([
db.owner,
db.profile.c.name.label('profile_name'),
db.profile.c.relay_ip,
]).
select_from(
db.owner.
join(db.profile)
).
where(db.owner.c.ip_addr != None).
order_by(sa.desc(db.owner.c.lease_date))
)).fetchall()
return {'items': items}
@template('assigned_edit.jinja2')
async def assigned_edit(request):
item_id = request.match_info.get('id')
await request.post()
async with request.app.db.acquire() as conn:
item = await (await conn.execute(
sa.select([
db.owner,
db.profile.c.name.label('profile_name'),
db.profile.c.relay_ip,
]).
select_from(
db.owner.
join(db.profile)
).
where(db.owner.c.id == item_id)
)).fetchone()
form = forms.AssignedItemEditForm(await request.post(), item)
if request.method == 'POST' and form.validate():
params = db.fit_params_dict(form.data, db.owner.c.keys())
await conn.execute(
db.owner.update().values(params).where(db.owner.c.id == item_id)
)
if 'redirect' in request.rel_url.query:
return web.HTTPFound(request.rel_url.query['redirect'])
return web.HTTPFound('/assigned/')
return {'item': item, 'form': form}
async def assigned_delete(request):
tbl = db.owner
item_id = request.match_info.get('id')
async with request.app.db.acquire() as conn:
async with conn.begin():
mac_addr = await conn.scalar(
sa.select([tbl.c.mac_addr]).
where(tbl.c.id == item_id)
)
await conn.execute(tbl.delete().where(tbl.c.id == item_id))
await conn.execute(
sa.select([sa.func.pg_notify('dhcp_control', 'REMOVE_ACTIVE {}'.format(mac_addr))])
)
return web.HTTPFound('/assigned/')
|
brainmorsel/python-dhcp-sprout | ds/db/__init__.py | <filename>ds/db/__init__.py<gh_stars>0
import sys
import ipaddress
import asyncio
from aiopg.sa import create_engine
from sqlalchemy.dialects import postgresql as pg
import sqlalchemy as sa
import psycopg2.extensions
metadata = sa.MetaData()
def cast_inet(value, cur):
if value is None:
return None
return ipaddress.ip_address(value)
def cast_cidr(value, cur):
if value is None:
return None
return ipaddress.ip_network(value)
# SELECT 'inet'::regtype::oid;
INET_OID = 869
INET = psycopg2.extensions.new_type((INET_OID,), "INET", cast_inet)
psycopg2.extensions.register_type(INET)
# SELECT 'cidr'::regtype::oid;
CIDR_OID = 650
CIDR = psycopg2.extensions.new_type((CIDR_OID,), "CIDR", cast_cidr)
psycopg2.extensions.register_type(CIDR)
# select typarray from pg_type where typname = 'inet'; -> 1041
psycopg2.extensions.register_type(
psycopg2.extensions.new_array_type(
(1041,), 'INET[]', INET))
def fit_params_dict(params, columns):
''' Делает копию словаря, содержащую только ключи соответсвующие полям таблицы.
Такой словарь можно безопасно передать в .update() или .insert(), не опасаясь
исключения "sqlalchemy.exc.CompileError: Unconsumed column names...".
'''
tbl_keys = {str(key) for key in columns}
prm_keys = set(params.keys())
keys = tbl_keys & prm_keys
return {key: params[key] for key in keys}
profile = sa.Table(
'profile', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('create_date', sa.DateTime(timezone=True), nullable=False,
server_default=sa.func.now()),
sa.Column('modify_date', sa.DateTime(timezone=True), nullable=False,
server_default=sa.func.now()),
sa.Column('name', sa.String, sa.CheckConstraint('length(name) > 0'), nullable=False),
sa.Column('description', sa.String, nullable=False, server_default=''),
sa.Column('relay_ip', pg.INET, nullable=False),
sa.Column('router_ip', pg.INET, nullable=True),
sa.Column('dns_ips', pg.ARRAY(pg.INET), nullable=True),
sa.Column('ntp_ips', pg.ARRAY(pg.INET), nullable=True),
sa.Column('lease_time', sa.Interval, nullable=False),
sa.Column('network_addr', pg.CIDR, nullable=False),
sa.UniqueConstraint('relay_ip'),
sa.UniqueConstraint('name'),
)
owner = sa.Table(
'owner', metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('create_date', sa.DateTime(timezone=True), nullable=False,
server_default=sa.func.now()),
sa.Column('modify_date', sa.DateTime(timezone=True), nullable=False,
server_default=sa.func.now()),
sa.Column('lease_date', sa.DateTime(timezone=True), nullable=False,
server_default=sa.func.now()),
sa.Column('profile_id', sa.Integer, sa.ForeignKey('profile.id', ondelete='CASCADE'),
nullable=False),
sa.Column('ip_addr', pg.INET, nullable=True),
sa.Column('mac_addr', pg.MACADDR, nullable=True, index=True),
sa.Column('description', sa.String, nullable=False, server_default=''),
sa.UniqueConstraint('profile_id', 'ip_addr'),
sa.UniqueConstraint('profile_id', 'mac_addr'),
)
|
brainmorsel/python-dhcp-sprout | ds/web/forms.py | <gh_stars>0
import ipaddress
from wtforms import Form
from wtforms import StringField
from wtforms import TextAreaField
from wtforms import validators
from wtforms import ValidationError
def str_to_ip_list(ip_list_str):
if ip_list_str and isinstance(ip_list_str, str):
return list(map(ipaddress.IPv4Address, ip_list_str.split()))
return []
def validate_network_address(form, field):
try:
ipaddress.IPv4Network(field.data)
except ValueError as e:
raise ValidationError(str(e))
def validate_ip_address(form, field):
try:
ipaddress.IPv4Address(field.data)
except ValueError as e:
raise ValidationError(str(e))
def validate_ip_list(form, field):
try:
str_to_ip_list(field.data)
except ValueError as e:
raise ValidationError(str(e))
def filter_ip_list(ip_list):
if not ip_list:
return ''
if isinstance(ip_list, str):
return ip_list
return '\n'.join(map(str, ip_list))
class ProfileEditForm(Form):
name = StringField('Название', [validators.DataRequired()])
description = TextAreaField('Описание')
relay_ip = StringField('Relay IP', [validate_ip_address])
router_ip = StringField('Router IP', [validate_ip_address])
network_addr = StringField('Сеть', [validate_network_address])
lease_time = StringField('Длительность аренды')
dns_ips = TextAreaField('DNS IPs', [validate_ip_list], filters=[filter_ip_list])
ntp_ips = TextAreaField('NTP IPs', [validate_ip_list], filters=[filter_ip_list])
class AssignedItemEditForm(Form):
description = TextAreaField('Описание')
|
brainmorsel/python-dhcp-sprout | ds/dhcp/proto/opttypes.py | from enum import IntEnum
class OptionType(IntEnum):
# RFC1497 vendor extensions
Pad = 0
End = 255
SubnetMask = 1
TimeOffset = 2
Router = 3
TimeServer = 4
NameServer = 5
DomainNameServers = 6
LogServer = 7
CookieServer = 8
LPRServer = 9
ImpressServer = 10
ResourceLocationServer = 11
HostName = 12
BootFileSize = 13
MeritDumpFile = 14
DomainName = 15
SwapServer = 16
RootPath = 17
ExtensionsPath = 18
# IP Layer Parameters per Host
IPForwarding = 19
NonLocalSourceRouting = 20
PolicyFilter = 21
MaxDatagramReassemblySize = 22
DefaultIPTTL = 23
PathMTUAgingTimeout = 24
PathMTUPlateauTable = 25
# IP Layer Parameters per Interface
InterfaceMTU = 26
AllSubnetsAreLocal = 27
BroadcastAddress = 28
PerformMaskDiscovery = 29
MaskSupplier = 30
PerformRouterDiscovery = 31
RouterSolicitationAddress = 32
StaticRoute = 33
# Link Layer Parameters per Interface
TrailerEncapsulationOption = 34
ARPCacheTimeout = 35
EthernetEncapsulation = 36
# TCP Parameters
TCPDefaultTTL = 37
TCPKeepaliveInterval = 38
TCPKeepaliveGarbage = 39
# Application and Service Parameters
NISDomain = 40
NIServers = 41
NTPServer = 42
VendorSpecificInformation = 43
NetBIOSNameServer = 44
NetBIOSDDS = 45
NetBIOSNodeType = 46
NetBIOSScope = 47
XWindowFontServer = 48
XWindowDisplayManager = 49
NISPlusDomain = 64
NISPlusServers = 65
MobileIPHomeAgent = 68
SMTPServer = 69
POP3Server = 70
NNTPServer = 71
DefaultWWWServer = 72
DefaultFingerServer = 73
DefaultIRCServer = 74
StreetTalkServer = 75
StreetTalkDirectoryAssistanceServer = 76
# DHCP Extensions
RequestedIPaddress = 50
IPaddressLeaseTime = 51
OptionOverload = 52
DHCPMessageType = 53
ServerIdentifier = 54
ParameterRequestList = 55
DHCPMessage = 56
MaximumDHCPMessageSize = 57
RenewalTime = 58
RebindingTime = 59
VendorClassIdentifier = 60
ClientIdentifier = 61
TFTPServerName = 66
BootfileName = 67
# Other:
ClientFQDN = 81 # RFC 4702
AgentInformation = 82
class AgentInformationOptionType(IntEnum):
CircuitID = 1
RemoteID = 2
|
brainmorsel/python-dhcp-sprout | ds/dhcp/bench.py | import logging
import socket
import threading
import time
from .proto.packet import Packet
from .proto.opttypes import OptionType
from .proto.dhcpmsg import MessageType
def sync_worker(address, on_success, on_fail, oneshot=False, macaddr=None, relay_ip=None):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 0))
sock.settimeout(1)
pkt = Packet(message_type=MessageType.REQUEST)
pkt.op = Packet.Op.REQUEST
pkt.chaddr = macaddr or 'de:12:44:4c:bb:48'
if relay_ip:
pkt.hops = 1
pkt.giaddr = relay_ip
pkt.add_option(OptionType.AgentInformation, b'\x01\x04test')
while True:
data = pkt.pack()
sent = sock.sendto(data, address)
try:
data, address = sock.recvfrom(4096)
on_success()
if oneshot:
reply = Packet.unpack_from(data)
print(reply)
except socket.timeout:
on_fail()
if oneshot:
break
def start_threaded(address, threads=1, macaddr=None, relay_ip=None):
host, port = address.split(':')
port = int(port)
success_count = 0
fail_count = 0
def inc_success():
nonlocal success_count
success_count += 1
def inc_fail():
nonlocal fail_count
fail_count += 1
for _ in range(threads):
t = threading.Thread(target=sync_worker, args=((host, port), inc_success, inc_fail, False, macaddr, relay_ip), daemon=True)
t.start()
while True:
time.sleep(1.0)
print('requests success: %s fail: %s' % (success_count, fail_count))
success_count = 0
fail_count = 0
def oneshot(address, macaddr, relay_ip):
host, port = address.split(':')
port = int(port)
sync_worker((host, port), lambda:None, lambda:None, oneshot=True, macaddr=macaddr, relay_ip=relay_ip)
|
brainmorsel/python-dhcp-sprout | ds/dhcp/util.py | <filename>ds/dhcp/util.py<gh_stars>0
import binascii
from collections import defaultdict
def mac_to_string(addr_bytes):
return ':'.join('%02x' % b for b in addr_bytes)
_cleanup_table = defaultdict(lambda: None, {ord(c): c for c in '0123456789abcdefABCDEF'})
def mac_to_bytes(addr_string):
return binascii.unhexlify(addr_string.translate(_cleanup_table))
|
brainmorsel/python-dhcp-sprout | ds/web/server.py | <reponame>brainmorsel/python-dhcp-sprout
import os
import base64
import logging
from aiohttp import web
from aiohttp_session import session_middleware
from aiohttp_session.cookie_storage import EncryptedCookieStorage
from cryptography.fernet import Fernet
import aiohttp_jinja2
import jinja2
from . import urls
def root_package_name():
return __name__.split('.')[0]
def root_package_path(relative_path=None):
root_module = __import__(root_package_name())
path = os.path.dirname(os.path.abspath(root_module.__file__))
if relative_path is not None:
path = os.path.join(path, relative_path)
return path
class WebServer:
def __init__(self, config, db, loop=None):
self._cfg = config
self._loop = loop
self._srv = None
self._handler = None
self.log = logging.getLogger(__name__)
# Fernet key must be 32 bytes.
cookie_secret = config.get('http', 'cookie_secret', fallback=None)
cookie_secret = base64.urlsafe_b64decode(cookie_secret or Fernet.generate_key())
middlewares = [
session_middleware(EncryptedCookieStorage(cookie_secret)),
]
app = web.Application(middlewares=middlewares)
app.ioloop = loop
app.db = db
aiohttp_jinja2.setup(app,
loader=jinja2.FileSystemLoader(root_package_path('web/templates')))
def make_prefixed_router(url_prefix=''):
def add_route(method, url, *args, **kwargs):
return app.router.add_route(method, url_prefix + url, *args, **kwargs)
return add_route
urls.configure(make_prefixed_router())
app.router.add_static('/', root_package_path('web/static'), name='static')
self._app = app
async def start(self):
host, port = self._cfg.get('http', 'bind', fallback='127.0.0.1:8000').split(':')
self.log.info('listen on http://%s:%s/', host, port)
self._handler = self._app.make_handler()
self._srv = await self._loop.create_server(self._handler, host, port)
async def stop(self):
await self._handler.finish_connections(1.0)
self._app.db.close()
self._srv.close()
await self._srv.wait_closed()
await self._app.finish()
|
brainmorsel/python-dhcp-sprout | setup.py | <gh_stars>0
from setuptools import setup, find_packages
setup(
name='ds',
version='0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'click',
'uvloop',
'aiopg',
'psycopg2',
'sqlalchemy',
'aiohttp',
'aiohttp-jinja2',
'aiohttp-session',
'cryptography',
'wtforms',
],
entry_points='''
[console_scripts]
ds-dhcp-server=ds.cli:dhcp_server
ds-web-server=ds.cli:web_server
ds-cli=ds.cli:cli
''',
)
|
brainmorsel/python-dhcp-sprout | ds/cli.py | import sys
import asyncio
import logging.handlers
import signal
from configparser import ConfigParser
import click
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
import aiopg
import psycopg2.extras
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pg
import aiopg.sa
from . import db
from .dhcp.server import DHCPServer, DBChannelListener
from .web.server import WebServer
def config_load(config_file):
config = ConfigParser(allow_no_value=True)
config.read(config_file)
return config
class DiagnosticLogFilter:
def filter(self, record):
return 1 if record.levelno >= logging.WARNING else 0
def config_logging(config, log_level=None):
log_level = log_level or config.get('log', 'level', fallback='info')
log_format = config.get('log', 'format', fallback='%(asctime)s %(levelname)-8s %(name)s %(message)s')
level = getattr(logging, log_level.upper())
logging.basicConfig(level=level, format=log_format)
http_access_file = config.get('log', 'http_access_file', fallback=None)
if http_access_file:
handler = logging.handlers.RotatingFileHandler(http_access_file, encoding='utf-8')
access_logger = logging.getLogger('aiohttp.access')
access_logger.addHandler(handler)
diagnostic_file = config.get('log', 'diagnostic_file', fallback=None)
if diagnostic_file:
handler = logging.handlers.RotatingFileHandler(diagnostic_file, encoding='utf-8')
handler.addFilter(DiagnosticLogFilter())
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)-8s %(name)s %(message)s'))
root_logger = logging.getLogger()
root_logger.addHandler(handler)
async def config_db(config):
db_engine = await aiopg.sa.create_engine(
**config['database'])
return db_engine
@click.command()
@click.option('-c', '--config', 'config_file', required=True, type=click.Path(exists=True, dir_okay=False))
@click.option('-l', '--log-level', 'log_level')
def dhcp_server(config_file, log_level):
config = config_load(config_file)
config_logging(config, log_level)
logger = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
try:
loop.add_signal_handler(signal.SIGTERM, loop.stop)
except NotImplementedError:
# signals are not available on Windows
pass
logger.info('Init db connection...')
db_engine = loop.run_until_complete(config_db(config))
channel = DBChannelListener(config['database'], 'dhcp_control')
loop.run_until_complete(channel.start())
logger.info('Init dhcp server...')
server = DHCPServer(db_engine, channel, config.get('dhcp', 'default_server_addr', fallback=None))
loop.run_until_complete(server.db_load_owners())
binds = config.get('dhcp', 'binds').split()
for bind in binds:
if ':' in bind:
host, port = bind.split(':')
else:
host, port = bind, 67
server.bind(host, port=port)
logger.info('Starting main loop...')
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(server.stop())
loop.run_until_complete(channel.stop())
logger.info('Awaiting remaining tasks...')
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
loop.close()
logger.info('Bye!')
@click.command()
@click.option('-c', '--config', 'config_file', required=True, type=click.Path(exists=True, dir_okay=False))
@click.option('-l', '--log-level', 'log_level')
def web_server(config_file, log_level):
config = config_load(config_file)
config_logging(config, log_level)
logger = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
try:
loop.add_signal_handler(signal.SIGTERM, loop.stop)
except NotImplementedError:
# signals are not available on Windows
pass
logger.info('Init db connection...')
db_engine = loop.run_until_complete(config_db(config))
logger.info('Init web server...')
ws = WebServer(config, db_engine, loop=loop)
loop.run_until_complete(ws.start())
logger.info('Starting main loop...')
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
logger.info('Stopping webserver...')
loop.run_until_complete(ws.stop())
logger.info('Closing db connections...')
db_engine.close()
loop.run_until_complete(db_engine.wait_closed())
logger.info('Awaiting remaining tasks...')
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
loop.close()
logger.info('Bye!')
@click.group()
@click.option('-c', '--config', 'config_file', type=click.Path(exists=True, dir_okay=False))
@click.option('-l', '--log-level', 'log_level')
@click.pass_context
def cli(ctx, config_file, log_level):
if config_file:
config = config_load(config_file)
config_logging(config, log_level)
ctx.obj = {'cfg': config}
@cli.group('db')
def cli_db():
pass
@cli_db.command('init')
@click.pass_context
def cli_db_init(ctx):
cfg = ctx.obj['cfg']
db_conn_format = 'postgresql://{user}:{password}@{host}:{port}/{database}'
db_uri = db_conn_format.format(**cfg['database'])
engine = sa.create_engine(db_uri)
with engine.connect() as conn:
db.metadata.create_all(conn)
@cli.command('dhcp-bench')
@click.option('-T', '--threads', default=1)
@click.option('-1', '--oneshot', default=False, is_flag=True)
@click.option('-m', '--macaddr', default=None)
@click.option('-r', '--relay-ip', default=None)
@click.argument('address')
def dhcp_bench(address, threads, oneshot, macaddr, relay_ip):
from .dhcp import bench
if oneshot:
bench.oneshot(address, macaddr, relay_ip)
else:
bench.start_threaded(address, threads, macaddr, relay_ip)
|
brainmorsel/python-dhcp-sprout | ds/dhcp/proto/enc.py | <filename>ds/dhcp/proto/enc.py
import struct
import ipaddress
ST_UINT32 = struct.Struct('!I')
def default(buffer, offset, type_code, value:bytes):
bytes_size = 2 + len(value)
buffer[offset] = type_code
buffer[offset + 1] = len(value)
buffer[offset + 2:offset + bytes_size] = value
return bytes_size
def pad(buffer, offset, type_code, value):
bytes_size = 1
buffer[offset] = type_code
return bytes_size
def uint8(buffer, offset, type_code, value):
bytes_size = 3
buffer[offset] = type_code
buffer[offset + 1] = 1
buffer[offset + 2] = int(value)
return bytes_size
def uint32(buffer, offset, type_code, value):
if isinstance(value, bytes):
return default(buffer, offset, type_code, value)
bytes_size = 6
buffer[offset] = type_code
buffer[offset + 1] = 4
ST_UINT32.pack_into(buffer, offset + 2, int(value))
return bytes_size
def ip_address(buffer, offset, type_code, value):
if isinstance(value, bytes):
return default(buffer, offset, type_code, value)
bytes_size = 6
buffer[offset] = type_code
buffer[offset + 1] = 4
ST_UINT32.pack_into(buffer, offset + 2, int(ipaddress.IPv4Address(value)))
return bytes_size
def ip_address_list(buffer, offset, type_code, value):
bytes_size = 2 + 4 * len(value)
buffer[offset] = type_code
buffer[offset + 1] = 4 * len(value)
for idx, ip in enumerate(value):
off = offset + 2 + 4 * idx
ST_UINT32.pack_into(buffer, off, int(ipaddress.IPv4Address(ip)))
return bytes_size
def string(buffer, offset, type_code, value:str):
if isinstance(value, str):
value = value.encode('utf-8')
return default(buffer, offset, type_code, value)
|
brainmorsel/python-dhcp-sprout | ds/dhcp/proto/dec.py | from .dhcpmsg import MessageType
from .opttypes import OptionType
def default(buffer, offset):
value_len = buffer[offset + 1]
value = buffer[offset + 2:offset + 2 + value_len]
size = 2 + value_len
return value, size
def pad(buffer, offset):
return None, 1
def parameters_request_list(buffer, offset):
value_len = buffer[offset + 1]
value = []
for type_code in buffer[offset + 2:offset + 2 + value_len]:
try:
value.append(OptionType(type_code))
except ValueError:
value.append(type_code)
size = 2 + value_len
return value, size
def dchp_message_type(buffer, offset):
value = MessageType(buffer[offset + 2])
size = 3
return value, size
|
brainmorsel/python-dhcp-sprout | ds/dhcp/proto/option.py | <gh_stars>0
from collections import defaultdict
from . import dec
from . import enc
from .opttypes import OptionType
from .opttypes import AgentInformationOptionType
class Option:
TYPE_ENUM = OptionType
DECODERS = defaultdict(lambda: dec.default, {
OptionType.Pad: dec.pad,
OptionType.End: dec.pad,
OptionType.ParameterRequestList: dec.parameters_request_list,
OptionType.DHCPMessageType: dec.dchp_message_type,
})
ENCODERS = defaultdict(lambda: enc.default, {
OptionType.Pad: enc.pad,
OptionType.End: enc.pad,
OptionType.DHCPMessageType: enc.uint8,
OptionType.SubnetMask: enc.ip_address,
OptionType.Router: enc.ip_address,
OptionType.IPaddressLeaseTime: enc.uint32,
OptionType.ServerIdentifier: enc.ip_address,
OptionType.DomainNameServers: enc.ip_address_list,
OptionType.HostName: enc.string,
OptionType.NTPServer: enc.ip_address_list,
})
def __init__(self, type, value=None, _byte_size=0):
self.type = type
self.value = value
self._byte_size = _byte_size
def __repr__(self):
return '{0} [size {2}]: {1}'.format(str(self.type), str(self.value), self._byte_size)
@classmethod
def unpack_from(cls, buffer, offset=0):
type_code = buffer[offset]
try:
type_code = cls.TYPE_ENUM(type_code)
except ValueError:
pass
decoder = cls.DECODERS[type_code]
value, size = decoder(buffer, offset)
return cls(type_code, value, _byte_size=size)
def pack_into(self, buffer, offset):
encoder = self.ENCODERS[self.type]
bytes_size = encoder(buffer, offset, self.type, self.value)
return bytes_size
class AgentInformationSubOption(Option):
TYPE_ENUM = AgentInformationOptionType
DECODERS = defaultdict(lambda: dec.default)
ENCODERS = defaultdict(lambda: enc.default)
|
brainmorsel/python-dhcp-sprout | ds/dhcp/proto/packet.py | <gh_stars>0
import ipaddress
import struct
from enum import IntEnum
from ..util import mac_to_string
from ..util import mac_to_bytes
from .option import Option
from .option import AgentInformationSubOption
from .opttypes import OptionType
from .opttypes import AgentInformationOptionType
from .dhcpmsg import MessageType
MAC_ADDRESS_LEN = 6
MIN_PACKET_LEN = 576
class HardwareAddressType(IntEnum):
# we only support Ethernet hardware address type
ETHERNET = 1
class Packet:
class Op(IntEnum):
REQUEST = 1
REPLY = 2
STRUCT = struct.Struct('!4BL2H4L16s64s128s')
FIELD_NAMES = 'op htype hlen hops xid secs flags ciaddr yiaddr siaddr giaddr chaddr sname file'.split()
_IP_FIELDS = ['ciaddr', 'yiaddr', 'siaddr', 'giaddr']
F_BROADCAST = 0x8000
MAGIC_COOKIE = bytes([99, 130, 83, 99])
def __init__(self, *, message_type=None, options=None, _field_values=None):
self._options = options or []
self.message_type = message_type
if _field_values:
if len(_field_values) != len(self.FIELD_NAMES):
raise ValueError('Values count not match fields count.')
for field, value in zip(self.FIELD_NAMES, _field_values):
if field in self._IP_FIELDS:
value = ipaddress.IPv4Address(value)
self.__dict__[field] = value
self.op = self.Op(self.op)
self.htype = HardwareAddressType(self.htype)
if self.hlen != MAC_ADDRESS_LEN:
raise ValueError('Invalid hardware address size.')
self.chaddr = mac_to_string(self.chaddr[:MAC_ADDRESS_LEN])
self.sname, _, _ = self.sname.partition(b'\0')
self.file, _, _ = self.file.partition(b'\0')
else:
self.op = self.Op.REQUEST
self.htype = HardwareAddressType.ETHERNET
self.hlen = MAC_ADDRESS_LEN
self.hops = 0
self.xid = 0
self.secs = 0
self.flags = 0
self.chaddr = '00:00:00:00:00:00'
self.sname = b''
self.file = b''
for field in self._IP_FIELDS:
self.__dict__[field] = ipaddress.IPv4Address(0)
def __repr__(self):
parts = ['DHCPPacket:']
for field in self.FIELD_NAMES:
parts.append(' {0}: {1}'.format(field, str(self.__dict__[field])))
if self.message_type:
parts.append(' MessageType: {0}'.format(str(self.message_type)))
if self._options:
parts.append(' Options:')
for o in self._options:
parts.append(' {0}'.format(o))
return '\n'.join(parts)
def add_option(self, type_code, value):
self._options.append(Option(type_code, value))
def reset_options(self):
self._options = []
def get_circuit_id(self):
opt_value = b''
for o in self._options:
if o.type == OptionType.AgentInformation:
opt_value = o.value
break
offset = 0
while offset < len(opt_value):
sub_opt = AgentInformationSubOption.unpack_from(opt_value, offset)
if sub_opt.type == AgentInformationOptionType.CircuitID:
return sub_opt.value
return None
@classmethod
def unpack_from(cls, buffer, offset=0):
values = cls.STRUCT.unpack_from(buffer, offset=offset)
options = []
message_type = None
offset += cls.STRUCT.size
if len(buffer) > offset:
if buffer[offset:offset + 4] != cls.MAGIC_COOKIE:
raise ValueError('Options magic cookie not matched.')
offset += 4
while offset < len(buffer):
option = Option.unpack_from(buffer, offset)
offset += option._byte_size
if option.type == OptionType.End:
break
if option.type == OptionType.Pad:
continue
if option.type == OptionType.DHCPMessageType:
message_type = MessageType(option.value)
continue
options.append(option)
return cls(options=options, _field_values=values, message_type=message_type)
def pack_into(self, buffer, offset=0):
values = (
self.op,
self.htype,
self.hlen,
self.hops,
self.xid,
self.secs,
self.flags,
int(ipaddress.IPv4Address(self.ciaddr)),
int(ipaddress.IPv4Address(self.yiaddr)),
int(ipaddress.IPv4Address(self.siaddr)),
int(ipaddress.IPv4Address(self.giaddr)),
mac_to_bytes(self.chaddr),
self.sname,
self.file
)
self.STRUCT.pack_into(buffer, offset, *values)
offset += self.STRUCT.size
if self._options or self.message_type:
buffer[offset:offset + 4] = self.MAGIC_COOKIE
offset += 4
if self.message_type is not None:
msg_opt = Option(OptionType.DHCPMessageType, self.message_type)
offset += msg_opt.pack_into(buffer, offset)
for option in self._options:
offset += option.pack_into(buffer, offset)
offset += Option(OptionType.End).pack_into(buffer, offset)
return offset
def pack(self):
buffer = bytearray(MIN_PACKET_LEN)
size = self.pack_into(buffer)
return buffer[:size]
def make_reply(self, server_addr, offered_addr):
if self.message_type == MessageType.DISCOVER:
pkt = self.__class__(message_type=MessageType.OFFER)
elif self.message_type == MessageType.REQUEST:
pkt = self.__class__(message_type=MessageType.ACK)
else:
raise ValueError('Can reply only to DISCOVER or REQUEST.')
pkt.op = self.Op.REPLY
pkt.xid = self.xid
pkt.chaddr = self.chaddr
pkt.siaddr = ipaddress.IPv4Address(server_addr or '0.0.0.0')
pkt.yiaddr = ipaddress.IPv4Address(offered_addr)
pkt.giaddr = ipaddress.IPv4Address(self.giaddr)
pkt.hops = self.hops
return pkt
|
brainmorsel/python-dhcp-sprout | ds/web/urls.py | <reponame>brainmorsel/python-dhcp-sprout<filename>ds/web/urls.py
from . import views
def configure(add):
add('GET', '/', views.index)
add('GET', '/profile/', views.profile_list)
add('*', '/profile/new', views.profile_edit)
add('*', '/profile/{id}/edit', views.profile_edit)
add('*', '/profile/{id}/delete', views.profile_delete)
add('GET', '/staging/', views.staging_list)
add('GET', '/staging/{id}/assign-ip', views.staging_assign_ip)
add('GET', '/staging/{id}/delete', views.staging_delete)
add('GET', '/assigned/', views.assigned_list)
add('GET', '/assigned/{id}/delete', views.assigned_delete)
add('*', '/assigned/{id}/edit', views.assigned_edit)
|
brainmorsel/python-dhcp-sprout | ds/dhcp/proto/dhcpmsg.py | <reponame>brainmorsel/python-dhcp-sprout
from enum import IntEnum
class MessageType(IntEnum):
DISCOVER = 1
OFFER = 2
REQUEST = 3
DECLINE = 4
ACK = 5
NAK = 6
RELEASE = 7
# rfc2132
INFORM = 8
# rfc4388
LEASEQUERY = 10
LEASEUNASSIGNED = 11
LEASEUNKNOWN = 12
LEASEACTIVE = 13
|
0dysseas/news-indicator | newsindicator/utils.py | import json
import os
def print_json_object(obj):
"""
A JSON print helper function.
"""
print ('Printing JSON object')
print (json.dumps(obj, indent=4))
def get_asset(asset='sources'):
"""
Gets the assets from the specified folder.
"""
absolute_path = os.path.dirname(os.path.abspath(__file__))
if asset is not 'sources':
return os.path.join(absolute_path, 'assets/news_icon.png')
return os.path.join(absolute_path, 'assets/news_sources.txt')
def get_news_sources_from_file():
"""
Gets the news sources from the file.
"""
source_file = get_asset()
with open(source_file, 'r') as f:
news_sources = dict()
for line in f:
if not line.startswith('#') and line.split():
split_line = line.split(' = ')
news_sources[split_line[0]] = split_line[1].rstrip('\n')
return news_sources
def delete_redundant_items(json_news, keys_to_del):
#deletes redundant items
for item in keys_to_del:
del json_news[item]
return json_news
|
0dysseas/news-indicator | newsindicator/news_indicator.py | import sys
import signal
import logging
import webbrowser
from Queue import Queue
from datetime import datetime
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('AppIndicator3', '0.1')
import notify2
from gi.repository import Gtk, AppIndicator3, GObject
from apscheduler.schedulers.background import BlockingScheduler
from apscheduler.events import EVENT_JOB_EXECUTED
from get_news import DownloadNewsWorker
from utils import get_asset
from about_and_settings_wins import SettingsState, render_settings_window, render_about_window, INTERVALS
# Constants
APP = 'News-Indicator'
JOB_ID = 'news_job'
ICON = get_asset(asset='icon')
try:
scheduler = BlockingScheduler()
except ImportError:
raise ImportError('Failed to import Scheduler')
sys.exit(1)
class NewsIndicator(object):
"""
The main Indicator class. It's the Observer B in the Observer pattern and changes state (notifications, interval)
based on the Observable's behavior.
"""
# Class variables to be shared among all NewsIndicator instances
menu = None
update_interval = 10
notifications = True
indicator = AppIndicator3.Indicator.new(APP, ICON, AppIndicator3.IndicatorCategory.OTHER)
indicator.set_status(AppIndicator3.IndicatorStatus.ACTIVE)
def __init__(self):
self.app = APP
self.settings_changed = False
def __repr__(self):
return self.app
@staticmethod
def open_news_url(self, url):
"""
Opens the selected url in the default browser
"""
try:
if not webbrowser.open_new_tab(url):
raise webbrowser.Error
except webbrowser.Error:
print('Unable to open a web browser, try accessing this URL manually instead:\n{0}'.format(url))
@staticmethod
def stop(self):
scheduler.shutdown(wait=False)
Gtk.main_quit()
sys.exit(1)
@staticmethod
def on_about(self):
"""
Callback function for the about menu item
"""
render_about_window()
@staticmethod
def on_settings(self):
"""
Callback function for the settings menu item. Pulls the app state, notifications state & the interval
selected by the user, and updates accordingly the SettingsState class.
"""
# Pull the current app state from the relay Observer object
status, interval, ntfc_status, ntfc_state = settings_state.get_state()
# Pass it to the Observable object in order to render the Settings window
settings_changed, update_interval, ntfc_changed, ntfc_selected = render_settings_window(
status, interval, ntfc_status, ntfc_state, settings_state)
# Register any state changes
settings_state.update_state(settings_changed, update_interval, ntfc_changed, ntfc_selected)
# If the interval has changed, reprogram scheduler to run at the new interval
if settings_state.intrvl_change_trig:
modify_scheduler(JOB_ID, settings_state.settings_interval)
if settings_state.notification_change_trig:
NewsIndicator.notifications = False if not settings_state.notification_state else True
def create_and_update_menu(self, list_of_news):
"""
Wrapper for creating and updating the indicator menu
with the list of news.
"""
self.create_menu(list_of_news)
def create_menu(self, menu_items):
"""
Create new indicator instance
"""
self.menu = Gtk.Menu()
# loop through the menu items
for k, v in enumerate(menu_items):
# menu item
first_item = Gtk.MenuItem(v['title'])
first_item.connect('activate', self.open_news_url, v['url'])
self.menu.append(first_item)
# separator item
separator = Gtk.SeparatorMenuItem()
self.menu.append(separator)
# settings item
settings_item = Gtk.MenuItem('Settings')
settings_item.connect('activate', self.on_settings)
self.menu.append(settings_item)
# about item
about_item = Gtk.MenuItem('About')
about_item.connect('activate', self.on_about)
self.menu.append(about_item)
# exit item
exit_item = Gtk.MenuItem('Exit')
exit_item.connect('activate', self.stop)
self.menu.append(exit_item)
self.menu.show_all()
self.indicator.set_menu(self.menu)
return self.menu
def show_notifications(run_time):
"""
Show the notification pop-up window
"""
# Initialize the d-bus connection and create the notification object
notify2.init("News Indicator")
n = notify2.Notification(None, icon=ICON)
# Set the urgency level and the timeout
n.set_urgency(notify2.URGENCY_NORMAL)
n.set_timeout(10000)
formatted_time = run_time.time().strftime('%H:%M')
message = 'Your {} news are here!'.format(formatted_time)
n.update('News Indicator', message=message, icon=ICON)
n.show()
def listen_for_new_updates(event):
"""
Listener that's triggered every time a successful job(news retrieval) is executed.
Upon its call it creates and renders the main indicator menu."
"""
if event.retval:
news_indicator.create_and_update_menu(event.retval)
if NewsIndicator.notifications:
show_notifications(event.scheduled_run_time)
Gtk.main()
def modify_scheduler(job_id, new_interval):
"""
Modifies the scheduler object in order to retrieve the news based on the new interval, as selected by the
user in the indicator settings.
"""
minutes = INTERVALS[new_interval][:2]
scheduler.reschedule_job(job_id, trigger='interval', minutes=int(minutes))
@scheduler.scheduled_job('interval', next_run_time=datetime.now(), minutes=10, id=JOB_ID, name='retrieve_news_job')
def main():
"""
Main function used to retrieve the news from the sources, based on a defined time interval
"""
output_queue = Queue()
out_list = list()
logging.info('Retrieving news...')
download = DownloadNewsWorker(output_queue)
download.retrieve_news()
while not output_queue.empty():
item = output_queue.get()
out_list.append(item)
return out_list
def run_indicator():
global news_indicator
global settings_state
news_indicator = NewsIndicator()
# Init the default newsindicator settings state to: news retrieval per 10' & notifications True
settings_state = SettingsState(False, 0, False, True)
signal.signal(signal.SIGINT, signal.SIG_DFL)
scheduler.add_listener(listen_for_new_updates, EVENT_JOB_EXECUTED)
scheduler.start()
if __name__ == '__main__':
run_indicator()
|
0dysseas/news-indicator | newsindicator/__init__.py | #__version__ used as the starting point for SemVer
__version__ = "2.6.3" |
0dysseas/news-indicator | setup.py | <filename>setup.py
#!/usr/bin/python
# from distutils.core import setup
from setuptools import setup
# Package meta-data.
NAME = 'newsindicator'
DESCRIPTION = 'Linux app indicator that retrieves news from top media outlets'
URL = 'https://github.com/0dysseas/news-indicator'
REQUIRED = [
'requests', 'APScheduler', 'notify2',
]
setup(name=NAME,
version='1.0.0',
description=DESCRIPTION,
url=URL,
license='GNU Lesser General Public License v3.0',
packages=['newsindicator'],
install_requires=REQUIRED,
classifiers=[
'License :: OSI Approved :: GLPL v3.0 License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
data_files=[
('/usr/share/applications', ['newsindicator.desktop']),
('/usr/share/icons', ['newsindicator_icon.png'])
],
package_data={'newsindicator': ['assets/*']},
scripts=['bin/newsindicator']
) |
0dysseas/news-indicator | newsindicator/about_and_settings_wins.py | import gi
gi.require_version('Gtk', '3.0')
gi.require_version('AppIndicator3', '0.1')
from gi.repository import Gtk
COMMENT = 'News Indicator is an appindicator that retrieves the latest news articles,' \
' on a variety of topics from top media outlets.\n\n\nBuilt with Python and powered by NewsAPI.'
# News retrieval intervals
INTERVALS = {0: '10 Minutes', 1: '15 Minutes', 2: '20 Minutes', 3: '30 Minutes', 4: '60 Minutes'}
class AboutWindow(Gtk.Dialog):
"""
The about window
"""
def __init__(self):
"""
About window constructor.
"""
super(AboutWindow, self).__init__()
about = Gtk.AboutDialog()
about.set_program_name('News Indicator')
about.set_logo_icon_name(None)
about.set_comments(COMMENT)
about.set_title('')
about.connect('response', self.on_close)
about.show()
def on_close(self, action, parameter):
"""
Destroy window on close.
"""
action.destroy()
def __repr__(self):
# object representation
return self.get_title()
class SettingsState(object):
"""
Observer A. Based on the Observer pattern it is responsible to hold the current state of the app.
It models the dependent functionality (notifications & retrieval interval) and is used as a relay to the other
observer too.
"""
def __init__(self, intrvl_change_trig, settings_interval, ntfc_change_trig, notification_state):
"""
Notification constructor
"""
self.notification_change_trig = ntfc_change_trig
self.notification_state = notification_state
# Interval dd menu state variables
self.intrvl_change_trig = intrvl_change_trig
self.settings_interval = settings_interval
def get_state(self):
"""
Gets the overall indicator change
"""
return self.intrvl_change_trig, self.settings_interval, self.notification_change_trig, self.notification_state
def update_state(self, new_settings_instance_trig, new_interval, new_ntfc_instance_trig, new_ntfc_change):
# Update notification switch state
self.notification_change_trig = new_ntfc_instance_trig
self.notification_state = new_ntfc_change
# Update interval option
self.intrvl_change_trig = new_settings_instance_trig
self.settings_interval = new_interval
class Settings(Gtk.ApplicationWindow):
"""
Observable class,based on the Observer pattern, that renders the settings window.
It is coupled to the SettingsState class and holds the independent functionality (scheduler).
"""
def __init__(self, called, interv, ntfc_called, ntfc_state, state):
# Store the state of the whole Settings window
self.settings_called = called
self.interval = interv
self.notifications_called = ntfc_called
self.notifications_state = ntfc_state
Gtk.Window.__init__(self, title="Settings")
self.set_size_request(300, 200)
self.set_border_width(10)
# Create the outer box
box_outer = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
self.add(box_outer)
# and then the main List box
listbox = Gtk.ListBox()
listbox.set_selection_mode(Gtk.SelectionMode.NONE)
box_outer.pack_start(listbox, True, True, 0)
row = Gtk.ListBoxRow()
horizontal_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=50)
row.add(horizontal_box)
vertical_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
horizontal_box.pack_start(vertical_box, True, True, 0)
# Create the Notification label
notification_label = Gtk.Label("Enable Notifications", xalign=0)
vertical_box.pack_start(notification_label, True, True, 0)
# and its switch
switch = Gtk.Switch()
switch.props.valign = Gtk.Align.CENTER
# Default notifications state is ON
switch.set_active(self.notifications_state)
switch.connect('notify::active', self.on_notification_change)
horizontal_box.pack_start(switch, False, True, 0)
listbox.add(row)
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=50)
row.add(hbox)
retrieval_label = Gtk.Label("Retrieval Intervals", xalign=0)
# Create the main combo-box
combo = Gtk.ComboBoxText()
# and populate it
for key, value in INTERVALS.items():
combo.insert(key, str(key), value)
# Default retrieval time is 10 mins
combo.set_active(self.interval)
combo.connect('changed', self.on_interval_change, state)
hbox.pack_start(retrieval_label, True, True, 0)
hbox.pack_start(combo, False, True, 0)
listbox.add(row)
def on_interval_change(self, combo, state):
"""
Callback function that is fired when the user changes the time interval.
"""
self.settings_called = True
state.intrvl_change_trig = True
index = combo.get_active_text()
active = combo.get_active()
if index:
# Set the new interval
combo.set_active(active)
# Get the number of minutes
self.interval = active
return self.interval
def on_notification_change(self, switch, active):
"""
Callback function that is fired upon changing the notification state (ON/OFF).
"""
self.notifications_called = True
state = switch.get_state()
switch.set_state(state)
self.notifications_state = state
return self.notifications_state
def __repr__(self):
return self.get_title()
def render_settings_window(s_called, s_int, ntfc_called, ntfc_state, s_state):
"""
Render the settings window
"""
win = Settings(s_called, s_int, ntfc_called, ntfc_state, s_state)
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
return win.settings_called, win.interval, win.notifications_called, win.notifications_state
def render_about_window():
"""
Render the about window
"""
AboutWindow()
Gtk.main()
|
0dysseas/news-indicator | newsindicator/tests/test_assert.py | import unittest
class TestAssert(unittest.TestCase):
def test_first(self):
self.assertEqual('foo', 'foo')
def test_second(self):
self.assertEqual('bar', 'bar')
def test_third(self):
self.assertEqual('foobar', 'foobar')
if __name__ == '__main__':
unittest.main() |
0dysseas/news-indicator | newsindicator/get_news.py | <filename>newsindicator/get_news.py
import logging
import os
import sys
from Queue import Queue
from threading import Thread
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('AppIndicator3', '0.1')
import requests
import notify2
from utils import get_news_sources_from_file, delete_redundant_items
NUM_THREADS = 8
MESSAGE = ' NEWS_API_KEY not found!! \nHave you stored it in ~/.profile?'
logging.basicConfig(level=logging.INFO)
def show_alert_notifications():
"""
Shows the alert notification pop-up window
"""
# Initialize the d-bus connection and create the notification object
notify2.init("News Indicator")
n = notify2.Notification(None)
# Set the urgency level and the timeout
n.set_urgency(notify2.URGENCY_NORMAL)
n.set_timeout(9000)
n.update('News Indicator', message=MESSAGE)
n.show()
class DownloadWorker(Thread):
"""
Main class that retrieves the actual articles from the corresponding urls,using multiple threads.
"""
def __init__(self, input_queue, out_queue):
# Init threads and queues
Thread.__init__(self, target=self.download_content)
self.input_queue = input_queue
self.out_queue = out_queue
def __repr__(self):
return self.input_queue
def _form_news_structure(self, json_news):
keys_to_remove = ['status', 'sortBy']
sub_keys_to_remove = ['description', 'author', 'publishedAt']
filtered_news_sources_format = delete_redundant_items(json_news, keys_to_remove)
# Get the first four articles from each source
for _, article in enumerate(filtered_news_sources_format['articles'][:2]):
final_news_sources_format = delete_redundant_items(article, sub_keys_to_remove)
self.out_queue.put(final_news_sources_format)
return json_news
def download_content(self):
"""
Asynchronously downloads the content from the news sources.
"""
while True:
link = self.input_queue.get()
response = requests.get(link).json()
self._form_news_structure(response)
self.input_queue.task_done()
class DownloadNewsWorker(object):
"""
Class used to get the news from the sources file and then put them in the input queue
"""
def __init__(self, output_queue):
# Init output queue
self.output_queue = output_queue
def __repr__(self):
return self.output_queue
def retrieve_news(self):
# retrieves news
try:
# api_key = str(os.environ.get('NEWS_API_KEY'))
api_key = os.environ['NEWS_API_KEY']
except KeyError:
show_alert_notifications()
sys.exit(1)
# Create an input_queue to store all API endpoints
input_queue = Queue()
# Create the worker threads. The number is arbitrary and will be optimized based on performance
for i in range(NUM_THREADS):
download_worker = DownloadWorker(input_queue, self.output_queue)
# Daemonize the working thread so as the main thread always exits
download_worker.setDaemon(True)
download_worker.start()
news_sources = get_news_sources_from_file()
# Put each news source into the queue
for _, val in news_sources.items():
news_item = '='.join([val, api_key])
input_queue.put(news_item)
input_queue.join()
|
hartono-wen/get-ip-address-with-python | main.py | <gh_stars>0
import requests
# API Specification: http://ip-api.com/docs/api:json
response = requests.get('http://ip-api.com/json')
print("This computer's IP Address is {0}".format(response.json()['query']))
print("The timezone of the request origin location is {0}".format(response.json()['timezone']))
print("The region of the request origin location is {0}".format(response.json()['regionName']))
print("The country of the request origin location is {0}".format(response.json()['country']))
print("The ISP used by the request origin location is {0}".format(response.json()['isp']))
|
Qone2/real-matzip-backend-detection-api-server | app.py | <reponame>Qone2/real-matzip-backend-detection-api-server
import time
from absl import app, logging
import cv2
import numpy as np
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset
from yolov3_tf2.utils import draw_outputs
from flask import Flask, request, Response, jsonify, send_from_directory, abort
import os
import requests
import json
# customize your API through the following parameters
classes_path = './data/labels/coco.names'
weights_path = './weights/yolov3.tf'
tiny = False # set to True if using a Yolov3 Tiny model
size = 416 # size images are resized to for model
output_path = './detections/' # path to output folder where images with detections are saved
num_classes = 80 # number of classes in model
# load in weights and classes
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
if tiny:
yolo = YoloV3Tiny(classes=num_classes)
else:
yolo = YoloV3(classes=num_classes)
yolo.load_weights(weights_path).expect_partial()
print('weights loaded')
class_names = [c.strip() for c in open(classes_path).readlines()]
print('classes loaded')
# Initialize Flask application
app = Flask(__name__)
# API that returns JSON with classes found in images
@app.route('/detections/by-image-files', methods=['POST'])
def get_detections_by_image_files():
raw_images = []
images = request.files.getlist("images")
image_names = []
for image in images:
image_name = "./temp/" + image.filename
image_names.append(image_name)
image.save(os.path.join(os.getcwd(), image_name[2:]))
img_raw = None
try:
img_raw = tf.image.decode_image(
open(image_name, 'rb').read(), channels=3)
except tf.errors.InvalidArgumentError:
# remove temporary images
for name in image_names:
os.remove(name)
abort(404, "it is not an image file or image file is an unsupported format. try jpg or png")
except Exception as e:
# remove temporary images
for name in image_names:
os.remove(name)
print(e.__class__)
print(e)
abort(500)
raw_images.append(img_raw)
num = 0
# create list for final response
response = []
for j in range(len(raw_images)):
# create list of responses for current image
responses = []
raw_img = raw_images[j]
num += 1
img = tf.expand_dims(raw_img, 0)
img = transform_images(img, size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
print('time: {}'.format(t2 - t1))
print('detections:')
for i in range(nums[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
responses.append({
"class": class_names[int(classes[0][i])],
"confidence": float("{0:.2f}".format(np.array(scores[0][i])*100)),
"box": np.array(boxes[0][i]).tolist()
})
response.append({
"image": image_names[j][7:],
"detections": responses
})
img = cv2.cvtColor(raw_img.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(output_path + 'detection' + str(num) + '.jpg', img)
print('output saved to: {}'.format(output_path + 'detection' + str(num) + '.jpg'))
# remove temporary images
for name in image_names:
os.remove(name)
try:
return Response(response=json.dumps({"response": response}), mimetype="application/json")
except FileNotFoundError:
abort(404)
# API that returns image with detections on it
@app.route('/image/by-image-file', methods= ['POST'])
def get_image_by_image_file():
image = request.files["images"]
image_name = "./temp/" + image.filename
image.save(os.path.join(os.getcwd(), image_name[2:]))
img_raw = None
try:
img_raw = tf.image.decode_image(
open(image_name, 'rb').read(), channels=3)
except tf.errors.InvalidArgumentError:
# remove temporary image
os.remove(image_name)
abort(404, "it is not an image file or image file is an unsupported format. try jpg or png")
except Exception as e:
# remove temporary image
os.remove(image_name)
print(e.__class__)
print(e)
abort(500)
img = tf.expand_dims(img_raw, 0)
img = transform_images(img, size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
print('time: {}'.format(t2 - t1))
print('detections:')
for i in range(nums[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(output_path + 'detection.jpg', img)
print('output saved to: {}'.format(output_path + 'detection.jpg'))
# prepare image for response
_, img_encoded = cv2.imencode('.png', img)
response = img_encoded.tostring()
# remove temporary image
os.remove(image_name)
try:
return Response(response=response, status=200, mimetype='image/png')
except FileNotFoundError:
abort(404)
# API that returns JSON with classes found in images from url list
@app.route('/detections/by-url-list', methods=['POST'])
def get_detections_by_url_list():
raw_images = []
image_urls = request.get_json()["images"]
if not isinstance(image_urls, list):
abort(400, "can't find image list")
image_names = []
custom_headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
}
for i, image_url in enumerate(image_urls):
image_name = "Image" + str(i + 1)
image_names.append(image_name)
img_raw = None
try:
img_raw = tf.image.decode_image(
requests.get(image_url, headers=custom_headers).content, channels=3)
except tf.errors.InvalidArgumentError:
abort(404, "it is not image url or that image is an unsupported format. try jpg or png")
except requests.exceptions.MissingSchema:
abort(400, "it is not url form")
except Exception as e:
print(e.__class__)
print(e)
abort(500)
raw_images.append(img_raw)
num = 0
# create list for final response
response = []
for j in range(len(raw_images)):
# create list of responses for current image
responses = []
raw_img = raw_images[j]
num += 1
img = tf.expand_dims(raw_img, 0)
img = transform_images(img, size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
print('time: {}'.format(t2 - t1))
print('detections:')
for i in range(nums[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
responses.append({
"class": class_names[int(classes[0][i])],
"confidence": float("{0:.2f}".format(np.array(scores[0][i])*100)),
"box": np.array(boxes[0][i]).tolist()
})
response.append({
"image": image_names[j],
"detections": responses
})
img = cv2.cvtColor(raw_img.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(output_path + 'detection' + str(num) + '.jpg', img)
print('output saved to: {}'.format(output_path + 'detection' + str(num) + '.jpg'))
return Response(response=json.dumps({"response": response}), mimetype="application/json")
# API that returns image with detections on it from url
@app.route('/image/by-url', methods=['POST'])
def get_image_by_url():
image_urls = request.get_json()["images"]
if not isinstance(image_urls, list):
abort(400, "can't find image list")
image_names = []
custom_headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36"
}
image_name = "Image" + str(1)
image_names.append(image_name)
img_raw = None
try:
img_raw = tf.image.decode_image(
requests.get(image_urls[0], headers=custom_headers).content, channels=3)
except tf.errors.InvalidArgumentError:
abort(404, "it is not image url or that image is an unsupported format. try jpg or png")
except requests.exceptions.MissingSchema:
abort(400, "it is not url form")
except Exception as e:
print(e.__class__)
print(e)
abort(500)
img = tf.expand_dims(img_raw, 0)
img = transform_images(img, size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
print('time: {}'.format(t2 - t1))
print('detections:')
for i in range(nums[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(output_path + 'detection.jpg', img)
print('output saved to: {}'.format(output_path + 'detection.jpg'))
# prepare image for response
_, img_encoded = cv2.imencode('.png', img)
response = img_encoded.tostring()
try:
return Response(response=response, status=200, mimetype='image/png')
except FileNotFoundError:
abort(404)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
|
StanfordAHA/garnet | global_buffer/design/glb_header.py | <reponame>StanfordAHA/garnet<gh_stars>10-100
from kratos import PackedStruct, clog2
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
import math
class GlbHeader():
def __init__(self, _params: GlobalBufferParams):
self._params = _params
self.cfg_data_network_t = PackedStruct("cfg_data_network_t",
[("tile_connected", 1),
("latency", self._params.latency_width)])
self.cfg_pcfg_network_t = PackedStruct("cfg_pcfg_network_t",
[("tile_connected", 1),
("latency", self._params.latency_width)])
self.cfg_dma_ctrl_t = PackedStruct("dma_ctrl_t",
[("mode", 2),
("use_valid", 1),
("data_mux", 2),
("num_repeat", clog2(self._params.queue_depth) + 1)])
# NOTE: Kratos does not support struct of struct now.
dma_header_struct_list = [("start_addr", self._params.glb_addr_width),
("cycle_start_addr", self._params.glb_addr_width)]
dma_header_struct_list += [("dim", 1 + clog2(self._params.loop_level))]
for i in range(self._params.loop_level):
dma_header_struct_list += [(f"range_{i}", self._params.axi_data_width),
(f"stride_{i}", self._params.axi_data_width),
(f"cycle_stride_{i}", self._params.axi_data_width)]
self.cfg_dma_header_t = PackedStruct("dma_header_t", dma_header_struct_list)
# pcfg dma header
self.cfg_pcfg_dma_ctrl_t = PackedStruct("pcfg_dma_ctrl_t", [("mode", 1)])
self.cfg_pcfg_dma_header_t = PackedStruct("pcfg_dma_header_t",
[("start_addr", self._params.glb_addr_width),
("num_cfg", self._params.max_num_cfg_width)])
wr_packet_list = [("wr_en", 1),
("wr_strb", math.ceil(self._params.bank_data_width / 8)),
("wr_addr", self._params.glb_addr_width),
("wr_data", self._params.bank_data_width), ]
rdrq_packet_list = [("rd_en", 1),
("rd_addr", self._params.glb_addr_width), ]
rdrs_packet_list = [("rd_data", self._params.bank_data_width),
("rd_data_valid", 1), ]
self.packet_t = PackedStruct(
"packet_t", wr_packet_list + rdrq_packet_list + rdrs_packet_list)
self.rd_packet_t = PackedStruct(
"rd_packet_t", rdrq_packet_list + rdrs_packet_list)
self.rdrq_packet_t = PackedStruct("rdrq_packet_t", rdrq_packet_list)
self.rdrs_packet_t = PackedStruct("rdrs_packet_t", rdrs_packet_list)
self.wr_packet_t = PackedStruct("wr_packet_t", wr_packet_list)
# NOTE: Kratos currently does not support struct of struct.
# This can become cleaner if it does.
self.wr_packet_ports = [name for (name, _) in wr_packet_list]
self.rdrq_packet_ports = [name for (name, _) in rdrq_packet_list]
self.rdrs_packet_ports = [name for (name, _) in rdrs_packet_list]
self.rd_packet_ports = [name for (name, _) in (
rdrq_packet_list + rdrs_packet_list)]
self.packet_ports = [name for (name, _) in (
rdrq_packet_list + rdrs_packet_list + wr_packet_list)]
self.cgra_cfg_t = PackedStruct("cgra_cfg_t", [("rd_en", 1), ("wr_en", 1), (
"addr", self._params.cgra_cfg_addr_width), ("data", self._params.cgra_cfg_data_width)])
|
StanfordAHA/garnet | global_buffer/design/glb_bank_ctrl.py | from kratos import Generator, always_comb, concat, const, always_ff, posedge
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_cfg_ifc import GlbConfigInterface
from global_buffer.design.glb_header import GlbHeader
from global_buffer.design.pipeline import Pipeline
class GlbBankCtrl(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_bank_ctrl")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.reset = self.reset("reset")
self.packet_wr_en = self.input("packet_wr_en", 1)
self.packet_wr_addr = self.input(
"packet_wr_addr", self._params.bank_addr_width)
self.packet_wr_data = self.input(
"packet_wr_data", self._params.bank_data_width)
self.packet_wr_data_bit_sel = self.input(
"packet_wr_data_bit_sel", self._params.bank_data_width)
self.packet_rd_en = self.input("packet_rd_en", 1)
self.packet_rd_addr = self.input(
"packet_rd_addr", self._params.bank_addr_width)
self.packet_rd_data = self.output(
"packet_rd_data", self._params.bank_data_width)
self.packet_rd_data_valid = self.output("packet_rd_data_valid", 1)
self.mem_rd_en = self.output("mem_rd_en", 1)
self.mem_wr_en = self.output("mem_wr_en", 1)
self.mem_addr = self.output("mem_addr", self._params.bank_addr_width)
self.mem_data_in = self.output(
"mem_data_in", self._params.bank_data_width)
self.mem_data_in_bit_sel = self.output(
"mem_data_in_bit_sel", self._params.bank_data_width)
self.mem_data_out = self.input(
"mem_data_out", self._params.bank_data_width)
self.bank_cfg_ifc = GlbConfigInterface(
addr_width=self._params.bank_addr_width, data_width=self._params.axi_data_width)
self.if_sram_cfg_s = self.interface(
self.bank_cfg_ifc.slave, f"if_sram_cfg_s", is_port=True)
self.bank_ctrl_pipeline_depth = self._params.glb_bank_memory_pipeline_depth + \
self._params.sram_gen_pipeline_depth + self._params.sram_gen_output_pipeline_depth + 1
# local variables
self.sram_cfg_rd_data_r = self.var(
"sram_cfg_rd_data_r", self._params.axi_data_width)
self.sram_cfg_rd_addr_sel_d = self.var("sram_cfg_rd_addr_sel_d", 1)
self.packet_rd_data_r = self.var(
"packet_rd_data_r", self._params.bank_data_width)
self.add_rd_en_pipeline()
self.add_always(self.mem_signal_logic)
self.add_always(self.packet_rd_data_ff)
self.add_always(self.packet_rd_data_logic)
self.add_sram_cfg_rd_addr_sel_pipeline()
self.add_always(self.sram_cfg_rd_data_ff)
self.add_always(self.sram_cfg_rd_data_logic)
@always_comb
def mem_signal_logic(self):
if self.if_sram_cfg_s.wr_en:
if self.if_sram_cfg_s.wr_addr[self._params.bank_byte_offset - 1] == 0:
self.mem_wr_en = 1
self.mem_rd_en_w = 0
self.mem_addr = self.if_sram_cfg_s.wr_addr
self.mem_data_in = concat(const(
0, self._params.bank_data_width - self._params.axi_data_width), self.if_sram_cfg_s.wr_data)
self.mem_data_in_bit_sel = concat(const(0, self._params.bank_data_width - self._params.axi_data_width),
const(2**self._params.axi_data_width - 1,
self._params.axi_data_width))
else:
self.mem_wr_en = 1
self.mem_rd_en_w = 0
self.mem_addr = self.if_sram_cfg_s.wr_addr
self.mem_data_in = concat(self.if_sram_cfg_s.wr_data[self._params.bank_data_width
- self._params.axi_data_width - 1, 0],
const(0, self._params.axi_data_width))
self.mem_data_in_bit_sel = concat(const(2**(self._params.bank_data_width - self._params.axi_data_width)
- 1,
self._params.bank_data_width - self._params.axi_data_width),
const(0, self._params.axi_data_width))
elif self.if_sram_cfg_s.rd_en:
self.mem_wr_en = 0
self.mem_rd_en_w = 1
self.mem_addr = self.if_sram_cfg_s.rd_addr
self.mem_data_in = 0
self.mem_data_in_bit_sel = 0
elif self.packet_wr_en:
self.mem_wr_en = 1
self.mem_rd_en_w = 0
self.mem_addr = self.packet_wr_addr
self.mem_data_in = self.packet_wr_data
self.mem_data_in_bit_sel = self.packet_wr_data_bit_sel
elif self.packet_rd_en:
self.mem_wr_en = 0
self.mem_rd_en_w = 1
self.mem_addr = self.packet_rd_addr
self.mem_data_in = 0
self.mem_data_in_bit_sel = 0
else:
self.mem_wr_en = 0
self.mem_rd_en_w = 0
self.mem_addr = 0
self.mem_data_in = 0
self.mem_data_in_bit_sel = 0
def add_rd_en_pipeline(self):
self.mem_rd_en_w = self.var("mem_rd_en_w", 1)
self.mem_rd_en_d = self.var("mem_rd_en_d", 1)
self.sram_cfg_rd_en_d = self.var("sram_cfg_rd_en_d", 1)
self.packet_rd_en_d = self.var("packet_rd_en_d", 1)
self.wire(self.mem_rd_en_w, self.mem_rd_en)
self.mem_rd_en_pipeline = Pipeline(width=1,
depth=self.bank_ctrl_pipeline_depth)
self.add_child("mem_rd_en_pipeline",
self.mem_rd_en_pipeline,
clk=self.clk,
clk_en=const(1, 1),
reset=self.reset,
in_=self.mem_rd_en_w,
out_=self.mem_rd_en_d)
self.sram_cfg_rd_en_pipeline = Pipeline(width=1,
depth=self.bank_ctrl_pipeline_depth)
self.add_child("sram_cfg_rd_en_pipeline",
self.sram_cfg_rd_en_pipeline,
clk=self.clk,
clk_en=const(1, 1),
reset=self.reset,
in_=self.if_sram_cfg_s.rd_en,
out_=self.sram_cfg_rd_en_d)
self.packet_rd_en_pipeline = Pipeline(width=1,
depth=self.bank_ctrl_pipeline_depth)
self.add_child("packet_rd_en_pipeline",
self.packet_rd_en_pipeline,
clk=self.clk,
clk_en=const(1, 1),
reset=self.reset,
in_=self.packet_rd_en,
out_=self.packet_rd_en_d)
@always_ff((posedge, "clk"), (posedge, "reset"))
def packet_rd_data_ff(self):
if self.reset:
self.packet_rd_data_r = 0
else:
self.packet_rd_data_r = self.packet_rd_data
@always_comb
def packet_rd_data_logic(self):
if self.packet_rd_en_d:
self.packet_rd_data = self.mem_data_out
else:
self.packet_rd_data = self.packet_rd_data_r
self.packet_rd_data_valid = self.packet_rd_en_d
def add_sram_cfg_rd_addr_sel_pipeline(self):
self.sram_cfg_rd_addr_sel_d = self.var("sram_cfg_rd_addr_sel_d", 1)
self.sram_cfg_rd_addr_sel_pipeline = Pipeline(width=1,
depth=self.bank_ctrl_pipeline_depth)
self.add_child("sram_cfg_rd_addr_sel_pipeline",
self.sram_cfg_rd_addr_sel_pipeline,
clk=self.clk,
clk_en=const(1, 1),
reset=self.reset,
in_=self.if_sram_cfg_s.rd_addr[self._params.bank_byte_offset - 1],
out_=self.sram_cfg_rd_addr_sel_d)
@always_ff((posedge, "clk"), (posedge, "reset"))
def sram_cfg_rd_data_ff(self):
if self.reset:
self.sram_cfg_rd_data_r = 0
else:
self.sram_cfg_rd_data_r = self.if_sram_cfg_s.rd_data
@always_comb
def sram_cfg_rd_data_logic(self):
if self.sram_cfg_rd_en_d:
if self.sram_cfg_rd_addr_sel_d == 0:
self.if_sram_cfg_s.rd_data = self.mem_data_out[self._params.axi_data_width - 1, 0]
else:
self.if_sram_cfg_s.rd_data = self.mem_data_out[self._params.axi_data_width
* 2 - 1, self._params.axi_data_width]
else:
self.if_sram_cfg_s.rd_data = self.sram_cfg_rd_data_r
self.if_sram_cfg_s.rd_data_valid = self.mem_rd_en_d & self.sram_cfg_rd_en_d
# TODO: This can just be simpler as below
# self.if_sram_cfg_s.rd_data_valid = self.sram_cfg_rd_en_d
|
StanfordAHA/garnet | global_buffer/design/glb_core_proc_router.py | <filename>global_buffer/design/glb_core_proc_router.py
from kratos import Generator, always_ff, always_comb, posedge
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
class GlbCoreProcRouter(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_core_proc_router")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.reset = self.reset("reset")
self.glb_tile_id = self.input(
"glb_tile_id", self._params.tile_sel_addr_width)
self.packet_w2e_wsti = self.input(
"packet_w2e_wsti", self.header.packet_t)
self.packet_e2w_wsto = self.output(
"packet_e2w_wsto", self.header.packet_t)
self.packet_e2w_esti = self.input(
"packet_e2w_esti", self.header.packet_t)
self.packet_w2e_esto = self.output(
"packet_w2e_esto", self.header.packet_t)
self.wr_packet_pr2sw = self.output(
"wr_packet_pr2sw", self.header.wr_packet_t)
self.rdrq_packet_pr2sw = self.output(
"rdrq_packet_pr2sw", self.header.rdrq_packet_t)
self.rdrs_packet_sw2pr = self.input(
"rdrs_packet_sw2pr", self.header.rdrs_packet_t)
# local variables
self.packet_w2e_wsti_d1 = self.var(
"packet_w2e_wsti_d1", self.header.packet_t)
self.packet_e2w_esti_d1 = self.var(
"packet_e2w_esti_d1", self.header.packet_t)
self.rdrs_packet_sw2pr_d1 = self.var(
"rdrs_packet_sw2pr_d1", self.header.rdrs_packet_t)
self.rdrq_packet_pr2sw_filtered = self.var(
"rdrq_packet_pr2sw_filtered", self.header.rdrq_packet_t)
self.rdrq_packet_pr2sw_muxed = self.var(
"rdrq_packet_pr2sw_muxed", self.header.rdrq_packet_t)
self.wr_packet_pr2sw_filtered = self.var(
"wr_packet_pr2sw_filtered", self.header.wr_packet_t)
self.wr_packet_pr2sw_muxed = self.var(
"wr_packet_pr2sw_muxed", self.header.wr_packet_t)
# localparam
self.packet_addr_tile_sel_msb = _params.bank_addr_width + \
_params.bank_sel_addr_width + _params.tile_sel_addr_width - 1
self.packet_addr_tile_sel_lsb = _params.bank_addr_width + _params.bank_sel_addr_width
self.add_is_even_stmt()
self.add_always(self.packet_pipeline)
self.add_always(self.rdrs_packet_pipeline)
self.add_always(self.rq_assign)
self.add_always(self.rs_assign)
def add_is_even_stmt(self):
self.is_even = self.var("is_even", 1)
self.wire(self.is_even, self.glb_tile_id[0] == 0)
@always_ff((posedge, "clk"), (posedge, "reset"))
def packet_pipeline(self):
if self.reset:
self.packet_w2e_wsti_d1 = 0
self.packet_e2w_esti_d1 = 0
else:
self.packet_w2e_wsti_d1 = self.packet_w2e_wsti
self.packet_e2w_esti_d1 = self.packet_e2w_esti
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrs_packet_pipeline(self):
if self.reset:
self.rdrs_packet_sw2pr_d1 = 0
else:
self.rdrs_packet_sw2pr_d1 = self.rdrs_packet_sw2pr
@always_comb
def rq_assign(self):
# TODO: Kratos currently does not support struct of struct
# packet output
for port in self.header.wr_packet_ports + self.header.rdrq_packet_ports:
self.packet_w2e_esto[port] = self.packet_w2e_wsti_d1[port]
self.packet_e2w_wsto[port] = self.packet_e2w_esti_d1[port]
# packet to core
if self.is_even:
for port in self.header.wr_packet_ports:
self.wr_packet_pr2sw_muxed[port] = self.packet_w2e_esto[port]
else:
for port in self.header.wr_packet_ports:
self.wr_packet_pr2sw_muxed[port] = self.packet_e2w_wsto[port]
if self.is_even:
for port in self.header.rdrq_packet_ports:
self.rdrq_packet_pr2sw_muxed[port] = self.packet_w2e_esto[port]
else:
for port in self.header.rdrq_packet_ports:
self.rdrq_packet_pr2sw_muxed[port] = self.packet_e2w_wsto[port]
if (self.wr_packet_pr2sw_muxed['wr_addr'][self.packet_addr_tile_sel_msb, self.packet_addr_tile_sel_lsb]
== self.glb_tile_id):
for port in self.header.wr_packet_ports:
self.wr_packet_pr2sw_filtered[port] = self.wr_packet_pr2sw_muxed[port]
else:
for port in self.header.wr_packet_ports:
self.wr_packet_pr2sw_filtered[port] = 0
if (self.rdrq_packet_pr2sw_muxed['rd_addr'][self.packet_addr_tile_sel_msb, self.packet_addr_tile_sel_lsb]
== self.glb_tile_id):
for port in self.header.rdrq_packet_ports:
self.rdrq_packet_pr2sw_filtered[port] = self.rdrq_packet_pr2sw_muxed[port]
else:
for port in self.header.rdrq_packet_ports:
self.rdrq_packet_pr2sw_filtered[port] = 0
self.wr_packet_pr2sw = self.wr_packet_pr2sw_filtered
self.rdrq_packet_pr2sw = self.rdrq_packet_pr2sw_filtered
@always_comb
def rs_assign(self):
if (self.is_even == 1) & (self.rdrs_packet_sw2pr_d1['rd_data_valid'] == 1):
for port in self.header.rdrs_packet_ports:
self.packet_w2e_esto[port] = self.rdrs_packet_sw2pr_d1[port]
else:
for port in self.header.rdrs_packet_ports:
self.packet_w2e_esto[port] = self.packet_w2e_wsti_d1[port]
if (self.is_even == 0) & (self.rdrs_packet_sw2pr_d1['rd_data_valid'] == 1):
for port in self.header.rdrs_packet_ports:
self.packet_e2w_wsto[port] = self.rdrs_packet_sw2pr_d1[port]
else:
for port in self.header.rdrs_packet_ports:
self.packet_e2w_wsto[port] = self.packet_e2w_esti_d1[port]
|
StanfordAHA/garnet | global_buffer/design/global_buffer.py | from kratos import Generator, always_ff, posedge, always_comb, clock_en, clog2, const, concat
from kratos.util import to_magma
from global_buffer.design.glb_tile import GlbTile
from global_buffer.design.glb_cfg_ifc import GlbConfigInterface
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
from gemstone.generator.from_magma import FromMagma
class GlobalBuffer(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("global_buffer")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.stall = self.input("stall", self._params.num_glb_tiles)
self.reset = self.reset("reset")
# TODO: Why cgra_stall has same width as num_glb_tiles
self.cgra_stall_in = self.input(
"cgra_stall_in", self._params.num_glb_tiles)
self.cgra_stall = self.output(
"cgra_stall", 1, size=[self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.proc_wr_en = self.input(
"proc_wr_en", 1)
self.proc_wr_strb = self.input(
"proc_wr_strb", self._params.bank_data_width // 8)
self.proc_wr_addr = self.input(
"proc_wr_addr", self._params.glb_addr_width)
self.proc_wr_data = self.input(
"proc_wr_data", self._params.bank_data_width)
self.proc_rd_en = self.input(
"proc_rd_en", 1)
self.proc_rd_addr = self.input(
"proc_rd_addr", self._params.glb_addr_width)
self.proc_rd_data = self.output(
"proc_rd_data", self._params.bank_data_width)
self.proc_rd_data_valid = self.output(
"proc_rd_data_valid", 1)
self.if_cfg_wr_en = self.input(
"if_cfg_wr_en", 1)
self.if_cfg_wr_addr = self.input(
"if_cfg_wr_addr", self._params.axi_addr_width)
self.if_cfg_wr_data = self.input(
"if_cfg_wr_data", self._params.axi_data_width)
self.if_cfg_rd_en = self.input(
"if_cfg_rd_en", 1)
self.if_cfg_rd_addr = self.input(
"if_cfg_rd_addr", self._params.axi_addr_width)
self.if_cfg_rd_data = self.output(
"if_cfg_rd_data", self._params.axi_data_width)
self.if_cfg_rd_data_valid = self.output(
"if_cfg_rd_data_valid", 1)
self.if_sram_cfg_wr_en = self.input(
"if_sram_cfg_wr_en", 1)
self.if_sram_cfg_wr_addr = self.input(
"if_sram_cfg_wr_addr", self._params.glb_addr_width)
self.if_sram_cfg_wr_data = self.input(
"if_sram_cfg_wr_data", self._params.axi_data_width)
self.if_sram_cfg_rd_en = self.input(
"if_sram_cfg_rd_en", 1)
self.if_sram_cfg_rd_addr = self.input(
"if_sram_cfg_rd_addr", self._params.glb_addr_width)
self.if_sram_cfg_rd_data = self.output(
"if_sram_cfg_rd_data", self._params.axi_data_width)
self.if_sram_cfg_rd_data_valid = self.output(
"if_sram_cfg_rd_data_valid", 1)
self.cgra_cfg_jtag_gc2glb_wr_en = self.input(
"cgra_cfg_jtag_gc2glb_wr_en", 1)
self.cgra_cfg_jtag_gc2glb_rd_en = self.input(
"cgra_cfg_jtag_gc2glb_rd_en", 1)
self.cgra_cfg_jtag_gc2glb_addr = self.input(
"cgra_cfg_jtag_gc2glb_addr", self._params.cgra_cfg_addr_width)
self.cgra_cfg_jtag_gc2glb_data = self.input(
"cgra_cfg_jtag_gc2glb_data", self._params.cgra_cfg_data_width)
self.stream_data_f2g = self.input("stream_data_f2g", self._params.cgra_data_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.stream_data_valid_f2g = self.input("stream_data_valid_f2g", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.stream_data_g2f = self.output("stream_data_g2f", self._params.cgra_data_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.stream_data_valid_g2f = self.output("stream_data_valid_g2f", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.cgra_cfg_g2f_cfg_wr_en = self.output("cgra_cfg_g2f_cfg_wr_en", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.cgra_cfg_g2f_cfg_rd_en = self.output("cgra_cfg_g2f_cfg_rd_en", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.cgra_cfg_g2f_cfg_addr = self.output("cgra_cfg_g2f_cfg_addr", self._params.cgra_cfg_addr_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.cgra_cfg_g2f_cfg_data = self.output("cgra_cfg_g2f_cfg_data", self._params.cgra_cfg_data_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.strm_g2f_start_pulse = self.input("strm_g2f_start_pulse", self._params.num_glb_tiles)
self.strm_f2g_start_pulse = self.input("strm_f2g_start_pulse", self._params.num_glb_tiles)
self.pcfg_start_pulse = self.input("pcfg_start_pulse", self._params.num_glb_tiles)
self.strm_f2g_interrupt_pulse = self.output("strm_f2g_interrupt_pulse", self._params.num_glb_tiles)
self.strm_g2f_interrupt_pulse = self.output("strm_g2f_interrupt_pulse", self._params.num_glb_tiles)
self.pcfg_g2f_interrupt_pulse = self.output("pcfg_g2f_interrupt_pulse", self._params.num_glb_tiles)
# local variables
self.cgra_cfg_jtag_gc2glb_wr_en_d = self.var(
"cgra_cfg_jtag_gc2glb_wr_en_d", 1)
self.cgra_cfg_jtag_gc2glb_rd_en_d = self.var(
"cgra_cfg_jtag_gc2glb_rd_en_d", 1)
self.cgra_cfg_jtag_gc2glb_addr_d = self.var(
"cgra_cfg_jtag_gc2glb_addr_d", self._params.cgra_cfg_addr_width)
self.cgra_cfg_jtag_gc2glb_data_d = self.var(
"cgra_cfg_jtag_gc2glb_data_d", self._params.cgra_cfg_data_width)
self.proc_packet_d = self.var(
"proc_packet_d", self.header.packet_t)
self.proc_packet_e2w_esti = self.var(
"proc_packet_e2w_esti", self.header.packet_t, size=self._params.num_glb_tiles, packed=True)
self.proc_packet_w2e_wsti = self.var(
"proc_packet_w2e_wsti", self.header.packet_t, size=self._params.num_glb_tiles, packed=True)
self.proc_packet_e2w_wsto = self.var(
"proc_packet_e2w_wsto", self.header.packet_t, size=self._params.num_glb_tiles, packed=True)
self.proc_packet_w2e_esto = self.var(
"proc_packet_w2e_esto", self.header.packet_t, size=self._params.num_glb_tiles, packed=True)
self.strm_packet_e2w_esti = self.var(
"strm_packet_e2w_esti", self.header.packet_t, size=self._params.num_glb_tiles, packed=True)
self.strm_packet_w2e_wsti = self.var(
"strm_packet_w2e_wsti", self.header.packet_t, size=self._params.num_glb_tiles, packed=True)
self.strm_packet_e2w_wsto = self.var(
"strm_packet_e2w_wsto", self.header.packet_t, size=self._params.num_glb_tiles, packed=True)
self.strm_packet_w2e_esto = self.var(
"strm_packet_w2e_esto", self.header.packet_t, size=self._params.num_glb_tiles, packed=True)
self.pcfg_packet_e2w_esti = self.var(
"pcfg_packet_e2w_esti", self.header.rd_packet_t, size=self._params.num_glb_tiles, packed=True)
self.pcfg_packet_w2e_wsti = self.var(
"pcfg_packet_w2e_wsti", self.header.rd_packet_t, size=self._params.num_glb_tiles, packed=True)
self.pcfg_packet_e2w_wsto = self.var(
"pcfg_packet_e2w_wsto", self.header.rd_packet_t, size=self._params.num_glb_tiles, packed=True)
self.pcfg_packet_w2e_esto = self.var(
"pcfg_packet_w2e_esto", self.header.rd_packet_t, size=self._params.num_glb_tiles, packed=True)
self.cfg_tile_connected = self.var(
"cfg_tile_connected", self._params.num_glb_tiles + 1)
self.cfg_pcfg_tile_connected = self.var(
"cfg_pcfg_tile_connected", self._params.num_glb_tiles + 1)
self.wire(self.cfg_tile_connected[0], 0)
self.wire(self.cfg_pcfg_tile_connected[0], 0)
self.cgra_cfg_jtag_wsti_wr_en = self.var(
"cgra_cfg_jtag_wsti_wr_en", 1, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_wsti_rd_en = self.var(
"cgra_cfg_jtag_wsti_rd_en", 1, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_wsti_addr = self.var(
"cgra_cfg_jtag_wsti_addr", self._params.cgra_cfg_addr_width, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_wsti_data = self.var(
"cgra_cfg_jtag_wsti_data", self._params.cgra_cfg_data_width, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_esto_wr_en = self.var(
"cgra_cfg_jtag_esto_wr_en", 1, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_esto_rd_en = self.var(
"cgra_cfg_jtag_esto_rd_en", 1, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_esto_addr = self.var(
"cgra_cfg_jtag_esto_addr", self._params.cgra_cfg_addr_width, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_esto_data = self.var(
"cgra_cfg_jtag_esto_data", self._params.cgra_cfg_data_width, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_wsti_rd_en_bypass = self.var("cgra_cfg_jtag_wsti_rd_en_bypass", 1,
size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_wsti_addr_bypass = self.var("cgra_cfg_jtag_wsti_addr_bypass",
self._params.cgra_cfg_addr_width,
size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_esto_rd_en_bypass = self.var("cgra_cfg_jtag_esto_rd_en_bypass", 1,
size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_jtag_esto_addr_bypass = self.var("cgra_cfg_jtag_esto_addr_bypass",
self._params.cgra_cfg_addr_width,
size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_pcfg_wsti_wr_en = self.var(
"cgra_cfg_pcfg_wsti_wr_en", 1, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_pcfg_wsti_rd_en = self.var(
"cgra_cfg_pcfg_wsti_rd_en", 1, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_pcfg_wsti_addr = self.var(
"cgra_cfg_pcfg_wsti_addr", self._params.cgra_cfg_addr_width, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_pcfg_wsti_data = self.var(
"cgra_cfg_pcfg_wsti_data", self._params.cgra_cfg_data_width, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_pcfg_esto_wr_en = self.var(
"cgra_cfg_pcfg_esto_wr_en", 1, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_pcfg_esto_rd_en = self.var(
"cgra_cfg_pcfg_esto_rd_en", 1, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_pcfg_esto_addr = self.var(
"cgra_cfg_pcfg_esto_addr", self._params.cgra_cfg_addr_width, size=self._params.num_glb_tiles, packed=True)
self.cgra_cfg_pcfg_esto_data = self.var(
"cgra_cfg_pcfg_esto_data", self._params.cgra_cfg_data_width, size=self._params.num_glb_tiles, packed=True)
self.stall_w = self.var("stall_w", self._params.num_glb_tiles)
self.stall_d = self.var("stall_d", self._params.num_glb_tiles)
self.wire(self.stall_w, self.stall)
self.cgra_stall_in_w = self.var(
"cgra_stall_in_w", self._params.num_glb_tiles)
self.cgra_stall_in_d = self.var(
"cgra_stall_in_d", self._params.num_glb_tiles)
self.wire(self.cgra_stall_in_w, self.cgra_stall_in)
for i in range(self._params.num_glb_tiles):
self.wire(self.cgra_stall[i], concat(
*[self.cgra_stall_in_d[i]] * self._params.cgra_per_glb))
self.strm_g2f_start_pulse_w = self.var("strm_g2f_start_pulse_w", self._params.num_glb_tiles)
self.strm_g2f_start_pulse_d = self.var("strm_g2f_start_pulse_d", self._params.num_glb_tiles)
self.wire(self.strm_g2f_start_pulse, self.strm_g2f_start_pulse_w)
self.strm_f2g_start_pulse_w = self.var("strm_f2g_start_pulse_w", self._params.num_glb_tiles)
self.strm_f2g_start_pulse_d = self.var("strm_f2g_start_pulse_d", self._params.num_glb_tiles)
self.wire(self.strm_f2g_start_pulse, self.strm_f2g_start_pulse_w)
self.pcfg_start_pulse_w = self.var("pcfg_start_pulse_w", self._params.num_glb_tiles)
self.pcfg_start_pulse_d = self.var("pcfg_start_pulse_d", self._params.num_glb_tiles)
self.wire(self.pcfg_start_pulse, self.pcfg_start_pulse_w)
self.strm_f2g_interrupt_pulse_w = self.var("strm_f2g_interrupt_pulse_w", self._params.num_glb_tiles)
self.strm_f2g_interrupt_pulse_d = self.var("strm_f2g_interrupt_pulse_d", self._params.num_glb_tiles)
self.wire(self.strm_f2g_interrupt_pulse_d, self.strm_f2g_interrupt_pulse)
self.strm_g2f_interrupt_pulse_w = self.var("strm_g2f_interrupt_pulse_w", self._params.num_glb_tiles)
self.strm_g2f_interrupt_pulse_d = self.var("strm_g2f_interrupt_pulse_d", self._params.num_glb_tiles)
self.wire(self.strm_g2f_interrupt_pulse_d, self.strm_g2f_interrupt_pulse)
self.pcfg_g2f_interrupt_pulse_w = self.var("pcfg_g2f_interrupt_pulse_w", self._params.num_glb_tiles)
self.pcfg_g2f_interrupt_pulse_d = self.var("pcfg_g2f_interrupt_pulse_d", self._params.num_glb_tiles)
self.wire(self.pcfg_g2f_interrupt_pulse_d, self.pcfg_g2f_interrupt_pulse)
self.cgra_cfg_g2f_cfg_wr_en_w = self.var("cgra_cfg_g2f_cfg_wr_en_w", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.cgra_cfg_g2f_cfg_wr_en_d = self.var("cgra_cfg_g2f_cfg_wr_en_d", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.wire(self.cgra_cfg_g2f_cfg_wr_en_d, self.cgra_cfg_g2f_cfg_wr_en)
self.cgra_cfg_g2f_cfg_rd_en_w = self.var("cgra_cfg_g2f_cfg_rd_en_w", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.cgra_cfg_g2f_cfg_rd_en_d = self.var("cgra_cfg_g2f_cfg_rd_en_d", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.wire(self.cgra_cfg_g2f_cfg_rd_en_d, self.cgra_cfg_g2f_cfg_rd_en)
self.cgra_cfg_g2f_cfg_addr_w = self.var("cgra_cfg_g2f_cfg_addr_w", self._params.cgra_cfg_addr_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.cgra_cfg_g2f_cfg_addr_d = self.var("cgra_cfg_g2f_cfg_addr_d", self._params.cgra_cfg_addr_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.wire(self.cgra_cfg_g2f_cfg_addr_d, self.cgra_cfg_g2f_cfg_addr)
self.cgra_cfg_g2f_cfg_data_w = self.var("cgra_cfg_g2f_cfg_data_w", self._params.cgra_cfg_data_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.cgra_cfg_g2f_cfg_data_d = self.var("cgra_cfg_g2f_cfg_data_d", self._params.cgra_cfg_data_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.wire(self.cgra_cfg_g2f_cfg_data_d, self.cgra_cfg_g2f_cfg_data)
self.stream_data_f2g_w = self.var("stream_data_f2g_w", self._params.cgra_data_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.stream_data_f2g_d = self.var("stream_data_f2g_d", self._params.cgra_data_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.wire(self.stream_data_f2g, self.stream_data_f2g_w)
self.stream_data_valid_f2g_w = self.var("stream_data_valid_f2g_w", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.stream_data_valid_f2g_d = self.var("stream_data_valid_f2g_d", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.wire(self.stream_data_valid_f2g, self.stream_data_valid_f2g_w)
self.stream_data_g2f_w = self.var("stream_data_g2f_w", self._params.cgra_data_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.stream_data_g2f_d = self.var("stream_data_g2f_d", self._params.cgra_data_width, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.wire(self.stream_data_g2f_d, self.stream_data_g2f)
self.stream_data_valid_g2f_w = self.var("stream_data_valid_g2f_w", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.stream_data_valid_g2f_d = self.var("stream_data_valid_g2f_d", 1, size=[
self._params.num_glb_tiles, self._params.cgra_per_glb], packed=True)
self.wire(self.stream_data_valid_g2f_d, self.stream_data_valid_g2f)
# interface
if_cfg_tile2tile = GlbConfigInterface(
addr_width=self._params.axi_addr_width, data_width=self._params.axi_data_width)
if_sram_cfg_tile2tile = GlbConfigInterface(
addr_width=self._params.glb_addr_width, data_width=self._params.axi_data_width)
self.if_cfg_list = []
self.if_sram_cfg_list = []
for i in range(self._params.num_glb_tiles + 1):
self.if_cfg_list.append(self.interface(
if_cfg_tile2tile, f"if_cfg_tile2tile_{i}"))
self.if_sram_cfg_list.append(self.interface(
if_sram_cfg_tile2tile, f"if_sram_cfg_tile2tile_{i}"))
self.glb_tile = []
for i in range(self._params.num_glb_tiles):
self.glb_tile.append(GlbTile(_params=self._params))
self.wire(self.if_cfg_list[-1].rd_data, 0)
self.wire(self.if_cfg_list[-1].rd_data_valid, 0)
self.wire(self.if_sram_cfg_list[-1].rd_data, 0)
self.wire(self.if_sram_cfg_list[-1].rd_data_valid, 0)
self.add_glb_tile()
self.add_always(self.left_edge_proc_ff)
self.add_always(self.left_edge_cfg_ff)
self.add_always(self.left_edge_sram_cfg_ff)
self.add_always(self.left_edge_cgra_cfg_ff)
self.tile2tile_e2w_wiring()
self.tile2tile_w2e_wiring()
self.add_always(self.tile2tile_w2e_cfg_wiring)
self.add_always(self.interrupt_pipeline)
self.add_always(self.start_pulse_pipeline)
self.add_always(self.stall_pipeline)
self.add_always(self.stream_data_pipeline)
self.add_always(self.cgra_cfg_pcfg_pipeline)
@always_ff((posedge, "clk"), (posedge, "reset"))
def left_edge_proc_ff(self):
if self.reset:
self.proc_packet_d['wr_en'] = 0
self.proc_packet_d['wr_strb'] = 0
self.proc_packet_d['wr_addr'] = 0
self.proc_packet_d['wr_data'] = 0
self.proc_packet_d['rd_en'] = 0
self.proc_packet_d['rd_addr'] = 0
self.proc_packet_d['rd_data'] = 0
self.proc_packet_d['rd_data_valid'] = 0
self.proc_rd_data = 0
self.proc_rd_data_valid = 0
else:
self.proc_packet_d['wr_en'] = self.proc_wr_en
self.proc_packet_d['wr_strb'] = self.proc_wr_strb
self.proc_packet_d['wr_addr'] = self.proc_wr_addr
self.proc_packet_d['wr_data'] = self.proc_wr_data
self.proc_packet_d['rd_en'] = self.proc_rd_en
self.proc_packet_d['rd_addr'] = self.proc_rd_addr
self.proc_packet_d['rd_data'] = 0
self.proc_packet_d['rd_data_valid'] = 0
self.proc_rd_data = self.proc_packet_e2w_wsto[0]['rd_data']
self.proc_rd_data_valid = self.proc_packet_e2w_wsto[0]['rd_data_valid']
@always_ff((posedge, "clk"), (posedge, "reset"))
def left_edge_cfg_ff(self):
if self.reset:
self.if_cfg_list[0].wr_en = 0
self.if_cfg_list[0].wr_addr = 0
self.if_cfg_list[0].wr_data = 0
self.if_cfg_list[0].rd_en = 0
self.if_cfg_list[0].rd_addr = 0
self.if_cfg_rd_data = 0
self.if_cfg_rd_data_valid = 0
else:
self.if_cfg_list[0].wr_en = self.if_cfg_wr_en
self.if_cfg_list[0].wr_addr = self.if_cfg_wr_addr
self.if_cfg_list[0].wr_data = self.if_cfg_wr_data
self.if_cfg_list[0].rd_en = self.if_cfg_rd_en
self.if_cfg_list[0].rd_addr = self.if_cfg_rd_addr
self.if_cfg_rd_data = self.if_cfg_list[0].rd_data
self.if_cfg_rd_data_valid = self.if_cfg_list[0].rd_data_valid
@always_ff((posedge, "clk"), (posedge, "reset"))
def left_edge_sram_cfg_ff(self):
if self.reset:
self.if_sram_cfg_list[0].wr_en = 0
self.if_sram_cfg_list[0].wr_addr = 0
self.if_sram_cfg_list[0].wr_data = 0
self.if_sram_cfg_list[0].rd_en = 0
self.if_sram_cfg_list[0].rd_addr = 0
self.if_sram_cfg_rd_data = 0
self.if_sram_cfg_rd_data_valid = 0
else:
self.if_sram_cfg_list[0].wr_en = self.if_sram_cfg_wr_en
self.if_sram_cfg_list[0].wr_addr = self.if_sram_cfg_wr_addr
self.if_sram_cfg_list[0].wr_data = self.if_sram_cfg_wr_data
self.if_sram_cfg_list[0].rd_en = self.if_sram_cfg_rd_en
self.if_sram_cfg_list[0].rd_addr = self.if_sram_cfg_rd_addr
self.if_sram_cfg_rd_data = self.if_sram_cfg_list[0].rd_data
self.if_sram_cfg_rd_data_valid = self.if_sram_cfg_list[0].rd_data_valid
@always_ff((posedge, "clk"), (posedge, "reset"))
def left_edge_cgra_cfg_ff(self):
if self.reset:
self.cgra_cfg_jtag_gc2glb_wr_en_d = 0
self.cgra_cfg_jtag_gc2glb_rd_en_d = 0
self.cgra_cfg_jtag_gc2glb_addr_d = 0
self.cgra_cfg_jtag_gc2glb_data_d = 0
else:
self.cgra_cfg_jtag_gc2glb_wr_en_d = self.cgra_cfg_jtag_gc2glb_wr_en
self.cgra_cfg_jtag_gc2glb_rd_en_d = self.cgra_cfg_jtag_gc2glb_rd_en
self.cgra_cfg_jtag_gc2glb_addr_d = self.cgra_cfg_jtag_gc2glb_addr
self.cgra_cfg_jtag_gc2glb_data_d = self.cgra_cfg_jtag_gc2glb_data
def tile2tile_e2w_wiring(self):
self.wire(self.proc_packet_e2w_esti[self._params.num_glb_tiles - 1],
self.proc_packet_w2e_esto[self._params.num_glb_tiles - 1])
self.wire(self.strm_packet_e2w_esti[self._params.num_glb_tiles - 1], 0)
self.wire(self.pcfg_packet_e2w_esti[self._params.num_glb_tiles - 1], 0)
for i in range(self._params.num_glb_tiles - 1):
self.wire(self.proc_packet_e2w_esti[i],
self.proc_packet_e2w_wsto[i + 1])
self.wire(self.strm_packet_e2w_esti[i],
self.strm_packet_e2w_wsto[i + 1])
self.wire(self.pcfg_packet_e2w_esti[i],
self.pcfg_packet_e2w_wsto[i + 1])
def tile2tile_w2e_wiring(self):
self.wire(self.proc_packet_w2e_wsti[0], self.proc_packet_d)
self.wire(self.strm_packet_w2e_wsti[0], 0)
self.wire(self.pcfg_packet_w2e_wsti[0], 0)
for i in range(1, self._params.num_glb_tiles):
self.wire(self.proc_packet_w2e_wsti[const(i, clog2(self._params.num_glb_tiles))],
self.proc_packet_w2e_esto[const((i - 1), clog2(self._params.num_glb_tiles))])
self.wire(self.strm_packet_w2e_wsti[const(i, clog2(self._params.num_glb_tiles))],
self.strm_packet_w2e_esto[const((i - 1), clog2(self._params.num_glb_tiles))])
self.wire(self.pcfg_packet_w2e_wsti[const(i, clog2(self._params.num_glb_tiles))],
self.pcfg_packet_w2e_esto[const((i - 1), clog2(self._params.num_glb_tiles))])
@always_comb
def tile2tile_w2e_cfg_wiring(self):
for i in range(0, self._params.num_glb_tiles):
if i == 0:
self.cgra_cfg_jtag_wsti_rd_en[i] = 0
self.cgra_cfg_jtag_wsti_wr_en[i] = self.cgra_cfg_jtag_gc2glb_wr_en_d
self.cgra_cfg_jtag_wsti_addr[i] = self.cgra_cfg_jtag_gc2glb_addr_d
self.cgra_cfg_jtag_wsti_data[i] = self.cgra_cfg_jtag_gc2glb_data_d
self.cgra_cfg_jtag_wsti_rd_en_bypass[i] = self.cgra_cfg_jtag_gc2glb_rd_en_d
self.cgra_cfg_jtag_wsti_addr_bypass[i] = self.cgra_cfg_jtag_gc2glb_addr_d
self.cgra_cfg_pcfg_wsti_rd_en[i] = 0
self.cgra_cfg_pcfg_wsti_wr_en[i] = 0
self.cgra_cfg_pcfg_wsti_addr[i] = 0
self.cgra_cfg_pcfg_wsti_data[i] = 0
else:
self.cgra_cfg_jtag_wsti_rd_en[i] = self.cgra_cfg_jtag_esto_rd_en[i - 1]
self.cgra_cfg_jtag_wsti_wr_en[i] = self.cgra_cfg_jtag_esto_wr_en[i - 1]
self.cgra_cfg_jtag_wsti_addr[i] = self.cgra_cfg_jtag_esto_addr[i - 1]
self.cgra_cfg_jtag_wsti_data[i] = self.cgra_cfg_jtag_esto_data[i - 1]
self.cgra_cfg_jtag_wsti_rd_en_bypass[i] = self.cgra_cfg_jtag_esto_rd_en_bypass[i - 1]
self.cgra_cfg_jtag_wsti_addr_bypass[i] = self.cgra_cfg_jtag_esto_addr_bypass[i - 1]
self.cgra_cfg_pcfg_wsti_rd_en[i] = self.cgra_cfg_pcfg_esto_rd_en[i - 1]
self.cgra_cfg_pcfg_wsti_wr_en[i] = self.cgra_cfg_pcfg_esto_wr_en[i - 1]
self.cgra_cfg_pcfg_wsti_addr[i] = self.cgra_cfg_pcfg_esto_addr[i - 1]
self.cgra_cfg_pcfg_wsti_data[i] = self.cgra_cfg_pcfg_esto_data[i - 1]
def add_glb_tile(self):
for i in range(self._params.num_glb_tiles):
self.add_child(f"glb_tile_gen_{i}",
self.glb_tile[i],
clk=self.clk,
clk_en=clock_en(~self.stall_d[i]),
reset=self.reset,
glb_tile_id=i,
proc_wr_en_e2w_esti=self.proc_packet_e2w_esti[i]['wr_en'],
proc_wr_strb_e2w_esti=self.proc_packet_e2w_esti[i]['wr_strb'],
proc_wr_addr_e2w_esti=self.proc_packet_e2w_esti[i]['wr_addr'],
proc_wr_data_e2w_esti=self.proc_packet_e2w_esti[i]['wr_data'],
proc_rd_en_e2w_esti=self.proc_packet_e2w_esti[i]['rd_en'],
proc_rd_addr_e2w_esti=self.proc_packet_e2w_esti[i]['rd_addr'],
proc_rd_data_e2w_esti=self.proc_packet_e2w_esti[i]['rd_data'],
proc_rd_data_valid_e2w_esti=self.proc_packet_e2w_esti[
i]['rd_data_valid'],
proc_wr_en_w2e_esto=self.proc_packet_w2e_esto[i]['wr_en'],
proc_wr_strb_w2e_esto=self.proc_packet_w2e_esto[i]['wr_strb'],
proc_wr_addr_w2e_esto=self.proc_packet_w2e_esto[i]['wr_addr'],
proc_wr_data_w2e_esto=self.proc_packet_w2e_esto[i]['wr_data'],
proc_rd_en_w2e_esto=self.proc_packet_w2e_esto[i]['rd_en'],
proc_rd_addr_w2e_esto=self.proc_packet_w2e_esto[i]['rd_addr'],
proc_rd_data_w2e_esto=self.proc_packet_w2e_esto[i]['rd_data'],
proc_rd_data_valid_w2e_esto=self.proc_packet_w2e_esto[
i]['rd_data_valid'],
proc_wr_en_w2e_wsti=self.proc_packet_w2e_wsti[i]['wr_en'],
proc_wr_strb_w2e_wsti=self.proc_packet_w2e_wsti[i]['wr_strb'],
proc_wr_addr_w2e_wsti=self.proc_packet_w2e_wsti[i]['wr_addr'],
proc_wr_data_w2e_wsti=self.proc_packet_w2e_wsti[i]['wr_data'],
proc_rd_en_w2e_wsti=self.proc_packet_w2e_wsti[i]['rd_en'],
proc_rd_addr_w2e_wsti=self.proc_packet_w2e_wsti[i]['rd_addr'],
proc_rd_data_w2e_wsti=self.proc_packet_w2e_wsti[i]['rd_data'],
proc_rd_data_valid_w2e_wsti=self.proc_packet_w2e_wsti[
i]['rd_data_valid'],
proc_wr_en_e2w_wsto=self.proc_packet_e2w_wsto[i]['wr_en'],
proc_wr_strb_e2w_wsto=self.proc_packet_e2w_wsto[i]['wr_strb'],
proc_wr_addr_e2w_wsto=self.proc_packet_e2w_wsto[i]['wr_addr'],
proc_wr_data_e2w_wsto=self.proc_packet_e2w_wsto[i]['wr_data'],
proc_rd_en_e2w_wsto=self.proc_packet_e2w_wsto[i]['rd_en'],
proc_rd_addr_e2w_wsto=self.proc_packet_e2w_wsto[i]['rd_addr'],
proc_rd_data_e2w_wsto=self.proc_packet_e2w_wsto[i]['rd_data'],
proc_rd_data_valid_e2w_wsto=self.proc_packet_e2w_wsto[
i]['rd_data_valid'],
strm_wr_en_e2w_esti=self.strm_packet_e2w_esti[i]['wr_en'],
strm_wr_strb_e2w_esti=self.strm_packet_e2w_esti[i]['wr_strb'],
strm_wr_addr_e2w_esti=self.strm_packet_e2w_esti[i]['wr_addr'],
strm_wr_data_e2w_esti=self.strm_packet_e2w_esti[i]['wr_data'],
strm_rd_en_e2w_esti=self.strm_packet_e2w_esti[i]['rd_en'],
strm_rd_addr_e2w_esti=self.strm_packet_e2w_esti[i]['rd_addr'],
strm_rd_data_e2w_esti=self.strm_packet_e2w_esti[i]['rd_data'],
strm_rd_data_valid_e2w_esti=self.strm_packet_e2w_esti[
i]['rd_data_valid'],
strm_wr_en_w2e_esto=self.strm_packet_w2e_esto[i]['wr_en'],
strm_wr_strb_w2e_esto=self.strm_packet_w2e_esto[i]['wr_strb'],
strm_wr_addr_w2e_esto=self.strm_packet_w2e_esto[i]['wr_addr'],
strm_wr_data_w2e_esto=self.strm_packet_w2e_esto[i]['wr_data'],
strm_rd_en_w2e_esto=self.strm_packet_w2e_esto[i]['rd_en'],
strm_rd_addr_w2e_esto=self.strm_packet_w2e_esto[i]['rd_addr'],
strm_rd_data_w2e_esto=self.strm_packet_w2e_esto[i]['rd_data'],
strm_rd_data_valid_w2e_esto=self.strm_packet_w2e_esto[
i]['rd_data_valid'],
strm_wr_en_w2e_wsti=self.strm_packet_w2e_wsti[i]['wr_en'],
strm_wr_strb_w2e_wsti=self.strm_packet_w2e_wsti[i]['wr_strb'],
strm_wr_addr_w2e_wsti=self.strm_packet_w2e_wsti[i]['wr_addr'],
strm_wr_data_w2e_wsti=self.strm_packet_w2e_wsti[i]['wr_data'],
strm_rd_en_w2e_wsti=self.strm_packet_w2e_wsti[i]['rd_en'],
strm_rd_addr_w2e_wsti=self.strm_packet_w2e_wsti[i]['rd_addr'],
strm_rd_data_w2e_wsti=self.strm_packet_w2e_wsti[i]['rd_data'],
strm_rd_data_valid_w2e_wsti=self.strm_packet_w2e_wsti[
i]['rd_data_valid'],
strm_wr_en_e2w_wsto=self.strm_packet_e2w_wsto[i]['wr_en'],
strm_wr_strb_e2w_wsto=self.strm_packet_e2w_wsto[i]['wr_strb'],
strm_wr_addr_e2w_wsto=self.strm_packet_e2w_wsto[i]['wr_addr'],
strm_wr_data_e2w_wsto=self.strm_packet_e2w_wsto[i]['wr_data'],
strm_rd_en_e2w_wsto=self.strm_packet_e2w_wsto[i]['rd_en'],
strm_rd_addr_e2w_wsto=self.strm_packet_e2w_wsto[i]['rd_addr'],
strm_rd_data_e2w_wsto=self.strm_packet_e2w_wsto[i]['rd_data'],
strm_rd_data_valid_e2w_wsto=self.strm_packet_e2w_wsto[
i]['rd_data_valid'],
pcfg_rd_en_e2w_esti=self.pcfg_packet_e2w_esti[i]['rd_en'],
pcfg_rd_addr_e2w_esti=self.pcfg_packet_e2w_esti[i]['rd_addr'],
pcfg_rd_data_e2w_esti=self.pcfg_packet_e2w_esti[i]['rd_data'],
pcfg_rd_data_valid_e2w_esti=self.pcfg_packet_e2w_esti[
i]['rd_data_valid'],
pcfg_rd_en_w2e_esto=self.pcfg_packet_w2e_esto[i]['rd_en'],
pcfg_rd_addr_w2e_esto=self.pcfg_packet_w2e_esto[i]['rd_addr'],
pcfg_rd_data_w2e_esto=self.pcfg_packet_w2e_esto[i]['rd_data'],
pcfg_rd_data_valid_w2e_esto=self.pcfg_packet_w2e_esto[
i]['rd_data_valid'],
pcfg_rd_en_w2e_wsti=self.pcfg_packet_w2e_wsti[i]['rd_en'],
pcfg_rd_addr_w2e_wsti=self.pcfg_packet_w2e_wsti[i]['rd_addr'],
pcfg_rd_data_w2e_wsti=self.pcfg_packet_w2e_wsti[i]['rd_data'],
pcfg_rd_data_valid_w2e_wsti=self.pcfg_packet_w2e_wsti[
i]['rd_data_valid'],
pcfg_rd_en_e2w_wsto=self.pcfg_packet_e2w_wsto[i]['rd_en'],
pcfg_rd_addr_e2w_wsto=self.pcfg_packet_e2w_wsto[i]['rd_addr'],
pcfg_rd_data_e2w_wsto=self.pcfg_packet_e2w_wsto[i]['rd_data'],
pcfg_rd_data_valid_e2w_wsto=self.pcfg_packet_e2w_wsto[
i]['rd_data_valid'],
if_cfg_est_m_wr_en=self.if_cfg_list[i + 1].wr_en,
if_cfg_est_m_wr_addr=self.if_cfg_list[i + 1].wr_addr,
if_cfg_est_m_wr_data=self.if_cfg_list[i + 1].wr_data,
if_cfg_est_m_rd_en=self.if_cfg_list[i + 1].rd_en,
if_cfg_est_m_rd_addr=self.if_cfg_list[i + 1].rd_addr,
if_cfg_est_m_rd_data=self.if_cfg_list[i + 1].rd_data,
if_cfg_est_m_rd_data_valid=self.if_cfg_list[i
+ 1].rd_data_valid,
if_cfg_wst_s_wr_en=self.if_cfg_list[i].wr_en,
if_cfg_wst_s_wr_addr=self.if_cfg_list[i].wr_addr,
if_cfg_wst_s_wr_data=self.if_cfg_list[i].wr_data,
if_cfg_wst_s_rd_en=self.if_cfg_list[i].rd_en,
if_cfg_wst_s_rd_addr=self.if_cfg_list[i].rd_addr,
if_cfg_wst_s_rd_data=self.if_cfg_list[i].rd_data,
if_cfg_wst_s_rd_data_valid=self.if_cfg_list[i].rd_data_valid,
if_sram_cfg_est_m_wr_en=self.if_sram_cfg_list[i + 1].wr_en,
if_sram_cfg_est_m_wr_addr=self.if_sram_cfg_list[i + 1].wr_addr,
if_sram_cfg_est_m_wr_data=self.if_sram_cfg_list[i + 1].wr_data,
if_sram_cfg_est_m_rd_en=self.if_sram_cfg_list[i + 1].rd_en,
if_sram_cfg_est_m_rd_addr=self.if_sram_cfg_list[i + 1].rd_addr,
if_sram_cfg_est_m_rd_data=self.if_sram_cfg_list[i + 1].rd_data,
if_sram_cfg_est_m_rd_data_valid=self.if_sram_cfg_list[
i + 1].rd_data_valid,
if_sram_cfg_wst_s_wr_en=self.if_sram_cfg_list[i].wr_en,
if_sram_cfg_wst_s_wr_addr=self.if_sram_cfg_list[i].wr_addr,
if_sram_cfg_wst_s_wr_data=self.if_sram_cfg_list[i].wr_data,
if_sram_cfg_wst_s_rd_en=self.if_sram_cfg_list[i].rd_en,
if_sram_cfg_wst_s_rd_addr=self.if_sram_cfg_list[i].rd_addr,
if_sram_cfg_wst_s_rd_data=self.if_sram_cfg_list[i].rd_data,
if_sram_cfg_wst_s_rd_data_valid=self.if_sram_cfg_list[
i].rd_data_valid,
cfg_tile_connected_wsti=self.cfg_tile_connected[i],
cfg_tile_connected_esto=self.cfg_tile_connected[i + 1],
cfg_pcfg_tile_connected_wsti=self.cfg_pcfg_tile_connected[i],
cfg_pcfg_tile_connected_esto=self.cfg_pcfg_tile_connected[i + 1],
stream_data_f2g=self.stream_data_f2g_d[i],
stream_data_valid_f2g=self.stream_data_valid_f2g_d[i],
stream_data_g2f=self.stream_data_g2f_w[i],
stream_data_valid_g2f=self.stream_data_valid_g2f_w[i],
cgra_cfg_g2f_cfg_wr_en=self.cgra_cfg_g2f_cfg_wr_en_w[i],
cgra_cfg_g2f_cfg_rd_en=self.cgra_cfg_g2f_cfg_rd_en_w[i],
cgra_cfg_g2f_cfg_addr=self.cgra_cfg_g2f_cfg_addr_w[i],
cgra_cfg_g2f_cfg_data=self.cgra_cfg_g2f_cfg_data_w[i],
cgra_cfg_pcfg_wsti_wr_en=self.cgra_cfg_pcfg_wsti_wr_en[i],
cgra_cfg_pcfg_wsti_rd_en=self.cgra_cfg_pcfg_wsti_rd_en[i],
cgra_cfg_pcfg_wsti_addr=self.cgra_cfg_pcfg_wsti_addr[i],
cgra_cfg_pcfg_wsti_data=self.cgra_cfg_pcfg_wsti_data[i],
cgra_cfg_pcfg_esto_wr_en=self.cgra_cfg_pcfg_esto_wr_en[i],
cgra_cfg_pcfg_esto_rd_en=self.cgra_cfg_pcfg_esto_rd_en[i],
cgra_cfg_pcfg_esto_addr=self.cgra_cfg_pcfg_esto_addr[i],
cgra_cfg_pcfg_esto_data=self.cgra_cfg_pcfg_esto_data[i],
cgra_cfg_jtag_wsti_wr_en=self.cgra_cfg_jtag_wsti_wr_en[i],
cgra_cfg_jtag_wsti_rd_en=self.cgra_cfg_jtag_wsti_rd_en[i],
cgra_cfg_jtag_wsti_addr=self.cgra_cfg_jtag_wsti_addr[i],
cgra_cfg_jtag_wsti_data=self.cgra_cfg_jtag_wsti_data[i],
cgra_cfg_jtag_esto_wr_en=self.cgra_cfg_jtag_esto_wr_en[i],
cgra_cfg_jtag_esto_rd_en=self.cgra_cfg_jtag_esto_rd_en[i],
cgra_cfg_jtag_esto_addr=self.cgra_cfg_jtag_esto_addr[i],
cgra_cfg_jtag_esto_data=self.cgra_cfg_jtag_esto_data[i],
cgra_cfg_jtag_wsti_rd_en_bypass=self.cgra_cfg_jtag_wsti_rd_en_bypass[
i],
cgra_cfg_jtag_wsti_addr_bypass=self.cgra_cfg_jtag_wsti_addr_bypass[
i],
cgra_cfg_jtag_esto_rd_en_bypass=self.cgra_cfg_jtag_esto_rd_en_bypass[
i],
cgra_cfg_jtag_esto_addr_bypass=self.cgra_cfg_jtag_esto_addr_bypass[
i],
strm_g2f_start_pulse=self.strm_g2f_start_pulse_d[i],
strm_f2g_start_pulse=self.strm_f2g_start_pulse_d[i],
pcfg_start_pulse=self.pcfg_start_pulse_d[i],
strm_f2g_interrupt_pulse=self.strm_f2g_interrupt_pulse_w[i],
strm_g2f_interrupt_pulse=self.strm_g2f_interrupt_pulse_w[i],
pcfg_g2f_interrupt_pulse=self.pcfg_g2f_interrupt_pulse_w[i])
@always_ff((posedge, "clk"), (posedge, "reset"))
def interrupt_pipeline(self):
if self.reset:
for i in range(self._params.num_glb_tiles):
self.strm_f2g_interrupt_pulse_d[i] = 0
self.strm_g2f_interrupt_pulse_d[i] = 0
self.pcfg_g2f_interrupt_pulse_d[i] = 0
else:
for i in range(self._params.num_glb_tiles):
self.strm_f2g_interrupt_pulse_d[i] = self.strm_f2g_interrupt_pulse_w[i]
self.strm_g2f_interrupt_pulse_d[i] = self.strm_g2f_interrupt_pulse_w[i]
self.pcfg_g2f_interrupt_pulse_d[i] = self.pcfg_g2f_interrupt_pulse_w[i]
@always_ff((posedge, "clk"), (posedge, "reset"))
def start_pulse_pipeline(self):
if self.reset:
for i in range(self._params.num_glb_tiles):
self.strm_g2f_start_pulse_d[i] = 0
self.strm_f2g_start_pulse_d[i] = 0
self.pcfg_start_pulse_d[i] = 0
else:
for i in range(self._params.num_glb_tiles):
self.strm_g2f_start_pulse_d[i] = self.strm_g2f_start_pulse_w[i]
self.strm_f2g_start_pulse_d[i] = self.strm_f2g_start_pulse_w[i]
self.pcfg_start_pulse_d[i] = self.pcfg_start_pulse_w[i]
@always_ff((posedge, "clk"), (posedge, "reset"))
def stall_pipeline(self):
if self.reset:
for i in range(self._params.num_glb_tiles):
self.stall_d[i] = 0
self.cgra_stall_in_d[i] = 0
else:
for i in range(self._params.num_glb_tiles):
self.stall_d[i] = self.stall_w[i]
self.cgra_stall_in_d[i] = self.cgra_stall_in_w[i]
@always_ff((posedge, "clk"), (posedge, "reset"))
def stream_data_pipeline(self):
if self.reset:
for i in range(self._params.num_glb_tiles):
self.stream_data_g2f_d[i] = 0
self.stream_data_valid_g2f_d[i] = 0
self.stream_data_f2g_d[i] = 0
self.stream_data_valid_f2g_d[i] = 0
else:
for i in range(self._params.num_glb_tiles):
self.stream_data_g2f_d[i] = self.stream_data_g2f_w[i]
self.stream_data_valid_g2f_d[i] = self.stream_data_valid_g2f_w[i]
self.stream_data_f2g_d[i] = self.stream_data_f2g_w[i]
self.stream_data_valid_f2g_d[i] = self.stream_data_valid_f2g_w[i]
@always_ff((posedge, "clk"), (posedge, "reset"))
def cgra_cfg_pcfg_pipeline(self):
if self.reset:
for i in range(self._params.num_glb_tiles):
self.cgra_cfg_g2f_cfg_wr_en_d[i] = 0
self.cgra_cfg_g2f_cfg_rd_en_d[i] = 0
self.cgra_cfg_g2f_cfg_addr_d[i] = 0
self.cgra_cfg_g2f_cfg_data_d[i] = 0
else:
for i in range(self._params.num_glb_tiles):
self.cgra_cfg_g2f_cfg_wr_en_d[i] = self.cgra_cfg_g2f_cfg_wr_en_w[i]
self.cgra_cfg_g2f_cfg_rd_en_d[i] = self.cgra_cfg_g2f_cfg_rd_en_w[i]
self.cgra_cfg_g2f_cfg_addr_d[i] = self.cgra_cfg_g2f_cfg_addr_w[i]
self.cgra_cfg_g2f_cfg_data_d[i] = self.cgra_cfg_g2f_cfg_data_w[i]
def GlobalBufferMagma(params: GlobalBufferParams):
dut = GlobalBuffer(params)
circ = to_magma(dut, flatten_array=True)
return FromMagma(circ)
|
StanfordAHA/garnet | global_buffer/design/glb_cfg_ifc.py | from kratos import Interface
class GlbConfigInterface(Interface):
def __init__(self, addr_width: int, data_width: int):
Interface.__init__(self, f"glb_cfg_ifc_A_{addr_width}_D_{data_width}")
# Local variables
self.wr_en = self.var("wr_en", 1)
self.wr_addr = self.var("wr_addr", addr_width)
self.wr_data = self.var("wr_data", data_width)
self.rd_en = self.var("rd_en", 1)
self.rd_addr = self.var("rd_addr", addr_width)
self.rd_data = self.var("rd_data", data_width)
self.rd_data_valid = self.var("rd_data_valid", 1)
m_to_s = [self.wr_en, self.wr_addr,
self.wr_data, self.rd_en, self.rd_addr]
s_to_m = [self.rd_data, self.rd_data_valid]
self.master = self.modport("master")
self.slave = self.modport("slave")
for port in m_to_s:
self.master.set_output(port)
self.slave.set_input(port)
for port in s_to_m:
self.master.set_input(port)
self.slave.set_output(port)
|
StanfordAHA/garnet | mflowgen/Tile_MemCore/construct.py | <gh_stars>10-100
#! /usr/bin/env python
#=========================================================================
# construct.py
#=========================================================================
# Author :
# Date :
#
import os
import sys
from mflowgen.components import Graph, Step
from shutil import which
def construct():
g = Graph()
#-----------------------------------------------------------------------
# Parameters
#-----------------------------------------------------------------------
adk_name = 'tsmc16'
adk_view = 'multicorner-multivt'
pwr_aware = True
synth_power = False
if os.environ.get('SYNTH_POWER') == 'True':
synth_power = True
# power domains do not work with post-synth power
if synth_power:
pwr_aware = False
parameters = {
'construct_path' : __file__,
'design_name' : 'Tile_MemCore',
'clock_period' : 1.1,
'adk' : adk_name,
'adk_view' : adk_view,
# Synthesis
'flatten_effort' : 3,
'topographical' : True,
# SRAM macros
'num_words' : 512,
'word_size' : 32,
'mux_size' : 4,
'corner' : "tt0p8v25c",
'bc_corner' : "ffg0p88v125c",
'partial_write' : False,
# Hold target slack
'hold_target_slack' : 0.015,
# Utilization target
'core_density_target' : 0.68,
# RTL Generation
'interconnect_only' : True,
# Power Domains
'PWR_AWARE' : pwr_aware,
# Power analysis
"use_sdf" : False, # uses sdf but not the way it is in xrun node
'app_to_run' : 'tests/conv_3_3',
'saif_instance' : 'testbench/dut',
'testbench_name' : 'testbench',
'strip_path' : 'testbench/dut'
}
#-----------------------------------------------------------------------
# Create nodes
#-----------------------------------------------------------------------
this_dir = os.path.dirname( os.path.abspath( __file__ ) )
# ADK step
g.set_adk( adk_name )
adk = g.get_adk_step()
# Custom steps
rtl = Step( this_dir + '/../common/rtl' )
genlibdb_constraints = Step( this_dir + '/../common/custom-genlibdb-constraints' )
constraints = Step( this_dir + '/constraints' )
gen_sram = Step( this_dir + '/../common/gen_sram_macro' )
custom_init = Step( this_dir + '/custom-init' )
custom_genus_scripts = Step( this_dir + '/custom-genus-scripts' )
custom_flowgen_setup = Step( this_dir + '/custom-flowgen-setup' )
custom_lvs = Step( this_dir + '/custom-lvs-rules' )
custom_power = Step( this_dir + '/../common/custom-power-leaf' )
testbench = Step( this_dir + '/../common/testbench' )
application = Step( this_dir + '/../common/application' )
lib2db = Step( this_dir + '/../common/synopsys-dc-lib2db' )
if synth_power:
post_synth_power = Step( this_dir + '/../common/tile-post-synth-power' )
post_pnr_power = Step( this_dir + '/../common/tile-post-pnr-power' )
# Power aware setup
if pwr_aware:
power_domains = Step( this_dir + '/../common/power-domains' )
pwr_aware_gls = Step( this_dir + '/../common/pwr-aware-gls' )
# Default steps
info = Step( 'info', default=True )
synth = Step( 'cadence-genus-synthesis', default=True )
iflow = Step( 'cadence-innovus-flowsetup', default=True )
init = Step( 'cadence-innovus-init', default=True )
power = Step( 'cadence-innovus-power', default=True )
place = Step( 'cadence-innovus-place', default=True )
cts = Step( 'cadence-innovus-cts', default=True )
postcts_hold = Step( 'cadence-innovus-postcts_hold', default=True )
route = Step( 'cadence-innovus-route', default=True )
postroute = Step( 'cadence-innovus-postroute', default=True )
postroute_hold = Step( 'cadence-innovus-postroute_hold', default=True )
signoff = Step( 'cadence-innovus-signoff', default=True )
pt_signoff = Step( 'synopsys-pt-timing-signoff', default=True )
genlibdb = Step( 'cadence-genus-genlib', default=True )
if which("calibre") is not None:
drc = Step( 'mentor-calibre-drc', default=True )
lvs = Step( 'mentor-calibre-lvs', default=True )
else:
drc = Step( 'cadence-pegasus-drc', default=True )
lvs = Step( 'cadence-pegasus-lvs', default=True )
debugcalibre = Step( 'cadence-innovus-debug-calibre', default=True )
# Extra DC input
synth.extend_inputs(["common.tcl"])
synth.extend_inputs(["simple_common.tcl"])
# Add sram macro inputs to downstream nodes
synth.extend_inputs( ['sram_tt.lib', 'sram.lef'] )
#pt_signoff.extend_inputs( ['sram_tt.db'] )
genlibdb.extend_inputs( ['sram_tt.lib'] )
# These steps need timing and lef info for srams
sram_steps = \
[iflow, init, power, place, cts, postcts_hold, route, postroute, postroute_hold, signoff]
for step in sram_steps:
step.extend_inputs( ['sram_tt.lib', 'sram_ff.lib', 'sram.lef'] )
# Need the sram gds to merge into the final layout
signoff.extend_inputs( ['sram.gds'] )
# Need SRAM spice file for LVS
lvs.extend_inputs( ['sram.spi'] )
# Add extra input edges to innovus steps that need custom tweaks
init.extend_inputs( custom_init.all_outputs() )
power.extend_inputs( custom_power.all_outputs() )
# Add extra input edges to genlibdb for loop-breaking constraints
genlibdb.extend_inputs( genlibdb_constraints.all_outputs() )
synth.extend_inputs( custom_genus_scripts.all_outputs() )
iflow.extend_inputs( custom_flowgen_setup.all_outputs() )
synth.extend_outputs( ["sdc"] )
iflow.extend_inputs( ["sdc"] )
init.extend_inputs( ["sdc"] )
power.extend_inputs( ["sdc"] )
place.extend_inputs( ["sdc"] )
cts.extend_inputs( ["sdc"] )
order = synth.get_param( 'order' )
order.append( 'copy_sdc.tcl' )
synth.set_param( 'order', order )
# Power aware setup
if pwr_aware:
synth.extend_inputs(['designer-interface.tcl', 'upf_Tile_MemCore.tcl', 'mem-constraints.tcl', 'mem-constraints-2.tcl', 'dc-dont-use-constraints.tcl'])
init.extend_inputs(['check-clamp-logic-structure.tcl', 'upf_Tile_MemCore.tcl', 'mem-load-upf.tcl', 'dont-touch-constraints.tcl', 'pd-mem-floorplan.tcl', 'mem-add-endcaps-welltaps-setup.tcl', 'pd-add-endcaps-welltaps.tcl', 'mem-power-switches-setup.tcl', 'add-power-switches.tcl'])
place.extend_inputs(['check-clamp-logic-structure.tcl', 'place-dont-use-constraints.tcl', 'add-aon-tie-cells.tcl'])
power.extend_inputs(['pd-globalnetconnect.tcl'] )
cts.extend_inputs(['check-clamp-logic-structure.tcl', 'conn-aon-cells-vdd.tcl'])
postcts_hold.extend_inputs(['check-clamp-logic-structure.tcl', 'conn-aon-cells-vdd.tcl'] )
route.extend_inputs(['check-clamp-logic-structure.tcl', 'conn-aon-cells-vdd.tcl'] )
postroute.extend_inputs(['check-clamp-logic-structure.tcl', 'conn-aon-cells-vdd.tcl'] )
postroute_hold.extend_inputs(['conn-aon-cells-vdd.tcl'] )
signoff.extend_inputs(['check-clamp-logic-structure.tcl', 'conn-aon-cells-vdd.tcl', 'pd-generate-lvs-netlist.tcl'] )
pwr_aware_gls.extend_inputs(['design.vcs.pg.v', 'sram_pwr.v'])
#-----------------------------------------------------------------------
# Graph -- Add nodes
#-----------------------------------------------------------------------
g.add_step( info )
g.add_step( rtl )
g.add_step( gen_sram )
g.add_step( constraints )
g.add_step( synth )
g.add_step( custom_genus_scripts )
g.add_step( iflow )
g.add_step( custom_flowgen_setup )
g.add_step( init )
g.add_step( custom_init )
g.add_step( power )
g.add_step( custom_power )
g.add_step( place )
g.add_step( cts )
g.add_step( postcts_hold )
g.add_step( route )
g.add_step( postroute )
g.add_step( postroute_hold )
g.add_step( signoff )
g.add_step( pt_signoff )
g.add_step( genlibdb_constraints )
g.add_step( genlibdb )
g.add_step( lib2db )
g.add_step( drc )
g.add_step( lvs )
g.add_step( custom_lvs )
g.add_step( debugcalibre )
g.add_step( application )
g.add_step( testbench )
if synth_power:
g.add_step( post_synth_power )
g.add_step( post_pnr_power )
# Power aware step
if pwr_aware:
g.add_step( power_domains )
g.add_step( pwr_aware_gls )
#-----------------------------------------------------------------------
# Graph -- Add edges
#-----------------------------------------------------------------------
# Connect by name
g.connect_by_name( adk, gen_sram )
g.connect_by_name( adk, synth )
g.connect_by_name( adk, iflow )
g.connect_by_name( adk, init )
g.connect_by_name( adk, power )
g.connect_by_name( adk, place )
g.connect_by_name( adk, cts )
g.connect_by_name( adk, postcts_hold )
g.connect_by_name( adk, route )
g.connect_by_name( adk, postroute )
g.connect_by_name( adk, postroute_hold )
g.connect_by_name( adk, signoff )
g.connect_by_name( adk, drc )
g.connect_by_name( adk, lvs )
g.connect_by_name( gen_sram, synth )
g.connect_by_name( gen_sram, iflow )
g.connect_by_name( gen_sram, init )
g.connect_by_name( gen_sram, power )
g.connect_by_name( gen_sram, place )
g.connect_by_name( gen_sram, cts )
g.connect_by_name( gen_sram, postcts_hold )
g.connect_by_name( gen_sram, route )
g.connect_by_name( gen_sram, postroute )
g.connect_by_name( gen_sram, postroute_hold )
g.connect_by_name( gen_sram, signoff )
g.connect_by_name( gen_sram, genlibdb )
g.connect_by_name( gen_sram, pt_signoff )
g.connect_by_name( gen_sram, drc )
g.connect_by_name( gen_sram, lvs )
g.connect_by_name( rtl, synth )
g.connect_by_name( constraints, synth )
g.connect_by_name( custom_genus_scripts, synth )
g.connect_by_name( synth, iflow )
g.connect_by_name( synth, init )
g.connect_by_name( synth, power )
g.connect_by_name( synth, place )
g.connect_by_name( synth, cts )
g.connect_by_name( custom_flowgen_setup, iflow )
g.connect_by_name( iflow, init )
g.connect_by_name( iflow, power )
g.connect_by_name( iflow, place )
g.connect_by_name( iflow, cts )
g.connect_by_name( iflow, postcts_hold )
g.connect_by_name( iflow, route )
g.connect_by_name( iflow, postroute )
g.connect_by_name( iflow, postroute_hold )
g.connect_by_name( iflow, signoff )
g.connect_by_name( custom_init, init )
g.connect_by_name( custom_power, power )
g.connect_by_name( custom_lvs, lvs )
g.connect_by_name( init, power )
g.connect_by_name( power, place )
g.connect_by_name( place, cts )
g.connect_by_name( cts, postcts_hold )
g.connect_by_name( postcts_hold, route )
g.connect_by_name( route, postroute )
g.connect_by_name( postroute, postroute_hold )
g.connect_by_name( postroute_hold, signoff )
g.connect_by_name( signoff, drc )
g.connect_by_name( signoff, lvs )
g.connect(signoff.o('design-merged.gds'), drc.i('design_merged.gds'))
g.connect(signoff.o('design-merged.gds'), lvs.i('design_merged.gds'))
g.connect_by_name( signoff, genlibdb )
g.connect_by_name( adk, genlibdb )
g.connect_by_name( genlibdb_constraints, genlibdb )
g.connect_by_name( genlibdb, lib2db )
g.connect_by_name( adk, pt_signoff )
g.connect_by_name( signoff, pt_signoff )
g.connect_by_name( application, testbench )
if synth_power:
g.connect_by_name( application, post_synth_power )
g.connect_by_name( gen_sram, post_synth_power )
g.connect_by_name( synth, post_synth_power )
g.connect_by_name( testbench, post_synth_power )
g.connect_by_name( application, post_pnr_power )
g.connect_by_name( gen_sram, post_pnr_power )
g.connect_by_name( signoff, post_pnr_power )
g.connect_by_name( pt_signoff, post_pnr_power )
g.connect_by_name( testbench, post_pnr_power )
g.connect_by_name( adk, debugcalibre )
g.connect_by_name( synth, debugcalibre )
g.connect_by_name( iflow, debugcalibre )
g.connect_by_name( signoff, debugcalibre )
g.connect_by_name( drc, debugcalibre )
g.connect_by_name( lvs, debugcalibre )
# Pwr aware steps:
if pwr_aware:
g.connect_by_name( power_domains, synth )
g.connect_by_name( power_domains, init )
g.connect_by_name( power_domains, power )
g.connect_by_name( power_domains, place )
g.connect_by_name( power_domains, cts )
g.connect_by_name( power_domains, postcts_hold )
g.connect_by_name( power_domains, route )
g.connect_by_name( power_domains, postroute )
g.connect_by_name( power_domains, postroute_hold )
g.connect_by_name( power_domains, signoff )
g.connect_by_name( adk, pwr_aware_gls)
g.connect_by_name( gen_sram, pwr_aware_gls)
g.connect_by_name( signoff, pwr_aware_gls)
#g.connect(power_domains.o('pd-globalnetconnect.tcl'), power.i('globalnetconnect.tcl'))
#-----------------------------------------------------------------------
# Parameterize
#-----------------------------------------------------------------------
g.update_params( parameters )
# Update PWR_AWARE variable
synth.update_params( { 'PWR_AWARE': parameters['PWR_AWARE'] }, True )
init.update_params( { 'PWR_AWARE': parameters['PWR_AWARE'] }, True )
power.update_params( { 'PWR_AWARE': parameters['PWR_AWARE'] }, True )
if pwr_aware:
pwr_aware_gls.update_params( { 'design_name': parameters['design_name'] }, True )
init.extend_postconditions( ["assert 'Clamping logic structure in the SBs and CBs is maintained' in File( 'mflowgen-run.log' )"] )
place.extend_postconditions( ["assert 'Clamping logic structure in the SBs and CBs is maintained' in File( 'mflowgen-run.log' )"] )
cts.extend_postconditions( ["assert 'Clamping logic structure in the SBs and CBs is maintained' in File( 'mflowgen-run.log' )"] )
postcts_hold.extend_postconditions( ["assert 'Clamping logic structure in the SBs and CBs is maintained' in File( 'mflowgen-run.log' )"] )
route.extend_postconditions( ["assert 'Clamping logic structure in the SBs and CBs is maintained' in File( 'mflowgen-run.log' )"] )
postroute.extend_postconditions( ["assert 'Clamping logic structure in the SBs and CBs is maintained' in File( 'mflowgen-run.log' )"] )
signoff.extend_postconditions( ["assert 'Clamping logic structure in the SBs and CBs is maintained' in File( 'mflowgen-run.log' )"] )
# Core density target param
init.update_params( { 'core_density_target': parameters['core_density_target'] }, True )
# Disable pwr aware flow
#init.update_params( { 'PWR_AWARE': parameters['PWR_AWARE'] }, allow_new=True )
#power.update_params( { 'PWR_AWARE': parameters['PWR_AWARE'] }, allow_new=True )
# Since we are adding an additional input script to the generic Innovus
# steps, we modify the order parameter for that node which determines
# which scripts get run and when they get run.
# init -- Add 'edge-blockages.tcl' after 'pin-assignments.tcl'
order = init.get_param('order') # get the default script run order
path_group_idx = order.index( 'make-path-groups.tcl' )
order.insert( path_group_idx + 1, 'additional-path-groups.tcl' )
pin_idx = order.index( 'pin-assignments.tcl' ) # find pin-assignments.tcl
order.insert( pin_idx + 1, 'edge-blockages.tcl' ) # add here
init.update_params( { 'order': order } )
# Adding new input for genlibdb node to run
order = genlibdb.get_param('order') # get the default script run order
read_idx = order.index( 'read_design.tcl' ) # find read_design.tcl
order.insert( read_idx + 1, 'genlibdb-constraints.tcl' ) # add here
genlibdb.update_params( { 'order': order } )
# Pwr aware steps:
if pwr_aware:
# init node
order = init.get_param('order')
read_idx = order.index( 'floorplan.tcl' ) # find floorplan.tcl
order.insert( read_idx + 1, 'mem-load-upf.tcl' ) # add here
order.insert( read_idx + 2, 'pd-mem-floorplan.tcl' ) # add here
order.insert( read_idx + 3, 'mem-add-endcaps-welltaps-setup.tcl' ) # add here
order.insert( read_idx + 4, 'pd-add-endcaps-welltaps.tcl' ) # add here
order.insert( read_idx + 5, 'mem-power-switches-setup.tcl') # add here
order.insert( read_idx + 6, 'add-power-switches.tcl' ) # add here
order.remove('add-endcaps-welltaps.tcl')
order.append('check-clamp-logic-structure.tcl')
init.update_params( { 'order': order } )
# power node
order = power.get_param('order')
order.insert( 0, 'pd-globalnetconnect.tcl' ) # add here
order.remove('globalnetconnect.tcl')
power.update_params( { 'order': order } )
# place node
order = place.get_param('order')
read_idx = order.index( 'main.tcl' ) # find main.tcl
order.insert(read_idx + 1, 'add-aon-tie-cells.tcl')
order.insert(read_idx - 1, 'place-dont-use-constraints.tcl')
order.append('check-clamp-logic-structure.tcl')
place.update_params( { 'order': order } )
# cts node
order = cts.get_param('order')
order.insert( 0, 'conn-aon-cells-vdd.tcl' ) # add here
order.append('check-clamp-logic-structure.tcl')
cts.update_params( { 'order': order } )
# postcts_hold node
order = postcts_hold.get_param('order')
order.insert( 0, 'conn-aon-cells-vdd.tcl' ) # add here
order.append('check-clamp-logic-structure.tcl')
postcts_hold.update_params( { 'order': order } )
# route node
order = route.get_param('order')
order.insert( 0, 'conn-aon-cells-vdd.tcl' ) # add here
order.append('check-clamp-logic-structure.tcl')
route.update_params( { 'order': order } )
# postroute node
order = postroute.get_param('order')
order.insert( 0, 'conn-aon-cells-vdd.tcl' ) # add here
order.append('check-clamp-logic-structure.tcl')
postroute.update_params( { 'order': order } )
# postroute-hold node
order = postroute_hold.get_param('order')
order.insert( 0, 'conn-aon-cells-vdd.tcl' ) # add here
postroute_hold.update_params( { 'order': order } )
# signoff node
order = signoff.get_param('order')
order.insert( 0, 'conn-aon-cells-vdd.tcl' ) # add here
order.append('check-clamp-logic-structure.tcl')
read_idx = order.index( 'generate-results.tcl' ) # find generate_results.tcl
order.insert(read_idx + 1, 'pd-generate-lvs-netlist.tcl')
signoff.update_params( { 'order': order } )
return g
if __name__ == '__main__':
g = construct()
# g.plot()
|
StanfordAHA/garnet | mflowgen/glb_top/synopsys-ptpx-gl/parse_report.py | import argparse
import pandas as pd
import re
def gen_power_df(filename: str, instances: list):
power_columns = ['internal', 'switching', 'leakage', 'total']
power_df = pd.DataFrame(0.0, index=instances, columns=power_columns)
with open(filename, 'r') as f:
line_start = False
for line in f.readlines():
if line_start is True:
if line.startswith( '---------------------' ):
break
is_counted = False
line_list = line.split()
cell_name = line_list[0]
power_list = line_list[1:5]
for inst in instances:
if bool(re.search(inst, cell_name)) is True:
power_df.loc[inst] += list(map(float, power_list))
is_counted = True
break # inst should be included in only one index
if is_counted is False: # If not counted, add it to misc.
line_list = line.split()
power_df.loc['misc'] += list(map(float, power_list))
elif line.startswith( '---------------------' ):
line_start = True
return power_df
def main():
parser = argparse.ArgumentParser(description='Power Report Parser')
parser.add_argument('--filename', '-f', type=str, default="")
parser.add_argument('--instances', '-i', nargs='+', default=[])
parser.add_argument('--csv', '-c', type=str, default="power_by_module.csv")
args = parser.parse_args()
if 'misc' not in args.instances:
instances = args.instances + ['misc']
power_df = gen_power_df(args.filename, instances)
power_df.to_csv(args.csv)
if __name__ == "__main__":
main()
|
StanfordAHA/garnet | global_buffer/design/glb_core_pcfg_dma.py | <filename>global_buffer/design/glb_core_pcfg_dma.py
from kratos import Generator, always_ff, always_comb, posedge, const, clog2, resize
from global_buffer.design.pipeline import Pipeline
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
import math
class GlbCorePcfgDma(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_core_pcfg_dma")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.reset = self.reset("reset")
self.cgra_cfg_pcfg = self.output(
"cgra_cfg_pcfg", self.header.cgra_cfg_t)
self.rdrq_packet = self.output(
"rdrq_packet", self.header.rdrq_packet_t)
self.rdrs_packet = self.input("rdrs_packet", self.header.rdrs_packet_t)
self.cfg_pcfg_dma_ctrl_mode = self.input("cfg_pcfg_dma_ctrl_mode", 1)
self.cfg_pcfg_dma_header = self.input(
"cfg_pcfg_dma_header", self.header.cfg_pcfg_dma_header_t)
self.cfg_pcfg_network_latency = self.input(
"cfg_pcfg_network_latency", self._params.latency_width)
self.pcfg_start_pulse = self.input("pcfg_start_pulse", 1)
self.pcfg_done_pulse = self.output("pcfg_done_pulse", 1)
# localparam
self.bank_data_byte_offset = math.ceil(
self._params.bank_data_width / 8)
self.default_latency = (self._params.glb_switch_pipeline_depth
+ self._params.glb_bank_memory_pipeline_depth
+ self._params.sram_gen_pipeline_depth
+ self._params.sram_gen_output_pipeline_depth
+ 1 # SRAM macro read latency
+ self._params.glb_switch_pipeline_depth
+ 2 # FIXME: Unnecessary delay of moving back and forth btw switch and router
+ 1 # dma cache register delay
)
# local variables
self.is_running_r = self.var("is_running_r", 1)
self.start_pulse_r = self.var("start_pulse_r", 1)
self.done_pulse_r = self.var("done_pulse_r", 1)
self.num_cfg_cnt_r = self.var(
"num_cfg_cnt_r", self._params.max_num_cfg_width)
self.num_cfg_cnt_next = self.var(
"num_cfg_cnt_next", self._params.max_num_cfg_width)
self.addr_r = self.var("addr_r", self._params.glb_addr_width)
self.addr_next = self.var("addr_next", self._params.glb_addr_width)
self.rdrq_packet_rd_en_r = self.var("rdrq_packet_rd_en_r", 1)
self.rdrq_packet_rd_en_next = self.var("rdrq_packet_rd_en_next", 1)
self.rdrq_packet_rd_addr_r = self.var(
"rdrq_packet_rd_addr_r", self._params.glb_addr_width)
self.rdrq_packet_rd_addr_next = self.var(
"rdrq_packet_rd_addr_next", self._params.glb_addr_width)
self.rdrs_packet_rd_data_r = self.var(
"rdrs_packet_rd_data_r", self._params.bank_data_width)
self.rdrs_packet_rd_data_valid_r = self.var(
"rdrs_packet_rd_data_valid_r", 1)
# Add always statements
self.add_always(self.start_pulse_ff)
self.add_always(self.done_pulse_ff)
self.add_always(self.is_running_ff)
self.add_always(self.adgn_logic)
self.add_always(self.adgn_ff)
self.add_always(self.rdrq_packet_logic)
self.add_always(self.rdrq_packet_ff)
self.add_always(self.rdrs_packet_ff)
self.assign_rdrq_packet()
self.assign_cgra_cfg_output()
self.add_pcfg_dma_done_pulse_pipeline()
@always_ff((posedge, "clk"), (posedge, "reset"))
def start_pulse_ff(self):
if self.reset:
self.start_pulse_r = 0
elif ((self.cfg_pcfg_dma_ctrl_mode == 1) & (~self.is_running_r) & (self.pcfg_start_pulse)):
self.start_pulse_r = 1
else:
self.start_pulse_r = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def done_pulse_ff(self):
if self.reset:
self.done_pulse_r = 0
elif ((self.is_running_r) & (self.num_cfg_cnt_r == 0)):
self.done_pulse_r = 1
else:
self.done_pulse_r = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def is_running_ff(self):
if self.reset:
self.is_running_r = 0
elif self.start_pulse_r:
self.is_running_r = 1
elif ((self.is_running_r == 1) & (self.num_cfg_cnt_r == 0)):
self.is_running_r = 0
# TODO: We can merge adgn_logic, adgn_ff, rdrq_packet_logic, rdrq_packet_ff
@always_comb
def adgn_logic(self):
if self.start_pulse_r:
self.num_cfg_cnt_next = self.cfg_pcfg_dma_header['num_cfg']
self.addr_next = self.cfg_pcfg_dma_header['start_addr']
elif ((self.is_running_r == 1) & (self.num_cfg_cnt_r > 0)):
self.num_cfg_cnt_next = self.num_cfg_cnt_r - 1
self.addr_next = self.addr_r + self.bank_data_byte_offset
else:
self.num_cfg_cnt_next = 0
self.addr_next = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def adgn_ff(self):
if self.reset:
self.num_cfg_cnt_r = 0
self.addr_r = 0
else:
self.num_cfg_cnt_r = self.num_cfg_cnt_next
self.addr_r = self.addr_next
@always_comb
def rdrq_packet_logic(self):
if (self.is_running_r & (self.num_cfg_cnt_r > 0)):
self.rdrq_packet_rd_en_next = 1
self.rdrq_packet_rd_addr_next = self.addr_r
else:
self.rdrq_packet_rd_en_next = 0
self.rdrq_packet_rd_addr_next = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrq_packet_ff(self):
if self.reset:
self.rdrq_packet_rd_en_r = 0
self.rdrq_packet_rd_addr_r = 0
else:
self.rdrq_packet_rd_en_r = self.rdrq_packet_rd_en_next
self.rdrq_packet_rd_addr_r = self.rdrq_packet_rd_addr_next
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrs_packet_ff(self):
if self.reset:
self.rdrs_packet_rd_data_r = 0
self.rdrs_packet_rd_data_valid_r = 0
elif self.rdrs_packet['rd_data_valid']:
self.rdrs_packet_rd_data_r = self.rdrs_packet['rd_data']
self.rdrs_packet_rd_data_valid_r = 1
else:
self.rdrs_packet_rd_data_r = 0
self.rdrs_packet_rd_data_valid_r = 0
def assign_rdrq_packet(self):
self.wire(self.rdrq_packet['rd_en'], self.rdrq_packet_rd_en_r)
self.wire(self.rdrq_packet['rd_addr'], self.rdrq_packet_rd_addr_r)
def assign_cgra_cfg_output(self):
self.wire(self.cgra_cfg_pcfg['rd_en'], 0)
self.wire(self.cgra_cfg_pcfg['wr_en'],
self.rdrs_packet_rd_data_valid_r)
self.wire(self.cgra_cfg_pcfg['addr'],
self.rdrs_packet_rd_data_r[self._params.cgra_cfg_data_width + self._params.cgra_cfg_addr_width - 1,
self._params.cgra_cfg_data_width])
self.wire(self.cgra_cfg_pcfg['data'],
self.rdrs_packet_rd_data_r[self._params.cgra_cfg_data_width - 1, 0])
def add_pcfg_dma_done_pulse_pipeline(self):
maximum_latency = 3 * self._params.num_glb_tiles + self.default_latency
latency_width = clog2(maximum_latency)
self.done_pulse_d_arr = self.var(
"done_pulse_d_arr", 1, size=maximum_latency, explicit_array=True)
self.done_pulse_pipeline = Pipeline(width=1,
depth=maximum_latency,
flatten_output=True)
self.add_child("done_pulse_pipeline",
self.done_pulse_pipeline,
clk=self.clk,
clk_en=const(1, 1),
reset=self.reset,
in_=self.done_pulse_r,
out_=self.done_pulse_d_arr)
self.wire(self.pcfg_done_pulse,
self.done_pulse_d_arr[resize(self.cfg_pcfg_network_latency, latency_width)
+ self.default_latency
+ self._params.num_glb_tiles])
|
StanfordAHA/garnet | global_buffer/design/glb_bank_memory.py | from kratos import Generator, always_comb, concat, always_ff, posedge, const
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.pipeline import Pipeline
from global_buffer.design.glb_bank_sram_gen import GlbBankSramGen
from global_buffer.design.glb_bank_sram_stub import GlbBankSramStub
class GlbBankMemory(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_bank_memory")
self._params = _params
self.clk = self.clock("clk")
self.reset = self.reset("reset")
self.ren = self.input("ren", 1)
self.wen = self.input("wen", 1)
self.addr = self.input("addr", self._params.bank_addr_width)
self.data_in = self.input("data_in", self._params.bank_data_width)
self.data_in_bit_sel = self.input(
"data_in_bit_sel", self._params.bank_data_width)
self.data_out = self.output("data_out", self._params.bank_data_width)
# local variables
self.sram_wen = self.var("sram_wen", 1)
self.sram_wen_d = self.var("sram_wen_d", 1)
self.sram_ren = self.var("sram_ren", 1)
self.sram_ren_d = self.var("sram_ren_d", 1)
self.sram_ren_d_vld = self.var("sram_ren_d_vld", 1)
self.sram_cen = self.var("sram_cen", 1)
self.sram_cen_d = self.var("sram_cen_d", 1)
self.sram_addr = self.var(
"sram_addr", self._params.bank_addr_width - self._params.bank_byte_offset)
self.sram_addr_d = self.var(
"sram_addr_d", self._params.bank_addr_width - self._params.bank_byte_offset)
self.sram_data_in = self.var(
"sram_data_in", self._params.bank_data_width)
self.sram_data_in_d = self.var(
"sram_data_in_d", self._params.bank_data_width)
self.sram_data_in_bit_sel = self.var(
"sram_data_in_bit_sel", self._params.bank_data_width)
self.sram_data_in_bit_sel_d = self.var(
"sram_data_in_bit_sel_d", self._params.bank_data_width)
self.sram_data_out = self.var(
"sram_data_out", self._params.bank_data_width)
self.data_out_w = self.var(
"data_out_w", self._params.bank_data_width)
self.data_out_r = self.var(
"data_out_r", self._params.bank_data_width)
self.wire(self.data_out, self.data_out_w)
self.add_glb_bank_memory_pipeline()
self.add_glb_bank_sram_gen()
self.add_always(self.sram_ctrl_logic)
self.add_always(self.data_out_ff)
self.add_always(self.data_out_logic)
def add_glb_bank_memory_pipeline(self):
sram_signals_in = concat(self.sram_ren, self.sram_wen, self.sram_cen,
self.sram_addr, self.sram_data_in, self.sram_data_in_bit_sel)
sram_signals_out = concat(self.sram_ren_d, self.sram_wen_d, self.sram_cen_d,
self.sram_addr_d, self.sram_data_in_d, self.sram_data_in_bit_sel_d)
sram_signals_pipeline = Pipeline(width=sram_signals_in.width,
depth=self._params.glb_bank_memory_pipeline_depth)
self.add_child(f"sram_signals_pipeline",
sram_signals_pipeline,
clk=self.clk,
clk_en=const(1, 1),
reset=self.reset,
in_=sram_signals_in,
out_=sram_signals_out)
self.sram_ren_rsp_pipeline = Pipeline(width=1,
depth=(self._params.sram_gen_pipeline_depth
+ self._params.sram_gen_output_pipeline_depth
+ 1))
self.add_child("sram_ren_rsp_pipeline",
self.sram_ren_rsp_pipeline,
clk=self.clk,
clk_en=const(1, 1),
reset=self.reset,
in_=self.sram_ren_d,
out_=self.sram_ren_d_vld)
def add_glb_bank_sram_gen(self):
if self._params.is_sram_stub:
self.glb_bank_sram_stub = GlbBankSramStub(addr_width=(self._params.bank_addr_width
- self._params.bank_byte_offset),
data_width=self._params.bank_data_width,
_params=self._params)
self.add_child("glb_bank_sram_stub",
self.glb_bank_sram_stub,
CLK=self.clk,
RESET=self.reset,
CEB=(~self.sram_cen_d),
WEB=(~self.sram_wen_d),
A=self.sram_addr_d,
D=self.sram_data_in_d,
BWEB=(~self.sram_data_in_bit_sel_d),
Q=self.sram_data_out)
else:
self.glb_bank_sram_gen = GlbBankSramGen(addr_width=(self._params.bank_addr_width
- self._params.bank_byte_offset),
sram_macro_width=self._params.bank_data_width,
sram_macro_depth=self._params.sram_macro_depth,
_params=self._params)
self.add_child("glb_bank_sram_gen",
self.glb_bank_sram_gen,
CLK=self.clk,
RESET=self.reset,
CEB=(~self.sram_cen_d),
WEB=(~self.sram_wen_d),
A=self.sram_addr_d,
D=self.sram_data_in_d,
BWEB=(~self.sram_data_in_bit_sel_d),
Q=self.sram_data_out)
@always_comb
def sram_ctrl_logic(self):
self.sram_wen = self.wen
self.sram_ren = self.ren
self.sram_cen = self.wen | self.ren
self.sram_addr = self.addr[self._params.bank_addr_width - 1,
self._params.bank_byte_offset]
self.sram_data_in = self.data_in
self.sram_data_in_bit_sel = self.data_in_bit_sel
@always_ff((posedge, "clk"), (posedge, "reset"))
def data_out_ff(self):
if self.reset:
self.data_out_r = 0
else:
self.data_out_r = self.data_out_w
@always_comb
def data_out_logic(self):
if self.sram_ren_d_vld:
self.data_out_w = self.sram_data_out
else:
self.data_out_w = self.data_out_r
|
StanfordAHA/garnet | global_buffer/design/glb_core_sram_cfg_ctrl.py | <gh_stars>10-100
from kratos import Generator, always_ff, always_comb, posedge
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_cfg_ifc import GlbConfigInterface
from global_buffer.design.glb_header import GlbHeader
class GlbCoreSramCfgCtrl(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_core_sram_cfg_ctrl")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.reset = self.reset("reset")
self.glb_tile_id = self.input(
"glb_tile_id", self._params.tile_sel_addr_width)
self.sram_cfg_ifc = GlbConfigInterface(
addr_width=self._params.glb_addr_width, data_width=self._params.axi_data_width)
self.bank_cfg_ifc = GlbConfigInterface(
addr_width=self._params.bank_addr_width, data_width=self._params.axi_data_width)
# config port
self.if_sram_cfg_est_m = self.interface(
self.sram_cfg_ifc.master, "if_sram_cfg_est_m", is_port=True)
self.if_sram_cfg_wst_s = self.interface(
self.sram_cfg_ifc.slave, "if_sram_cfg_wst_s", is_port=True)
# bank_config port
self.if_sram_cfg_core2bank_m = []
for i in range(self._params.banks_per_tile):
self.if_sram_cfg_core2bank_m.append(self.interface(
self.bank_cfg_ifc.master, f"if_sram_cfg_core2bank_m_{i}", is_port=True))
self.tile_id_match = self.var("tile_id_match", 1)
self.bank2core_rd_data_valid_w = self.var(
"bank2core_rd_data_valid_w", 1, size=self._params.banks_per_tile)
self.bank2core_rd_data_w = self.var(
"bank2core_rd_data_w", self._params.axi_data_width, size=self._params.banks_per_tile)
self.rd_data_valid_w = self.var("rd_data_valid_w", 1)
self.rd_data_w = self.var("rd_data_w", self._params.axi_data_width)
self.add_always(self.tile_id_match_logic)
for i in range(self._params.banks_per_tile):
self.add_always(self.if_sram_cfg_core2bank_logic, i=i)
self.add_always(self.rd_data_mux)
self.add_always(self.sram_cfg_pipeline)
@always_comb
def tile_id_match_logic(self):
self.tile_id_match = (self.if_sram_cfg_wst_s.wr_addr[self._params.bank_addr_width
+ self._params.bank_sel_addr_width
+ self._params.tile_sel_addr_width - 1,
self._params.bank_addr_width
+ self._params.bank_sel_addr_width]
== self.glb_tile_id)
@always_comb
def if_sram_cfg_core2bank_logic(self, i):
if self.tile_id_match:
self.if_sram_cfg_core2bank_m[i].wr_en = ((self.if_sram_cfg_wst_s.wr_addr[self._params.bank_addr_width
+ self._params.bank_sel_addr_width
- 1, self._params.bank_addr_width]
== i) & self.if_sram_cfg_wst_s.wr_en)
self.if_sram_cfg_core2bank_m[i].wr_addr = self.if_sram_cfg_wst_s.wr_addr[self._params.bank_addr_width - 1,
0]
self.if_sram_cfg_core2bank_m[i].wr_data = self.if_sram_cfg_wst_s.wr_data
self.if_sram_cfg_core2bank_m[i].rd_en = ((self.if_sram_cfg_wst_s.rd_addr[self._params.bank_addr_width
+ self._params.bank_sel_addr_width
- 1,
self._params.bank_addr_width] == i)
& self.if_sram_cfg_wst_s.rd_en)
self.if_sram_cfg_core2bank_m[i].rd_addr = self.if_sram_cfg_wst_s.rd_addr[self._params.bank_addr_width - 1,
0]
else:
self.if_sram_cfg_core2bank_m[i].wr_en = 0
self.if_sram_cfg_core2bank_m[i].wr_addr = 0
self.if_sram_cfg_core2bank_m[i].wr_data = 0
self.if_sram_cfg_core2bank_m[i].rd_en = 0
self.if_sram_cfg_core2bank_m[i].rd_addr = 0
@always_comb
def rd_data_mux(self):
self.rd_data_w = self.if_sram_cfg_est_m.rd_data
self.rd_data_valid_w = self.if_sram_cfg_est_m.rd_data_valid
for i in range(self._params.banks_per_tile):
if self.if_sram_cfg_core2bank_m[i].rd_data_valid == 1:
self.rd_data_w = self.if_sram_cfg_core2bank_m[i].rd_data
self.rd_data_valid_w = self.if_sram_cfg_core2bank_m[i].rd_data_valid
@always_ff((posedge, "clk"), (posedge, "reset"))
def sram_cfg_pipeline(self):
if self.reset:
self.if_sram_cfg_est_m.wr_en = 0
self.if_sram_cfg_est_m.wr_addr = 0
self.if_sram_cfg_est_m.wr_data = 0
self.if_sram_cfg_est_m.rd_en = 0
self.if_sram_cfg_est_m.rd_addr = 0
self.if_sram_cfg_wst_s.rd_data = 0
self.if_sram_cfg_wst_s.rd_data_valid = 0
else:
self.if_sram_cfg_est_m.wr_en = self.if_sram_cfg_wst_s.wr_en
self.if_sram_cfg_est_m.wr_addr = self.if_sram_cfg_wst_s.wr_addr
self.if_sram_cfg_est_m.wr_data = self.if_sram_cfg_wst_s.wr_data
self.if_sram_cfg_est_m.rd_en = self.if_sram_cfg_wst_s.rd_en
self.if_sram_cfg_est_m.rd_addr = self.if_sram_cfg_wst_s.rd_addr
self.if_sram_cfg_wst_s.rd_data = self.rd_data_w
self.if_sram_cfg_wst_s.rd_data_valid = self.rd_data_valid_w
|
StanfordAHA/garnet | global_buffer/gen_global_buffer_rdl.py | <gh_stars>10-100
from abc import ABC
from kratos import clog2
import os
class RdlNode(ABC):
def __init__(self, name="", desc="", property=[]):
self.name = name
self._desc = desc
self.property = property
self.children = []
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
class RdlNonLeafNode(RdlNode):
def __init__(self, name="", desc="", size=1, property=[]):
super().__init__(name=name, desc=desc, property=property)
self.size = size
def add_child(self, child):
self.children.append(child)
def add_children(self, children):
self.children += children
class AddrMap(RdlNonLeafNode):
type = "addrmap"
def __init__(self, name, size=1):
super().__init__(name=name, size=size)
self.regwidth = 32
self.accesswidth = 32
self.property = ["addressing = compact",
"default regwidth = 32",
"default sw = rw",
"default hw = r"]
class RegFile(RdlNonLeafNode):
type = "regfile"
def __init__(self, name, size=1):
super().__init__(name=name, size=size)
class Reg(RdlNonLeafNode):
type = "reg"
def __init__(self, name, size=1):
super().__init__(name=name, size=size)
class Field(RdlNode):
type = "field"
def __init__(self, name, width=1, property=[]):
super().__init__(name=name, property=property)
self.width = width
class Rdl:
def __init__(self, top):
self.top = top
def dump_rdl(self, filename):
if not isinstance(self.top, AddrMap):
raise Exception("Top RdlNode should be AddrMap Class")
with open(filename, 'w') as f:
f.write(self._get_rdl_node_expr(self.top))
def _get_rdl_node_expr(self, rdl_node, level=0):
expr = ""
if isinstance(rdl_node, RdlNonLeafNode) and rdl_node.size > 1:
for i in range(rdl_node.size):
expr += "\t" * level
if isinstance(rdl_node, AddrMap):
expr += f"{rdl_node.type} {rdl_node.name}{{\n"
else:
expr += f"{rdl_node.type} {{\n"
expr += "\t" * (level + 1)
expr += f"name = \"{rdl_node.name}\";\n"
if rdl_node.desc:
expr += "\t" * (level + 1)
expr += f"desc = \"{rdl_node.desc}\";\n"
for e in rdl_node.property:
expr += "\t" * (level + 1)
expr += f"{e};\n"
if not isinstance(rdl_node, Field):
for child in rdl_node.children:
expr += self._get_rdl_node_expr(child, level + 1)
elab_name = rdl_node.name + f"_{i}"
if isinstance(rdl_node, Field):
expr += f"\t" * level
expr += f"}} {elab_name}[{rdl_node.width}] = 0;\n"
elif not isinstance(rdl_node, AddrMap):
expr += f"\t" * level
expr += f"}} {elab_name};\n"
else:
expr += f"\t" * level
expr += f"}};\n"
expr += "\n"
else:
expr += "\t" * level
if isinstance(rdl_node, AddrMap):
expr += f"{rdl_node.type} {rdl_node.name}{{\n"
else:
expr += f"{rdl_node.type} {{\n"
expr += "\t" * (level + 1)
expr += f"name = \"{rdl_node.name}\";\n"
if rdl_node.desc:
expr += "\t" * (level + 1)
expr += f"desc = \"{rdl_node.desc}\";\n"
for e in rdl_node.property:
expr += "\t" * (level + 1)
expr += f"{e};\n"
if not isinstance(rdl_node, Field):
for child in rdl_node.children:
expr += self._get_rdl_node_expr(child, level + 1)
elab_name = rdl_node.name
if isinstance(rdl_node, Field):
expr += f"\t" * level
expr += f"}} {elab_name}[{rdl_node.width}] = 0;\n"
elif not isinstance(rdl_node, AddrMap):
expr += f"\t" * level
expr += f"}} {elab_name};\n"
else:
expr += f"\t" * level
expr += f"}};\n"
expr += "\n"
return expr
def gen_global_buffer_rdl(name, params):
addr_map = AddrMap(name)
# Data Network Ctrl Register
data_network_ctrl = Reg("data_network")
tile_connected_f = Field("tile_connected", 1)
strm_latency_f = Field("latency", params.latency_width)
data_network_ctrl.add_children([tile_connected_f, strm_latency_f])
addr_map.add_child(data_network_ctrl)
# Pcfg Network Ctrl Register
pcfg_network_ctrl = Reg("pcfg_network")
pcfg_network_ctrl.add_children([Field("tile_connected", 1),
Field("latency", params.latency_width)])
addr_map.add_child(pcfg_network_ctrl)
# Store DMA Ctrl
st_dma_ctrl_r = Reg("st_dma_ctrl")
st_dma_mode_f = Field("mode", 2)
st_dma_ctrl_r.add_child(st_dma_mode_f)
st_dma_use_valid_f = Field("use_valid", 1)
st_dma_ctrl_r.add_child(st_dma_use_valid_f)
st_dma_data_mux_f = Field("data_mux", 2)
st_dma_ctrl_r.add_child(st_dma_data_mux_f)
st_dma_num_repeat_f = Field("num_repeat", clog2(params.queue_depth) + 1)
st_dma_ctrl_r.add_child(st_dma_num_repeat_f)
addr_map.add_child(st_dma_ctrl_r)
# Store DMA Header
if params.queue_depth == 1:
st_dma_header_rf = RegFile(f"st_dma_header_0", size=params.queue_depth)
else:
st_dma_header_rf = RegFile(f"st_dma_header", size=params.queue_depth)
# dim reg
dim_r = Reg(f"dim")
dim_f = Field(f"dim", width=clog2(params.loop_level) + 1)
dim_r.add_child(dim_f)
st_dma_header_rf.add_child(dim_r)
# start_addr reg
start_addr_r = Reg(f"start_addr")
start_addr_f = Field(f"start_addr", width=params.glb_addr_width)
start_addr_r.add_child(start_addr_f)
st_dma_header_rf.add_child(start_addr_r)
# cycle_start_addr reg
cycle_start_addr_r = Reg(f"cycle_start_addr")
cycle_start_addr_f = Field(f"cycle_start_addr", width=params.glb_addr_width)
cycle_start_addr_r.add_child(cycle_start_addr_f)
st_dma_header_rf.add_child(cycle_start_addr_r)
# num_word reg
range_r = Reg(f"range", size=params.loop_level)
range_f = Field("range", width=params.axi_data_width)
range_r.add_child(range_f)
stride_r = Reg(f"stride", size=params.loop_level)
stride_f = Field("stride", width=params.axi_data_width)
stride_r.add_child(stride_f)
cycle_stride_r = Reg(f"cycle_stride", size=params.loop_level)
cycle_stride_f = Field("cycle_stride", width=params.axi_data_width)
cycle_stride_r.add_child(cycle_stride_f)
st_dma_header_rf.add_child(range_r)
st_dma_header_rf.add_child(stride_r)
st_dma_header_rf.add_child(cycle_stride_r)
addr_map.add_child(st_dma_header_rf)
# Load DMA Ctrl
ld_dma_ctrl_r = Reg("ld_dma_ctrl")
ld_dma_mode_f = Field("mode", 2)
ld_dma_ctrl_r.add_child(ld_dma_mode_f)
ld_dma_use_valid_f = Field("use_valid", 1)
ld_dma_ctrl_r.add_child(ld_dma_use_valid_f)
ld_dma_data_mux_f = Field("data_mux", 2)
ld_dma_ctrl_r.add_child(ld_dma_data_mux_f)
ld_dma_num_repeat_f = Field("num_repeat", clog2(params.queue_depth) + 1)
ld_dma_ctrl_r.add_child(ld_dma_num_repeat_f)
addr_map.add_child(ld_dma_ctrl_r)
# Load DMA Header
if params.queue_depth == 1:
ld_dma_header_rf = RegFile(f"ld_dma_header_0", size=params.queue_depth)
else:
ld_dma_header_rf = RegFile(f"ld_dma_header", size=params.queue_depth)
# dim reg
dim_r = Reg(f"dim")
dim_f = Field(f"dim", width=clog2(params.loop_level) + 1)
dim_r.add_child(dim_f)
ld_dma_header_rf.add_child(dim_r)
# start_addr reg
start_addr_r = Reg(f"start_addr")
start_addr_f = Field(f"start_addr", width=params.glb_addr_width)
start_addr_r.add_child(start_addr_f)
ld_dma_header_rf.add_child(start_addr_r)
# cycle_start_addr reg
cycle_start_addr_r = Reg(f"cycle_start_addr")
cycle_start_addr_f = Field(f"cycle_start_addr", width=params.glb_addr_width)
cycle_start_addr_r.add_child(cycle_start_addr_f)
ld_dma_header_rf.add_child(cycle_start_addr_r)
# num_word reg
range_r = Reg(f"range", size=params.loop_level)
range_f = Field("range", width=params.axi_data_width)
range_r.add_child(range_f)
stride_r = Reg(f"stride", size=params.loop_level)
stride_f = Field("stride", width=params.axi_data_width)
stride_r.add_child(stride_f)
cycle_stride_r = Reg(f"cycle_stride", size=params.loop_level)
cycle_stride_f = Field("cycle_stride", width=params.axi_data_width)
cycle_stride_r.add_child(cycle_stride_f)
ld_dma_header_rf.add_child(range_r)
ld_dma_header_rf.add_child(stride_r)
ld_dma_header_rf.add_child(cycle_stride_r)
addr_map.add_child(ld_dma_header_rf)
# Pcfg DMA Ctrl
pcfg_dma_ctrl_r = Reg("pcfg_dma_ctrl")
pcfg_dma_mode_f = Field("mode", 1)
pcfg_dma_ctrl_r.add_child(pcfg_dma_mode_f)
addr_map.add_child(pcfg_dma_ctrl_r)
# Pcfg DMA Header RegFile
pcfg_dma_header_rf = RegFile("pcfg_dma_header")
# start_addr reg
start_addr_r = Reg(f"start_addr")
start_addr_f = Field(f"start_addr", width=params.glb_addr_width)
start_addr_r.add_child(start_addr_f)
pcfg_dma_header_rf.add_child(start_addr_r)
# num cfg reg
num_cfg_r = Reg(f"num_cfg")
num_cfg_f = Field(f"num_cfg", width=params.max_num_cfg_width)
num_cfg_r.add_child(num_cfg_f)
pcfg_dma_header_rf.add_child(num_cfg_r)
addr_map.add_child(pcfg_dma_header_rf)
glb_rdl = Rdl(addr_map)
return glb_rdl
def gen_glb_pio_wrapper(src_file, dest_file):
os.system(f"sed '/\.\*/d' {src_file} > {dest_file}") # nopep8
return dest_file
|
StanfordAHA/garnet | global_buffer/design/pipeline.py | from kratos import Generator, always_ff, posedge, resize, clog2, const
class Pipeline(Generator):
def __init__(self, width: int, depth: int, flatten_output=False, reset_high=False):
name_suffix = ""
if flatten_output:
name_suffix += "_array"
if reset_high:
name_suffix += "_reset_high"
super().__init__(f"pipeline_w_{width}_d_{depth}{name_suffix}")
self.clk = self.clock("clk")
self.clk_en = self.clock_en("clk_en")
self.reset = self.reset("reset")
self.width = width
self.depth = depth
self.reset_high = reset_high
if self.depth == 0:
self.in_ = self.input("in_", self.width)
self.out_ = self.output("out_", self.width)
self.wire(self.out_, self.in_)
else:
self.depth_width = max(clog2(self.depth), 1)
self.in_ = self.input("in_", self.width)
if flatten_output:
self.out_ = self.output("out_", self.width, size=self.depth)
else:
self.out_ = self.output("out_", self.width)
if self.depth == 1 and self.width == 1:
self.pipeline_r = self.var(
"pipeline_r", width=self.width, size=self.depth)
else:
self.pipeline_r = self.var(
"pipeline_r", width=self.width, size=self.depth, explicit_array=True)
if flatten_output:
self.wire(self.out_, self.pipeline_r)
else:
self.wire(self.out_, self.pipeline_r[self.depth - 1])
self.add_always(self.pipeline)
@always_ff((posedge, "clk"), (posedge, "reset"))
def pipeline(self):
if self.reset:
for i in range(self.depth):
if self.reset_high:
self.pipeline_r[i] = const(2 ** self.width - 1, self.width)
else:
self.pipeline_r[i] = 0
elif self.clk_en:
for i in range(self.depth):
if i == 0:
self.pipeline_r[i] = self.in_
else:
self.pipeline_r[i] = self.pipeline_r[resize(
i - 1, self.depth_width)]
|
StanfordAHA/garnet | global_buffer/design/glb_bank_sram_gen.py | <reponame>StanfordAHA/garnet
from kratos import Generator, always_comb, concat, always_ff, posedge, const, resize
from kratos.util import clog2
from global_buffer.design.TS1N16FFCLLSBLVTC2048X64M8SW import TS1N16FFCLLSBLVTC2048X64M8SW
from global_buffer.design.pipeline import Pipeline
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
class GlbBankSramGen(Generator):
def __init__(self, addr_width, sram_macro_width, sram_macro_depth, _params: GlobalBufferParams):
super().__init__("glb_bank_sram_gen")
self._params = _params
self.addr_width = addr_width
self.sram_macro_width = sram_macro_width
self.sram_macro_depth = sram_macro_depth
self.RESET = self.reset("RESET")
self.CLK = self.clock("CLK")
self.CEB = self.input("CEB", 1)
self.WEB = self.input("WEB", 1)
self.BWEB = self.input("BWEB", self.sram_macro_width)
self.D = self.input("D", self.sram_macro_width)
self.A = self.input("A", self.addr_width)
self.Q = self.output("Q", self.sram_macro_width)
# local parameter
self.sram_macro_addr_width = clog2(self.sram_macro_depth)
self.num_sram_macros = 2 ** (self.addr_width
- self.sram_macro_addr_width)
# local variables
self.web_demux = self.var("WEB_DEMUX", self.num_sram_macros)
self.ceb_demux = self.var("CEB_DEMUX", self.num_sram_macros)
self.a_sram = self.var("A_SRAM", self.sram_macro_addr_width)
self.q_sram2mux = self.var(
"Q_SRAM2MUX", self.sram_macro_width, size=self.num_sram_macros)
self.sram_sel = self.var(
"SRAM_SEL", self.addr_width - self.sram_macro_addr_width)
self.Q_w = self.var("Q_w", self.sram_macro_width)
self.q_sel = self.var(
"Q_SEL", self.addr_width - self.sram_macro_addr_width)
self.CEB_d = self.var("CEB_d", 1)
self.WEB_d = self.var("WEB_d", 1)
self.BWEB_d = self.var("BWEB_d", self.sram_macro_width)
self.D_d = self.var("D_d", self.sram_macro_width)
self.web_demux_d = self.var("WEB_DEMUX_d", self.num_sram_macros)
self.ceb_demux_d = self.var("CEB_DEMUX_d", self.num_sram_macros)
self.sram_sel_d = self.var(
"SRAM_SEL_d", self.addr_width - self.sram_macro_addr_width)
self.a_sram_d = self.var("A_SRAM_d", self.sram_macro_addr_width)
self.wire(self.sram_sel,
self.A[self.addr_width - 1, self.sram_macro_addr_width])
self.wire(self.a_sram, self.A[self.sram_macro_addr_width - 1, 0])
self.add_pipeline()
self.add_always(self.q_sel_ff)
self.add_always(self.sram_ctrl_logic)
self.add_sram_macro()
self.wire(self.Q_w, self.q_sram2mux[self.q_sel])
def add_pipeline(self):
sram_signals_reset_high_in = concat(
self.WEB, self.CEB, self.web_demux, self.ceb_demux, self.BWEB)
sram_signals_reset_high_out = concat(
self.WEB_d, self.CEB_d, self.web_demux_d, self.ceb_demux_d, self.BWEB_d)
self.sram_signals_reset_high_pipeline = Pipeline(width=sram_signals_reset_high_in.width,
depth=self._params.sram_gen_pipeline_depth,
reset_high=True)
self.add_child("sram_signals_reset_high_pipeline",
self.sram_signals_reset_high_pipeline,
clk=self.CLK,
clk_en=const(1, 1),
reset=self.RESET,
in_=sram_signals_reset_high_in,
out_=sram_signals_reset_high_out)
sram_signals_in = concat(self.a_sram, self.sram_sel, self.D)
sram_signals_out = concat(self.a_sram_d, self.sram_sel_d, self.D_d)
self.sram_signals_pipeline = Pipeline(width=sram_signals_in.width,
depth=self._params.sram_gen_pipeline_depth)
self.add_child("sram_signals_pipeline",
self.sram_signals_pipeline,
clk=self.CLK,
clk_en=const(1, 1),
reset=self.RESET,
in_=sram_signals_in,
out_=sram_signals_out)
self.sram_signals_output_pipeline = Pipeline(width=self.sram_macro_width,
depth=self._params.sram_gen_output_pipeline_depth)
self.add_child("sram_signals_output_pipeline",
self.sram_signals_output_pipeline,
clk=self.CLK,
clk_en=const(1, 1),
reset=self.RESET,
in_=self.Q_w,
out_=self.Q)
@always_ff((posedge, "CLK"), (posedge, "RESET"))
def q_sel_ff(self):
if self.RESET:
self.q_sel = 0
else:
if (self.CEB_d == 0) & (self.WEB_d == 1):
self.q_sel = self.sram_sel_d
@always_comb
def sram_ctrl_logic(self):
if ~self.WEB:
self.web_demux = ~(const(1, width=self.num_sram_macros) << resize(
self.sram_sel, self.num_sram_macros))
else:
self.web_demux = const(
2**self.num_sram_macros - 1, self.num_sram_macros)
if ~self.CEB:
self.ceb_demux = ~(const(1, width=self.num_sram_macros) << resize(
self.sram_sel, self.num_sram_macros))
else:
self.ceb_demux = const(
2**self.num_sram_macros - 1, self.num_sram_macros)
def add_sram_macro(self):
for i in range(self.num_sram_macros):
self.add_child(f"sram_array_{i}",
TS1N16FFCLLSBLVTC2048X64M8SW(),
CLK=self.CLK,
A=self.a_sram_d,
BWEB=self.BWEB_d,
CEB=self.ceb_demux_d[i],
WEB=self.web_demux_d[i],
D=self.D_d,
Q=self.q_sram2mux[i],
RTSEL=const(0b01, 2),
WTSEL=const(0b00, 2))
|
StanfordAHA/garnet | global_buffer/global_buffer_main.py | <filename>global_buffer/global_buffer_main.py
import argparse
from global_buffer.design.global_buffer import GlobalBuffer
from global_buffer.design.global_buffer_parameter import gen_global_buffer_params, gen_header_files
from systemRDL.util import gen_rdl_header
import os
import pathlib
import kratos as k
def gen_param_header(top_name, params, output_folder):
svh_filename = os.path.join(output_folder, f"{top_name}.svh")
h_filename = os.path.join(output_folder, f"{top_name}.h")
gen_header_files(params=params,
svh_filename=svh_filename,
h_filename=h_filename,
header_name="global_buffer")
def main():
garnet_home = os.getenv('GARNET_HOME')
if not garnet_home:
garnet_home = pathlib.Path(__file__).parent.parent.resolve()
parser = argparse.ArgumentParser(description='Garnet Global Buffer')
parser.add_argument('--num_glb_tiles', type=int, default=16)
parser.add_argument('--num_cgra_cols', type=int, default=32)
parser.add_argument('--glb_tile_mem_size', type=int, default=256)
parser.add_argument("--sram_stub", action="store_true")
parser.add_argument("-v", "--verilog", action="store_true")
parser.add_argument("-p", "--parameter", action="store_true")
parser.add_argument("-r", "--rdl", action="store_true")
args = parser.parse_args()
params = gen_global_buffer_params(num_glb_tiles=args.num_glb_tiles,
num_cgra_cols=args.num_cgra_cols,
# FIXEME: We assume num_prr is same as num_glb_tiles
num_prr=args.num_glb_tiles,
is_sram_stub=args.sram_stub,
glb_tile_mem_size=args.glb_tile_mem_size)
glb = GlobalBuffer(_params=params)
if args.parameter:
gen_param_header(top_name="global_buffer_param",
params=params,
output_folder=os.path.join(garnet_home, "global_buffer/header"))
if args.rdl:
gen_rdl_header(top_name="glb",
rdl_file=os.path.join(garnet_home, "global_buffer/systemRDL/glb.rdl"),
output_folder=os.path.join(garnet_home, "global_buffer/header"))
if args.verilog:
k.verilog(glb, filename=os.path.join(garnet_home, "global_buffer", "global_buffer.sv"))
if __name__ == "__main__":
main()
|
StanfordAHA/garnet | global_buffer/design/glb_tile_pcfg_switch.py | <gh_stars>10-100
from kratos import Generator, always_ff, always_comb, posedge
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
class GlbTilePcfgSwitch(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_tile_pcfg_switch")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.reset = self.reset("reset")
self.cfg_pcfg_dma_mode = self.input("cfg_pcfg_dma_mode", 1)
self.cgra_cfg_core2sw = self.input(
"cgra_cfg_core2sw", self.header.cgra_cfg_t)
self.cgra_cfg_jtag_wsti = self.input(
"cgra_cfg_jtag_wsti", self.header.cgra_cfg_t)
self.cgra_cfg_jtag_esto = self.output(
"cgra_cfg_jtag_esto", self.header.cgra_cfg_t)
self.cgra_cfg_pcfg_wsti = self.input(
"cgra_cfg_pcfg_wsti", self.header.cgra_cfg_t)
self.cgra_cfg_pcfg_esto = self.output(
"cgra_cfg_pcfg_esto", self.header.cgra_cfg_t)
self.cgra_cfg_g2f = self.output(
"cgra_cfg_g2f", self.header.cgra_cfg_t, size=self._params.cgra_per_glb, packed=True)
self.cgra_cfg_jtag_wsti_rd_en_bypass = self.input(
"cgra_cfg_jtag_wsti_rd_en_bypass", 1)
self.cgra_cfg_jtag_wsti_addr_bypass = self.input(
"cgra_cfg_jtag_wsti_addr_bypass", self._params.cgra_cfg_addr_width)
self.cgra_cfg_jtag_esto_rd_en_bypass = self.output(
"cgra_cfg_jtag_esto_rd_en_bypass", 1)
self.cgra_cfg_jtag_esto_addr_bypass = self.output(
"cgra_cfg_jtag_esto_addr_bypass", self._params.cgra_cfg_addr_width)
# local variables
self.cgra_cfg_g2f_w = self.var(
"cgra_cfg_g2f_w", self.header.cgra_cfg_t, size=self._params.cgra_per_glb)
self.cgra_cfg_pcfg_muxed = self.var(
"cgra_cfg_pcfg_muxed", self.header.cgra_cfg_t)
self.add_always(self.bypass_logic)
self.add_always(self.cgra_cfg_pcfg_muxed_logic)
self.add_always(self.pipeline)
self.add_always(self.cgra_cfg_g2f_logic)
self.add_always(self.pipeline_cgra_cfg_g2f)
@always_comb
def bypass_logic(self):
self.cgra_cfg_jtag_esto_rd_en_bypass = self.cgra_cfg_jtag_wsti_rd_en_bypass
self.cgra_cfg_jtag_esto_addr_bypass = self.cgra_cfg_jtag_wsti_addr_bypass
@always_comb
def cgra_cfg_pcfg_muxed_logic(self):
if self.cfg_pcfg_dma_mode == 1:
self.cgra_cfg_pcfg_muxed = self.cgra_cfg_core2sw
else:
self.cgra_cfg_pcfg_muxed = self.cgra_cfg_pcfg_wsti
@always_ff((posedge, "clk"), (posedge, "reset"))
def pipeline(self):
if self.reset:
self.cgra_cfg_jtag_esto = 0
self.cgra_cfg_pcfg_esto = 0
else:
self.cgra_cfg_jtag_esto = self.cgra_cfg_jtag_wsti
self.cgra_cfg_pcfg_esto = self.cgra_cfg_pcfg_muxed
@always_comb
def cgra_cfg_g2f_logic(self):
for i in range(self._params.cgra_per_glb):
if self.cgra_cfg_jtag_esto_rd_en_bypass:
self.cgra_cfg_g2f_w[i]['wr_en'] = 0
self.cgra_cfg_g2f_w[i]['rd_en'] = 1
self.cgra_cfg_g2f_w[i]['addr'] = self.cgra_cfg_jtag_esto_addr_bypass
self.cgra_cfg_g2f_w[i]['data'] = 0
else:
self.cgra_cfg_g2f_w[i] = self.cgra_cfg_jtag_esto | self.cgra_cfg_pcfg_esto
@always_ff((posedge, "clk"), (posedge, "reset"))
def pipeline_cgra_cfg_g2f(self):
if self.reset:
for i in range(self._params.cgra_per_glb):
self.cgra_cfg_g2f[i] = 0
else:
for i in range(self._params.cgra_per_glb):
self.cgra_cfg_g2f[i] = self.cgra_cfg_g2f_w[i]
|
StanfordAHA/garnet | global_buffer/design/glb_core_store_dma.py | from kratos import Generator, always_ff, always_comb, posedge, concat, const, resize, ext, clog2
from global_buffer.design.glb_loop_iter import GlbLoopIter
from global_buffer.design.glb_sched_gen import GlbSchedGen
from global_buffer.design.glb_addr_gen import GlbAddrGen
from global_buffer.design.pipeline import Pipeline
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
import math
class GlbCoreStoreDma(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_core_store_dma")
self._params = _params
self.header = GlbHeader(self._params)
assert self._params.bank_data_width == self._params.cgra_data_width * 4
self.clk = self.clock("clk")
self.clk_en = self.clock_en("clk_en")
self.reset = self.reset("reset")
self.data_f2g = self.input(
"data_f2g", width=self._params.cgra_data_width)
self.data_valid_f2g = self.input("data_valid_f2g", width=1)
self.wr_packet = self.output(
"wr_packet", self.header.wr_packet_t)
self.cfg_st_dma_num_repeat = self.input("cfg_st_dma_num_repeat", clog2(self._params.queue_depth) + 1)
self.cfg_st_dma_ctrl_mode = self.input("cfg_st_dma_ctrl_mode", 2)
self.cfg_st_dma_ctrl_use_valid = self.input("cfg_st_dma_ctrl_use_valid", 1)
self.cfg_data_network_latency = self.input(
"cfg_data_network_latency", self._params.latency_width)
self.cfg_st_dma_header = self.input(
"cfg_st_dma_header", self.header.cfg_dma_header_t, size=self._params.queue_depth, explicit_array=True)
self.st_dma_start_pulse = self.input("st_dma_start_pulse", 1)
self.st_dma_done_pulse = self.output("st_dma_done_pulse", 1)
# localparam
self.default_latency = (self._params.glb_bank_memory_pipeline_depth
+ self._params.sram_gen_pipeline_depth
+ self._params.glb_switch_pipeline_depth
)
self.cgra_strb_width = self._params.cgra_data_width // 8
self.cgra_strb_value = 2 ** (self._params.cgra_data_width // 8) - 1
# local variables
self.strm_wr_data_w = self.var("strm_wr_data_w", width=self._params.cgra_data_width)
self.strm_wr_addr_w = self.var("strm_wr_addr_w", width=self._params.glb_addr_width)
self.last_strm_wr_addr_r = self.var("last_strm_wr_addr_r", width=self._params.glb_addr_width)
self.strm_wr_en_w = self.var("strm_wr_en_w", width=1)
self.strm_data_sel = self.var("strm_data_sel", self._params.bank_byte_offset - self._params.cgra_byte_offset)
self.bank_addr_match = self.var("bank_addr_match", 1)
self.bank_wr_en = self.var("bank_wr_en", 1)
self.bank_wr_addr = self.var("bank_wr_addr", width=self._params.glb_addr_width)
self.bank_wr_data_cache_r = self.var("bank_wr_data_cache_r", self._params.bank_data_width)
self.bank_wr_data_cache_w = self.var("bank_wr_data_cache_w", self._params.bank_data_width)
self.bank_wr_strb_cache_r = self.var("bank_wr_strb_cache_r", math.ceil(self._params.bank_data_width / 8))
self.bank_wr_strb_cache_w = self.var("bank_wr_strb_cache_w", math.ceil(self._params.bank_data_width / 8))
self.done_pulse_w = self.var("done_pulse_w", 1)
self.st_dma_start_pulse_next = self.var("st_dma_start_pulse_next", 1)
self.st_dma_start_pulse_r = self.var("st_dma_start_pulse_r", 1)
self.is_first = self.var("is_first", 1)
self.is_last = self.var("is_last", 1)
self.strm_run = self.var("strm_run", 1)
self.loop_done = self.var("loop_done", 1)
self.cycle_valid = self.var("cycle_valid", 1)
self.cycle_valid_muxed = self.var("cycle_valid_muxed", 1)
self.cycle_count = self.var("cycle_count", self._params.axi_data_width)
self.cycle_current_addr = self.var("cycle_current_addr", self._params.axi_data_width)
self.data_current_addr = self.var("data_current_addr", self._params.axi_data_width)
self.loop_mux_sel = self.var("loop_mux_sel", clog2(self._params.loop_level))
self.repeat_cnt = self.var("repeat_cnt", clog2(self._params.queue_depth) + 1)
if self._params.queue_depth != 1:
self.queue_sel_r = self.var("queue_sel_r", max(1, clog2(self.repeat_cnt.width)))
# Current dma header
self.current_dma_header = self.var("current_dma_header", self.header.cfg_dma_header_t)
if self._params.queue_depth == 1:
self.wire(self.cfg_st_dma_header, self.current_dma_header)
else:
self.wire(self.cfg_st_dma_header[self.queue_sel_r], self.current_dma_header)
if self._params.queue_depth != 1:
self.add_always(self.queue_sel_ff)
self.add_always(self.repeat_cnt_ff)
self.add_always(self.is_first_ff)
self.add_always(self.is_last_ff)
self.add_always(self.strm_run_ff)
self.add_always(self.st_dma_start_pulse_logic)
self.add_always(self.st_dma_start_pulse_ff)
self.add_always(self.cycle_counter)
self.add_always(self.cycle_valid_comb)
self.add_always(self.strm_wr_packet_comb)
self.add_always(self.last_strm_wr_addr_ff)
self.add_always(self.strm_data_sel_comb)
self.add_always(self.bank_wr_packet_cache_comb)
self.add_always(self.bank_wr_packet_cache_ff)
self.add_always(self.bank_wr_packet_logic)
self.add_always(self.wr_packet_logic)
self.add_always(self.strm_done_pulse_logic)
self.add_done_pulse_pipeline()
# Loop iteration shared for cycle and data
self.loop_iter = GlbLoopIter(self._params)
self.add_child("loop_iter",
self.loop_iter,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
step=self.cycle_valid_muxed,
mux_sel_out=self.loop_mux_sel,
restart=self.loop_done)
self.wire(self.loop_iter.dim, self.current_dma_header[f"dim"])
for i in range(self._params.loop_level):
self.wire(self.loop_iter.ranges[i], self.current_dma_header[f"range_{i}"])
# Cycle stride
self.cycle_stride_sched_gen = GlbSchedGen(self._params)
self.add_child("cycle_stride_sched_gen",
self.cycle_stride_sched_gen,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
restart=self.st_dma_start_pulse_r,
cycle_count=self.cycle_count,
current_addr=self.cycle_current_addr,
finished=self.loop_done,
valid_output=self.cycle_valid)
self.cycle_stride_addr_gen = GlbAddrGen(self._params)
self.add_child("cycle_stride_addr_gen",
self.cycle_stride_addr_gen,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
restart=self.st_dma_start_pulse_r,
step=self.cycle_valid_muxed,
mux_sel=self.loop_mux_sel,
addr_out=self.cycle_current_addr)
self.wire(self.cycle_stride_addr_gen.start_addr, ext(
self.current_dma_header[f"cycle_start_addr"], self._params.axi_data_width))
for i in range(self._params.loop_level):
self.wire(self.cycle_stride_addr_gen.strides[i],
self.current_dma_header[f"cycle_stride_{i}"])
# Data stride
self.data_stride_addr_gen = GlbAddrGen(self._params)
self.add_child("data_stride_addr_gen",
self.data_stride_addr_gen,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
restart=self.st_dma_start_pulse_r,
step=self.cycle_valid_muxed,
mux_sel=self.loop_mux_sel,
addr_out=self.data_current_addr)
self.wire(self.data_stride_addr_gen.start_addr, ext(
self.current_dma_header[f"start_addr"], self._params.axi_data_width))
for i in range(self._params.loop_level):
self.wire(self.data_stride_addr_gen.strides[i], self.current_dma_header[f"stride_{i}"])
@always_ff((posedge, "clk"), (posedge, "reset"))
def repeat_cnt_ff(self):
if self.reset:
self.repeat_cnt = 0
elif self.clk_en:
if self.cfg_st_dma_ctrl_mode == 2:
if self.st_dma_done_pulse:
if (self.repeat_cnt + 1) < self.cfg_st_dma_num_repeat:
self.repeat_cnt += 1
elif self.cfg_st_dma_ctrl_mode == 3:
if self.st_dma_done_pulse:
if (((self.repeat_cnt + 1) < self.cfg_st_dma_num_repeat)
& ((self.repeat_cnt + 1) < self._params.queue_depth)):
self.repeat_cnt += 1
@always_ff((posedge, "clk"), (posedge, "reset"))
def queue_sel_ff(self):
if self.reset:
self.queue_sel_r = 0
elif self.clk_en:
if self.cfg_st_dma_ctrl_mode == 3:
if self.st_dma_done_pulse:
if (self.repeat_cnt + 1) < self.cfg_st_dma_num_repeat:
self.queue_sel_r = self.queue_sel_r + 1
else:
self.queue_sel_r = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def is_first_ff(self):
if self.reset:
self.is_first = 0
elif self.clk_en:
if self.st_dma_start_pulse_r:
self.is_first = 1
elif self.strm_wr_en_w:
self.is_first = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def is_last_ff(self):
if self.reset:
self.is_last = 0
elif self.clk_en:
if self.loop_done:
self.is_last = 1
elif self.bank_wr_en:
self.is_last = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def strm_run_ff(self):
if self.reset:
self.strm_run = 0
elif self.clk_en:
if self.st_dma_start_pulse_r:
self.strm_run = 1
elif self.loop_done:
self.strm_run = 0
@always_comb
def st_dma_start_pulse_logic(self):
if self.cfg_st_dma_ctrl_mode == 0:
self.st_dma_start_pulse_next = 0
elif self.cfg_st_dma_ctrl_mode == 1:
self.st_dma_start_pulse_next = (~self.strm_run) & self.st_dma_start_pulse
elif (self.cfg_st_dma_ctrl_mode == 2) | (self.cfg_st_dma_ctrl_mode == 3):
self.st_dma_start_pulse_next = (((~self.strm_run) & self.st_dma_start_pulse)
| ((self.st_dma_done_pulse)
& ((self.repeat_cnt + 1) < self.cfg_st_dma_num_repeat)))
else:
self.st_dma_start_pulse_next = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def st_dma_start_pulse_ff(self):
if self.reset:
self.st_dma_start_pulse_r = 0
elif self.clk_en:
if self.st_dma_start_pulse_r:
self.st_dma_start_pulse_r = 0
else:
self.st_dma_start_pulse_r = self.st_dma_start_pulse_next
@always_ff((posedge, "clk"), (posedge, "reset"))
def cycle_counter(self):
if self.reset:
self.cycle_count = 0
elif self.clk_en:
if self.st_dma_start_pulse_r:
self.cycle_count = 0
elif self.loop_done:
self.cycle_count = 0
elif self.strm_run:
self.cycle_count = self.cycle_count + 1
@always_comb
def cycle_valid_comb(self):
if self.cfg_st_dma_ctrl_use_valid:
self.cycle_valid_muxed = self.data_valid_f2g
else:
self.cycle_valid_muxed = self.cycle_valid
@always_comb
def strm_wr_packet_comb(self):
self.strm_wr_en_w = self.cycle_valid_muxed
self.strm_wr_addr_w = resize(self.data_current_addr, self._params.glb_addr_width)
self.strm_wr_data_w = self.data_f2g
@always_ff((posedge, "clk"), (posedge, "reset"))
def last_strm_wr_addr_ff(self):
if self.reset:
self.last_strm_wr_addr_r = 0
elif self.clk_en:
if self.strm_wr_en_w:
self.last_strm_wr_addr_r = self.strm_wr_addr_w
@always_comb
def strm_data_sel_comb(self):
self.strm_data_sel = self.strm_wr_addr_w[self._params.bank_byte_offset - 1, self._params.cgra_byte_offset]
@always_comb
def bank_wr_packet_cache_comb(self):
self.bank_wr_strb_cache_w = self.bank_wr_strb_cache_r
self.bank_wr_data_cache_w = self.bank_wr_data_cache_r
# First, if cached data is written to memory, clear it.
if self.bank_wr_en:
self.bank_wr_strb_cache_w = 0
self.bank_wr_data_cache_w = 0
# Next, save data to cache
if self.strm_wr_en_w:
if self.strm_data_sel == 0:
self.bank_wr_strb_cache_w[self.cgra_strb_width - 1,
0] = const(self.cgra_strb_value, self.cgra_strb_width)
self.bank_wr_data_cache_w[self._params.cgra_data_width - 1, 0] = self.strm_wr_data_w
elif self.strm_data_sel == 1:
self.bank_wr_strb_cache_w[self.cgra_strb_width * 2 - 1,
self.cgra_strb_width] = const(self.cgra_strb_value,
self.cgra_strb_width)
self.bank_wr_data_cache_w[self._params.cgra_data_width * 2 - 1,
self._params.cgra_data_width] = self.strm_wr_data_w
elif self.strm_data_sel == 2:
self.bank_wr_strb_cache_w[self.cgra_strb_width * 3 - 1,
self.cgra_strb_width * 2] = const(self.cgra_strb_value,
self.cgra_strb_width)
self.bank_wr_data_cache_w[self._params.cgra_data_width * 3 - 1,
self._params.cgra_data_width * 2] = self.strm_wr_data_w
elif self.strm_data_sel == 3:
self.bank_wr_strb_cache_w[self.cgra_strb_width * 4 - 1,
self.cgra_strb_width * 3] = const(self.cgra_strb_value,
self.cgra_strb_width)
self.bank_wr_data_cache_w[self._params.cgra_data_width * 4 - 1,
self._params.cgra_data_width * 3] = self.strm_wr_data_w
else:
self.bank_wr_strb_cache_w = self.bank_wr_strb_cache_r
self.bank_wr_data_cache_w = self.bank_wr_data_cache_r
@always_ff((posedge, "clk"), (posedge, "reset"))
def bank_wr_packet_cache_ff(self):
if self.reset:
self.bank_wr_strb_cache_r = 0
self.bank_wr_data_cache_r = 0
elif self.clk_en:
self.bank_wr_strb_cache_r = self.bank_wr_strb_cache_w
self.bank_wr_data_cache_r = self.bank_wr_data_cache_w
@always_comb
def bank_wr_packet_logic(self):
self.bank_addr_match = (self.strm_wr_addr_w[self._params.glb_addr_width - 1, self._params.bank_byte_offset]
== self.last_strm_wr_addr_r[self._params.glb_addr_width - 1,
self._params.bank_byte_offset])
self.bank_wr_en = ((self.strm_wr_en_w & (~self.bank_addr_match) & (~self.is_first)) | self.is_last)
self.bank_wr_addr = self.last_strm_wr_addr_r
@always_comb
def wr_packet_logic(self):
self.wr_packet['wr_en'] = self.bank_wr_en
self.wr_packet['wr_strb'] = self.bank_wr_strb_cache_r
self.wr_packet['wr_data'] = self.bank_wr_data_cache_r
self.wr_packet['wr_addr'] = self.bank_wr_addr
@always_comb
def strm_done_pulse_logic(self):
self.done_pulse_w = self.loop_done & self.strm_run
def add_done_pulse_pipeline(self):
maximum_latency = 2 * self._params.num_glb_tiles + self.default_latency
latency_width = clog2(maximum_latency)
self.done_pulse_d_arr = self.var(
"done_pulse_d_arr", 1, size=maximum_latency, explicit_array=True)
self.done_pulse_pipeline = Pipeline(width=1,
depth=maximum_latency,
flatten_output=True)
self.add_child("done_pulse_pipeline",
self.done_pulse_pipeline,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
in_=self.done_pulse_w,
out_=self.done_pulse_d_arr)
self.wire(self.st_dma_done_pulse,
self.done_pulse_d_arr[resize(self.cfg_data_network_latency, latency_width) + self.default_latency])
|
StanfordAHA/garnet | global_buffer/design/glb_sched_gen.py | from kratos import always_ff, always_comb, posedge, Generator, clog2
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
class GlbSchedGen(Generator):
''' Generate schedule '''
def __init__(self, _params: GlobalBufferParams):
super().__init__(f"glb_sched_gen")
self._params = _params
self.clk = self.clock("clk")
self.clk_en = self.clock_en("clk_en")
self.reset = self.reset("reset")
self.restart = self.input("restart", 1)
self.cycle_count = self.input("cycle_count", self._params.axi_data_width)
self.current_addr = self.input("current_addr", self._params.axi_data_width)
self.finished = self.input("finished", 1)
self.valid_output = self.output("valid_output", 1)
# local variables
self.valid_gate = self.var("valid_gate", 1)
self.add_always(self.valid_gate_ff)
self.add_always(self.set_valid_out)
@always_ff((posedge, "clk"), (posedge, "reset"))
def valid_gate_ff(self):
if self.reset:
self.valid_gate = 1
elif self.clk_en:
if self.restart:
self.valid_gate = 0
elif self.finished:
self.valid_gate = 1
@always_comb
def set_valid_out(self):
self.valid_output = (self.cycle_count == self.current_addr) & (~self.valid_gate)
|
StanfordAHA/garnet | global_buffer/design/glb_loop_iter.py | from kratos import Generator, clog2, always_ff, always_comb, posedge, const
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
class GlbLoopIter(Generator):
''' Generate loop iteration '''
def __init__(self, _params: GlobalBufferParams):
super().__init__(f"glb_loop_iter")
self._params = _params
# INPUTS
self.clk = self.clock("clk")
self.clk_en = self.clock_en("clk_en")
self.reset = self.reset("reset")
self.ranges = self.input("ranges", self._params.axi_data_width,
size=self._params.loop_level,
packed=True, explicit_array=True)
self.dim = self.input("dim", 1 + clog2(self._params.loop_level))
self.step = self.input("step", 1)
self.mux_sel_out = self.output("mux_sel_out", max(clog2(self._params.loop_level), 1))
self.restart = self.output("restart", 1)
# local varaibles
self.dim_counter = self.var("dim_counter", self._params.axi_data_width,
size=self._params.loop_level,
packed=True,
explicit_array=True)
self.max_value = self.var("max_value", self._params.loop_level)
self.mux_sel = self.var("mux_sel", max(clog2(self._params.loop_level), 1))
self.wire(self.mux_sel_out, self.mux_sel)
self.not_done = self.var("not_done", 1)
self.clear = self.var("clear", self._params.loop_level)
self.inc = self.var("inc", self._params.loop_level)
self.is_maxed = self.var("is_maxed", 1)
self.wire(self.is_maxed, (self.dim_counter[self.mux_sel]
== self.ranges[self.mux_sel]) & self.inc[self.mux_sel])
self.add_code(self.set_mux_sel)
for i in range(self._params.loop_level):
self.add_code(self.set_clear, idx=i)
self.add_code(self.set_inc, idx=i)
self.add_code(self.dim_counter_update, idx=i)
self.add_code(self.max_value_update, idx=i)
self.wire(self.restart, self.step & (~self.not_done))
@always_comb
# Find lowest ready
def set_mux_sel(self):
self.mux_sel = 0
self.not_done = 0
for i in range(self._params.loop_level):
if ~self.not_done:
if ~self.max_value[i] & (i < self.dim):
self.mux_sel = i
self.not_done = 1
@always_comb
def set_clear(self, idx):
self.clear[idx] = 0
if ((idx < self.mux_sel) | (~self.not_done)) & self.step:
self.clear[idx] = 1
@always_comb
def set_inc(self, idx):
self.inc[idx] = 0
if (const(idx, 5) == 0) & self.step & (idx < self.dim):
self.inc[idx] = 1
elif (idx == self.mux_sel) & self.step & (idx < self.dim):
self.inc[idx] = 1
@always_ff((posedge, "clk"), (posedge, "reset"))
def dim_counter_update(self, idx):
if self.reset:
self.dim_counter[idx] = 0
else:
if self.clear[idx]:
self.dim_counter[idx] = 0
elif self.inc[idx]:
self.dim_counter[idx] = self.dim_counter[self.mux_sel] + 1
@always_ff((posedge, "clk"), (posedge, "reset"))
def max_value_update(self, idx):
if self.reset:
self.max_value[idx] = 0
elif self.clk_en:
if self.clear[idx]:
self.max_value[idx] = 0
elif self.inc[idx]:
self.max_value[idx] = self.is_maxed
|
StanfordAHA/garnet | mflowgen/global_controller/construct.py | #! /usr/bin/env python
#=========================================================================
# construct.py
#=========================================================================
# Author :
# Date :
#
import os
import sys
from mflowgen.components import Graph, Step
from shutil import which
def construct():
g = Graph()
#-----------------------------------------------------------------------
# Parameters
#-----------------------------------------------------------------------
adk_name = 'tsmc16'
adk_view = 'multicorner'
parameters = {
'construct_path' : __file__,
'design_name' : 'global_controller',
'clock_period' : 1.0,
'adk' : adk_name,
'adk_view' : adk_view,
# Synthesis
'flatten_effort' : 3,
'topographical' : True,
# RTL Generation
'interconnect_only' : False,
# Power Domains (leave this false)
'PWR_AWARE' : False,
# hold target slack
'hold_target_slack' : 0.030,
# Utilization target
'core_density_target' : 0.50
}
#-----------------------------------------------------------------------
# Create nodes
#-----------------------------------------------------------------------
this_dir = os.path.dirname( os.path.abspath( __file__ ) )
# ADK step
g.set_adk( adk_name )
adk = g.get_adk_step()
# Custom steps
rtl = Step( this_dir + '/rtl' )
constraints = Step( this_dir + '/constraints' )
custom_init = Step( this_dir + '/custom-init' )
custom_power = Step( this_dir + '/../common/custom-power-leaf' )
lib2db = Step( this_dir + '/../common/synopsys-dc-lib2db' )
# Default steps
info = Step( 'info', default=True )
#constraints = Step( 'constraints', default=True )
synth = Step( 'cadence-genus-synthesis', default=True )
iflow = Step( 'cadence-innovus-flowsetup', default=True )
init = Step( 'cadence-innovus-init', default=True )
power = Step( 'cadence-innovus-power', default=True )
place = Step( 'cadence-innovus-place', default=True )
cts = Step( 'cadence-innovus-cts', default=True )
postcts_hold = Step( 'cadence-innovus-postcts_hold', default=True )
route = Step( 'cadence-innovus-route', default=True )
postroute = Step( 'cadence-innovus-postroute', default=True )
postroute_hold = Step( 'cadence-innovus-postroute_hold',default=True )
signoff = Step( 'cadence-innovus-signoff', default=True )
pt_signoff = Step( 'synopsys-pt-timing-signoff', default=True )
genlib = Step( 'cadence-genus-genlib', default=True )
if which("calibre") is not None:
drc = Step( 'mentor-calibre-drc', default=True )
lvs = Step( 'mentor-calibre-lvs', default=True )
else:
drc = Step( 'cadence-pegasus-drc', default=True )
lvs = Step( 'cadence-pegasus-lvs', default=True )
debugcalibre = Step( 'cadence-innovus-debug-calibre', default=True )
# Add extra input edges to innovus steps that need custom tweaks
init.extend_inputs( custom_init.all_outputs() )
power.extend_inputs( custom_power.all_outputs() )
#-----------------------------------------------------------------------
# Graph -- Add nodes
#-----------------------------------------------------------------------
g.add_step( info )
g.add_step( rtl )
g.add_step( constraints )
g.add_step( synth )
g.add_step( iflow )
g.add_step( init )
g.add_step( custom_init )
g.add_step( power )
g.add_step( custom_power )
g.add_step( place )
g.add_step( cts )
g.add_step( postcts_hold )
g.add_step( route )
g.add_step( postroute )
g.add_step( postroute_hold )
g.add_step( signoff )
g.add_step( pt_signoff )
g.add_step( genlib )
g.add_step( lib2db )
g.add_step( drc )
g.add_step( lvs )
g.add_step( debugcalibre )
#-----------------------------------------------------------------------
# Graph -- Add edges
#-----------------------------------------------------------------------
# Connect by name
g.connect_by_name( adk, synth )
g.connect_by_name( adk, iflow )
g.connect_by_name( adk, init )
g.connect_by_name( adk, power )
g.connect_by_name( adk, place )
g.connect_by_name( adk, cts )
g.connect_by_name( adk, postcts_hold )
g.connect_by_name( adk, route )
g.connect_by_name( adk, postroute )
g.connect_by_name( adk, postroute_hold )
g.connect_by_name( adk, signoff )
g.connect_by_name( adk, drc )
g.connect_by_name( adk, lvs )
g.connect_by_name( rtl, synth )
g.connect_by_name( constraints, synth )
g.connect_by_name( synth, iflow )
g.connect_by_name( synth, init )
g.connect_by_name( synth, power )
g.connect_by_name( synth, place )
g.connect_by_name( synth, cts )
g.connect_by_name( iflow, init )
g.connect_by_name( iflow, power )
g.connect_by_name( iflow, place )
g.connect_by_name( iflow, cts )
g.connect_by_name( iflow, postcts_hold )
g.connect_by_name( iflow, route )
g.connect_by_name( iflow, postroute )
g.connect_by_name( iflow, postroute_hold )
g.connect_by_name( iflow, signoff )
g.connect_by_name( custom_init, init )
g.connect_by_name( custom_power, power )
g.connect_by_name( init, power )
g.connect_by_name( power, place )
g.connect_by_name( place, cts )
g.connect_by_name( cts, postcts_hold )
g.connect_by_name( postcts_hold, route )
g.connect_by_name( route, postroute )
g.connect_by_name( postroute, postroute_hold )
g.connect_by_name( postroute_hold, signoff )
g.connect_by_name( signoff, drc )
g.connect_by_name( signoff, lvs )
g.connect(signoff.o('design-merged.gds'), drc.i('design_merged.gds'))
g.connect(signoff.o('design-merged.gds'), lvs.i('design_merged.gds'))
g.connect_by_name( signoff, genlib )
g.connect_by_name( adk, genlib )
g.connect_by_name( genlib, lib2db )
g.connect_by_name( adk, pt_signoff )
g.connect_by_name( signoff, pt_signoff )
g.connect_by_name( adk, debugcalibre )
g.connect_by_name( synth, debugcalibre )
g.connect_by_name( iflow, debugcalibre )
g.connect_by_name( signoff, debugcalibre )
g.connect_by_name( drc, debugcalibre )
g.connect_by_name( lvs, debugcalibre )
#-----------------------------------------------------------------------
# Parameterize
#-----------------------------------------------------------------------
g.update_params( parameters )
# Since we are adding an additional input script to the generic Innovus
# steps, we modify the order parameter for that node which determines
# which scripts get run and when they get run.
# init -- Add 'add-endcaps-welltaps.tcl' after 'floorplan.tcl'
order = init.get_param('order') # get the default script run order
floorplan_idx = order.index( 'floorplan.tcl' ) # find floorplan.tcl
order.insert( floorplan_idx + 1, 'add-endcaps-welltaps.tcl' ) # add here
init.update_params( { 'order': order } )
# Add density target parameter
init.update_params( { 'core_density_target': parameters['core_density_target'] }, True )
# Increase hold slack on postroute_hold step
postroute_hold.update_params( { 'hold_target_slack': parameters['hold_target_slack'] }, allow_new=True )
# GLC Uses leaf-level power strategy, which is shared with other blocks
# that use power domains flow
power.update_params( { 'PWR_AWARE': parameters['PWR_AWARE'] }, allow_new=True )
return g
if __name__ == '__main__':
g = construct()
# g.plot()
|
StanfordAHA/garnet | global_buffer/design/glb_addr_gen.py | <reponame>StanfordAHA/garnet
from kratos import always_ff, posedge, Generator, clog2
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
class GlbAddrGen(Generator):
''' Generate addresses '''
def __init__(self, _params: GlobalBufferParams):
super().__init__(f"glb_addr_gen")
self._params = _params
self.clk = self.clock("clk")
self.clk_en = self.clock_en("clk_en")
self.reset = self.reset("reset")
self.restart = self.input("restart", 1)
self.strides = self.input("strides", self._params.axi_data_width,
size=self._params.loop_level,
packed=True, explicit_array=True)
self.start_addr = self.input("start_addr", self._params.axi_data_width)
self.step = self.input("step", 1)
self.mux_sel = self.input("mux_sel", max(clog2(self._params.loop_level), 1))
self.addr_out = self.output("addr_out", self._params.axi_data_width)
# local variables
self.current_addr = self.var("current_addr", self._params.axi_data_width)
# output address
self.wire(self.addr_out, self.start_addr + self.current_addr)
self.add_always(self.calculate_address)
@always_ff((posedge, "clk"), (posedge, "reset"))
def calculate_address(self):
if self.reset:
self.current_addr = 0
elif self.clk_en:
if self.restart:
self.current_addr = 0
elif self.step:
self.current_addr = self.current_addr + self.strides[self.mux_sel]
|
StanfordAHA/garnet | global_buffer/design/glb_core_switch.py | <reponame>StanfordAHA/garnet<gh_stars>10-100
from kratos import Generator, always_ff, always_comb, posedge, concat
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
class GlbCoreSwitch(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_core_switch")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.clk_en = self.input("clk_en", 1)
self.reset = self.reset("reset")
self.glb_tile_id = self.input(
"glb_tile_id", self._params.tile_sel_addr_width)
# wr packet
self.wr_packet_pr2sw = self.input(
"wr_packet_pr2sw", self.header.wr_packet_t)
self.wr_packet_sr2sw = self.input(
"wr_packet_sr2sw", self.header.wr_packet_t)
self.wr_packet_sw2sr = self.output(
"wr_packet_sw2sr", self.header.wr_packet_t)
self.wr_packet_dma2sw = self.input(
"wr_packet_dma2sw", self.header.wr_packet_t)
self.wr_packet_sw2bankarr = self.output(
"wr_packet_sw2bankarr", self.header.wr_packet_t, size=self._params.banks_per_tile)
# rdrq packet
self.rdrq_packet_pr2sw = self.input(
"rdrq_packet_pr2sw", self.header.rdrq_packet_t)
self.rdrq_packet_sr2sw = self.input(
"rdrq_packet_sr2sw", self.header.rdrq_packet_t)
self.rdrq_packet_sw2sr = self.output(
"rdrq_packet_sw2sr", self.header.rdrq_packet_t)
self.rdrq_packet_dma2sw = self.input(
"rdrq_packet_dma2sw", self.header.rdrq_packet_t)
self.rdrq_packet_pcfgr2sw = self.input(
"rdrq_packet_pcfgr2sw", self.header.rdrq_packet_t)
self.rdrq_packet_sw2pcfgr = self.output(
"rdrq_packet_sw2pcfgr", self.header.rdrq_packet_t)
self.rdrq_packet_pcfgdma2sw = self.input(
"rdrq_packet_pcfgdma2sw", self.header.rdrq_packet_t)
self.rdrq_packet_sw2bankarr = self.output(
"rdrq_packet_sw2bankarr", self.header.rdrq_packet_t, size=self._params.banks_per_tile)
# rdrq packet
self.rdrs_packet_sw2pr = self.output(
"rdrs_packet_sw2pr", self.header.rdrs_packet_t)
self.rdrs_packet_sr2sw = self.input(
"rdrs_packet_sr2sw", self.header.rdrs_packet_t)
self.rdrs_packet_sw2sr = self.output(
"rdrs_packet_sw2sr", self.header.rdrs_packet_t)
self.rdrs_packet_sw2dma = self.output(
"rdrs_packet_sw2dma", self.header.rdrs_packet_t)
self.rdrs_packet_pcfgr2sw = self.input(
"rdrs_packet_pcfgr2sw", self.header.rdrs_packet_t)
self.rdrs_packet_sw2pcfgr = self.output(
"rdrs_packet_sw2pcfgr", self.header.rdrs_packet_t)
self.rdrs_packet_sw2pcfgdma = self.output(
"rdrs_packet_sw2pcfgdma", self.header.rdrs_packet_t)
self.rdrs_packet_bankarr2sw = self.input(
"rdrs_packet_bankarr2sw", self.header.rdrs_packet_t, size=self._params.banks_per_tile)
# configuration
self.cfg_st_dma_ctrl_mode = self.input("cfg_st_dma_ctrl_mode", 2)
self.cfg_ld_dma_ctrl_mode = self.input("cfg_ld_dma_ctrl_mode", 2)
self.cfg_pcfg_dma_ctrl_mode = self.input("cfg_pcfg_dma_ctrl_mode", 1)
# local variables
assert self._params.glb_switch_pipeline_depth == 1 # switch pipeline depth is fixed to 1
self.wr_packet_sr2sw_d = self.var(
"wr_packet_sr2sw_d", self.header.wr_packet_t)
self.wr_packet_pr2sw_d = self.var(
"wr_packet_pr2sw_d", self.header.wr_packet_t)
self.wr_packet_dma2sw_d = self.var(
"wr_packet_dma2sw_d", self.header.wr_packet_t)
self.wr_packet_sw2bank_muxed = self.var(
"wr_packet_sw2bank_muxed", self.header.wr_packet_t)
self.wr_packet_sw2bank_filtered = self.var(
"wr_packet_sw2bank_filtered", self.header.wr_packet_t)
self.rdrq_packet_pr2sw_d = self.var(
"rdrq_packet_pr2sw_d", self.header.rdrq_packet_t)
self.rdrq_packet_sr2sw_d = self.var(
"rdrq_packet_sr2sw_d", self.header.rdrq_packet_t)
self.rdrq_packet_dma2sw_d = self.var(
"rdrq_packet_dma2sw_d", self.header.rdrq_packet_t)
self.rdrq_packet_pcfgr2sw_d = self.var(
"rdrq_packet_pcfgr2sw_d", self.header.rdrq_packet_t)
self.rdrq_packet_pcfgdma2sw_d = self.var(
"rdrq_packet_pcfgdma2sw_d", self.header.rdrq_packet_t)
self.rdrq_packet_sw2bank_muxed = self.var(
"rdrq_packet_sw2bank_muxed", self.header.rdrq_packet_t)
self.rdrs_packet_pcfgr2sw_d = self.var(
"rdrs_packet_pcfgr2sw_d", self.header.rdrs_packet_t)
self.rdrs_packet_sr2sw_d = self.var(
"rdrs_packet_sr2sw_d", self.header.rdrs_packet_t)
self.rdrs_packet_bankarr2sw_sr_d = self.var(
"rdrs_packet_bankarr2sw_sr_d ", self.header.rdrs_packet_t, size=self._params.banks_per_tile)
self.rdrs_packet_bankarr2sw_pr_d = self.var(
"rdrs_packet_bankarr2sw_pr_d ", self.header.rdrs_packet_t, size=self._params.banks_per_tile)
self.rdrs_packet_bankarr2sw_pcfgr_d = self.var(
"rdrs_packet_bankarr2sw_pcfgr_d ", self.header.rdrs_packet_t, size=self._params.banks_per_tile)
# packet src enum
self.packet_src_e = self.enum("packet_src_e", {
"none": 0, "proc": 1, "strm_dma": 2, "strm_rtr": 3, "pcfg_dma": 4, "pcfg_rtr": 5})
# TODO: Kratos doesn't support array of enum instances yet
self.rdrq_sel = self.var("rdrq_sel", self.packet_src_e)
self.rdrq_sel_d = []
self.rdrq_sel_d_nostall = []
for i in range(self._params.glb_bank_memory_pipeline_depth
+ self._params.sram_gen_pipeline_depth
+ self._params.sram_gen_output_pipeline_depth
+ self._params.glb_switch_pipeline_depth
+ 1):
self.rdrq_sel_d.append(
self.var(f"rdrq_sel_d{i+1}", self.packet_src_e))
self.rdrq_sel_d_nostall.append(
self.var(f"rdrq_sel_d{i+1}_nostall", self.packet_src_e))
self.rdrq_bank_sel = self.var(
"rdrq_bank_sel", self._params.bank_sel_addr_width)
self.rdrq_bank_sel_d = []
self.rdrq_bank_sel_d_nostall = []
for i in range(self._params.glb_bank_memory_pipeline_depth
+ self._params.sram_gen_pipeline_depth
+ self._params.sram_gen_output_pipeline_depth
+ self._params.glb_switch_pipeline_depth
+ 1):
self.rdrq_bank_sel_d.append(
self.var(f"rdrq_bank_sel_d{i+1}", self._params.bank_sel_addr_width))
self.rdrq_bank_sel_d_nostall.append(
self.var(f"rdrq_bank_sel_d{i+1}_nostall", self._params.bank_sel_addr_width))
self.wr_bank_sel = self.var(
"wr_bank_sel", self._params.bank_sel_addr_width)
# localparam
self.packet_addr_tile_sel_msb = _params.bank_addr_width + \
_params.bank_sel_addr_width + _params.tile_sel_addr_width - 1
self.packet_addr_tile_sel_lsb = _params.bank_addr_width + _params.bank_sel_addr_width
self.packet_addr_bank_sel_msb = _params.bank_addr_width + \
_params.bank_sel_addr_width - 1
self.packet_addr_bank_sel_lsb = _params.bank_addr_width
# Add always statements
# wr packet
self.add_always(self.wr_proc_pipeline)
self.add_always(self.wr_data_pipeline)
self.add_always(self.wr_sw2bank_logic)
self.add_always(self.wr_sw2bank_filtered_logic)
self.add_always(self.wr_sw2bankarr_logic)
self.add_always(self.wr_sw2sr_logic)
# rdrq packet
self.add_always(self.rdrq_proc_pcfg_pipeline)
self.add_always(self.rdrq_data_pipeline)
self.add_always(self.rdrq_sel_logic)
self.add_always(self.rdrq_switch_logic)
self.add_always(self.rdrq_sw2bank_logic)
self.add_always(self.rdrq_sw2sr_logic)
self.add_always(self.rdrq_sw2pcfgr_logic)
# rdrq_sel pipeline
for in_, out_ in zip([self.rdrq_sel] + self.rdrq_sel_d[:-1], self.rdrq_sel_d):
self.add_always(self.rdrq_pipeline, in_=in_, out_=out_,
rst=self.packet_src_e.none)
for in_, out_ in zip([self.rdrq_sel] + self.rdrq_sel_d_nostall[:-1], self.rdrq_sel_d_nostall):
self.add_always(self.rdrq_pipeline_nostall, in_=in_,
out_=out_, rst=self.packet_src_e.none)
# rdrq_bank_sel pipeline
for in_, out_ in zip([self.rdrq_bank_sel] + self.rdrq_bank_sel_d[:-1], self.rdrq_bank_sel_d):
self.add_always(self.rdrq_pipeline, in_=in_, out_=out_, rst=0)
for in_, out_ in zip([self.rdrq_bank_sel] + self.rdrq_bank_sel_d_nostall[:-1], self.rdrq_bank_sel_d_nostall):
self.add_always(self.rdrq_pipeline_nostall,
in_=in_, out_=out_, rst=0)
# rdrs packet
self.add_always(self.rdrs_proc_pcfg_pipeline)
self.add_always(self.rdrs_data_pipeline)
self.add_always(self.rdrs_sr2sw_pipieline)
self.add_always(self.rdrs_sw2dma_logic)
self.add_always(self.rdrs_sw2sr_logic)
self.add_always(self.rdrs_sw2pr_logic)
self.add_always(self.rdrs_pcfgr2sw_pipeline)
self.add_always(self.rdrs_sw2pcfgdma_logic)
self.add_always(self.rdrs_sw2pcfgr_logic)
@always_ff((posedge, "clk"), (posedge, "reset"))
def wr_proc_pipeline(self):
if self.reset:
self.wr_packet_pr2sw_d = 0
else:
self.wr_packet_pr2sw_d = self.wr_packet_pr2sw
@always_ff((posedge, "clk"), (posedge, "reset"))
def wr_data_pipeline(self):
if self.reset:
self.wr_packet_dma2sw_d = 0
self.wr_packet_sr2sw_d = 0
elif self.clk_en:
self.wr_packet_dma2sw_d = self.wr_packet_dma2sw
self.wr_packet_sr2sw_d = self.wr_packet_sr2sw
@always_comb
def wr_sw2bank_logic(self):
if self.wr_packet_pr2sw_d['wr_en']:
self.wr_packet_sw2bank_muxed = self.wr_packet_pr2sw_d
elif self.cfg_st_dma_ctrl_mode != 0:
self.wr_packet_sw2bank_muxed = self.wr_packet_dma2sw_d
else:
self.wr_packet_sw2bank_muxed = self.wr_packet_sr2sw_d
@always_comb
def wr_sw2bank_filtered_logic(self):
if self.wr_packet_sw2bank_muxed['wr_addr'][self.packet_addr_tile_sel_msb,
self.packet_addr_tile_sel_lsb] == self.glb_tile_id:
self.wr_packet_sw2bank_filtered = self.wr_packet_sw2bank_muxed
else:
self.wr_packet_sw2bank_filtered = 0
self.wr_bank_sel = self.wr_packet_sw2bank_filtered['wr_addr'][
self.packet_addr_bank_sel_msb, self.packet_addr_bank_sel_lsb]
@always_comb
def wr_sw2bankarr_logic(self):
for i in range(self._params.banks_per_tile):
if self.wr_bank_sel == i:
self.wr_packet_sw2bankarr[i] = self.wr_packet_sw2bank_filtered
else:
self.wr_packet_sw2bankarr[i] = 0
@always_comb
def wr_sw2sr_logic(self):
if self.cfg_st_dma_ctrl_mode != 0:
self.wr_packet_sw2sr = self.wr_packet_dma2sw
else:
self.wr_packet_sw2sr = self.wr_packet_sr2sw
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrq_proc_pcfg_pipeline(self):
if self.reset:
self.rdrq_packet_pr2sw_d = 0
self.rdrq_packet_pcfgr2sw_d = 0
self.rdrq_packet_pcfgdma2sw_d = 0
else:
self.rdrq_packet_pr2sw_d = self.rdrq_packet_pr2sw
self.rdrq_packet_pcfgr2sw_d = self.rdrq_packet_pcfgr2sw
self.rdrq_packet_pcfgdma2sw_d = self.rdrq_packet_pcfgdma2sw
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrq_data_pipeline(self):
if self.reset:
self.rdrq_packet_dma2sw_d = 0
self.rdrq_packet_sr2sw_d = 0
elif self.clk_en:
self.rdrq_packet_dma2sw_d = self.rdrq_packet_dma2sw
self.rdrq_packet_sr2sw_d = self.rdrq_packet_sr2sw
@always_comb
def rdrq_sel_logic(self):
if ((self.rdrq_packet_pr2sw_d['rd_en'] == 1)
& (self.rdrq_packet_pr2sw_d['rd_addr'][self.packet_addr_tile_sel_msb, self.packet_addr_tile_sel_lsb]
== self.glb_tile_id)):
self.rdrq_sel = self.packet_src_e.proc
elif ((self.cfg_pcfg_dma_ctrl_mode == 1)
& (self.rdrq_packet_pcfgdma2sw_d['rd_en'] == 1)
& (self.rdrq_packet_pcfgdma2sw_d['rd_addr'][self.packet_addr_tile_sel_msb, self.packet_addr_tile_sel_lsb]
== self.glb_tile_id)):
self.rdrq_sel = self.packet_src_e.pcfg_dma
elif ((self.cfg_pcfg_dma_ctrl_mode == 0)
& (self.rdrq_packet_pcfgr2sw_d['rd_en'] == 1)
& (self.rdrq_packet_pcfgr2sw_d['rd_addr'][self.packet_addr_tile_sel_msb, self.packet_addr_tile_sel_lsb]
== self.glb_tile_id)):
self.rdrq_sel = self.packet_src_e.pcfg_rtr
elif ((self.cfg_ld_dma_ctrl_mode != 0)
& (self.rdrq_packet_dma2sw_d['rd_en'] == 1)
& (self.rdrq_packet_dma2sw_d['rd_addr'][self.packet_addr_tile_sel_msb, self.packet_addr_tile_sel_lsb]
== self.glb_tile_id)):
self.rdrq_sel = self.packet_src_e.strm_dma
elif ((self.cfg_ld_dma_ctrl_mode == 0)
& (self.rdrq_packet_sr2sw_d['rd_en'] == 1)
& (self.rdrq_packet_sr2sw_d['rd_addr'][self.packet_addr_tile_sel_msb, self.packet_addr_tile_sel_lsb]
== self.glb_tile_id)):
self.rdrq_sel = self.packet_src_e.strm_rtr
else:
self.rdrq_sel = self.packet_src_e.none
@always_comb
def rdrq_switch_logic(self):
if self.rdrq_sel == self.packet_src_e.proc:
self.rdrq_packet_sw2bank_muxed = self.rdrq_packet_pr2sw_d
elif self.rdrq_sel == self.packet_src_e.pcfg_dma:
self.rdrq_packet_sw2bank_muxed = self.rdrq_packet_pcfgdma2sw_d
elif self.rdrq_sel == self.packet_src_e.pcfg_rtr:
self.rdrq_packet_sw2bank_muxed = self.rdrq_packet_pcfgr2sw_d
elif self.rdrq_sel == self.packet_src_e.strm_dma:
self.rdrq_packet_sw2bank_muxed = self.rdrq_packet_dma2sw_d
elif self.rdrq_sel == self.packet_src_e.strm_rtr:
self.rdrq_packet_sw2bank_muxed = self.rdrq_packet_sr2sw_d
else:
self.rdrq_packet_sw2bank_muxed = 0
self.rdrq_bank_sel = self.rdrq_packet_sw2bank_muxed['rd_addr'][
self.packet_addr_bank_sel_msb, self.packet_addr_bank_sel_lsb]
@always_comb
def rdrq_sw2bank_logic(self):
for i in range(self._params.banks_per_tile):
if self.rdrq_bank_sel == i:
self.rdrq_packet_sw2bankarr[i] = self.rdrq_packet_sw2bank_muxed
else:
self.rdrq_packet_sw2bankarr[i] = 0
@always_comb
def rdrq_sw2sr_logic(self):
if self.cfg_ld_dma_ctrl_mode != 0:
self.rdrq_packet_sw2sr = self.rdrq_packet_dma2sw
else:
self.rdrq_packet_sw2sr = self.rdrq_packet_sr2sw
@always_comb
def rdrq_sw2pcfgr_logic(self):
if self.cfg_pcfg_dma_ctrl_mode != 0:
self.rdrq_packet_sw2pcfgr = self.rdrq_packet_pcfgdma2sw
else:
self.rdrq_packet_sw2pcfgr = self.rdrq_packet_pcfgr2sw
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrs_proc_pcfg_pipeline(self):
if self.reset:
for i in range(self._params.banks_per_tile):
self.rdrs_packet_bankarr2sw_pr_d[i] = 0
self.rdrs_packet_bankarr2sw_pcfgr_d[i] = 0
else:
for i in range(self._params.banks_per_tile):
self.rdrs_packet_bankarr2sw_pr_d[i] = self.rdrs_packet_bankarr2sw[i]
self.rdrs_packet_bankarr2sw_pcfgr_d[i] = self.rdrs_packet_bankarr2sw[i]
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrs_data_pipeline(self):
if self.reset:
for i in range(self._params.banks_per_tile):
self.rdrs_packet_bankarr2sw_sr_d[i] = 0
elif self.clk_en:
for i in range(self._params.banks_per_tile):
self.rdrs_packet_bankarr2sw_sr_d[i] = self.rdrs_packet_bankarr2sw[i]
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrq_pipeline_nostall(self, in_, out_, rst):
if self.reset:
out_ = rst
else:
out_ = in_
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrq_pipeline(self, in_, out_, rst):
if self.reset:
out_ = rst
elif self.clk_en:
out_ = in_
# rdrs strm
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrs_sr2sw_pipieline(self):
if self.reset:
self.rdrs_packet_sr2sw_d = 0
elif self.clk_en:
self.rdrs_packet_sr2sw_d = self.rdrs_packet_sr2sw
@always_comb
def rdrs_sw2dma_logic(self):
if self.cfg_ld_dma_ctrl_mode != 0:
self.rdrs_packet_sw2dma = self.rdrs_packet_sr2sw_d
else:
self.rdrs_packet_sw2dma = 0
@always_comb
def rdrs_sw2sr_logic(self):
if (self.rdrq_sel_d[-1] == self.packet_src_e.strm_rtr) | (self.rdrq_sel_d[-1] == self.packet_src_e.strm_dma):
self.rdrs_packet_sw2sr = self.rdrs_packet_bankarr2sw_sr_d[self.rdrq_bank_sel_d[-1]]
else:
if self.cfg_ld_dma_ctrl_mode != 0:
self.rdrs_packet_sw2sr = 0
else:
self.rdrs_packet_sw2sr = self.rdrs_packet_sr2sw
# rdrs proc
@always_comb
def rdrs_sw2pr_logic(self):
if self.rdrq_sel_d_nostall[-1] == self.packet_src_e.proc:
self.rdrs_packet_sw2pr = self.rdrs_packet_bankarr2sw_pr_d[
self.rdrq_bank_sel_d_nostall[-1]]
else:
self.rdrs_packet_sw2pr = 0
# rdrs pcfg
@always_ff((posedge, "clk"), (posedge, "reset"))
def rdrs_pcfgr2sw_pipeline(self):
if self.reset:
self.rdrs_packet_pcfgr2sw_d = 0
else:
self.rdrs_packet_pcfgr2sw_d = self.rdrs_packet_pcfgr2sw
@always_comb
def rdrs_sw2pcfgdma_logic(self):
if self.cfg_pcfg_dma_ctrl_mode != 0:
self.rdrs_packet_sw2pcfgdma = self.rdrs_packet_pcfgr2sw_d
else:
self.rdrs_packet_sw2pcfgdma = 0
@always_comb
def rdrs_sw2pcfgr_logic(self):
if ((self.rdrq_sel_d_nostall[-1] == self.packet_src_e.pcfg_rtr)
| (self.rdrq_sel_d_nostall[-1] == self.packet_src_e.pcfg_dma)):
self.rdrs_packet_sw2pcfgr = self.rdrs_packet_bankarr2sw_pcfgr_d[
self.rdrq_bank_sel_d_nostall[-1]]
else:
if self.cfg_pcfg_dma_ctrl_mode != 0:
self.rdrs_packet_sw2pcfgr = 0
else:
self.rdrs_packet_sw2pcfgr = self.rdrs_packet_pcfgr2sw
|
StanfordAHA/garnet | global_buffer/design/glb_core_strm_mux.py | from kratos import Generator, always_ff, always_comb, posedge
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
class GlbCoreStrmMux(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_core_strm_mux")
self._params = _params
self.clk = self.clock("clk")
self.clk_en = self.input("clk_en", 1)
self.reset = self.reset("reset")
self.data_g2f_dma = self.input(
"data_g2f_dma", width=self._params.cgra_data_width)
self.data_valid_g2f_dma = self.input(
"data_valid_g2f_dma", width=1)
self.data_g2f = self.output(
"data_g2f", width=self._params.cgra_data_width, size=self._params.cgra_per_glb, packed=True)
self.data_valid_g2f = self.output(
"data_valid_g2f", 1, size=self._params.cgra_per_glb, packed=True)
self.data_f2g_dma = self.output(
"data_f2g_dma", width=self._params.cgra_data_width)
self.data_valid_f2g_dma = self.output(
"data_valid_f2g_dma", width=1)
self.data_f2g = self.input(
"data_f2g", width=self._params.cgra_data_width, size=self._params.cgra_per_glb, packed=True)
self.data_valid_f2g = self.input(
"data_valid_f2g", 1, size=self._params.cgra_per_glb, packed=True)
self.cfg_data_network_g2f_mux = self.input(
"cfg_data_network_g2f_mux", self._params.cgra_per_glb)
self.cfg_data_network_f2g_mux = self.input(
"cfg_data_network_f2g_mux", self._params.cgra_per_glb)
# local variables
self.data_g2f_int = self.var(
"data_g2f_int", width=self._params.cgra_data_width, size=self._params.cgra_per_glb, packed=True)
self.data_valid_g2f_int = self.var(
"data_valid_g2f_int", 1, size=self._params.cgra_per_glb, packed=True)
self.add_always(self.pipeline)
self.add_always(self.data_g2f_logic)
self.add_always(self.data_f2g_logic)
@always_ff((posedge, "clk"), (posedge, "reset"))
def pipeline(self):
if self.reset:
for i in range(self._params.cgra_per_glb):
self.data_g2f[i] = 0
self.data_valid_g2f[i] = 0
elif self.clk_en:
for i in range(self._params.cgra_per_glb):
self.data_g2f[i] = self.data_g2f_int[i]
self.data_valid_g2f[i] = self.data_valid_g2f_int[i]
@always_comb
def data_g2f_logic(self):
for i in range(self._params.cgra_per_glb):
if self.cfg_data_network_g2f_mux[i] == 1:
self.data_g2f_int[i] = self.data_g2f_dma
self.data_valid_g2f_int[i] = self.data_valid_g2f_dma
else:
self.data_g2f_int[i] = 0
self.data_valid_g2f_int[i] = 0
@always_comb
def data_f2g_logic(self):
self.data_f2g_dma = 0
self.data_valid_f2g_dma = 0
for i in range(self._params.cgra_per_glb):
if self.cfg_data_network_f2g_mux[i] == 1:
self.data_f2g_dma = self.data_f2g[i]
self.data_valid_f2g_dma = self.data_valid_f2g[i]
else:
self.data_f2g_dma = self.data_f2g_dma
self.data_valid_f2g_dma = self.data_valid_f2g_dma
|
StanfordAHA/garnet | global_buffer/design/glb_core.py | <filename>global_buffer/design/glb_core.py
from kratos import Generator
from global_buffer.design.glb_core_pcfg_router import GlbCorePcfgRouter
from global_buffer.design.glb_core_proc_router import GlbCoreProcRouter
from global_buffer.design.glb_core_sram_cfg_ctrl import GlbCoreSramCfgCtrl
from global_buffer.design.glb_core_strm_mux import GlbCoreStrmMux
from global_buffer.design.glb_core_strm_router import GlbCoreStrmRouter
from global_buffer.design.glb_core_switch import GlbCoreSwitch
from global_buffer.design.glb_bank import GlbBank
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_cfg_ifc import GlbConfigInterface
from global_buffer.design.glb_header import GlbHeader
from global_buffer.design.glb_core_store_dma import GlbCoreStoreDma
from global_buffer.design.glb_core_load_dma import GlbCoreLoadDma
from global_buffer.design.glb_core_pcfg_dma import GlbCorePcfgDma
class GlbCore(Generator):
def __init__(self, _params: GlobalBufferParams):
# TODO: configuration wiring to children modules should be a pass
super().__init__("glb_core")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.clk_en = self.input("clk_en", 1)
self.reset = self.reset("reset")
self.glb_tile_id = self.input("glb_tile_id", self._params.tile_sel_addr_width)
self.proc_packet_w2e_wsti = self.input("proc_packet_w2e_wsti", self.header.packet_t)
self.proc_packet_e2w_wsto = self.output("proc_packet_e2w_wsto", self.header.packet_t)
self.proc_packet_e2w_esti = self.input("proc_packet_e2w_esti", self.header.packet_t)
self.proc_packet_w2e_esto = self.output("proc_packet_w2e_esto", self.header.packet_t)
self.strm_packet_w2e_wsti = self.input("strm_packet_w2e_wsti", self.header.packet_t)
self.strm_packet_e2w_wsto = self.output("strm_packet_e2w_wsto", self.header.packet_t)
self.strm_packet_e2w_esti = self.input("strm_packet_e2w_esti", self.header.packet_t)
self.strm_packet_w2e_esto = self.output("strm_packet_w2e_esto", self.header.packet_t)
self.pcfg_packet_w2e_wsti = self.input("pcfg_packet_w2e_wsti", self.header.rd_packet_t)
self.pcfg_packet_e2w_wsto = self.output("pcfg_packet_e2w_wsto", self.header.rd_packet_t)
self.pcfg_packet_e2w_esti = self.input("pcfg_packet_e2w_esti", self.header.rd_packet_t)
self.pcfg_packet_w2e_esto = self.output("pcfg_packet_w2e_esto", self.header.rd_packet_t)
self.strm_data_f2g = self.input("strm_data_f2g", self._params.cgra_data_width,
size=self._params.cgra_per_glb, packed=True)
self.strm_data_valid_f2g = self.input("strm_data_valid_f2g", 1, size=self._params.cgra_per_glb, packed=True)
self.strm_data_g2f = self.output("strm_data_g2f", self._params.cgra_data_width,
size=self._params.cgra_per_glb, packed=True)
self.strm_data_valid_g2f = self.output("strm_data_valid_g2f", 1, size=self._params.cgra_per_glb, packed=True)
# config port
self.sram_cfg_ifc = GlbConfigInterface(addr_width=self._params.glb_addr_width,
data_width=self._params.axi_data_width)
self.if_sram_cfg_est_m = self.interface(self.sram_cfg_ifc.master, "if_sram_cfg_est_m", is_port=True)
self.if_sram_cfg_wst_s = self.interface(self.sram_cfg_ifc.slave, "if_sram_cfg_wst_s", is_port=True)
# configuration registers
self.cfg_data_network_connected_prev = self.input("cfg_data_network_connected_prev", 1)
self.cfg_pcfg_network_connected_prev = self.input("cfg_pcfg_network_connected_prev", 1)
self.cfg_data_network = self.input("cfg_data_network", self.header.cfg_data_network_t)
self.cfg_pcfg_network = self.input("cfg_pcfg_network", self.header.cfg_pcfg_network_t)
# st dma
self.cfg_st_dma_ctrl = self.input("cfg_st_dma_ctrl", self.header.cfg_dma_ctrl_t)
self.cfg_st_dma_header = self.input("cfg_st_dma_header", self.header.cfg_dma_header_t,
size=self._params.queue_depth)
# ld dma
self.cfg_ld_dma_ctrl = self.input("cfg_ld_dma_ctrl", self.header.cfg_dma_ctrl_t)
self.cfg_ld_dma_header = self.input("cfg_ld_dma_header", self.header.cfg_dma_header_t,
size=self._params.queue_depth)
# pcfg dma
self.cfg_pcfg_dma_ctrl = self.input("cfg_pcfg_dma_ctrl", self.header.cfg_pcfg_dma_ctrl_t)
self.cfg_pcfg_dma_header = self.input("cfg_pcfg_dma_header", self.header.cfg_pcfg_dma_header_t)
self.cgra_cfg_pcfg = self.output("cgra_cfg_pcfg", self.header.cgra_cfg_t)
self.ld_dma_start_pulse = self.input("ld_dma_start_pulse", 1)
self.ld_dma_done_pulse = self.output("ld_dma_done_pulse", 1)
self.st_dma_start_pulse = self.input("st_dma_start_pulse", 1)
self.st_dma_done_pulse = self.output("st_dma_done_pulse", 1)
self.pcfg_start_pulse = self.input("pcfg_start_pulse", 1)
self.pcfg_done_pulse = self.output("pcfg_done_pulse", 1)
self.strm_data_g2f_dma2mux = self.var("strm_data_g2f_dma2mux", self._params.cgra_data_width)
self.strm_data_valid_g2f_dma2mux = self.var("strm_data_valid_g2f_dma2mux", 1)
self.strm_data_f2g_mux2dma = self.var("strm_data_f2g_mux2dma", self._params.cgra_data_width)
self.strm_data_valid_f2g_mux2dma = self.var("strm_data_valid_f2g_mux2dma", 1)
self.wr_packet_pr2sw = self.var("wr_packet_pr2sw", self.header.wr_packet_t)
self.wr_packet_sr2sw = self.var("wr_packet_sr2sw", self.header.wr_packet_t)
self.wr_packet_sw2sr = self.var("wr_packet_sw2sr", self.header.wr_packet_t)
self.wr_packet_dma2sw = self.var("wr_packet_dma2sw", self.header.wr_packet_t)
self.wr_packet_sw2bankarr = self.var(
"wr_packet_sw2bankarr", self.header.wr_packet_t, size=self._params.banks_per_tile)
self.rdrq_packet_pr2sw = self.var("rdrq_packet_pr2sw", self.header.rdrq_packet_t)
self.rdrq_packet_sr2sw = self.var("rdrq_packet_sr2sw", self.header.rdrq_packet_t)
self.rdrq_packet_sw2sr = self.var("rdrq_packet_sw2sr", self.header.rdrq_packet_t)
self.rdrq_packet_dma2sw = self.var("rdrq_packet_dma2sw", self.header.rdrq_packet_t)
self.rdrq_packet_pcfgdma2sw = self.var("rdrq_packet_pcfgdma2sw", self.header.rdrq_packet_t)
self.rdrq_packet_pcfgr2sw = self.var("rdrq_packet_pcfgr2sw", self.header.rdrq_packet_t)
self.rdrq_packet_sw2pcfgr = self.var("rdrq_packet_sw2pcfgr", self.header.rdrq_packet_t)
self.rdrq_packet_sw2bankarr = self.var(
"rdrq_packet_sw2bankarr", self.header.rdrq_packet_t, size=self._params.banks_per_tile)
self.rdrs_packet_sw2pr = self.var("rdrs_packet_sw2pr", self.header.rdrs_packet_t)
self.rdrs_packet_sr2sw = self.var("rdrs_packet_sr2sw", self.header.rdrs_packet_t)
self.rdrs_packet_sw2sr = self.var("rdrs_packet_sw2sr", self.header.rdrs_packet_t)
self.rdrs_packet_sw2dma = self.var("rdrs_packet_sw2dma", self.header.rdrs_packet_t)
self.rdrs_packet_pcfgr2sw = self.var("rdrs_packet_pcfgr2sw", self.header.rdrs_packet_t)
self.rdrs_packet_sw2pcfgr = self.var("rdrs_packet_sw2pcfgr", self.header.rdrs_packet_t)
self.rdrs_packet_sw2pcfgdma = self.var("rdrs_packet_sw2pcfgdma", self.header.rdrs_packet_t)
self.rdrs_packet_bankarr2sw = self.var(
"rdrs_packet_bankarr2sw", self.header.rdrs_packet_t, size=self._params.banks_per_tile)
self.packet_sr2sw = self.var("packet_sr2sw", self.header.packet_t)
self.packet_sw2sr = self.var("packet_sw2sr", self.header.packet_t)
self.rd_packet_pcfgr2sw = self.var("rd_packet_pcfgr2sw", self.header.rd_packet_t)
self.rd_packet_sw2pcfgr = self.var("rd_packet_sw2pcfgr", self.header.rd_packet_t)
self.if_sram_cfg_bank2core = []
for i in range(self._params.banks_per_tile):
if_sram_cfg_bank2core = self.interface(GlbConfigInterface(addr_width=self._params.bank_addr_width,
data_width=self._params.axi_data_width),
f"if_sram_cfg_bank2core_{i}")
self.if_sram_cfg_bank2core.append(if_sram_cfg_bank2core)
self.glb_bank_arr = []
for i in range(self._params.banks_per_tile):
glb_bank = GlbBank(self._params)
self.add_child(f"glb_bank_{i}",
glb_bank,
clk=self.clk,
reset=self.reset,
wr_packet=self.wr_packet_sw2bankarr[i],
rdrq_packet=self.rdrq_packet_sw2bankarr[i],
rdrs_packet=self.rdrs_packet_bankarr2sw[i],
if_sram_cfg_s=self.if_sram_cfg_bank2core[i])
self.glb_bank_arr.append(glb_bank)
self.glb_core_sram_cfg_ctrl = GlbCoreSramCfgCtrl(self._params)
self.add_child("glb_core_sram_cfg_ctrl",
self.glb_core_sram_cfg_ctrl,
clk=self.clk,
reset=self.reset,
glb_tile_id=self.glb_tile_id,
if_sram_cfg_est_m=self.if_sram_cfg_est_m,
if_sram_cfg_wst_s=self.if_sram_cfg_wst_s)
for i in range(self._params.banks_per_tile):
self.wire(self.glb_core_sram_cfg_ctrl.if_sram_cfg_core2bank_m[i],
self.if_sram_cfg_bank2core[i])
self.add_child("glb_core_store_dma",
GlbCoreStoreDma(_params=self._params),
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
data_f2g=self.strm_data_f2g_mux2dma,
data_valid_f2g=self.strm_data_valid_f2g_mux2dma,
wr_packet=self.wr_packet_dma2sw,
# TODO: How to make this automatic
cfg_st_dma_num_repeat=self.cfg_st_dma_ctrl['num_repeat'],
cfg_st_dma_ctrl_use_valid=self.cfg_st_dma_ctrl['use_valid'],
cfg_st_dma_ctrl_mode=self.cfg_st_dma_ctrl['mode'],
cfg_data_network_latency=self.cfg_data_network['latency'],
cfg_st_dma_header=self.cfg_st_dma_header,
st_dma_start_pulse=self.st_dma_start_pulse,
st_dma_done_pulse=self.st_dma_done_pulse)
self.add_child("glb_core_load_dma",
GlbCoreLoadDma(_params=self._params),
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
data_g2f=self.strm_data_g2f_dma2mux,
data_valid_g2f=self.strm_data_valid_g2f_dma2mux,
rdrq_packet=self.rdrq_packet_dma2sw,
rdrs_packet=self.rdrs_packet_sw2dma,
# TODO: How to make this automatic
cfg_ld_dma_num_repeat=self.cfg_ld_dma_ctrl['num_repeat'],
cfg_ld_dma_ctrl_use_valid=self.cfg_ld_dma_ctrl['use_valid'],
cfg_ld_dma_ctrl_mode=self.cfg_ld_dma_ctrl['mode'],
cfg_data_network_latency=self.cfg_data_network['latency'],
cfg_ld_dma_header=self.cfg_ld_dma_header,
ld_dma_start_pulse=self.ld_dma_start_pulse,
ld_dma_done_pulse=self.ld_dma_done_pulse)
self.add_child("glb_core_pcfg_dma",
GlbCorePcfgDma(_params=self._params),
clk=self.clk,
reset=self.reset,
cgra_cfg_pcfg=self.cgra_cfg_pcfg,
rdrq_packet=self.rdrq_packet_pcfgdma2sw,
rdrs_packet=self.rdrs_packet_sw2pcfgdma,
# TODO: How to make this automatic
cfg_pcfg_dma_ctrl_mode=self.cfg_pcfg_dma_ctrl['mode'],
cfg_pcfg_network_latency=self.cfg_pcfg_network['latency'],
cfg_pcfg_dma_header=self.cfg_pcfg_dma_header,
pcfg_start_pulse=self.pcfg_start_pulse,
pcfg_done_pulse=self.pcfg_done_pulse)
self.add_child("glb_core_strm_mux",
GlbCoreStrmMux(_params=self._params),
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
data_g2f_dma=self.strm_data_g2f_dma2mux,
data_valid_g2f_dma=self.strm_data_valid_g2f_dma2mux,
data_g2f=self.strm_data_g2f,
data_valid_g2f=self.strm_data_valid_g2f,
data_f2g_dma=self.strm_data_f2g_mux2dma,
data_valid_f2g_dma=self.strm_data_valid_f2g_mux2dma,
data_f2g=self.strm_data_f2g,
data_valid_f2g=self.strm_data_valid_f2g,
cfg_data_network_g2f_mux=self.cfg_ld_dma_ctrl['data_mux'],
cfg_data_network_f2g_mux=self.cfg_st_dma_ctrl['data_mux'])
self.glb_core_switch = GlbCoreSwitch(_params=self._params)
self.add_child("glb_core_switch",
self.glb_core_switch,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
glb_tile_id=self.glb_tile_id,
wr_packet_pr2sw=self.wr_packet_pr2sw,
wr_packet_dma2sw=self.wr_packet_dma2sw,
wr_packet_sw2bankarr=self.wr_packet_sw2bankarr,
rdrq_packet_pr2sw=self.rdrq_packet_pr2sw,
rdrq_packet_dma2sw=self.rdrq_packet_dma2sw,
rdrq_packet_pcfgdma2sw=self.rdrq_packet_pcfgdma2sw,
rdrq_packet_sw2bankarr=self.rdrq_packet_sw2bankarr,
rdrs_packet_sw2pr=self.rdrs_packet_sw2pr,
rdrs_packet_sw2dma=self.rdrs_packet_sw2dma,
rdrs_packet_sw2pcfgdma=self.rdrs_packet_sw2pcfgdma,
rdrs_packet_bankarr2sw=self.rdrs_packet_bankarr2sw,
cfg_st_dma_ctrl_mode=self.cfg_st_dma_ctrl['mode'],
cfg_ld_dma_ctrl_mode=self.cfg_ld_dma_ctrl['mode'],
cfg_pcfg_dma_ctrl_mode=self.cfg_pcfg_dma_ctrl['mode'])
for port in self.header.wr_packet_ports:
self.wire(
self.glb_core_switch.wr_packet_sr2sw[port], self.packet_sr2sw[port])
self.wire(
self.glb_core_switch.wr_packet_sw2sr[port], self.packet_sw2sr[port])
for port in self.header.rdrq_packet_ports:
self.wire(
self.glb_core_switch.rdrq_packet_sr2sw[port], self.packet_sr2sw[port])
self.wire(
self.glb_core_switch.rdrq_packet_sw2sr[port], self.packet_sw2sr[port])
self.wire(
self.glb_core_switch.rdrq_packet_pcfgr2sw[port], self.rd_packet_pcfgr2sw[port])
self.wire(
self.glb_core_switch.rdrq_packet_sw2pcfgr[port], self.rd_packet_sw2pcfgr[port])
for port in self.header.rdrs_packet_ports:
self.wire(
self.glb_core_switch.rdrs_packet_sr2sw[port], self.packet_sr2sw[port])
self.wire(
self.glb_core_switch.rdrs_packet_sw2sr[port], self.packet_sw2sr[port])
self.wire(
self.glb_core_switch.rdrs_packet_pcfgr2sw[port], self.rd_packet_pcfgr2sw[port])
self.wire(
self.glb_core_switch.rdrs_packet_sw2pcfgr[port], self.rd_packet_sw2pcfgr[port])
self.add_child("glb_core_proc_router",
GlbCoreProcRouter(_params=self._params),
clk=self.clk,
reset=self.reset,
glb_tile_id=self.glb_tile_id,
packet_w2e_wsti=self.proc_packet_w2e_wsti,
packet_e2w_wsto=self.proc_packet_e2w_wsto,
packet_e2w_esti=self.proc_packet_e2w_esti,
packet_w2e_esto=self.proc_packet_w2e_esto,
wr_packet_pr2sw=self.wr_packet_pr2sw,
rdrq_packet_pr2sw=self.rdrq_packet_pr2sw,
rdrs_packet_sw2pr=self.rdrs_packet_sw2pr)
self.add_child("glb_core_strm_router",
GlbCoreStrmRouter(_params=self._params),
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
glb_tile_id=self.glb_tile_id,
packet_w2e_wsti=self.strm_packet_w2e_wsti,
packet_e2w_wsto=self.strm_packet_e2w_wsto,
packet_e2w_esti=self.strm_packet_e2w_esti,
packet_w2e_esto=self.strm_packet_w2e_esto,
packet_sr2sw=self.packet_sr2sw,
packet_sw2sr=self.packet_sw2sr,
cfg_tile_connected_prev=self.cfg_data_network_connected_prev,
cfg_tile_connected_next=self.cfg_data_network['tile_connected'])
self.add_child("glb_core_pcfg_router",
GlbCorePcfgRouter(_params=self._params),
clk=self.clk,
reset=self.reset,
glb_tile_id=self.glb_tile_id,
rd_packet_w2e_wsti=self.pcfg_packet_w2e_wsti,
rd_packet_e2w_wsto=self.pcfg_packet_e2w_wsto,
rd_packet_e2w_esti=self.pcfg_packet_e2w_esti,
rd_packet_w2e_esto=self.pcfg_packet_w2e_esto,
rd_packet_sw2pcfgr=self.rd_packet_sw2pcfgr,
rd_packet_pcfgr2sw=self.rd_packet_pcfgr2sw,
cfg_tile_connected_prev=self.cfg_pcfg_network_connected_prev,
cfg_tile_connected_next=self.cfg_pcfg_network['tile_connected'])
|
StanfordAHA/garnet | global_buffer/design/TS1N16FFCLLSBLVTC2048X64M8SW.py | from kratos import Generator, always_ff, posedge
class TS1N16FFCLLSBLVTC2048X64M8SW(Generator):
def __init__(self):
super().__init__("TS1N16FFCLLSBLVTC2048X64M8SW")
self.CLK = self.clock("CLK")
self.CEB = self.input("CEB", 1)
self.WEB = self.input("WEB", 1)
self.BWEB = self.input("BWEB", 64)
self.D = self.input("D", 64)
self.A = self.input("A", 11)
self.Q = self.output("Q", 64)
self.RTSEL = self.input("RTSEL", 2)
self.WTSEL = self.input("WTSEL", 2)
self.data_array = self.var("data_array", 64, size=2048)
self.add_always(self.ff)
@always_ff((posedge, "CLK"))
def ff(self):
if self.CEB == 0:
self.Q = self.data_array[self.A]
if self.WEB == 0:
for i in range(64):
if self.BWEB[i] == 0:
self.data_array[self.A][i] = self.D[i]
|
StanfordAHA/garnet | global_buffer/design/glb_bank_sram_stub.py | <filename>global_buffer/design/glb_bank_sram_stub.py
from kratos import Generator, always_ff, posedge, concat, const, resize, clog2
from global_buffer.design.TS1N16FFCLLSBLVTC2048X64M8SW import TS1N16FFCLLSBLVTC2048X64M8SW
from global_buffer.design.pipeline import Pipeline
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
class GlbBankSramStub(Generator):
def __init__(self, addr_width, data_width, _params: GlobalBufferParams):
super().__init__("glb_bank_sram_stub")
self._params = _params
self.addr_width = addr_width
self.data_width = data_width
assert self.data_width == 4 * self._params.cgra_data_width
self.RESET = self.reset("RESET")
self.CLK = self.clock("CLK")
self.CEB = self.input("CEB", 1)
self.WEB = self.input("WEB", 1)
self.BWEB = self.input("BWEB", self.data_width)
self.D = self.input("D", self.data_width)
self.A = self.input("A", self.addr_width)
self.Q = self.output("Q", self.data_width)
self.Q_w = self.var("Q_w", self.data_width)
self.mem = self.var("mem", self._params.cgra_data_width, size=2**(self.addr_width + 2), explicit_array=True)
self.add_always(self.mem_ff)
self.add_pipeline()
@always_ff((posedge, "CLK"))
def mem_ff(self):
if self.CEB == 0:
self.Q_w = concat(self.mem[resize((self.A << 2) + 3, self.addr_width + 2)],
self.mem[resize((self.A << 2) + 2, self.addr_width + 2)],
self.mem[resize((self.A << 2) + 1, self.addr_width + 2)],
self.mem[resize((self.A << 2), self.addr_width + 2)])
if self.WEB == 0:
for i in range(self.data_width):
if self.BWEB[i] == 0:
self.mem[resize((self.A << 2) + i // 16,
self.addr_width + 2)][resize(i % 16,
clog2(self._params.cgra_data_width))] = self.D[i]
def add_pipeline(self):
self.mem_pipeline = Pipeline(width=self.data_width,
depth=(self._params.sram_gen_pipeline_depth
+ self._params.sram_gen_output_pipeline_depth))
self.add_child("mem_pipeline",
self.mem_pipeline,
clk=self.CLK,
clk_en=const(1, 1),
reset=self.RESET,
in_=self.Q_w,
out_=self.Q)
|
StanfordAHA/garnet | global_buffer/design/glb_tile_cfg_ctrl.py | from kratos import Generator, always_comb, always_ff, posedge
from global_buffer.design.glb_cfg_ifc import GlbConfigInterface
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
class GlbTileCfgCtrl(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_tile_cfg_ctrl")
self._params = _params
# local parameters
config = GlbConfigInterface(
addr_width=self._params.axi_addr_width, data_width=self._params.axi_data_width)
self.clk = self.clock("clk")
self.reset = self.reset("reset")
self.glb_tile_id = self.input(
"glb_tile_id", _params.tile_sel_addr_width)
# config port
self.if_cfg_wst_s = self.interface(
config.slave, "if_cfg_wst_s", is_port=True)
self.if_cfg_est_m = self.interface(
config.master, "if_cfg_est_m", is_port=True)
self.h2d_pio_dec_write_data = self.output(
"h2d_pio_dec_write_data", _params.axi_data_width)
self.h2d_pio_dec_address = self.output(
"h2d_pio_dec_address", _params.axi_addr_reg_width)
self.h2d_pio_dec_read = self.output("h2d_pio_dec_read", 1)
self.h2d_pio_dec_write = self.output("h2d_pio_dec_write", 1)
self.d2h_dec_pio_read_data = self.input(
"d2h_dec_pio_read_data", _params.axi_data_width)
self.d2h_dec_pio_ack = self.input("d2h_dec_pio_ack", 1)
self.d2h_dec_pio_nack = self.input("d2h_dec_pio_nack", 1)
# local variables
self.wr_data_internal = self.var(
"wr_data_internal", _params.axi_data_width)
self.addr_internal = self.var(
"addr_internal", _params.axi_addr_reg_width)
self.read_internal = self.var("read_internal", 1)
self.write_internal = self.var("write_internal", 1)
self.rd_en_d1 = self.var("rd_en_d1", 1)
self.rd_en_d2 = self.var("rd_en_d2", 1)
self.rd_data_internal = self.var(
"rd_data_internal", _params.axi_data_width)
self.rd_data_next = self.var("rd_data_next", _params.axi_data_width)
self.rd_data_valid_internal = self.var("rd_data_valid_internal", 1)
self.rd_data_valid_next = self.var("rd_data_vald_next", 1)
self.wr_tile_id_match = self.var("wr_tile_id_match", 1)
self.rd_tile_id_match = self.var("rd_tile_id_match", 1)
self.wr_addr_tile_id = self.var(
"wr_addr_tile_id", _params.tile_sel_addr_width)
self.rd_addr_tile_id = self.var(
"rd_addr_tile_id", _params.tile_sel_addr_width)
tile_id_msb = (_params.axi_addr_reg_width
+ _params.axi_byte_offset
+ _params.tile_sel_addr_width
- 1)
tile_id_lsb = _params.axi_addr_reg_width + _params.axi_byte_offset
self.add_always(self.tile_id_match,
tile_id_msb=tile_id_msb, tile_id_lsb=tile_id_lsb)
self.add_always(self.internal_logic)
self.add_always(self.w2e_wr_ifc)
self.add_always(self.w2e_rd_ifc)
self.add_always(self.e2w_rd_ifc)
self.add_always(self.rd_en_pipeline)
self.add_always(self.rd_data_ff)
# wire outputs
self.wire_outputs()
@always_comb
def tile_id_match(self, tile_id_msb, tile_id_lsb):
"""Check if tile id matches with cfg address"""
self.wr_addr_tile_id = self.if_cfg_wst_s.wr_addr[tile_id_msb, tile_id_lsb]
self.rd_addr_tile_id = self.if_cfg_wst_s.rd_addr[tile_id_msb, tile_id_lsb]
self.wr_tile_id_match = self.glb_tile_id == self.wr_addr_tile_id
self.rd_tile_id_match = self.glb_tile_id == self.rd_addr_tile_id
@always_comb
def internal_logic(self):
self.wr_data_internal = 0
self.addr_internal = 0
self.read_internal = 0
self.write_internal = 0
if self.if_cfg_wst_s.rd_en and self.rd_tile_id_match:
self.addr_internal = self.if_cfg_wst_s.rd_addr[(self._params.axi_byte_offset
+ self._params.axi_addr_reg_width
- 1),
self._params.axi_byte_offset]
self.read_internal = 1
if self.if_cfg_wst_s.wr_en and self.wr_tile_id_match:
self.addr_internal = self.if_cfg_wst_s.wr_addr[(self._params.axi_byte_offset
+ self._params.axi_addr_reg_width
- 1),
self._params.axi_byte_offset]
self.wr_data_internal = self.if_cfg_wst_s.wr_data
self.write_internal = 1
@always_ff((posedge, "clk"), (posedge, "reset"))
def rd_en_pipeline(self):
if self.reset:
self.rd_en_d1 = 0
self.rd_en_d2 = 0
else:
self.rd_en_d1 = self.read_internal
self.rd_en_d2 = self.rd_en_d1
@always_ff((posedge, "clk"), (posedge, "reset"))
def rd_data_ff(self):
if self.reset:
self.rd_data_valid_internal = 0
self.rd_data_internal = 0
# TODO: Do we really need rd_en_d2 check?
# Isn't d2h_dec_pio_ack/nack enough?
elif (self.rd_en_d2 == 1) & (self.d2h_dec_pio_ack | self.d2h_dec_pio_nack):
self.rd_data_valid_internal = 1
self.rd_data_internal = self.d2h_dec_pio_read_data
else:
self.rd_data_valid_internal = 0
self.rd_data_internal = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def w2e_wr_ifc(self):
if self.reset:
self.if_cfg_est_m.wr_en = 0
self.if_cfg_est_m.wr_addr = 0
self.if_cfg_est_m.wr_data = 0
elif (not self.wr_tile_id_match and (self.if_cfg_wst_s.wr_en == 1)):
# Passthrough cfg signals
self.if_cfg_est_m.wr_en = self.if_cfg_wst_s.wr_en
self.if_cfg_est_m.wr_addr = self.if_cfg_wst_s.wr_addr
self.if_cfg_est_m.wr_data = self.if_cfg_wst_s.wr_data
else:
self.if_cfg_est_m.wr_en = 0
self.if_cfg_est_m.wr_addr = 0
self.if_cfg_est_m.wr_data = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def w2e_rd_ifc(self):
if self.reset:
self.if_cfg_est_m.rd_en = 0
self.if_cfg_est_m.rd_addr = 0
elif (not self.rd_tile_id_match and (self.if_cfg_wst_s.rd_en == 1)):
self.if_cfg_est_m.rd_en = self.if_cfg_wst_s.rd_en
self.if_cfg_est_m.rd_addr = self.if_cfg_wst_s.rd_addr
else:
self.if_cfg_est_m.rd_en = 0
self.if_cfg_est_m.rd_addr = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def e2w_rd_ifc(self):
if self.reset:
self.if_cfg_wst_s.rd_data = 0
self.if_cfg_wst_s.rd_data_valid = 0
elif self.rd_data_valid_internal:
self.if_cfg_wst_s.rd_data = self.rd_data_internal
self.if_cfg_wst_s.rd_data_valid = self.rd_data_valid_internal
else:
self.if_cfg_wst_s.rd_data = self.if_cfg_est_m.rd_data
self.if_cfg_wst_s.rd_data_valid = self.if_cfg_est_m.rd_data_valid
def wire_outputs(self):
# assign output wires
self.wire(self.h2d_pio_dec_write_data, self.wr_data_internal)
self.wire(self.h2d_pio_dec_address, self.addr_internal)
self.wire(self.h2d_pio_dec_read, self.read_internal)
self.wire(self.h2d_pio_dec_write, self.write_internal)
|
StanfordAHA/garnet | global_buffer/design/glb_core_pcfg_router.py | <reponame>StanfordAHA/garnet<gh_stars>10-100
from kratos import Generator, always_ff, always_comb, posedge
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
class GlbCorePcfgRouter(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_core_pcfg_router")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.reset = self.reset("reset")
self.glb_tile_id = self.input(
"glb_tile_id", self._params.tile_sel_addr_width)
self.rd_packet_w2e_wsti = self.input(
"rd_packet_w2e_wsti", self.header.rd_packet_t)
self.rd_packet_e2w_wsto = self.output(
"rd_packet_e2w_wsto", self.header.rd_packet_t)
self.rd_packet_e2w_esti = self.input(
"rd_packet_e2w_esti", self.header.rd_packet_t)
self.rd_packet_w2e_esto = self.output(
"rd_packet_w2e_esto", self.header.rd_packet_t)
self.rd_packet_sw2pcfgr = self.input(
"rd_packet_sw2pcfgr", self.header.rd_packet_t)
self.rd_packet_pcfgr2sw = self.output(
"rd_packet_pcfgr2sw", self.header.rd_packet_t)
self.cfg_tile_connected_prev = self.input(
"cfg_tile_connected_prev", 1)
self.cfg_tile_connected_next = self.input(
"cfg_tile_connected_next", 1)
# local variables
self.rd_packet_w2e_wsti_turned = self.var(
"rd_packet_w2e_wsti_turned", self.header.rd_packet_t)
self.rd_packet_w2e_wsti_turned_d1 = self.var(
"rd_packet_w2e_wsti_turned_d1", self.header.rd_packet_t)
self.rd_packet_e2w_esti_turned = self.var(
"rd_packet_e2w_esti_turned", self.header.rd_packet_t)
self.rd_packet_e2w_esti_turned_d1 = self.var(
"rd_packet_e2w_esti_turned_d1", self.header.rd_packet_t)
self.rd_packet_sw2pcfgr_d1 = self.var(
"rd_packet_sw2pcfgr_d1", self.header.rd_packet_t)
self.add_is_even_stmt()
self.add_always(self.packet_wst_logic)
self.add_always(self.packet_est_logic)
self.add_always(self.packet_pipeline)
self.add_always(self.packet_switch)
def add_is_even_stmt(self):
self.is_even = self.var("is_even", 1)
self.wire(self.is_even, self.glb_tile_id[0] == 0)
@always_comb
def packet_wst_logic(self):
if self.cfg_tile_connected_prev:
self.rd_packet_w2e_wsti_turned = self.rd_packet_w2e_wsti
else:
self.rd_packet_w2e_wsti_turned = self.rd_packet_e2w_wsto
@always_comb
def packet_est_logic(self):
if self.cfg_tile_connected_next:
self.rd_packet_e2w_esti_turned = self.rd_packet_e2w_esti
else:
self.rd_packet_e2w_esti_turned = self.rd_packet_w2e_esto
@always_ff((posedge, "clk"), (posedge, "reset"))
def packet_pipeline(self):
if self.reset:
self.rd_packet_w2e_wsti_turned_d1 = 0
self.rd_packet_e2w_esti_turned_d1 = 0
self.rd_packet_sw2pcfgr_d1 = 0
else:
self.rd_packet_w2e_wsti_turned_d1 = self.rd_packet_w2e_wsti_turned
self.rd_packet_e2w_esti_turned_d1 = self.rd_packet_e2w_esti_turned
self.rd_packet_sw2pcfgr_d1 = self.rd_packet_sw2pcfgr
@always_comb
def packet_switch(self):
# packet to core
if self.is_even:
self.rd_packet_pcfgr2sw = self.rd_packet_w2e_wsti_turned
self.rd_packet_w2e_esto = self.rd_packet_sw2pcfgr_d1
self.rd_packet_e2w_wsto = self.rd_packet_sw2pcfgr_d1
else:
self.rd_packet_pcfgr2sw = self.rd_packet_e2w_esti_turned
self.rd_packet_w2e_esto = self.rd_packet_w2e_wsti_turned_d1
self.rd_packet_e2w_wsto = self.rd_packet_e2w_esti_turned_d1
|
StanfordAHA/garnet | global_buffer/design/glb_bank.py | <reponame>StanfordAHA/garnet
from kratos import Generator
from global_buffer.design.glb_bank_memory import GlbBankMemory
from global_buffer.design.glb_bank_ctrl import GlbBankCtrl
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_cfg_ifc import GlbConfigInterface
from global_buffer.design.glb_header import GlbHeader
class GlbBank(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_bank")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.reset = self.reset("reset")
self.wr_packet = self.input(
"wr_packet", self.header.wr_packet_t)
self.rdrq_packet = self.input(
"rdrq_packet", self.header.rdrq_packet_t)
self.rdrs_packet = self.output(
"rdrs_packet", self.header.rdrs_packet_t)
self.bank_cfg_ifc = GlbConfigInterface(
addr_width=self._params.bank_addr_width, data_width=self._params.axi_data_width)
self.if_sram_cfg_s = self.interface(
self.bank_cfg_ifc.slave, f"if_sram_cfg_s", is_port=True)
# local variables
self.mem_rd_en = self.var("mem_rd_en", 1)
self.mem_wr_en = self.var("mem_wr_en", 1)
self.mem_addr = self.var("mem_addr", self._params.bank_addr_width)
self.mem_data_in = self.var(
"mem_data_in", self._params.bank_data_width)
self.mem_data_in_bit_sel = self.var(
"mem_data_in_bit_sel", self._params.bank_data_width)
self.mem_data_out = self.var(
"mem_data_out", self._params.bank_data_width)
# memory core declaration
self.wr_data_bit_sel = self.var(
"wr_data_bit_sel", self._params.bank_data_width)
self.wr_data_bit_sel_logic()
self.add_glb_bank_ctrl()
self.add_glb_bank_memory()
# TODO: Is there a better way to connect?
def wr_data_bit_sel_logic(self):
for i in range(self._params.bank_data_width):
self.wire(self.wr_data_bit_sel[i], self.wr_packet['wr_strb'][i // 8])
def add_glb_bank_memory(self):
self.glb_bank_memory = GlbBankMemory(_params=self._params)
self.add_child("glb_bank_memory", self.glb_bank_memory,
clk=self.clk,
reset=self.reset,
ren=self.mem_rd_en,
wen=self.mem_wr_en,
addr=self.mem_addr,
data_in=self.mem_data_in,
data_in_bit_sel=self.mem_data_in_bit_sel,
data_out=self.mem_data_out)
def add_glb_bank_ctrl(self):
self.glb_bank_ctrl = GlbBankCtrl(_params=self._params)
self.add_child("glb_bank_ctrl", self.glb_bank_ctrl,
clk=self.clk,
reset=self.reset,
packet_wr_en=self.wr_packet['wr_en'],
packet_wr_addr=self.wr_packet['wr_addr'][self._params.bank_addr_width - 1, 0],
packet_wr_data=self.wr_packet['wr_data'],
packet_wr_data_bit_sel=self.wr_data_bit_sel,
packet_rd_en=self.rdrq_packet['rd_en'],
packet_rd_addr=self.rdrq_packet['rd_addr'][self._params.bank_addr_width - 1, 0],
packet_rd_data=self.rdrs_packet['rd_data'],
packet_rd_data_valid=self.rdrs_packet['rd_data_valid'],
mem_rd_en=self.mem_rd_en,
mem_wr_en=self.mem_wr_en,
mem_addr=self.mem_addr,
mem_data_in=self.mem_data_in,
mem_data_in_bit_sel=self.mem_data_in_bit_sel,
mem_data_out=self.mem_data_out,
if_sram_cfg_s=self.if_sram_cfg_s)
|
StanfordAHA/garnet | cgra/ifc_struct.py | import magma
class ProcPacketIfc:
"""
This class is like a SystemVerilog interface
slave and master attributes are classes which can be used as the magma ports
"""
def __init__(self, addr_width, data_width):
self.addr_width = addr_width
self.data_width = data_width
self.slave = magma.Product.from_fields("ProcPacketIfcSlave", dict(
wr_en=magma.In(magma.Bit),
wr_strb=magma.In(magma.Bits[self.data_width // 8]),
wr_addr=magma.In(magma.Bits[self.addr_width]),
wr_data=magma.In(magma.Bits[self.data_width]),
rd_en=magma.In(magma.Bit),
rd_addr=magma.In(magma.Bits[self.addr_width]),
rd_data=magma.Out(magma.Bits[self.data_width]),
rd_data_valid=magma.Out(magma.Bit)))
self.master = magma.Product.from_fields("ProcPacketIfcMaster", dict(
wr_en=magma.Out(magma.Bit),
wr_strb=magma.Out(magma.Bits[self.data_width // 8]),
wr_addr=magma.Out(magma.Bits[self.addr_width]),
wr_data=magma.Out(magma.Bits[self.data_width]),
rd_en=magma.Out(magma.Bit),
rd_addr=magma.Out(magma.Bits[self.addr_width]),
rd_data=magma.In(magma.Bits[self.data_width]),
rd_data_valid=magma.In(magma.Bit)))
class GlbCfgIfc:
def __init__(self, addr_width, data_width):
self.addr_width = addr_width
self.data_width = data_width
self.slave = magma.Product.from_fields("GlbCfgIfcSlave", dict(
wr_en=magma.In(magma.Bit),
wr_clk_en=magma.In(magma.Bit),
wr_addr=magma.In(magma.Bits[addr_width]),
wr_data=magma.In(magma.Bits[data_width]),
rd_en=magma.In(magma.Bit),
rd_clk_en=magma.In(magma.Bit),
rd_addr=magma.In(magma.Bits[addr_width]),
rd_data=magma.Out(magma.Bits[data_width]),
rd_data_valid=magma.Out(magma.Bit)))
self.master = magma.Product.from_fields("GlbCfgIfcMaster", dict(
wr_en=magma.Out(magma.Bit),
wr_clk_en=magma.Out(magma.Bit),
wr_addr=magma.Out(magma.Bits[addr_width]),
wr_data=magma.Out(magma.Bits[data_width]),
rd_en=magma.Out(magma.Bit),
rd_clk_en=magma.Out(magma.Bit),
rd_addr=magma.Out(magma.Bits[addr_width]),
rd_data=magma.In(magma.Bits[data_width]),
rd_data_valid=magma.In(magma.Bit)))
"""
This class returns a axi4-slave class (parameterized by @addr_width and
@data_width) which can be used as the magma ports with these inputs
and outputs
Below is AXI4-Lite interface ports in verilog
input logic [`$axi_addr_width-1`:0] AWADDR,
input logic AWVALID,
output logic AWREADY,
input logic [`$cfg_bus_width-1`:0] WDATA,
input logic WVALID,
output logic WREADY,
input logic [`$axi_addr_width-1`:0] ARADDR,
input logic ARVALID,
output logic ARREADY,
output logic [`$cfg_bus_width-1`:0] RDATA,
output logic [1:0] RRESP,
output logic RVALID,
input logic RREADY,
output logic interrupt,
"""
class AXI4LiteIfc:
def __init__(self, addr_width, data_width):
self.addr_width = addr_width
self.data_width = data_width
self.slave = magma.Product.from_fields("AXI4SlaveType", dict(
awaddr=magma.In(magma.Bits[addr_width]),
awvalid=magma.In(magma.Bit),
awready=magma.Out(magma.Bit),
wdata=magma.In(magma.Bits[data_width]),
wvalid=magma.In(magma.Bit),
wready=magma.Out(magma.Bit),
bready=magma.In(magma.Bit),
bresp=magma.Out(magma.Bits[2]),
bvalid=magma.Out(magma.Bit),
araddr=magma.In(magma.Bits[addr_width]),
arvalid=magma.In(magma.Bit),
arready=magma.Out(magma.Bit),
rdata=magma.Out(magma.Bits[data_width]),
rresp=magma.Out(magma.Bits[2]),
rvalid=magma.Out(magma.Bit),
rready=magma.In(magma.Bit)))
self.master = magma.Product.from_fields("AXI4MasterType", dict(
awaddr=magma.Out(magma.Bits[addr_width]),
awvalid=magma.Out(magma.Bit),
awready=magma.In(magma.Bit),
wdata=magma.Out(magma.Bits[data_width]),
wvalid=magma.Out(magma.Bit),
wready=magma.In(magma.Bit),
bready=magma.Out(magma.Bit),
bresp=magma.In(magma.Bits[2]),
bvalid=magma.In(magma.Bit),
araddr=magma.Out(magma.Bits[addr_width]),
arvalid=magma.Out(magma.Bit),
arready=magma.In(magma.Bit),
rdata=magma.In(magma.Bits[data_width]),
rresp=magma.In(magma.Bits[2]),
rvalid=magma.In(magma.Bit),
rready=magma.Out(magma.Bit)))
|
StanfordAHA/garnet | global_buffer/design/glb_core_load_dma.py | <reponame>StanfordAHA/garnet<gh_stars>10-100
from kratos import Generator, always_ff, always_comb, posedge, resize, clog2, ext
from global_buffer.design.glb_loop_iter import GlbLoopIter
from global_buffer.design.glb_sched_gen import GlbSchedGen
from global_buffer.design.glb_addr_gen import GlbAddrGen
from global_buffer.design.pipeline import Pipeline
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
class GlbCoreLoadDma(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_core_load_dma")
self._params = _params
self.header = GlbHeader(self._params)
assert self._params.bank_data_width == self._params.cgra_data_width * 4
self.clk = self.clock("clk")
self.clk_en = self.clock_en("clk_en")
self.reset = self.reset("reset")
self.data_g2f = self.output(
"data_g2f", width=self._params.cgra_data_width)
self.data_valid_g2f = self.output("data_valid_g2f", width=1)
self.rdrq_packet = self.output(
"rdrq_packet", self.header.rdrq_packet_t)
self.rdrs_packet = self.input("rdrs_packet", self.header.rdrs_packet_t)
self.cfg_ld_dma_num_repeat = self.input("cfg_ld_dma_num_repeat", clog2(self._params.queue_depth) + 1)
self.cfg_ld_dma_ctrl_use_valid = self.input("cfg_ld_dma_ctrl_use_valid", 1)
self.cfg_ld_dma_ctrl_mode = self.input("cfg_ld_dma_ctrl_mode", 2)
self.cfg_data_network_latency = self.input("cfg_data_network_latency", self._params.latency_width)
self.cfg_ld_dma_header = self.input(
"cfg_ld_dma_header", self.header.cfg_dma_header_t, size=self._params.queue_depth)
self.ld_dma_start_pulse = self.input("ld_dma_start_pulse", 1)
self.ld_dma_done_pulse = self.output("ld_dma_done_pulse", 1)
# local parameter
self.default_latency = (self._params.glb_switch_pipeline_depth
+ self._params.glb_bank_memory_pipeline_depth
+ self._params.sram_gen_pipeline_depth
+ self._params.sram_gen_output_pipeline_depth
+ 1 # SRAM macro read latency
+ self._params.glb_switch_pipeline_depth
+ 2 # FIXME: Unnecessary delay of moving back and forth btw switch and router
+ 1 # load_dma cache register delay
)
# local variables
self.strm_data = self.var("strm_data", self._params.cgra_data_width)
self.strm_data_r = self.var(
"strm_data_r", self._params.cgra_data_width)
self.strm_data_valid = self.var("strm_data_valid", 1)
self.strm_data_valid_r = self.var("strm_data_valid_r", 1)
self.strm_data_sel = self.var("strm_data_sel", self._params.bank_byte_offset - self._params.cgra_byte_offset)
self.strm_rd_en_w = self.var("strm_rd_en_w", 1)
self.strm_rd_addr_w = self.var(
"strm_rd_addr_w", self._params.glb_addr_width)
self.last_strm_rd_addr_r = self.var(
"last_strm_rd_addr_r", self._params.glb_addr_width)
self.ld_dma_start_pulse_next = self.var("ld_dma_start_pulse_next", 1)
self.ld_dma_start_pulse_r = self.var("ld_dma_start_pulse_r", 1)
self.is_first = self.var("is_first", 1)
self.ld_dma_done_pulse_w = self.var("ld_dma_done_pulse_w", 1)
self.bank_addr_match = self.var("bank_addr_match", 1)
self.bank_rdrq_rd_en = self.var("bank_rdrq_rd_en", 1)
self.bank_rdrq_rd_addr = self.var(
"bank_rdrq_rd_addr", self._params.glb_addr_width)
self.bank_rdrs_data_cache_r = self.var(
"bank_rdrs_data_cache_r", self._params.bank_data_width)
self.strm_run = self.var("strm_run", 1)
self.loop_done = self.var("loop_done", 1)
self.cycle_valid = self.var("cycle_valid", 1)
self.cycle_count = self.var("cycle_count", self._params.axi_data_width)
self.cycle_current_addr = self.var("cycle_current_addr", self._params.axi_data_width)
self.data_current_addr = self.var("data_current_addr", self._params.axi_data_width)
self.loop_mux_sel = self.var("loop_mux_sel", clog2(self._params.loop_level))
self.repeat_cnt = self.var("repeat_cnt", clog2(self._params.queue_depth) + 1)
if self._params.queue_depth != 1:
self.queue_sel_r = self.var("queue_sel_r", max(1, clog2(self.repeat_cnt.width)))
# Current dma header
self.current_dma_header = self.var("current_dma_header", self.header.cfg_dma_header_t)
if self._params.queue_depth == 1:
self.wire(self.cfg_ld_dma_header, self.current_dma_header)
else:
self.wire(self.cfg_ld_dma_header[self.queue_sel_r], self.current_dma_header)
if self._params.queue_depth != 1:
self.add_always(self.queue_sel_ff)
self.add_always(self.repeat_cnt_ff)
self.add_always(self.cycle_counter)
self.add_always(self.is_first_ff)
self.add_always(self.strm_run_ff)
self.add_always(self.strm_data_ff)
self.add_strm_data_start_pulse_pipeline()
self.add_ld_dma_done_pulse_pipeline()
self.add_strm_rd_en_pipeline()
self.add_strm_rd_addr_pipeline()
self.add_always(self.ld_dma_start_pulse_logic)
self.add_always(self.ld_dma_start_pulse_ff)
self.add_always(self.strm_data_mux)
self.add_always(self.ld_dma_done_pulse_logic)
self.add_always(self.strm_rdrq_packet_ff)
self.add_always(self.last_strm_rd_addr_ff)
self.add_always(self.bank_rdrq_packet_logic)
self.add_always(self.bank_rdrs_data_cache_ff)
self.add_always(self.strm_data_logic)
# Loop iteration shared for cycle and data
self.loop_iter = GlbLoopIter(self._params)
self.add_child("loop_iter",
self.loop_iter,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
step=self.cycle_valid,
mux_sel_out=self.loop_mux_sel,
restart=self.loop_done)
self.wire(self.loop_iter.dim, self.current_dma_header[f"dim"])
for i in range(self._params.loop_level):
self.wire(self.loop_iter.ranges[i], self.current_dma_header[f"range_{i}"])
# Cycle stride
self.cycle_stride_sched_gen = GlbSchedGen(self._params)
self.add_child("cycle_stride_sched_gen",
self.cycle_stride_sched_gen,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
restart=self.ld_dma_start_pulse_r,
cycle_count=self.cycle_count,
current_addr=self.cycle_current_addr,
finished=self.loop_done,
valid_output=self.cycle_valid)
self.cycle_stride_addr_gen = GlbAddrGen(self._params)
self.add_child("cycle_stride_addr_gen",
self.cycle_stride_addr_gen,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
restart=self.ld_dma_start_pulse_r,
step=self.cycle_valid,
mux_sel=self.loop_mux_sel,
addr_out=self.cycle_current_addr)
self.wire(self.cycle_stride_addr_gen.start_addr, ext(
self.current_dma_header[f"cycle_start_addr"], self._params.axi_data_width))
for i in range(self._params.loop_level):
self.wire(self.cycle_stride_addr_gen.strides[i],
self.current_dma_header[f"cycle_stride_{i}"])
# Data stride
self.data_stride_addr_gen = GlbAddrGen(self._params)
self.add_child("data_stride_addr_gen",
self.data_stride_addr_gen,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
restart=self.ld_dma_start_pulse_r,
step=self.cycle_valid,
mux_sel=self.loop_mux_sel,
addr_out=self.data_current_addr)
self.wire(self.data_stride_addr_gen.start_addr, ext(
self.current_dma_header[f"start_addr"], self._params.axi_data_width))
for i in range(self._params.loop_level):
self.wire(self.data_stride_addr_gen.strides[i], self.current_dma_header[f"stride_{i}"])
@always_ff((posedge, "clk"), (posedge, "reset"))
def queue_sel_ff(self):
if self.reset:
self.queue_sel_r = 0
elif self.clk_en:
if self.cfg_ld_dma_ctrl_mode == 3:
if self.ld_dma_done_pulse:
if (self.repeat_cnt + 1) < self.cfg_ld_dma_num_repeat:
self.queue_sel_r = self.queue_sel_r + 1
else:
self.queue_sel_r = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def repeat_cnt_ff(self):
if self.reset:
self.repeat_cnt = 0
elif self.clk_en:
if self.cfg_ld_dma_ctrl_mode == 2:
if self.ld_dma_done_pulse:
if (self.repeat_cnt + 1) < self.cfg_ld_dma_num_repeat:
self.repeat_cnt += 1
elif self.cfg_ld_dma_ctrl_mode == 3:
if self.ld_dma_done_pulse:
if (((self.repeat_cnt + 1) < self.cfg_ld_dma_num_repeat)
& ((self.repeat_cnt + 1) < self._params.queue_depth)):
self.repeat_cnt += 1
@always_ff((posedge, "clk"), (posedge, "reset"))
def is_first_ff(self):
if self.reset:
self.is_first = 0
elif self.clk_en:
if self.ld_dma_start_pulse_r:
self.is_first = 1
elif self.bank_rdrq_rd_en:
self.is_first = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def strm_run_ff(self):
if self.reset:
self.strm_run = 0
elif self.clk_en:
if self.ld_dma_start_pulse_r:
self.strm_run = 1
elif self.loop_done:
self.strm_run = 0
@always_comb
def ld_dma_start_pulse_logic(self):
if self.cfg_ld_dma_ctrl_mode == 0:
self.ld_dma_start_pulse_next = 0
elif self.cfg_ld_dma_ctrl_mode == 1:
self.ld_dma_start_pulse_next = (~self.strm_run) & self.ld_dma_start_pulse
elif (self.cfg_ld_dma_ctrl_mode == 2) | (self.cfg_ld_dma_ctrl_mode == 3):
self.ld_dma_start_pulse_next = (((~self.strm_run) & self.ld_dma_start_pulse)
| ((self.ld_dma_done_pulse)
& ((self.repeat_cnt + 1) < self.cfg_ld_dma_num_repeat)))
else:
self.ld_dma_start_pulse_next = 0
@always_ff((posedge, "clk"), (posedge, "reset"))
def ld_dma_start_pulse_ff(self):
if self.reset:
self.ld_dma_start_pulse_r = 0
elif self.clk_en:
if self.ld_dma_start_pulse_r:
self.ld_dma_start_pulse_r = 0
else:
self.ld_dma_start_pulse_r = self.ld_dma_start_pulse_next
@always_ff((posedge, "clk"), (posedge, "reset"))
def cycle_counter(self):
if self.reset:
self.cycle_count = 0
elif self.clk_en:
if self.ld_dma_start_pulse_r:
self.cycle_count = 0
elif self.loop_done:
self.cycle_count = 0
elif self.strm_run:
self.cycle_count = self.cycle_count + 1
@always_ff((posedge, "clk"), (posedge, "reset"))
def strm_data_ff(self):
if self.reset:
self.strm_data_r = 0
self.strm_data_valid_r = 0
elif self.clk_en:
self.strm_data_r = self.strm_data
self.strm_data_valid_r = self.strm_data_valid
@always_comb
def strm_data_mux(self):
if self.cfg_ld_dma_ctrl_use_valid:
self.data_g2f = self.strm_data_r
self.data_valid_g2f = self.strm_data_valid_r
else:
self.data_g2f = self.strm_data_r
self.data_valid_g2f = self.strm_data_start_pulse
@always_comb
def ld_dma_done_pulse_logic(self):
self.ld_dma_done_pulse_w = self.strm_run & self.loop_done
@always_comb
def strm_rdrq_packet_ff(self):
self.strm_rd_en_w = self.cycle_valid
self.strm_rd_addr_w = resize(self.data_current_addr, self._params.glb_addr_width)
@always_ff((posedge, "clk"), (posedge, "reset"))
def last_strm_rd_addr_ff(self):
if self.reset:
self.last_strm_rd_addr_r = 0
elif self.clk_en:
if self.strm_rd_en_w:
self.last_strm_rd_addr_r = self.strm_rd_addr_w
@always_comb
def bank_rdrq_packet_logic(self):
self.bank_addr_match = (self.strm_rd_addr_w[self._params.glb_addr_width - 1, self._params.bank_byte_offset]
== self.last_strm_rd_addr_r[self._params.glb_addr_width - 1,
self._params.bank_byte_offset])
self.bank_rdrq_rd_en = self.strm_rd_en_w & (self.is_first | (~self.bank_addr_match))
self.bank_rdrq_rd_addr = self.strm_rd_addr_w
self.rdrq_packet['rd_en'] = self.bank_rdrq_rd_en
self.rdrq_packet['rd_addr'] = self.bank_rdrq_rd_addr
@always_ff((posedge, "clk"), (posedge, "reset"))
def bank_rdrs_data_cache_ff(self):
if self.reset:
self.bank_rdrs_data_cache_r = 0
elif self.clk_en:
if self.rdrs_packet['rd_data_valid']:
self.bank_rdrs_data_cache_r = self.rdrs_packet['rd_data']
@always_comb
def strm_data_logic(self):
if self.strm_data_sel == 0:
self.strm_data = self.bank_rdrs_data_cache_r[self._params.cgra_data_width - 1, 0]
elif self.strm_data_sel == 1:
self.strm_data = self.bank_rdrs_data_cache_r[self._params.cgra_data_width * 2 - 1,
self._params.cgra_data_width * 1]
elif self.strm_data_sel == 2:
self.strm_data = self.bank_rdrs_data_cache_r[self._params.cgra_data_width * 3 - 1,
self._params.cgra_data_width * 2]
elif self.strm_data_sel == 3:
self.strm_data = self.bank_rdrs_data_cache_r[self._params.cgra_data_width * 4 - 1,
self._params.cgra_data_width * 3]
else:
self.strm_data = self.bank_rdrs_data_cache_r[self._params.cgra_data_width - 1, 0]
def add_strm_rd_en_pipeline(self):
maximum_latency = 2 * self._params.num_glb_tiles + self.default_latency
latency_width = clog2(maximum_latency)
self.strm_rd_en_d_arr = self.var(
"strm_rd_en_d_arr", 1, size=maximum_latency, explicit_array=True)
self.strm_rd_en_pipeline = Pipeline(width=1,
depth=maximum_latency,
flatten_output=True)
self.add_child("strm_rd_en_pipeline",
self.strm_rd_en_pipeline,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
in_=self.strm_rd_en_w,
out_=self.strm_rd_en_d_arr)
self.wire(self.strm_data_valid, self.strm_rd_en_d_arr[resize(
self.cfg_data_network_latency, latency_width) + self.default_latency])
def add_strm_rd_addr_pipeline(self):
maximum_latency = 2 * self._params.num_glb_tiles + self.default_latency
latency_width = clog2(maximum_latency)
self.strm_rd_addr_d_arr = self.var(
"strm_rd_addr_d_arr", width=self._params.glb_addr_width, size=maximum_latency, explicit_array=True)
self.strm_rd_addr_pipeline = Pipeline(width=self._params.glb_addr_width,
depth=maximum_latency,
flatten_output=True)
self.add_child("strm_rd_addr_pipeline",
self.strm_rd_addr_pipeline,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
in_=self.strm_rd_addr_w,
out_=self.strm_rd_addr_d_arr)
self.strm_data_sel = self.strm_rd_addr_d_arr[resize(self.cfg_data_network_latency, latency_width)
+ self.default_latency][self._params.bank_byte_offset - 1,
self._params.cgra_byte_offset]
def add_strm_data_start_pulse_pipeline(self):
maximum_latency = 2 * self._params.num_glb_tiles + self.default_latency + 2
latency_width = clog2(maximum_latency)
self.strm_data_start_pulse_d_arr = self.var(
"strm_data_start_pulse_d_arr", 1, size=maximum_latency, explicit_array=True)
self.strm_data_start_pulse_pipeline = Pipeline(width=1,
depth=maximum_latency,
flatten_output=True)
self.add_child("strm_dma_start_pulse_pipeline",
self.strm_data_start_pulse_pipeline,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
in_=self.ld_dma_start_pulse_r,
out_=self.strm_data_start_pulse_d_arr)
self.strm_data_start_pulse = self.var("strm_data_start_pulse", 1)
self.wire(self.strm_data_start_pulse,
self.strm_data_start_pulse_d_arr[resize(self.cfg_data_network_latency, latency_width)
+ self.default_latency + 1])
def add_ld_dma_done_pulse_pipeline(self):
maximum_latency = 2 * self._params.num_glb_tiles + self.default_latency + 3
latency_width = clog2(maximum_latency)
self.ld_dma_done_pulse_d_arr = self.var(
"ld_dma_done_pulse_d_arr", 1, size=maximum_latency, explicit_array=True)
self.ld_dma_done_pulse_pipeline = Pipeline(width=1,
depth=maximum_latency,
flatten_output=True)
self.add_child("ld_dma_done_pulse_pipeline",
self.ld_dma_done_pulse_pipeline,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
in_=self.ld_dma_done_pulse_w,
out_=self.ld_dma_done_pulse_d_arr)
self.wire(self.ld_dma_done_pulse,
self.ld_dma_done_pulse_d_arr[resize(self.cfg_data_network_latency, latency_width)
+ self.default_latency + 3])
|
StanfordAHA/garnet | global_buffer/testvectors/gen_glb_test.py | import random
import argparse
def gen_reg_pair(f_reglist, f_regpair):
with open(f_reglist, 'r') as reglist:
num = 0
for line in reglist:
if line.startswith("0x"):
num += 1
with open(f_reglist, 'r') as reglist, open(f_regpair, 'w') as regpair:
regpair.write(f"{num}\n")
for line in reglist:
if not line.startswith("0x"):
continue
word_list = line.split()
addr = word_list[0][2:]
bits = word_list[6]
data = 2**int(bits) - 1
regpair.write(f"{addr} {data}\n")
def gen_bs_sample(filename, num):
with open(filename, 'w') as f:
f.write(f"{num}\n")
for i in range(num):
# addr = random.randrange(0, 2**32)
col = random.randrange(0, 32)
reg = random.randrange(0, 2**8)
data = random.randrange(0, 2**32)
f.write(f"{(reg << 8 ) | col} ")
f.write(f"{data}\n")
def gen_data_sample(filename, width, num):
with open(filename, 'w') as f:
# f.write(f"{num}\n")
for i in range(num):
x = random.randrange(0, 2**width)
f.write(f"{hex(x)[2:]} ")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='testvector generator')
parser.add_argument('--data', type=str, default=None)
parser.add_argument('--width', type=int, default=16)
parser.add_argument('--num', type=int, default=32)
parser.add_argument('--config', type=str, default=None)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--bitstream', type=str, default=None)
parser.add_argument('--bitstream-size', type=int, default=32)
args = parser.parse_args()
random.seed(args.seed)
if args.config:
gen_reg_pair("../systemRDL/output/glb.reglist", args.config)
if args.bitstream:
gen_bs_sample(args.bitstream, args.bitstream_size)
if args.data:
gen_data_sample(args.data, args.width, args.num)
|
StanfordAHA/garnet | global_buffer/gls/gen_sdf_cmd.py | <reponame>StanfordAHA/garnet
import argparse
def gen_glb_sdf_cmd(filename, num_glb_tiles, mtm, sdf_top, sdf_tile, log):
with open(filename, "w") as f:
top_cmd = get_sdf_string(sdf_top, 'top.dut', mtm, f"{log}/glb_top.sdf.log")
f.write(top_cmd)
for i in range(num_glb_tiles):
tile_cmd = get_sdf_string(sdf_tile, f"top.dut.glb_tile_gen_{i}", mtm, f"{log}/glb_tile_{i}.sdf.log")
f.write(tile_cmd)
def get_sdf_string(filename, scope, mtm, log):
result = f"SDF_FILE = \"{filename}\",\n"\
f"SCOPE = \"{scope}\",\n"\
f"MTM_CONTROL = \"{mtm}\",\n"\
f"LOG_FILE = \"{log}\";\n\n"
return result
def gen_glb_sdf_inline(filename, num_glb_tiles, mtm, sdf_top, sdf_tile):
if mtm == "MAXIMUM":
mtm = "max"
elif mtm == "MINUMUM":
mtm = "min"
else:
mtm = "typ"
sdf = f" -sdf {mtm}:top.dut:{sdf_top}"
for i in range(num_glb_tiles):
sdf += f" -sdf {mtm}:top.dut.glb_tile_gen_{i}:{sdf_tile}"
with open(filename, "w") as f:
f.write(sdf)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="SDF command file generator")
parser.add_argument('-f', '--filename', type=str, default="sdf_cmd.cmd")
parser.add_argument('-n', '--num-glb-tiles', type=int, default=16)
parser.add_argument('--top', type=str, default="glb.sdf")
parser.add_argument('--tile', type=str, default="glb_tile.sdf")
parser.add_argument('-l', '--log', type=str, default="sdf_logs")
parser.add_argument('-t', '--mtm', type=str, default="MAXIMUM")
parser.add_argument('--tool', type=str, default="XCELIUM")
args = parser.parse_args()
if args.tool == "XCELIUM":
gen_glb_sdf_cmd(args.filename, args.num_glb_tiles, args.mtm, args.top, args.tile, args.log)
else:
gen_glb_sdf_inline(args.filename, args.num_glb_tiles, args.mtm, args.top, args.tile)
|
StanfordAHA/garnet | mflowgen/glb_top/construct.py | #! /usr/bin/env python
#=========================================================================
# construct.py
#=========================================================================
# Author :
# Date :
#
import os
import sys
import pathlib
from mflowgen.components import Graph, Step
from shutil import which
def construct():
g = Graph()
#-----------------------------------------------------------------------
# Parameters
#-----------------------------------------------------------------------
adk_name = 'tsmc16'
adk_view = 'view-standard'
parameters = {
'construct_path' : __file__,
'design_name' : 'global_buffer',
'clock_period' : 1.25,
'adk' : adk_name,
'adk_view' : adk_view,
# Synthesis
'flatten_effort' : 3,
'topographical' : True,
# hold target slack
'hold_target_slack' : 0.045,
# array_width = width of CGRA below GLB; `pin-assignments.tcl` uses
# these parms to set up per-cgra-column ports connecting glb tile
# signals in glb_top to corresponding CGRA tile columns below glb_top
'array_width' : 32,
'num_glb_tiles' : 16,
'tool' : "VCS",
# glb tile memory size (unit: KB)
'glb_tile_mem_size' : 256,
'rtl_testvectors' : ["test1", "test2", "test3", "test4"],
'gls_testvectors' : ["test1.pwr", "test2.pwr"]
}
#-----------------------------------------------------------------------
# Create nodes
#-----------------------------------------------------------------------
this_dir = os.path.dirname( os.path.abspath( __file__ ) )
# ADK step
g.set_adk( adk_name )
adk = g.get_adk_step()
# Custom steps
rtl = Step( this_dir + '/../common/rtl' )
sim_compile = Step( this_dir + '/sim-compile' )
sim_run = Step( this_dir + '/sim-run' )
sim_gl_compile = Step( this_dir + '/sim-gl-compile' )
glb_tile = Step( this_dir + '/glb_tile' )
constraints = Step( this_dir + '/constraints' )
custom_init = Step( this_dir + '/custom-init' )
custom_lvs = Step( this_dir + '/custom-lvs-rules' )
custom_power = Step( this_dir + '/../common/custom-power-hierarchical' )
lib2db = Step( this_dir + '/../common/synopsys-dc-lib2db' )
# Default steps
info = Step( 'info', default=True )
synth = Step( 'cadence-genus-synthesis', default=True )
iflow = Step( 'cadence-innovus-flowsetup', default=True )
init = Step( 'cadence-innovus-init', default=True )
power = Step( 'cadence-innovus-power', default=True )
place = Step( 'cadence-innovus-place', default=True )
cts = Step( 'cadence-innovus-cts', default=True )
postcts_hold = Step( 'cadence-innovus-postcts_hold', default=True )
route = Step( 'cadence-innovus-route', default=True )
postroute = Step( 'cadence-innovus-postroute', default=True )
postroute_hold = Step( 'cadence-innovus-postroute_hold', default=True )
signoff = Step( 'cadence-innovus-signoff', default=True )
pt_signoff = Step( 'synopsys-pt-timing-signoff', default=True )
genlib = Step( 'cadence-genus-genlib', default=True )
if which("calibre") is not None:
drc = Step( 'mentor-calibre-drc', default=True )
lvs = Step( 'mentor-calibre-lvs', default=True )
else:
drc = Step( 'cadence-pegasus-drc', default=True )
lvs = Step( 'cadence-pegasus-lvs', default=True )
debugcalibre = Step( 'cadence-innovus-debug-calibre', default=True )
if parameters['tool'] == 'VCS':
sim_compile.extend_outputs(['simv', 'simv.daidir'])
sim_gl_compile.extend_outputs(['simv', 'simv.daidir'])
sim_run.extend_inputs(['simv', 'simv.daidir'])
elif parameters['tool'] == 'XCELIUM':
sim_compile.extend_outputs(['xcelium.d'])
sim_gl_compile.extend_outputs(['xcelium.d'])
sim_run.extend_inputs(['xcelium.d'])
sim_gl_run_nodes = {}
ptpx_gl_nodes = {}
for test in parameters["gls_testvectors"]:
sim_gl_run = Step( this_dir + '/sim-gl-run' )
ptpx_gl = Step( this_dir + '/synopsys-ptpx-gl' )
# rename
sim_gl_run.set_name(f"sim_gl_run_{test}")
ptpx_gl.set_name(f"ptpx_gl_{test}")
sim_gl_run_nodes[test] = sim_gl_run
ptpx_gl_nodes[test] = ptpx_gl
sim_gl_run.update_params( {'test' : test}, allow_new=True)
# Gate-level ptpx node
ptpx_gl.set_param("strip_path", "top/dut")
ptpx_gl.extend_inputs(glb_tile.all_outputs())
if parameters['tool'] == 'VCS':
sim_gl_run.extend_inputs(['simv', 'simv.daidir'])
elif parameters['tool'] == 'XCELIUM':
sim_gl_run.extend_inputs(['xcelium.d'])
# Add header files to outputs
rtl.extend_outputs( ['header'] )
rtl.extend_postconditions( ["assert File( 'outputs/header' ) "] )
# Add (dummy) parameters to the default innovus init step
init.update_params( {
'core_width' : 0,
'core_height' : 0
}, allow_new=True )
# Add glb_tile macro inputs to downstream nodes
pt_signoff.extend_inputs( ['glb_tile_tt.db'] )
# These steps need timing info for glb_tiles
tile_steps = \
[ synth, iflow, init, power, place, cts, postcts_hold,
route, postroute, postroute_hold, signoff, genlib ]
for step in tile_steps:
step.extend_inputs( ['glb_tile_tt.lib', 'glb_tile.lef'] )
# Need the glb_tile gds to merge into the final layout
signoff.extend_inputs( ['glb_tile.gds'] )
# Need glb_tile lvs.v file for LVS
lvs.extend_inputs( ['glb_tile.lvs.v'] )
# Need sram spice file for LVS
lvs.extend_inputs( ['sram.spi'] )
xlist = synth.get_postconditions()
xlist = \
[ _ for _ in xlist if 'percent_clock_gated' not in _ ]
xlist = synth.set_postconditions( xlist )
# Add extra input edges to innovus steps that need custom tweaks
init.extend_inputs( custom_init.all_outputs() )
power.extend_inputs( custom_power.all_outputs() )
#-----------------------------------------------------------------------
# Graph -- Add nodes
#-----------------------------------------------------------------------
g.add_step( info )
g.add_step( rtl )
g.add_step( sim_compile )
g.add_step( sim_run )
g.add_step( sim_gl_compile )
g.add_step( glb_tile )
g.add_step( constraints )
g.add_step( synth )
g.add_step( iflow )
g.add_step( init )
g.add_step( custom_init )
g.add_step( power )
g.add_step( custom_power )
g.add_step( place )
g.add_step( cts )
g.add_step( postcts_hold )
g.add_step( route )
g.add_step( postroute )
g.add_step( postroute_hold )
g.add_step( signoff )
g.add_step( pt_signoff )
g.add_step( genlib )
g.add_step( lib2db )
g.add_step( drc )
g.add_step( lvs )
g.add_step( custom_lvs )
g.add_step( debugcalibre )
for test in parameters["gls_testvectors"]:
g.add_step(sim_gl_run_nodes[test])
g.add_step(ptpx_gl_nodes[test])
#-----------------------------------------------------------------------
# Graph -- Add edges
#-----------------------------------------------------------------------
# Connect by name
g.connect_by_name( adk, synth )
g.connect_by_name( adk, iflow )
g.connect_by_name( adk, init )
g.connect_by_name( adk, power )
g.connect_by_name( adk, place )
g.connect_by_name( adk, cts )
g.connect_by_name( adk, postcts_hold )
g.connect_by_name( adk, route )
g.connect_by_name( adk, postroute )
g.connect_by_name( adk, postroute_hold )
g.connect_by_name( adk, signoff )
g.connect_by_name( adk, drc )
g.connect_by_name( adk, lvs )
g.connect_by_name( glb_tile, synth )
g.connect_by_name( glb_tile, iflow )
g.connect_by_name( glb_tile, init )
g.connect_by_name( glb_tile, power )
g.connect_by_name( glb_tile, place )
g.connect_by_name( glb_tile, cts )
g.connect_by_name( glb_tile, postcts_hold )
g.connect_by_name( glb_tile, route )
g.connect_by_name( glb_tile, postroute )
g.connect_by_name( glb_tile, postroute_hold )
g.connect_by_name( glb_tile, signoff )
g.connect_by_name( glb_tile, pt_signoff )
g.connect_by_name( glb_tile, genlib )
g.connect_by_name( glb_tile, drc )
g.connect_by_name( glb_tile, lvs )
g.connect_by_name( rtl, sim_compile )
g.connect_by_name( sim_compile, sim_run )
g.connect_by_name( rtl, synth )
g.connect_by_name( constraints, synth )
# glb_tile can use the same rtl as glb_top
g.connect_by_name( rtl, glb_tile )
g.connect_by_name( synth, iflow )
g.connect_by_name( synth, init )
g.connect_by_name( synth, power )
g.connect_by_name( synth, place )
g.connect_by_name( synth, cts )
g.connect_by_name( iflow, init )
g.connect_by_name( iflow, power )
g.connect_by_name( iflow, place )
g.connect_by_name( iflow, cts )
g.connect_by_name( iflow, postcts_hold )
g.connect_by_name( iflow, route )
g.connect_by_name( iflow, postroute )
g.connect_by_name( iflow, postroute_hold )
g.connect_by_name( iflow, signoff )
g.connect_by_name( custom_init, init )
g.connect_by_name( custom_power, power )
g.connect_by_name( custom_lvs, lvs )
g.connect_by_name( init, power )
g.connect_by_name( power, place )
g.connect_by_name( place, cts )
g.connect_by_name( cts, postcts_hold )
g.connect_by_name( postcts_hold, route )
g.connect_by_name( route, postroute )
g.connect_by_name( postroute, postroute_hold )
g.connect_by_name( postroute_hold, signoff )
g.connect_by_name( signoff, drc )
g.connect_by_name( signoff, lvs )
g.connect(signoff.o('design-merged.gds'), drc.i('design_merged.gds'))
g.connect(signoff.o('design-merged.gds'), lvs.i('design_merged.gds'))
g.connect_by_name( adk, pt_signoff )
g.connect_by_name( signoff, pt_signoff )
g.connect_by_name( adk, genlib )
g.connect_by_name( signoff, genlib )
g.connect_by_name( rtl, sim_gl_compile )
g.connect_by_name( adk, sim_gl_compile )
g.connect_by_name( glb_tile, sim_gl_compile )
g.connect_by_name( signoff, sim_gl_compile )
for test in parameters["gls_testvectors"]:
g.connect_by_name( sim_gl_compile, sim_gl_run_nodes[test] )
for test in parameters["gls_testvectors"]:
g.connect_by_name( adk, ptpx_gl_nodes[test] )
g.connect_by_name( glb_tile, ptpx_gl_nodes[test] )
g.connect_by_name( signoff, ptpx_gl_nodes[test] )
g.connect_by_name( sim_gl_run_nodes[test], ptpx_gl_nodes[test] )
g.connect_by_name( genlib, lib2db )
g.connect_by_name( adk, debugcalibre )
g.connect_by_name( synth, debugcalibre )
g.connect_by_name( iflow, debugcalibre )
g.connect_by_name( signoff, debugcalibre )
g.connect_by_name( drc, debugcalibre )
g.connect_by_name( lvs, debugcalibre )
#-----------------------------------------------------------------------
# Parameterize
#-----------------------------------------------------------------------
g.update_params( parameters )
# Since we are adding an additional input script to the generic Innovus
# steps, we modify the order parameter for that node which determines
# which scripts get run and when they get run.
# rtl parameters update
rtl.update_params( { 'glb_only': True }, allow_new=True )
# pin assignment parameters update
init.update_params( { 'array_width': parameters['array_width'] }, allow_new=True )
init.update_params( { 'num_glb_tiles': parameters['num_glb_tiles'] }, allow_new=True )
# Change nthreads
synth.update_params( { 'nthreads': 4 } )
iflow.update_params( { 'nthreads': 4 } )
order = init.get_param('order') # get the default script run order
reporting_idx = order.index( 'reporting.tcl' ) # find reporting.tcl
# Add dont-touch before reporting
order.insert ( reporting_idx, 'dont-touch.tcl' )
init.update_params( { 'order': order } )
# Increase hold slack on postroute_hold step
postroute_hold.update_params( { 'hold_target_slack': parameters['hold_target_slack'] }, allow_new=True )
# useful_skew
cts.update_params( { 'useful_skew': False }, allow_new=True )
return g
if __name__ == '__main__':
g = construct()
# g.plot()
|
StanfordAHA/garnet | cgra/wiring.py | <reponame>StanfordAHA/garnet
def glb_glc_wiring(garnet):
""" global controller <-> global buffer ports connection """
garnet.wire(garnet.global_controller.ports.reset_out,
garnet.global_buffer.ports.reset)
garnet.wire(garnet.global_controller.ports.glb_stall,
garnet.global_buffer.ports.stall)
garnet.wire(garnet.global_controller.ports.cgra_config.config_addr,
garnet.global_buffer.ports.cgra_cfg_jtag_gc2glb_addr)
garnet.wire(garnet.global_controller.ports.cgra_config.config_data,
garnet.global_buffer.ports.cgra_cfg_jtag_gc2glb_data)
garnet.wire(garnet.global_controller.ports.cgra_config.read,
garnet.global_buffer.ports.cgra_cfg_jtag_gc2glb_rd_en)
garnet.wire(garnet.global_controller.ports.cgra_config.write,
garnet.global_buffer.ports.cgra_cfg_jtag_gc2glb_wr_en)
garnet.wire(garnet.global_controller.ports.glb_cfg.wr_en,
garnet.global_buffer.ports.if_cfg_wr_en[0])
garnet.wire(garnet.global_controller.ports.glb_cfg.wr_addr,
garnet.global_buffer.ports.if_cfg_wr_addr)
garnet.wire(garnet.global_controller.ports.glb_cfg.wr_data,
garnet.global_buffer.ports.if_cfg_wr_data)
garnet.wire(garnet.global_controller.ports.glb_cfg.rd_en,
garnet.global_buffer.ports.if_cfg_rd_en[0])
garnet.wire(garnet.global_controller.ports.glb_cfg.rd_addr,
garnet.global_buffer.ports.if_cfg_rd_addr)
garnet.wire(garnet.global_controller.ports.glb_cfg.rd_data,
garnet.global_buffer.ports.if_cfg_rd_data)
garnet.wire(garnet.global_controller.ports.glb_cfg.rd_data_valid,
garnet.global_buffer.ports.if_cfg_rd_data_valid[0])
garnet.wire(garnet.global_controller.ports.sram_cfg.wr_en,
garnet.global_buffer.ports.if_sram_cfg_wr_en[0])
garnet.wire(garnet.global_controller.ports.sram_cfg.wr_addr,
garnet.global_buffer.ports.if_sram_cfg_wr_addr)
garnet.wire(garnet.global_controller.ports.sram_cfg.wr_data,
garnet.global_buffer.ports.if_sram_cfg_wr_data)
garnet.wire(garnet.global_controller.ports.sram_cfg.rd_en,
garnet.global_buffer.ports.if_sram_cfg_rd_en[0])
garnet.wire(garnet.global_controller.ports.sram_cfg.rd_addr,
garnet.global_buffer.ports.if_sram_cfg_rd_addr)
garnet.wire(garnet.global_controller.ports.sram_cfg.rd_data,
garnet.global_buffer.ports.if_sram_cfg_rd_data)
garnet.wire(garnet.global_controller.ports.sram_cfg.rd_data_valid,
garnet.global_buffer.ports.if_sram_cfg_rd_data_valid[0])
garnet.wire(garnet.global_controller.ports.strm_g2f_start_pulse,
garnet.global_buffer.ports.strm_g2f_start_pulse)
garnet.wire(garnet.global_controller.ports.strm_f2g_start_pulse,
garnet.global_buffer.ports.strm_f2g_start_pulse)
garnet.wire(garnet.global_controller.ports.pc_start_pulse,
garnet.global_buffer.ports.pcfg_start_pulse)
garnet.wire(garnet.global_controller.ports.strm_f2g_interrupt_pulse,
garnet.global_buffer.ports.strm_f2g_interrupt_pulse)
garnet.wire(garnet.global_controller.ports.strm_g2f_interrupt_pulse,
garnet.global_buffer.ports.strm_g2f_interrupt_pulse)
garnet.wire(garnet.global_controller.ports.pcfg_g2f_interrupt_pulse,
garnet.global_buffer.ports.pcfg_g2f_interrupt_pulse)
garnet.wire(garnet.global_controller.ports.cgra_stall,
garnet.global_buffer.ports.cgra_stall_in)
return garnet
def glb_interconnect_wiring(garnet):
# width of garnet
width = garnet.width
num_glb_tiles = garnet.glb_params.num_glb_tiles
col_per_glb = width // num_glb_tiles
assert width % num_glb_tiles == 0
# parallel configuration ports wiring
for i in range(num_glb_tiles):
for j in range(col_per_glb):
cfg_data_port_name = f"cgra_cfg_g2f_cfg_data_{i}_{j}"
cfg_addr_port_name = f"cgra_cfg_g2f_cfg_addr_{i}_{j}"
cfg_rd_en_port_name = f"cgra_cfg_g2f_cfg_rd_en_{i}_{j}"
cfg_wr_en_port_name = f"cgra_cfg_g2f_cfg_wr_en_{i}_{j}"
garnet.wire(garnet.global_buffer.ports[cfg_data_port_name],
garnet.interconnect.ports.config[i * col_per_glb + j].config_data)
garnet.wire(garnet.global_buffer.ports[cfg_addr_port_name],
garnet.interconnect.ports.config[i * col_per_glb + j].config_addr)
garnet.wire(garnet.global_buffer.ports[cfg_rd_en_port_name],
garnet.interconnect.ports.config[i * col_per_glb + j].read)
garnet.wire(garnet.global_buffer.ports[cfg_wr_en_port_name],
garnet.interconnect.ports.config[i * col_per_glb + j].write)
# stall signal wiring
for i in range(num_glb_tiles):
for j in range(col_per_glb):
garnet.wire(garnet.global_buffer.ports[f"cgra_stall_{i}_{j}"][0],
garnet.interconnect.ports.stall[i * col_per_glb + j])
# input/output stream ports wiring
for i in range(num_glb_tiles):
for j in range(col_per_glb):
x = i * col_per_glb + j
io2glb_16_port = f"io2glb_16_X{x:02X}_Y{0:02X}"
io2glb_1_port = f"io2glb_1_X{x:02X}_Y{0:02X}"
glb2io_16_port = f"glb2io_16_X{x:02X}_Y{0:02X}"
glb2io_1_port = f"glb2io_1_X{x:02X}_Y{0:02X}"
garnet.wire(garnet.global_buffer.ports[f"stream_data_f2g_{i}_{j}"],
garnet.interconnect.ports[io2glb_16_port])
garnet.wire(garnet.global_buffer.ports[f"stream_data_valid_f2g_{i}_{j}"],
garnet.interconnect.ports[io2glb_1_port])
garnet.wire(garnet.global_buffer.ports[f"stream_data_g2f_{i}_{j}"],
garnet.interconnect.ports[glb2io_16_port])
garnet.wire(garnet.global_buffer.ports[f"stream_data_valid_g2f_{i}_{j}"],
garnet.interconnect.ports[glb2io_1_port])
return garnet
"""Useful pass to connect all wires in global controller"""
def glc_interconnect_wiring(garnet):
# global controller <-> interconnect ports connection
garnet.wire(garnet.global_controller.ports.reset_out,
garnet.interconnect.ports.reset)
garnet.wire(garnet.interconnect.ports.read_config_data,
garnet.global_controller.ports.read_data_in)
return garnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.