repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
StanfordAHA/garnet | global_buffer/design/global_buffer_parameter.py | import dataclasses
import math
import os
@dataclasses.dataclass(eq=True, frozen=False)
class GlobalBufferParams:
# cgra parameters
num_prr: int = 16
num_prr_width: int = math.ceil(math.log(num_prr, 2))
cgra_axi_addr_width: int = 13
cgra_axi_data_width: int = 32
# tile parameters
num_glb_tiles: int = 16
tile_sel_addr_width: int = math.ceil(math.log(num_glb_tiles, 2))
# cgra tiles
num_cgra_tiles: int = 32
# cgra tiles per glb tile
cgra_per_glb: int = num_cgra_tiles // num_glb_tiles # 2
# bank parameters
banks_per_tile: int = 2
bank_sel_addr_width: int = math.ceil(math.log(banks_per_tile, 2))
bank_data_width: int = 64
sram_macro_depth: int = 2048
bank_addr_width: int = 17
bank_byte_offset: int = math.ceil(math.log(bank_data_width / 8, 2))
# glb parameters
glb_addr_width: int = (bank_addr_width
+ bank_sel_addr_width
+ tile_sel_addr_width)
# cgra data parameters
cgra_data_width: int = 16
cgra_byte_offset: int = math.ceil(math.log(cgra_data_width / 8, 2))
# glb config parameters
axi_addr_width: int = 12
axi_addr_reg_width: int = 6
axi_data_width: int = 32
axi_strb_width: int = math.ceil(axi_data_width / 8)
axi_byte_offset: int = math.ceil(math.log(axi_data_width / 8, 2))
# max number of words in dma header
max_num_words_width: int = (glb_addr_width
- bank_byte_offset
+ math.ceil(math.log((bank_data_width
/ cgra_data_width), 2)))
max_stride_width: int = 10
# max number of bitstream in dma header
max_num_cfg_width: int = glb_addr_width - bank_byte_offset
# cgra config parameters
cgra_cfg_addr_width: int = 32
cgra_cfg_data_width: int = 32
# dma address generator
queue_depth: int = 1
loop_level: int = 5
# dma latency
latency_width: int = 2 + math.ceil(math.log(num_glb_tiles, 2))
# pipeline depth
glb_switch_pipeline_depth: int = 1 # fixed
glb_bank_memory_pipeline_depth: int = 1
sram_gen_pipeline_depth: int = 1
sram_gen_output_pipeline_depth: int = 1
is_sram_stub: int = 0
def gen_global_buffer_params(**kwargs):
# User-defined parameters
num_prr = kwargs.pop('num_prr', 16)
num_glb_tiles = kwargs.pop('num_glb_tiles', 16)
num_cgra_cols = kwargs.pop('num_cgra_cols', 32)
glb_tile_mem_size = kwargs.pop('glb_tile_mem_size', 256)
banks_per_tile = kwargs.pop('banks_per_tile', 2)
bank_data_width = kwargs.pop('bank_data_width', 64)
sram_macro_depth = kwargs.pop('sram_macro_depth', 2048)
axi_addr_width = kwargs.pop('axi_addr_width', 12)
cgra_axi_addr_width = kwargs.pop('cgra_axi_addr_width', 13)
cgra_axi_data_width = kwargs.pop('cgra_axi_data_width', 32)
# TODO: axi_addr_reg_width should be automatically calculated based on configuration space
axi_addr_reg_width = kwargs.pop('axi_addr_reg_width', 6)
axi_data_width = kwargs.pop('axi_data_width', 32)
cfg_addr_width = kwargs.pop('cfg_addr_width', 32)
cfg_data_width = kwargs.pop('cfg_data_width', 32)
cgra_data_width = kwargs.pop('cgra_data_width', 16)
max_stride_width = kwargs.pop('max_stride_width', 10)
queue_depth = kwargs.pop('queue_depth', 1)
loop_level = kwargs.pop('loop_level', 5)
glb_bank_memory_pipeline_depth = kwargs.pop('glb_bank_memory_pipeline_depth', 1)
sram_gen_pipeline_depth = kwargs.pop('sram_gen_pipeline_depth', 1)
sram_gen_output_pipeline_depth = kwargs.pop('sram_gen_output_pipeline_depth', 1)
is_sram_stub = kwargs.pop('is_sram_stub', 0)
# Check if there is unused kwargs
if kwargs:
raise Exception(f"{kwargs.keys()} are not supported parameters")
# the number of glb tiles is half the number of cgra columns
assert 2 * num_glb_tiles == num_cgra_cols
def _power_of_two(n):
if n == 1:
return True
elif n % 2 != 0 or n == 0:
return False
return _power_of_two(n / 2)
assert _power_of_two(glb_tile_mem_size) is True
# Unit is KB, so we add 10
bank_addr_width = (math.ceil(math.log(glb_tile_mem_size, 2))
- math.ceil(math.log(banks_per_tile, 2)) + 10)
bank_byte_offset = math.ceil(math.log(bank_data_width / 8, 2))
cgra_byte_offset = math.ceil(math.log(cgra_data_width / 8, 2))
axi_strb_width = math.ceil(axi_data_width / 8)
axi_byte_offset = math.ceil(math.log(axi_data_width / 8, 2))
glb_addr_width = (bank_addr_width
+ math.ceil(math.log(banks_per_tile, 2))
+ math.ceil(math.log(num_glb_tiles, 2)))
tile_sel_addr_width = math.ceil(math.log(num_glb_tiles, 2))
cgra_per_glb = num_cgra_cols // num_glb_tiles
bank_sel_addr_width = math.ceil(math.log(banks_per_tile, 2))
max_num_words_width = (glb_addr_width - bank_byte_offset
+ math.ceil(math.log((bank_data_width
/ cgra_data_width), 2)))
max_num_cfg_width = glb_addr_width - bank_byte_offset
latency_width = 2 + math.ceil(math.log(num_glb_tiles, 2))
params = GlobalBufferParams(num_prr=num_prr,
num_glb_tiles=num_glb_tiles,
tile_sel_addr_width=tile_sel_addr_width,
num_cgra_tiles=num_cgra_cols,
cgra_per_glb=cgra_per_glb,
banks_per_tile=banks_per_tile,
bank_sel_addr_width=bank_sel_addr_width,
bank_data_width=bank_data_width,
sram_macro_depth=sram_macro_depth,
bank_addr_width=bank_addr_width,
bank_byte_offset=bank_byte_offset,
glb_addr_width=glb_addr_width,
cgra_data_width=cgra_data_width,
cgra_byte_offset=cgra_byte_offset,
cgra_axi_addr_width=cgra_axi_addr_width,
cgra_axi_data_width=cgra_axi_data_width,
axi_addr_width=axi_addr_width,
axi_addr_reg_width=axi_addr_reg_width,
axi_data_width=axi_data_width,
axi_strb_width=axi_strb_width,
axi_byte_offset=axi_byte_offset,
max_num_words_width=max_num_words_width,
max_stride_width=max_stride_width,
max_num_cfg_width=max_num_cfg_width,
cgra_cfg_addr_width=cfg_addr_width,
cgra_cfg_data_width=cfg_data_width,
queue_depth=queue_depth,
loop_level=loop_level,
latency_width=latency_width,
glb_bank_memory_pipeline_depth=glb_bank_memory_pipeline_depth,
sram_gen_pipeline_depth=sram_gen_pipeline_depth,
sram_gen_output_pipeline_depth=sram_gen_output_pipeline_depth,
is_sram_stub=is_sram_stub
)
return params
def gen_header_files(params, svh_filename, h_filename, header_name):
mod_params = dataclasses.asdict(params)
folder = svh_filename.rsplit('/', 1)[0]
# parameter pass to systemverilog package
if not os.path.exists(folder):
os.makedirs(folder)
with open(svh_filename, "w") as f:
f.write(f"`ifndef {header_name.upper()}_PARAM\n")
f.write(f"`define {header_name.upper()}_PARAM\n")
f.write(f"package {header_name}_param;\n")
for k, v in mod_params.items():
v = int(v)
f.write(f"localparam int {k.upper()} = {v};\n")
f.write(f"endpackage\n")
f.write(f"`endif\n")
with open(h_filename, "w") as f:
f.write(f"#pragma once\n")
for k, v in mod_params.items():
v = int(v)
f.write(f"#define {k.upper()} {v}\n")
|
StanfordAHA/garnet | global_buffer/design/glb_tile.py | from kratos import Generator, RawStringStmt
from global_buffer.design.glb_core import GlbCore
from global_buffer.design.glb_tile_cfg import GlbTileCfg
from global_buffer.design.glb_tile_pcfg_switch import GlbTilePcfgSwitch
from global_buffer.design.glb_cfg_ifc import GlbConfigInterface
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
class GlbTile(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_tile")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.clk_en = self.clock_en("clk_en")
self.reset = self.reset("reset")
self.glb_tile_id = self.input(
"glb_tile_id", self._params.tile_sel_addr_width)
self.proc_wr_en_e2w_esti = self.input(
"proc_wr_en_e2w_esti", 1)
self.proc_wr_strb_e2w_esti = self.input(
"proc_wr_strb_e2w_esti", self._params.bank_data_width // 8)
self.proc_wr_addr_e2w_esti = self.input(
"proc_wr_addr_e2w_esti", self._params.glb_addr_width)
self.proc_wr_data_e2w_esti = self.input(
"proc_wr_data_e2w_esti", self._params.bank_data_width)
self.proc_rd_en_e2w_esti = self.input(
"proc_rd_en_e2w_esti", 1)
self.proc_rd_addr_e2w_esti = self.input(
"proc_rd_addr_e2w_esti", self._params.glb_addr_width)
self.proc_rd_data_e2w_esti = self.input(
"proc_rd_data_e2w_esti", self._params.bank_data_width)
self.proc_rd_data_valid_e2w_esti = self.input(
"proc_rd_data_valid_e2w_esti", 1)
self.proc_wr_en_w2e_esto = self.output(
"proc_wr_en_w2e_esto", 1)
self.proc_wr_strb_w2e_esto = self.output(
"proc_wr_strb_w2e_esto", self._params.bank_data_width // 8)
self.proc_wr_addr_w2e_esto = self.output(
"proc_wr_addr_w2e_esto", self._params.glb_addr_width)
self.proc_wr_data_w2e_esto = self.output(
"proc_wr_data_w2e_esto", self._params.bank_data_width)
self.proc_rd_en_w2e_esto = self.output(
"proc_rd_en_w2e_esto", 1)
self.proc_rd_addr_w2e_esto = self.output(
"proc_rd_addr_w2e_esto", self._params.glb_addr_width)
self.proc_rd_data_w2e_esto = self.output(
"proc_rd_data_w2e_esto", self._params.bank_data_width)
self.proc_rd_data_valid_w2e_esto = self.output(
"proc_rd_data_valid_w2e_esto", 1)
self.proc_wr_en_w2e_wsti = self.input(
"proc_wr_en_w2e_wsti", 1)
self.proc_wr_strb_w2e_wsti = self.input(
"proc_wr_strb_w2e_wsti", self._params.bank_data_width // 8)
self.proc_wr_addr_w2e_wsti = self.input(
"proc_wr_addr_w2e_wsti", self._params.glb_addr_width)
self.proc_wr_data_w2e_wsti = self.input(
"proc_wr_data_w2e_wsti", self._params.bank_data_width)
self.proc_rd_en_w2e_wsti = self.input(
"proc_rd_en_w2e_wsti", 1)
self.proc_rd_addr_w2e_wsti = self.input(
"proc_rd_addr_w2e_wsti", self._params.glb_addr_width)
self.proc_rd_data_w2e_wsti = self.input(
"proc_rd_data_w2e_wsti", self._params.bank_data_width)
self.proc_rd_data_valid_w2e_wsti = self.input(
"proc_rd_data_valid_w2e_wsti", 1)
self.proc_wr_en_e2w_wsto = self.output(
"proc_wr_en_e2w_wsto", 1)
self.proc_wr_strb_e2w_wsto = self.output(
"proc_wr_strb_e2w_wsto", self._params.bank_data_width // 8)
self.proc_wr_addr_e2w_wsto = self.output(
"proc_wr_addr_e2w_wsto", self._params.glb_addr_width)
self.proc_wr_data_e2w_wsto = self.output(
"proc_wr_data_e2w_wsto", self._params.bank_data_width)
self.proc_rd_en_e2w_wsto = self.output(
"proc_rd_en_e2w_wsto", 1)
self.proc_rd_addr_e2w_wsto = self.output(
"proc_rd_addr_e2w_wsto", self._params.glb_addr_width)
self.proc_rd_data_e2w_wsto = self.output(
"proc_rd_data_e2w_wsto", self._params.bank_data_width)
self.proc_rd_data_valid_e2w_wsto = self.output(
"proc_rd_data_valid_e2w_wsto", 1)
self.strm_wr_en_e2w_esti = self.input(
"strm_wr_en_e2w_esti", 1)
self.strm_wr_strb_e2w_esti = self.input(
"strm_wr_strb_e2w_esti", self._params.bank_data_width // 8)
self.strm_wr_addr_e2w_esti = self.input(
"strm_wr_addr_e2w_esti", self._params.glb_addr_width)
self.strm_wr_data_e2w_esti = self.input(
"strm_wr_data_e2w_esti", self._params.bank_data_width)
self.strm_rd_en_e2w_esti = self.input(
"strm_rd_en_e2w_esti", 1)
self.strm_rd_addr_e2w_esti = self.input(
"strm_rd_addr_e2w_esti", self._params.glb_addr_width)
self.strm_rd_data_e2w_esti = self.input(
"strm_rd_data_e2w_esti", self._params.bank_data_width)
self.strm_rd_data_valid_e2w_esti = self.input(
"strm_rd_data_valid_e2w_esti", 1)
self.strm_wr_en_w2e_esto = self.output(
"strm_wr_en_w2e_esto", 1)
self.strm_wr_strb_w2e_esto = self.output(
"strm_wr_strb_w2e_esto", self._params.bank_data_width // 8)
self.strm_wr_addr_w2e_esto = self.output(
"strm_wr_addr_w2e_esto", self._params.glb_addr_width)
self.strm_wr_data_w2e_esto = self.output(
"strm_wr_data_w2e_esto", self._params.bank_data_width)
self.strm_rd_en_w2e_esto = self.output(
"strm_rd_en_w2e_esto", 1)
self.strm_rd_addr_w2e_esto = self.output(
"strm_rd_addr_w2e_esto", self._params.glb_addr_width)
self.strm_rd_data_w2e_esto = self.output(
"strm_rd_data_w2e_esto", self._params.bank_data_width)
self.strm_rd_data_valid_w2e_esto = self.output(
"strm_rd_data_valid_w2e_esto", 1)
self.strm_wr_en_w2e_wsti = self.input(
"strm_wr_en_w2e_wsti", 1)
self.strm_wr_strb_w2e_wsti = self.input(
"strm_wr_strb_w2e_wsti", self._params.bank_data_width // 8)
self.strm_wr_addr_w2e_wsti = self.input(
"strm_wr_addr_w2e_wsti", self._params.glb_addr_width)
self.strm_wr_data_w2e_wsti = self.input(
"strm_wr_data_w2e_wsti", self._params.bank_data_width)
self.strm_rd_en_w2e_wsti = self.input(
"strm_rd_en_w2e_wsti", 1)
self.strm_rd_addr_w2e_wsti = self.input(
"strm_rd_addr_w2e_wsti", self._params.glb_addr_width)
self.strm_rd_data_w2e_wsti = self.input(
"strm_rd_data_w2e_wsti", self._params.bank_data_width)
self.strm_rd_data_valid_w2e_wsti = self.input(
"strm_rd_data_valid_w2e_wsti", 1)
self.strm_wr_en_e2w_wsto = self.output(
"strm_wr_en_e2w_wsto", 1)
self.strm_wr_strb_e2w_wsto = self.output(
"strm_wr_strb_e2w_wsto", self._params.bank_data_width // 8)
self.strm_wr_addr_e2w_wsto = self.output(
"strm_wr_addr_e2w_wsto", self._params.glb_addr_width)
self.strm_wr_data_e2w_wsto = self.output(
"strm_wr_data_e2w_wsto", self._params.bank_data_width)
self.strm_rd_en_e2w_wsto = self.output(
"strm_rd_en_e2w_wsto", 1)
self.strm_rd_addr_e2w_wsto = self.output(
"strm_rd_addr_e2w_wsto", self._params.glb_addr_width)
self.strm_rd_data_e2w_wsto = self.output(
"strm_rd_data_e2w_wsto", self._params.bank_data_width)
self.strm_rd_data_valid_e2w_wsto = self.output(
"strm_rd_data_valid_e2w_wsto", 1)
self.pcfg_rd_en_e2w_esti = self.input(
"pcfg_rd_en_e2w_esti", 1)
self.pcfg_rd_addr_e2w_esti = self.input(
"pcfg_rd_addr_e2w_esti", self._params.glb_addr_width)
self.pcfg_rd_data_e2w_esti = self.input(
"pcfg_rd_data_e2w_esti", self._params.bank_data_width)
self.pcfg_rd_data_valid_e2w_esti = self.input(
"pcfg_rd_data_valid_e2w_esti", 1)
self.pcfg_rd_en_w2e_esto = self.output(
"pcfg_rd_en_w2e_esto", 1)
self.pcfg_rd_addr_w2e_esto = self.output(
"pcfg_rd_addr_w2e_esto", self._params.glb_addr_width)
self.pcfg_rd_data_w2e_esto = self.output(
"pcfg_rd_data_w2e_esto", self._params.bank_data_width)
self.pcfg_rd_data_valid_w2e_esto = self.output(
"pcfg_rd_data_valid_w2e_esto", 1)
self.pcfg_rd_en_w2e_wsti = self.input(
"pcfg_rd_en_w2e_wsti", 1)
self.pcfg_rd_addr_w2e_wsti = self.input(
"pcfg_rd_addr_w2e_wsti", self._params.glb_addr_width)
self.pcfg_rd_data_w2e_wsti = self.input(
"pcfg_rd_data_w2e_wsti", self._params.bank_data_width)
self.pcfg_rd_data_valid_w2e_wsti = self.input(
"pcfg_rd_data_valid_w2e_wsti", 1)
self.pcfg_rd_en_e2w_wsto = self.output(
"pcfg_rd_en_e2w_wsto", 1)
self.pcfg_rd_addr_e2w_wsto = self.output(
"pcfg_rd_addr_e2w_wsto", self._params.glb_addr_width)
self.pcfg_rd_data_e2w_wsto = self.output(
"pcfg_rd_data_e2w_wsto", self._params.bank_data_width)
self.pcfg_rd_data_valid_e2w_wsto = self.output(
"pcfg_rd_data_valid_e2w_wsto", 1)
self.if_cfg_est_m_wr_en = self.output(
"if_cfg_est_m_wr_en", 1)
self.if_cfg_est_m_wr_addr = self.output(
"if_cfg_est_m_wr_addr", self._params.axi_addr_width)
self.if_cfg_est_m_wr_data = self.output(
"if_cfg_est_m_wr_data", self._params.axi_data_width)
self.if_cfg_est_m_rd_en = self.output(
"if_cfg_est_m_rd_en", 1)
self.if_cfg_est_m_rd_addr = self.output(
"if_cfg_est_m_rd_addr", self._params.axi_addr_width)
self.if_cfg_est_m_rd_data = self.input(
"if_cfg_est_m_rd_data", self._params.axi_data_width)
self.if_cfg_est_m_rd_data_valid = self.input(
"if_cfg_est_m_rd_data_valid", 1)
self.if_cfg_wst_s_wr_en = self.input(
"if_cfg_wst_s_wr_en", 1)
self.if_cfg_wst_s_wr_addr = self.input(
"if_cfg_wst_s_wr_addr", self._params.axi_addr_width)
self.if_cfg_wst_s_wr_data = self.input(
"if_cfg_wst_s_wr_data", self._params.axi_data_width)
self.if_cfg_wst_s_rd_en = self.input(
"if_cfg_wst_s_rd_en", 1)
self.if_cfg_wst_s_rd_addr = self.input(
"if_cfg_wst_s_rd_addr", self._params.axi_addr_width)
self.if_cfg_wst_s_rd_data = self.output(
"if_cfg_wst_s_rd_data", self._params.axi_data_width)
self.if_cfg_wst_s_rd_data_valid = self.output(
"if_cfg_wst_s_rd_data_valid", 1)
self.if_sram_cfg_est_m_wr_en = self.output(
"if_sram_cfg_est_m_wr_en", 1)
self.if_sram_cfg_est_m_wr_addr = self.output(
"if_sram_cfg_est_m_wr_addr", self._params.glb_addr_width)
self.if_sram_cfg_est_m_wr_data = self.output(
"if_sram_cfg_est_m_wr_data", self._params.axi_data_width)
self.if_sram_cfg_est_m_rd_en = self.output(
"if_sram_cfg_est_m_rd_en", 1)
self.if_sram_cfg_est_m_rd_addr = self.output(
"if_sram_cfg_est_m_rd_addr", self._params.glb_addr_width)
self.if_sram_cfg_est_m_rd_data = self.input(
"if_sram_cfg_est_m_rd_data", self._params.axi_data_width)
self.if_sram_cfg_est_m_rd_data_valid = self.input(
"if_sram_cfg_est_m_rd_data_valid", 1)
self.if_sram_cfg_wst_s_wr_en = self.input(
"if_sram_cfg_wst_s_wr_en", 1)
self.if_sram_cfg_wst_s_wr_addr = self.input(
"if_sram_cfg_wst_s_wr_addr", self._params.glb_addr_width)
self.if_sram_cfg_wst_s_wr_data = self.input(
"if_sram_cfg_wst_s_wr_data", self._params.axi_data_width)
self.if_sram_cfg_wst_s_rd_en = self.input(
"if_sram_cfg_wst_s_rd_en", 1)
self.if_sram_cfg_wst_s_rd_addr = self.input(
"if_sram_cfg_wst_s_rd_addr", self._params.glb_addr_width)
self.if_sram_cfg_wst_s_rd_data = self.output(
"if_sram_cfg_wst_s_rd_data", self._params.axi_data_width)
self.if_sram_cfg_wst_s_rd_data_valid = self.output(
"if_sram_cfg_wst_s_rd_data_valid", 1)
self.cfg_tile_connected_wsti = self.input(
"cfg_tile_connected_wsti", 1)
self.cfg_tile_connected_esto = self.output(
"cfg_tile_connected_esto", 1)
self.cfg_pcfg_tile_connected_wsti = self.input(
"cfg_pcfg_tile_connected_wsti", 1)
self.cfg_pcfg_tile_connected_esto = self.output(
"cfg_pcfg_tile_connected_esto", 1)
self.cgra_cfg_jtag_wsti_wr_en = self.input(
"cgra_cfg_jtag_wsti_wr_en", 1)
self.cgra_cfg_jtag_wsti_rd_en = self.input(
"cgra_cfg_jtag_wsti_rd_en", 1)
self.cgra_cfg_jtag_wsti_addr = self.input(
"cgra_cfg_jtag_wsti_addr", self._params.cgra_cfg_addr_width)
self.cgra_cfg_jtag_wsti_data = self.input(
"cgra_cfg_jtag_wsti_data", self._params.cgra_cfg_data_width)
self.cgra_cfg_jtag_esto_wr_en = self.output(
"cgra_cfg_jtag_esto_wr_en", 1)
self.cgra_cfg_jtag_esto_rd_en = self.output(
"cgra_cfg_jtag_esto_rd_en", 1)
self.cgra_cfg_jtag_esto_addr = self.output(
"cgra_cfg_jtag_esto_addr", self._params.cgra_cfg_addr_width)
self.cgra_cfg_jtag_esto_data = self.output(
"cgra_cfg_jtag_esto_data", self._params.cgra_cfg_data_width)
self.cgra_cfg_jtag_wsti_rd_en_bypass = self.input(
"cgra_cfg_jtag_wsti_rd_en_bypass", 1)
self.cgra_cfg_jtag_wsti_addr_bypass = self.input(
"cgra_cfg_jtag_wsti_addr_bypass", self._params.cgra_cfg_addr_width)
self.cgra_cfg_jtag_esto_rd_en_bypass = self.output(
"cgra_cfg_jtag_esto_rd_en_bypass", 1)
self.cgra_cfg_jtag_esto_addr_bypass = self.output(
"cgra_cfg_jtag_esto_addr_bypass", self._params.cgra_cfg_addr_width)
self.cgra_cfg_pcfg_wsti_wr_en = self.input(
"cgra_cfg_pcfg_wsti_wr_en", 1)
self.cgra_cfg_pcfg_wsti_rd_en = self.input(
"cgra_cfg_pcfg_wsti_rd_en", 1)
self.cgra_cfg_pcfg_wsti_addr = self.input(
"cgra_cfg_pcfg_wsti_addr", self._params.cgra_cfg_addr_width)
self.cgra_cfg_pcfg_wsti_data = self.input(
"cgra_cfg_pcfg_wsti_data", self._params.cgra_cfg_data_width)
self.cgra_cfg_pcfg_esto_wr_en = self.output(
"cgra_cfg_pcfg_esto_wr_en", 1)
self.cgra_cfg_pcfg_esto_rd_en = self.output(
"cgra_cfg_pcfg_esto_rd_en", 1)
self.cgra_cfg_pcfg_esto_addr = self.output(
"cgra_cfg_pcfg_esto_addr", self._params.cgra_cfg_addr_width)
self.cgra_cfg_pcfg_esto_data = self.output(
"cgra_cfg_pcfg_esto_data", self._params.cgra_cfg_data_width)
self.stream_data_f2g = self.input(
"stream_data_f2g", self._params.cgra_data_width, size=self._params.cgra_per_glb, packed=True)
self.stream_data_valid_f2g = self.input(
"stream_data_valid_f2g", 1, size=self._params.cgra_per_glb, packed=True)
self.stream_data_g2f = self.output(
"stream_data_g2f", self._params.cgra_data_width, size=self._params.cgra_per_glb, packed=True)
self.stream_data_valid_g2f = self.output(
"stream_data_valid_g2f", 1, size=self._params.cgra_per_glb, packed=True)
self.cgra_cfg_g2f_cfg_wr_en = self.output(
"cgra_cfg_g2f_cfg_wr_en", 1, size=self._params.cgra_per_glb, packed=True)
self.cgra_cfg_g2f_cfg_rd_en = self.output(
"cgra_cfg_g2f_cfg_rd_en", 1, size=self._params.cgra_per_glb, packed=True)
self.cgra_cfg_g2f_cfg_addr = self.output(
"cgra_cfg_g2f_cfg_addr", self._params.cgra_cfg_addr_width, size=self._params.cgra_per_glb, packed=True)
self.cgra_cfg_g2f_cfg_data = self.output(
"cgra_cfg_g2f_cfg_data", self._params.cgra_cfg_data_width, size=self._params.cgra_per_glb, packed=True)
self.strm_g2f_start_pulse = self.input("strm_g2f_start_pulse", 1)
self.strm_f2g_start_pulse = self.input("strm_f2g_start_pulse", 1)
self.pcfg_start_pulse = self.input(
"pcfg_start_pulse", 1)
self.strm_f2g_interrupt_pulse = self.output(
"strm_f2g_interrupt_pulse", 1)
self.strm_g2f_interrupt_pulse = self.output(
"strm_g2f_interrupt_pulse", 1)
self.pcfg_g2f_interrupt_pulse = self.output(
"pcfg_g2f_interrupt_pulse", 1)
self.if_cfg = GlbConfigInterface(
addr_width=self._params.axi_addr_width, data_width=self._params.axi_data_width)
self.if_sram_cfg = GlbConfigInterface(
addr_width=self._params.glb_addr_width, data_width=self._params.axi_data_width)
self.if_cfg_est_m = self.interface(self.if_cfg, "if_cfg_est_m")
self.if_cfg_wst_s = self.interface(self.if_cfg, "if_cfg_wst_s")
self.if_sram_cfg_est_m = self.interface(
self.if_sram_cfg, "if_sram_cfg_est_m")
self.if_sram_cfg_wst_s = self.interface(
self.if_sram_cfg, "if_sram_cfg_wst_s")
self.glb_tile_cfg = GlbTileCfg(_params=self._params)
self.add_child("glb_tile_cfg",
self.glb_tile_cfg,
clk=self.clk,
reset=self.reset,
glb_tile_id=self.glb_tile_id)
self.glb_tile_pcfg_switch = GlbTilePcfgSwitch(_params=self._params)
self.add_child("glb_tile_pcfg_switch",
self.glb_tile_pcfg_switch,
clk=self.clk,
reset=self.reset)
self.glb_core = GlbCore(_params=self._params)
self.add_child("glb_core",
self.glb_core,
clk=self.clk,
clk_en=self.clk_en,
reset=self.reset,
glb_tile_id=self.glb_tile_id)
if self._params.is_sram_stub:
self.readmemh_block = RawStringStmt(["initial begin",
"\tstring b0_file_name;",
"\tstring b1_file_name;",
"\tstring load_arg;",
"\t$sformat(b0_file_name, \"testvectors/tile%0d_b0.dat\", glb_tile_id);",
"\t$sformat(b1_file_name, \"testvectors/tile%0d_b1.dat\", glb_tile_id);",
"\t$sformat(load_arg, \"LOAD%0d\", glb_tile_id);",
"\tif (($test$plusargs(load_arg))) begin",
"\t\t$readmemh(b0_file_name, glb_core.glb_bank_0.glb_bank_memory.glb_bank_sram_stub.mem);", # noqa
"\t\t$readmemh(b1_file_name, glb_core.glb_bank_1.glb_bank_memory.glb_bank_sram_stub.mem);", # noqa
"\tend",
"end"])
self.writememh_block = RawStringStmt(["final begin",
"\tstring b0_file_name;",
"\tstring b1_file_name;",
"\tstring save_arg;",
"\t$sformat(b0_file_name, \"testvectors/tile%0d_b0_out.dat\", glb_tile_id);",
"\t$sformat(b1_file_name, \"testvectors/tile%0d_b1_out.dat\", glb_tile_id);",
"\t$sformat(save_arg, \"SAVE%0d\", glb_tile_id);",
"\tif (($test$plusargs(save_arg))) begin",
"\t\t$writememh(b0_file_name, glb_core.glb_bank_0.glb_bank_memory.glb_bank_sram_stub.mem);", # noqa
"\t\t$writememh(b1_file_name, glb_core.glb_bank_1.glb_bank_memory.glb_bank_sram_stub.mem);", # noqa
"\tend",
"end"])
self.add_stmt(self.readmemh_block.stmt())
self.add_stmt(self.writememh_block.stmt())
self.interface_wiring()
self.tile2cfg_wiring()
self.tile2core_wiring()
self.tile2pcfgs_wiring()
self.cfg2core_wiring()
self.core2pcfgs_wiring()
def interface_wiring(self):
self.wire(self.if_cfg_est_m.wr_en, self.if_cfg_est_m_wr_en)
self.wire(self.if_cfg_est_m.wr_addr, self.if_cfg_est_m_wr_addr)
self.wire(self.if_cfg_est_m.wr_data, self.if_cfg_est_m_wr_data)
self.wire(self.if_cfg_est_m.rd_en, self.if_cfg_est_m_rd_en)
self.wire(self.if_cfg_est_m.rd_addr, self.if_cfg_est_m_rd_addr)
self.wire(self.if_cfg_est_m.rd_data, self.if_cfg_est_m_rd_data)
self.wire(self.if_cfg_est_m.rd_data_valid,
self.if_cfg_est_m_rd_data_valid)
self.wire(self.if_cfg_wst_s.wr_en, self.if_cfg_wst_s_wr_en)
self.wire(self.if_cfg_wst_s.wr_addr, self.if_cfg_wst_s_wr_addr)
self.wire(self.if_cfg_wst_s.wr_data, self.if_cfg_wst_s_wr_data)
self.wire(self.if_cfg_wst_s.rd_en, self.if_cfg_wst_s_rd_en)
self.wire(self.if_cfg_wst_s.rd_addr, self.if_cfg_wst_s_rd_addr)
self.wire(self.if_cfg_wst_s.rd_data, self.if_cfg_wst_s_rd_data)
self.wire(self.if_cfg_wst_s.rd_data_valid,
self.if_cfg_wst_s_rd_data_valid)
self.wire(self.if_sram_cfg_est_m.wr_en, self.if_sram_cfg_est_m_wr_en)
self.wire(self.if_sram_cfg_est_m.wr_addr,
self.if_sram_cfg_est_m_wr_addr)
self.wire(self.if_sram_cfg_est_m.wr_data,
self.if_sram_cfg_est_m_wr_data)
self.wire(self.if_sram_cfg_est_m.rd_en, self.if_sram_cfg_est_m_rd_en)
self.wire(self.if_sram_cfg_est_m.rd_addr,
self.if_sram_cfg_est_m_rd_addr)
self.wire(self.if_sram_cfg_est_m.rd_data,
self.if_sram_cfg_est_m_rd_data)
self.wire(self.if_sram_cfg_est_m.rd_data_valid,
self.if_sram_cfg_est_m_rd_data_valid)
self.wire(self.if_sram_cfg_wst_s.wr_en, self.if_sram_cfg_wst_s_wr_en)
self.wire(self.if_sram_cfg_wst_s.wr_addr,
self.if_sram_cfg_wst_s_wr_addr)
self.wire(self.if_sram_cfg_wst_s.wr_data,
self.if_sram_cfg_wst_s_wr_data)
self.wire(self.if_sram_cfg_wst_s.rd_en, self.if_sram_cfg_wst_s_rd_en)
self.wire(self.if_sram_cfg_wst_s.rd_addr,
self.if_sram_cfg_wst_s_rd_addr)
self.wire(self.if_sram_cfg_wst_s.rd_data,
self.if_sram_cfg_wst_s_rd_data)
self.wire(self.if_sram_cfg_wst_s.rd_data_valid,
self.if_sram_cfg_wst_s_rd_data_valid)
def tile2cfg_wiring(self):
self.wire(self.glb_tile_cfg.if_cfg_wst_s, self.if_cfg_wst_s)
self.wire(self.glb_tile_cfg.if_cfg_est_m, self.if_cfg_est_m)
def tile2core_wiring(self):
self.wire(self.glb_core.if_sram_cfg_wst_s, self.if_sram_cfg_wst_s)
self.wire(self.glb_core.if_sram_cfg_est_m, self.if_sram_cfg_est_m)
self.wire(
self.glb_core.proc_packet_w2e_wsti['wr_en'], self.proc_wr_en_w2e_wsti)
self.wire(
self.glb_core.proc_packet_w2e_wsti['wr_addr'], self.proc_wr_addr_w2e_wsti)
self.wire(
self.glb_core.proc_packet_w2e_wsti['wr_strb'], self.proc_wr_strb_w2e_wsti)
self.wire(
self.glb_core.proc_packet_w2e_wsti['wr_data'], self.proc_wr_data_w2e_wsti)
self.wire(
self.glb_core.proc_packet_w2e_wsti['rd_en'], self.proc_rd_en_w2e_wsti)
self.wire(
self.glb_core.proc_packet_w2e_wsti['rd_addr'], self.proc_rd_addr_w2e_wsti)
self.wire(
self.glb_core.proc_packet_w2e_wsti['rd_data'], self.proc_rd_data_w2e_wsti)
self.wire(
self.glb_core.proc_packet_w2e_wsti['rd_data_valid'], self.proc_rd_data_valid_w2e_wsti)
self.wire(
self.glb_core.proc_packet_e2w_wsto['wr_en'], self.proc_wr_en_e2w_wsto)
self.wire(
self.glb_core.proc_packet_e2w_wsto['wr_addr'], self.proc_wr_addr_e2w_wsto)
self.wire(
self.glb_core.proc_packet_e2w_wsto['wr_strb'], self.proc_wr_strb_e2w_wsto)
self.wire(
self.glb_core.proc_packet_e2w_wsto['wr_data'], self.proc_wr_data_e2w_wsto)
self.wire(
self.glb_core.proc_packet_e2w_wsto['rd_en'], self.proc_rd_en_e2w_wsto)
self.wire(
self.glb_core.proc_packet_e2w_wsto['rd_addr'], self.proc_rd_addr_e2w_wsto)
self.wire(
self.glb_core.proc_packet_e2w_wsto['rd_data'], self.proc_rd_data_e2w_wsto)
self.wire(
self.glb_core.proc_packet_e2w_wsto['rd_data_valid'], self.proc_rd_data_valid_e2w_wsto)
self.wire(
self.glb_core.proc_packet_e2w_esti['wr_en'], self.proc_wr_en_e2w_esti)
self.wire(
self.glb_core.proc_packet_e2w_esti['wr_addr'], self.proc_wr_addr_e2w_esti)
self.wire(
self.glb_core.proc_packet_e2w_esti['wr_strb'], self.proc_wr_strb_e2w_esti)
self.wire(
self.glb_core.proc_packet_e2w_esti['wr_data'], self.proc_wr_data_e2w_esti)
self.wire(
self.glb_core.proc_packet_e2w_esti['rd_en'], self.proc_rd_en_e2w_esti)
self.wire(
self.glb_core.proc_packet_e2w_esti['rd_addr'], self.proc_rd_addr_e2w_esti)
self.wire(
self.glb_core.proc_packet_e2w_esti['rd_data'], self.proc_rd_data_e2w_esti)
self.wire(
self.glb_core.proc_packet_e2w_esti['rd_data_valid'], self.proc_rd_data_valid_e2w_esti)
self.wire(
self.glb_core.proc_packet_w2e_esto['wr_en'], self.proc_wr_en_w2e_esto)
self.wire(
self.glb_core.proc_packet_w2e_esto['wr_addr'], self.proc_wr_addr_w2e_esto)
self.wire(
self.glb_core.proc_packet_w2e_esto['wr_strb'], self.proc_wr_strb_w2e_esto)
self.wire(
self.glb_core.proc_packet_w2e_esto['wr_data'], self.proc_wr_data_w2e_esto)
self.wire(
self.glb_core.proc_packet_w2e_esto['rd_en'], self.proc_rd_en_w2e_esto)
self.wire(
self.glb_core.proc_packet_w2e_esto['rd_addr'], self.proc_rd_addr_w2e_esto)
self.wire(
self.glb_core.proc_packet_w2e_esto['rd_data'], self.proc_rd_data_w2e_esto)
self.wire(
self.glb_core.proc_packet_w2e_esto['rd_data_valid'], self.proc_rd_data_valid_w2e_esto)
self.wire(
self.glb_core.strm_packet_w2e_wsti['wr_en'], self.strm_wr_en_w2e_wsti)
self.wire(
self.glb_core.strm_packet_w2e_wsti['wr_addr'], self.strm_wr_addr_w2e_wsti)
self.wire(
self.glb_core.strm_packet_w2e_wsti['wr_strb'], self.strm_wr_strb_w2e_wsti)
self.wire(
self.glb_core.strm_packet_w2e_wsti['wr_data'], self.strm_wr_data_w2e_wsti)
self.wire(
self.glb_core.strm_packet_w2e_wsti['rd_en'], self.strm_rd_en_w2e_wsti)
self.wire(
self.glb_core.strm_packet_w2e_wsti['rd_addr'], self.strm_rd_addr_w2e_wsti)
self.wire(
self.glb_core.strm_packet_w2e_wsti['rd_data'], self.strm_rd_data_w2e_wsti)
self.wire(
self.glb_core.strm_packet_w2e_wsti['rd_data_valid'], self.strm_rd_data_valid_w2e_wsti)
self.wire(
self.glb_core.strm_packet_e2w_wsto['wr_en'], self.strm_wr_en_e2w_wsto)
self.wire(
self.glb_core.strm_packet_e2w_wsto['wr_addr'], self.strm_wr_addr_e2w_wsto)
self.wire(
self.glb_core.strm_packet_e2w_wsto['wr_strb'], self.strm_wr_strb_e2w_wsto)
self.wire(
self.glb_core.strm_packet_e2w_wsto['wr_data'], self.strm_wr_data_e2w_wsto)
self.wire(
self.glb_core.strm_packet_e2w_wsto['rd_en'], self.strm_rd_en_e2w_wsto)
self.wire(
self.glb_core.strm_packet_e2w_wsto['rd_addr'], self.strm_rd_addr_e2w_wsto)
self.wire(
self.glb_core.strm_packet_e2w_wsto['rd_data'], self.strm_rd_data_e2w_wsto)
self.wire(
self.glb_core.strm_packet_e2w_wsto['rd_data_valid'], self.strm_rd_data_valid_e2w_wsto)
self.wire(
self.glb_core.strm_packet_e2w_esti['wr_en'], self.strm_wr_en_e2w_esti)
self.wire(
self.glb_core.strm_packet_e2w_esti['wr_addr'], self.strm_wr_addr_e2w_esti)
self.wire(
self.glb_core.strm_packet_e2w_esti['wr_strb'], self.strm_wr_strb_e2w_esti)
self.wire(
self.glb_core.strm_packet_e2w_esti['wr_data'], self.strm_wr_data_e2w_esti)
self.wire(
self.glb_core.strm_packet_e2w_esti['rd_en'], self.strm_rd_en_e2w_esti)
self.wire(
self.glb_core.strm_packet_e2w_esti['rd_addr'], self.strm_rd_addr_e2w_esti)
self.wire(
self.glb_core.strm_packet_e2w_esti['rd_data'], self.strm_rd_data_e2w_esti)
self.wire(
self.glb_core.strm_packet_e2w_esti['rd_data_valid'], self.strm_rd_data_valid_e2w_esti)
self.wire(
self.glb_core.strm_packet_w2e_esto['wr_en'], self.strm_wr_en_w2e_esto)
self.wire(
self.glb_core.strm_packet_w2e_esto['wr_addr'], self.strm_wr_addr_w2e_esto)
self.wire(
self.glb_core.strm_packet_w2e_esto['wr_strb'], self.strm_wr_strb_w2e_esto)
self.wire(
self.glb_core.strm_packet_w2e_esto['wr_data'], self.strm_wr_data_w2e_esto)
self.wire(
self.glb_core.strm_packet_w2e_esto['rd_en'], self.strm_rd_en_w2e_esto)
self.wire(
self.glb_core.strm_packet_w2e_esto['rd_addr'], self.strm_rd_addr_w2e_esto)
self.wire(
self.glb_core.strm_packet_w2e_esto['rd_data'], self.strm_rd_data_w2e_esto)
self.wire(
self.glb_core.strm_packet_w2e_esto['rd_data_valid'], self.strm_rd_data_valid_w2e_esto)
self.wire(
self.glb_core.pcfg_packet_w2e_wsti['rd_en'], self.pcfg_rd_en_w2e_wsti)
self.wire(
self.glb_core.pcfg_packet_w2e_wsti['rd_addr'], self.pcfg_rd_addr_w2e_wsti)
self.wire(
self.glb_core.pcfg_packet_w2e_wsti['rd_data'], self.pcfg_rd_data_w2e_wsti)
self.wire(
self.glb_core.pcfg_packet_w2e_wsti['rd_data_valid'], self.pcfg_rd_data_valid_w2e_wsti)
self.wire(
self.glb_core.pcfg_packet_e2w_wsto['rd_en'], self.pcfg_rd_en_e2w_wsto)
self.wire(
self.glb_core.pcfg_packet_e2w_wsto['rd_addr'], self.pcfg_rd_addr_e2w_wsto)
self.wire(
self.glb_core.pcfg_packet_e2w_wsto['rd_data'], self.pcfg_rd_data_e2w_wsto)
self.wire(
self.glb_core.pcfg_packet_e2w_wsto['rd_data_valid'], self.pcfg_rd_data_valid_e2w_wsto)
self.wire(
self.glb_core.pcfg_packet_e2w_esti['rd_en'], self.pcfg_rd_en_e2w_esti)
self.wire(
self.glb_core.pcfg_packet_e2w_esti['rd_addr'], self.pcfg_rd_addr_e2w_esti)
self.wire(
self.glb_core.pcfg_packet_e2w_esti['rd_data'], self.pcfg_rd_data_e2w_esti)
self.wire(
self.glb_core.pcfg_packet_e2w_esti['rd_data_valid'], self.pcfg_rd_data_valid_e2w_esti)
self.wire(
self.glb_core.pcfg_packet_w2e_esto['rd_en'], self.pcfg_rd_en_w2e_esto)
self.wire(
self.glb_core.pcfg_packet_w2e_esto['rd_addr'], self.pcfg_rd_addr_w2e_esto)
self.wire(
self.glb_core.pcfg_packet_w2e_esto['rd_data'], self.pcfg_rd_data_w2e_esto)
self.wire(
self.glb_core.pcfg_packet_w2e_esto['rd_data_valid'], self.pcfg_rd_data_valid_w2e_esto)
self.wire(self.glb_core.strm_data_valid_f2g,
self.stream_data_valid_f2g)
self.wire(self.glb_core.strm_data_valid_g2f,
self.stream_data_valid_g2f)
self.wire(self.glb_core.strm_data_f2g, self.stream_data_f2g)
self.wire(self.glb_core.strm_data_g2f, self.stream_data_g2f)
self.wire(self.cfg_tile_connected_esto,
self.glb_tile_cfg.cfg_data_network['tile_connected'])
self.wire(self.cfg_pcfg_tile_connected_esto,
self.glb_tile_cfg.cfg_pcfg_network['tile_connected'])
self.wire(self.glb_core.cfg_data_network_connected_prev,
self.cfg_tile_connected_wsti)
self.wire(self.glb_core.cfg_pcfg_network_connected_prev,
self.cfg_pcfg_tile_connected_wsti)
self.wire(self.glb_core.ld_dma_start_pulse, self.strm_g2f_start_pulse)
self.wire(self.glb_core.st_dma_start_pulse, self.strm_f2g_start_pulse)
self.wire(self.glb_core.pcfg_start_pulse, self.pcfg_start_pulse)
self.wire(self.glb_core.ld_dma_done_pulse,
self.strm_g2f_interrupt_pulse)
self.wire(self.glb_core.st_dma_done_pulse,
self.strm_f2g_interrupt_pulse)
self.wire(self.glb_core.pcfg_done_pulse, self.pcfg_g2f_interrupt_pulse)
def cfg2core_wiring(self):
self.wire(self.glb_core.cfg_data_network,
self.glb_tile_cfg.cfg_data_network)
self.wire(self.glb_core.cfg_pcfg_network,
self.glb_tile_cfg.cfg_pcfg_network)
self.wire(self.glb_core.cfg_st_dma_ctrl, self.glb_tile_cfg.cfg_st_dma_ctrl)
# NOTE: Kratos bug - Cannot directly wire struct array from two different modules
st_dma_header_w = self.var("st_dma_header_w", self.header.cfg_dma_header_t, size=self._params.queue_depth)
self.wire(st_dma_header_w, self.glb_tile_cfg.cfg_st_dma_header)
self.wire(self.glb_core.cfg_st_dma_header, st_dma_header_w)
self.wire(self.glb_core.cfg_ld_dma_ctrl, self.glb_tile_cfg.cfg_ld_dma_ctrl)
ld_dma_header_w = self.var("ld_dma_header_w", self.header.cfg_dma_header_t, size=self._params.queue_depth)
self.wire(self.glb_core.cfg_ld_dma_header, ld_dma_header_w)
self.wire(ld_dma_header_w, self.glb_tile_cfg.cfg_ld_dma_header)
self.wire(self.glb_core.cfg_pcfg_dma_ctrl, self.glb_tile_cfg.cfg_pcfg_dma_ctrl)
self.wire(self.glb_core.cfg_pcfg_dma_header, self.glb_tile_cfg.cfg_pcfg_dma_header)
def core2pcfgs_wiring(self):
self.wire(self.glb_core.cgra_cfg_pcfg, self.glb_tile_pcfg_switch.cgra_cfg_core2sw)
self.wire(self.glb_tile_cfg.cfg_pcfg_dma_ctrl['mode'], self.glb_tile_pcfg_switch.cfg_pcfg_dma_mode)
def tile2pcfgs_wiring(self):
cgra_cfg_g2f_w = self.var(
f"cgra_cfg_g2f_cfg_w", self.header.cgra_cfg_t, size=self._params.cgra_per_glb, packed=True)
self.wire(self.glb_tile_pcfg_switch.cgra_cfg_g2f, cgra_cfg_g2f_w)
for i in range(self._params.cgra_per_glb):
self.wire(cgra_cfg_g2f_w[i]['wr_en'],
self.cgra_cfg_g2f_cfg_wr_en[i])
self.wire(cgra_cfg_g2f_w[i]['rd_en'],
self.cgra_cfg_g2f_cfg_rd_en[i])
self.wire(cgra_cfg_g2f_w[i]['addr'], self.cgra_cfg_g2f_cfg_addr[i])
self.wire(cgra_cfg_g2f_w[i]['data'], self.cgra_cfg_g2f_cfg_data[i])
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_jtag_wsti['wr_en'], self.cgra_cfg_jtag_wsti_wr_en)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_jtag_wsti['rd_en'], self.cgra_cfg_jtag_wsti_rd_en)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_jtag_wsti['addr'], self.cgra_cfg_jtag_wsti_addr)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_jtag_wsti['data'], self.cgra_cfg_jtag_wsti_data)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_jtag_esto['wr_en'], self.cgra_cfg_jtag_esto_wr_en)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_jtag_esto['rd_en'], self.cgra_cfg_jtag_esto_rd_en)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_jtag_esto['addr'], self.cgra_cfg_jtag_esto_addr)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_jtag_esto['data'], self.cgra_cfg_jtag_esto_data)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_pcfg_wsti['wr_en'], self.cgra_cfg_pcfg_wsti_wr_en)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_pcfg_wsti['rd_en'], self.cgra_cfg_pcfg_wsti_rd_en)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_pcfg_wsti['addr'], self.cgra_cfg_pcfg_wsti_addr)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_pcfg_wsti['data'], self.cgra_cfg_pcfg_wsti_data)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_pcfg_esto['wr_en'], self.cgra_cfg_pcfg_esto_wr_en)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_pcfg_esto['rd_en'], self.cgra_cfg_pcfg_esto_rd_en)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_pcfg_esto['addr'], self.cgra_cfg_pcfg_esto_addr)
self.wire(
self.glb_tile_pcfg_switch.cgra_cfg_pcfg_esto['data'], self.cgra_cfg_pcfg_esto_data)
self.wire(self.glb_tile_pcfg_switch.cgra_cfg_jtag_wsti_rd_en_bypass,
self.cgra_cfg_jtag_wsti_rd_en_bypass)
self.wire(self.glb_tile_pcfg_switch.cgra_cfg_jtag_esto_rd_en_bypass,
self.cgra_cfg_jtag_esto_rd_en_bypass)
self.wire(self.glb_tile_pcfg_switch.cgra_cfg_jtag_wsti_addr_bypass,
self.cgra_cfg_jtag_wsti_addr_bypass)
self.wire(self.glb_tile_pcfg_switch.cgra_cfg_jtag_esto_addr_bypass,
self.cgra_cfg_jtag_esto_addr_bypass)
|
StanfordAHA/garnet | global_buffer/design/glb_core_strm_router.py | from kratos import Generator, always_ff, always_comb, posedge
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from global_buffer.design.glb_header import GlbHeader
class GlbCoreStrmRouter(Generator):
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_core_strm_router")
self._params = _params
self.header = GlbHeader(self._params)
self.clk = self.clock("clk")
self.clk_en = self.input("clk_en", 1)
self.reset = self.reset("reset")
self.glb_tile_id = self.input(
"glb_tile_id", self._params.tile_sel_addr_width)
self.packet_w2e_wsti = self.input(
"packet_w2e_wsti", self.header.packet_t)
self.packet_e2w_wsto = self.output(
"packet_e2w_wsto", self.header.packet_t)
self.packet_e2w_esti = self.input(
"packet_e2w_esti", self.header.packet_t)
self.packet_w2e_esto = self.output(
"packet_w2e_esto", self.header.packet_t)
self.packet_sw2sr = self.input(
"packet_sw2sr", self.header.packet_t)
self.packet_sr2sw = self.output(
"packet_sr2sw", self.header.packet_t)
self.cfg_tile_connected_prev = self.input(
"cfg_tile_connected_prev", 1)
self.cfg_tile_connected_next = self.input(
"cfg_tile_connected_next", 1)
# local variables
self.packet_w2e_wsti_d1 = self.var(
"packet_w2e_wsti_d1", self.header.packet_t)
self.packet_w2e_wsti_turned = self.var(
"packet_w2e_wsti_turned", self.header.packet_t)
self.packet_w2e_wsti_turned_d1 = self.var(
"packet_w2e_wsti_turned_d1", self.header.packet_t)
self.packet_e2w_esti_d1 = self.var(
"packet_e2w_esti_d1", self.header.packet_t)
self.packet_e2w_esti_turned = self.var(
"packet_e2w_esti_turned", self.header.packet_t)
self.packet_e2w_esti_turned_d1 = self.var(
"packet_e2w_esti_turned_d1", self.header.packet_t)
self.packet_sw2sr_d1 = self.var(
"packet_sw2sr_d1", self.header.packet_t)
# localparam
self.add_is_even_stmt()
self.add_always(self.packet_wst_logic)
self.add_always(self.packet_est_logic)
self.add_always(self.packet_pipeline)
self.add_always(self.packet_sw2sr_pipeline)
self.add_always(self.packet_switch)
def add_is_even_stmt(self):
self.is_even = self.var("is_even", 1)
self.wire(self.is_even, self.glb_tile_id[0] == 0)
@always_comb
def packet_wst_logic(self):
if self.cfg_tile_connected_prev:
self.packet_w2e_wsti_turned = self.packet_w2e_wsti_d1
else:
self.packet_w2e_wsti_turned = self.packet_e2w_wsto
@always_comb
def packet_est_logic(self):
if self.cfg_tile_connected_next:
self.packet_e2w_esti_turned = self.packet_e2w_esti_d1
else:
self.packet_e2w_esti_turned = self.packet_w2e_esto
@always_ff((posedge, "clk"), (posedge, "reset"))
def packet_sw2sr_pipeline(self):
if self.reset:
self.packet_sw2sr_d1 = 0
elif self.clk_en:
self.packet_sw2sr_d1 = self.packet_sw2sr
@always_ff((posedge, "clk"), (posedge, "reset"))
def packet_pipeline(self):
if self.reset:
self.packet_w2e_wsti_d1 = 0
self.packet_e2w_esti_d1 = 0
self.packet_w2e_wsti_turned_d1 = 0
self.packet_e2w_esti_turned_d1 = 0
elif self.clk_en:
self.packet_w2e_wsti_d1 = self.packet_w2e_wsti
self.packet_e2w_esti_d1 = self.packet_e2w_esti
self.packet_w2e_wsti_turned_d1 = self.packet_w2e_wsti_turned
self.packet_e2w_esti_turned_d1 = self.packet_e2w_esti_turned
@always_comb
def packet_switch(self):
if self.is_even:
self.packet_sr2sw = self.packet_w2e_wsti_turned
self.packet_w2e_esto = self.packet_sw2sr_d1
self.packet_e2w_wsto = self.packet_e2w_esti_turned_d1
else:
self.packet_sr2sw = self.packet_e2w_esti_turned
self.packet_w2e_esto = self.packet_w2e_wsti_turned_d1
self.packet_e2w_wsto = self.packet_sw2sr_d1
|
StanfordAHA/garnet | global_buffer/design/glb_tile_cfg.py | from kratos import Generator
from global_buffer.design.glb_header import GlbHeader
from global_buffer.design.glb_cfg_ifc import GlbConfigInterface
from global_buffer.design.glb_tile_cfg_ctrl import GlbTileCfgCtrl
from global_buffer.gen_global_buffer_rdl import gen_global_buffer_rdl, gen_glb_pio_wrapper
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
from systemRDL.util import run_systemrdl
import pathlib
import os
class GlbTileCfg(Generator):
cache = None
def __init__(self, _params: GlobalBufferParams):
super().__init__("glb_tile_cfg")
self._params = _params
self.header = GlbHeader(self._params)
cfg_ifc = GlbConfigInterface(addr_width=self._params.axi_addr_width, data_width=self._params.axi_data_width)
# ports
self.clk = self.clock("clk")
self.reset = self.reset("reset", is_async=True)
self.glb_tile_id = self.input("glb_tile_id", self._params.tile_sel_addr_width)
# config port
self.if_cfg_wst_s = self.interface(cfg_ifc.slave, "if_cfg_wst_s", is_port=True)
self.if_cfg_est_m = self.interface(cfg_ifc.master, "if_cfg_est_m", is_port=True)
# configuration register struct
# TODO: Can we have a pass for this configuration?
self.cfg_data_network = self.output("cfg_data_network", self.header.cfg_data_network_t)
self.cfg_pcfg_network = self.output("cfg_pcfg_network", self.header.cfg_pcfg_network_t)
# st dma
self.cfg_st_dma_ctrl = self.output("cfg_st_dma_ctrl", self.header.cfg_dma_ctrl_t)
self.cfg_st_dma_header = self.output("cfg_st_dma_header", self.header.cfg_dma_header_t,
size=self._params.queue_depth)
# ld dma
self.cfg_ld_dma_ctrl = self.output("cfg_ld_dma_ctrl", self.header.cfg_dma_ctrl_t)
self.cfg_ld_dma_header = self.output("cfg_ld_dma_header", self.header.cfg_dma_header_t,
size=self._params.queue_depth)
# pcfg dma
self.cfg_pcfg_dma_ctrl = self.output("cfg_pcfg_dma_ctrl", self.header.cfg_pcfg_dma_ctrl_t)
self.cfg_pcfg_dma_header = self.output("cfg_pcfg_dma_header", self.header.cfg_pcfg_dma_header_t)
self.glb_pio_wrapper = self.get_glb_pio_wrapper()
# TODO: Update axi_reg_addr_width
self.add_child("glb_pio", self.glb_pio_wrapper)
self.glb_tile_cfg_ctrl = GlbTileCfgCtrl(self._params)
self.add_child("glb_tile_cfg_ctrl", self.glb_tile_cfg_ctrl)
self.wire_config_signals()
self.wire_ctrl_signals()
def get_glb_pio_wrapper(self):
# TODO: For now, we run systemRDL to generate SV and parse it.
# However, in the future, we need to generate wrapper directly from configuration space.
top_name = "glb"
garnet_home = pathlib.Path(__file__).parent.parent.parent.resolve()
rdl_output_folder = os.path.join(garnet_home, "global_buffer/systemRDL/output/")
pio_file = rdl_output_folder + top_name + "_pio.sv"
pio_wrapper_file = rdl_output_folder + top_name + "_pio_wrapper.sv"
if not self.__class__.cache:
self.__class__.cache = self._params
glb_rdl = gen_global_buffer_rdl(name=top_name, params=self._params)
# Dump rdl file
rdl_file = os.path.join(garnet_home, "global_buffer/systemRDL/glb.rdl")
glb_rdl.dump_rdl(rdl_file)
# Run ORDT to generate RTL
ordt_path = os.path.join(garnet_home, 'systemRDL', 'Ordt.jar')
rdl_parms_file = os.path.join(garnet_home, "global_buffer/systemRDL/ordt_params/glb.parms")
run_systemrdl(ordt_path, top_name, rdl_file, rdl_parms_file, rdl_output_folder)
# Create wrapper of glb_pio.sv
gen_glb_pio_wrapper(src_file=pio_file, dest_file=pio_wrapper_file)
return self.from_verilog("glb_pio", pio_wrapper_file, [], {})
def wire_config_signals(self):
self.wire(self.clk, self.glb_pio_wrapper.ports["clk"])
self.wire(self.reset, self.glb_pio_wrapper.ports["reset"])
self.wire(self.cfg_data_network['tile_connected'],
self.glb_pio_wrapper.ports[f"l2h_data_network_tile_connected_r"])
self.wire(self.cfg_data_network['latency'],
self.glb_pio_wrapper.ports[f"l2h_data_network_latency_r"])
self.wire(self.cfg_pcfg_network['tile_connected'],
self.glb_pio_wrapper.ports[f"l2h_pcfg_network_tile_connected_r"])
self.wire(self.cfg_pcfg_network['latency'],
self.glb_pio_wrapper.ports[f"l2h_pcfg_network_latency_r"])
self.wire(self.cfg_st_dma_ctrl['data_mux'],
self.glb_pio_wrapper.ports[f"l2h_st_dma_ctrl_data_mux_r"])
self.wire(self.cfg_st_dma_ctrl['mode'], self.glb_pio_wrapper.ports[f"l2h_st_dma_ctrl_mode_r"])
self.wire(self.cfg_st_dma_ctrl['use_valid'],
self.glb_pio_wrapper.ports[f"l2h_st_dma_ctrl_use_valid_r"])
self.wire(self.cfg_st_dma_ctrl['num_repeat'], self.glb_pio_wrapper.ports[f"l2h_st_dma_ctrl_num_repeat_r"])
for i in range(self._params.queue_depth):
if self._params.queue_depth == 1:
current_header = self.cfg_st_dma_header
else:
current_header = self.cfg_st_dma_header[i]
self.wire(current_header['start_addr'],
self.glb_pio_wrapper.ports[f"l2h_st_dma_header_{i}_start_addr_start_addr_r"])
self.wire(current_header['cycle_start_addr'],
self.glb_pio_wrapper.ports[f"l2h_st_dma_header_{i}_cycle_start_addr_cycle_start_addr_r"])
self.wire(current_header['dim'],
self.glb_pio_wrapper.ports[f"l2h_st_dma_header_{i}_dim_dim_r"])
for j in range(self._params.loop_level):
self.wire(current_header[f"cycle_stride_{j}"],
self.glb_pio_wrapper.ports[f"l2h_st_dma_header_{i}_cycle_stride_{j}_cycle_stride_r"])
self.wire(current_header[f"stride_{j}"],
self.glb_pio_wrapper.ports[f"l2h_st_dma_header_{i}_stride_{j}_stride_r"])
self.wire(current_header[f"range_{j}"],
self.glb_pio_wrapper.ports[f"l2h_st_dma_header_{i}_range_{j}_range_r"])
self.wire(self.cfg_ld_dma_ctrl['data_mux'],
self.glb_pio_wrapper.ports[f"l2h_ld_dma_ctrl_data_mux_r"])
self.wire(self.cfg_ld_dma_ctrl['mode'], self.glb_pio_wrapper.ports[f"l2h_ld_dma_ctrl_mode_r"])
self.wire(self.cfg_ld_dma_ctrl['use_valid'],
self.glb_pio_wrapper.ports[f"l2h_ld_dma_ctrl_use_valid_r"])
self.wire(self.cfg_ld_dma_ctrl['num_repeat'], self.glb_pio_wrapper.ports[f"l2h_ld_dma_ctrl_num_repeat_r"])
for i in range(self._params.queue_depth):
if self._params.queue_depth == 1:
current_header = self.cfg_ld_dma_header
else:
current_header = self.cfg_ld_dma_header[i]
self.wire(current_header['start_addr'],
self.glb_pio_wrapper.ports[f"l2h_ld_dma_header_{i}_start_addr_start_addr_r"])
self.wire(current_header['cycle_start_addr'],
self.glb_pio_wrapper.ports[f"l2h_ld_dma_header_{i}_cycle_start_addr_cycle_start_addr_r"])
self.wire(current_header['dim'],
self.glb_pio_wrapper.ports[f"l2h_ld_dma_header_{i}_dim_dim_r"])
for j in range(self._params.loop_level):
self.wire(current_header[f"cycle_stride_{j}"],
self.glb_pio_wrapper.ports[f"l2h_ld_dma_header_{i}_cycle_stride_{j}_cycle_stride_r"])
self.wire(current_header[f"stride_{j}"],
self.glb_pio_wrapper.ports[f"l2h_ld_dma_header_{i}_stride_{j}_stride_r"])
self.wire(current_header[f"range_{j}"],
self.glb_pio_wrapper.ports[f"l2h_ld_dma_header_{i}_range_{j}_range_r"])
self.wire(
self.cfg_pcfg_dma_ctrl['mode'], self.glb_pio_wrapper.ports[f"l2h_pcfg_dma_ctrl_mode_r"])
self.wire(self.cfg_pcfg_dma_header['start_addr'],
self.glb_pio_wrapper.ports[f"l2h_pcfg_dma_header_start_addr_start_addr_r"])
self.wire(self.cfg_pcfg_dma_header['num_cfg'],
self.glb_pio_wrapper.ports[f"l2h_pcfg_dma_header_num_cfg_num_cfg_r"])
def wire_ctrl_signals(self):
self.wire(self.clk, self.glb_tile_cfg_ctrl.clk)
self.wire(self.reset, self.glb_tile_cfg_ctrl.reset)
self.wire(self.glb_tile_id, self.glb_tile_cfg_ctrl.glb_tile_id)
self.wire(self.if_cfg_wst_s, self.glb_tile_cfg_ctrl.if_cfg_wst_s)
self.wire(self.if_cfg_est_m, self.glb_tile_cfg_ctrl.if_cfg_est_m)
self.wire(self.glb_pio_wrapper.ports['h2d_pio_dec_address'],
self.glb_tile_cfg_ctrl.h2d_pio_dec_address)
self.wire(self.glb_pio_wrapper.ports['h2d_pio_dec_write_data'],
self.glb_tile_cfg_ctrl.h2d_pio_dec_write_data)
self.wire(self.glb_pio_wrapper.ports['h2d_pio_dec_write'],
self.glb_tile_cfg_ctrl.h2d_pio_dec_write)
self.wire(
self.glb_pio_wrapper.ports['h2d_pio_dec_read'], self.glb_tile_cfg_ctrl.h2d_pio_dec_read)
self.wire(self.glb_pio_wrapper.ports['d2h_dec_pio_read_data'],
self.glb_tile_cfg_ctrl.d2h_dec_pio_read_data)
self.wire(self.glb_pio_wrapper.ports['d2h_dec_pio_ack'],
self.glb_tile_cfg_ctrl.d2h_dec_pio_ack)
self.wire(self.glb_pio_wrapper.ports['d2h_dec_pio_nack'],
self.glb_tile_cfg_ctrl.d2h_dec_pio_nack)
|
StanfordAHA/garnet | global_buffer/design/global_buffer_magma_helper.py | import magma as m
from global_buffer.design.global_buffer_parameter import GlobalBufferParams
def _get_raw_interface(params: GlobalBufferParams):
return dict(
clk=m.In(m.clock),
stall=m.In(m.Bits[params.num_glb_tiles]),
cgra_stall_in=m.In(m.Bits[params.num_glb_tiles]),
reset=m.In(m.asyncreset),
cgra_soft_reset=m.In(m.Bit),
# proc
proc_wr_en=m.In(m.Bit),
proc_wr_strb=m.In(m.Bits[params.bank_data_width // 8]),
proc_wr_addr=m.In(m.Bits[params.glb_addr_width]),
proc_wr_data=m.In(m.Bits[params.bank_data_width]),
proc_rd_en=m.In(m.Bit),
proc_rd_addr=m.In(m.Bits[params.glb_addr_width]),
proc_rd_data=m.Out(m.Bits[params.bank_data_width]),
proc_rd_data_valid=m.Out(m.Bit),
# configuration of glb from glc
if_cfg_wr_en=m.In(m.Bit),
if_cfg_wr_clk_en=m.In(m.Bit),
if_cfg_wr_addr=m.In(m.Bits[params.axi_addr_width]),
if_cfg_wr_data=m.In(m.Bits[params.axi_data_width]),
if_cfg_rd_en=m.In(m.Bit),
if_cfg_rd_clk_en=m.In(m.Bit),
if_cfg_rd_addr=m.In(m.Bits[params.axi_addr_width]),
if_cfg_rd_data=m.Out(m.Bits[params.axi_data_width]),
if_cfg_rd_data_valid=m.Out(m.Bit),
# configuration of sram from glc
if_sram_cfg_wr_en=m.In(m.Bit),
if_sram_cfg_wr_clk_en=m.In(m.Bit),
if_sram_cfg_wr_addr=m.In(m.Bits[params.glb_addr_width]),
if_sram_cfg_wr_data=m.In(m.Bits[params.axi_data_width]),
if_sram_cfg_rd_en=m.In(m.Bit),
if_sram_cfg_rd_clk_en=m.In(m.Bit),
if_sram_cfg_rd_addr=m.In(m.Bits[params.glb_addr_width]),
if_sram_cfg_rd_data=m.Out(m.Bits[params.axi_data_width]),
if_sram_cfg_rd_data_valid=m.Out(m.Bit),
# cgra to glb streaming word
stream_data_f2g=m.In(m.Array[params.num_glb_tiles,
m.Array[params.cgra_per_glb, m.Bits[params.cgra_data_width]]]),
stream_data_valid_f2g=m.In(m.Array[params.num_glb_tiles, m.Bits[params.cgra_per_glb]]),
# glb to cgra streaming word
stream_data_g2f=m.Out(m.Array[params.num_glb_tiles,
m.Array[params.cgra_per_glb, m.Bits[params.cgra_data_width]]]),
stream_data_valid_g2f=m.Out(m.Array[params.num_glb_tiles, m.Bits[params.cgra_per_glb]]),
# cgra configuration from global controller
cgra_cfg_jtag_gc2glb_wr_en=m.In(m.Bits[1]),
cgra_cfg_jtag_gc2glb_rd_en=m.In(m.Bits[1]),
cgra_cfg_jtag_gc2glb_addr=m.In(m.Bits[params.cgra_cfg_addr_width]),
cgra_cfg_jtag_gc2glb_data=m.In(m.Bits[params.cgra_cfg_data_width]),
# cgra configuration to cgra
cgra_cfg_g2f_cfg_wr_en=m.Out(m.Array[params.num_glb_tiles, m.Bits[params.cgra_per_glb]]),
cgra_cfg_g2f_cfg_rd_en=m.Out(m.Array[params.num_glb_tiles, m.Bits[params.cgra_per_glb]]),
cgra_cfg_g2f_cfg_addr=m.Out(
m.Array[params.num_glb_tiles, m.Array[params.cgra_per_glb, m.Bits[params.cgra_cfg_addr_width]]]),
cgra_cfg_g2f_cfg_data=m.Out(
m.Array[params.num_glb_tiles, m.Array[params.cgra_per_glb, m.Bits[params.cgra_cfg_data_width]]]),
cgra_stall=m.Out(m.Array[params.num_glb_tiles, m.Bits[params.cgra_per_glb]]),
strm_start_pulse=m.In(m.Bits[params.num_glb_tiles]),
pc_start_pulse=m.In(m.Bits[params.num_glb_tiles]),
strm_f2g_interrupt_pulse=m.Out(m.Bits[params.num_glb_tiles]),
strm_g2f_interrupt_pulse=m.Out(m.Bits[params.num_glb_tiles]),
pcfg_g2f_interrupt_pulse=m.Out(m.Bits[params.num_glb_tiles]),
)
def _flatten(types):
def _map(t):
# Don't need to flatten non-array typess, or even Bits types since they
# are already flat.
if not issubclass(t, m.Array) or issubclass(t, m.Bits):
return t
size = t.N
t = t.T
while not issubclass(t, m.Digital):
size *= t.N
t = t.T
return m.Bits[size].qualify(t.direction)
return {k: _map(t) for k, t in types.items()}
class GlobalBufferDeclarationGenerator(m.Generator2):
def __init__(self, params: GlobalBufferParams = None):
# if parameters are not passed, use the default parameters
if params is None:
params = GlobalBufferParams()
self.params = params
self.name = "global_buffer"
args = _flatten(_get_raw_interface(self.params))
self.io = m.IO(**args)
|
StanfordAHA/garnet | systemRDL/util.py | import json
import argparse
import sys
import os
from systemrdl import RDLCompiler, node, RDLCompileError
from peakrdl.html import HTMLExporter
from dataclasses import dataclass
def convert_field(rdlc: RDLCompiler, obj: node.FieldNode) -> dict:
json_obj = dict()
json_obj['type'] = 'field'
json_obj['inst_name'] = obj.inst_name
json_obj['lsb'] = obj.lsb
json_obj['msb'] = obj.msb
json_obj['reset'] = obj.get_property('reset')
json_obj['sw_access'] = obj.get_property('sw').name
return json_obj
def convert_reg(rdlc: RDLCompiler, obj: node.RegNode) -> dict:
if obj.is_array:
# Use the RDL Compiler message system to print an error
# fatal() raises RDLCompileError
rdlc.msg.fatal(
"JSON export does not support arrays",
obj.inst.inst_src_ref
)
# Convert information about the register
json_obj = dict()
json_obj['type'] = 'reg'
json_obj['inst_name'] = obj.inst_name
json_obj['addr_offset'] = obj.address_offset
# Iterate over all the fields in this reg and convert them
json_obj['children'] = []
for field in obj.fields():
json_field = convert_field(rdlc, field)
json_obj['children'].append(json_field)
return json_obj
def convert_addrmap(rdlc: RDLCompiler, obj) -> dict:
if obj.is_array:
rdlc.msg.fatal(
"JSON export does not support arrays",
obj.inst.inst_src_ref
)
json_obj = dict()
if isinstance(obj, node.AddrmapNode):
json_obj['type'] = 'addrmap'
elif isinstance(obj, node.RegfileNode):
json_obj['type'] = 'regfile'
else:
raise RuntimeError
json_obj['inst_name'] = obj.inst_name
json_obj['addr_offset'] = obj.address_offset
json_obj['children'] = []
for child in obj.children():
if isinstance(child, (node.AddrmapNode, node.RegfileNode)):
json_child = convert_addrmap(rdlc, child)
elif isinstance(child, node.RegNode):
json_child = convert_reg(rdlc, child)
json_obj['children'].append(json_child)
return json_obj
def convert_to_json(rdl_json, path: str):
# Write to a JSON file
with open(path, "w") as f:
json.dump(rdl_json, f, indent=4)
@dataclass
class Reg():
name: str
addr: int
lsb: int
msb: int
@dataclass
class Field():
name: str
lsb: int
msb: int
def convert_to_header(rdl_json, path: str):
header_list = _convert_to_regmap(rdl_json, "", 0)
svh_path = path + '.svh'
with open(svh_path, "w") as f:
for header in header_list:
if isinstance(header, Reg):
f.write(f"`define {header.name} 'h{format(header.addr, 'x')}\n")
f.write(f"`define {header.name + '_LSB'} {header.lsb}\n")
f.write(f"`define {header.name + '_MSB'} {header.msb}\n")
elif isinstance(header, Field):
f.write(f"`define {header.name + '_F_LSB'} {header.lsb}\n")
f.write(f"`define {header.name + '_F_MSB'} {header.msb}\n")
h_path = path + '.h'
with open(h_path, "w") as f:
f.write(f"#pragma once\n")
for header in header_list:
if isinstance(header, Reg):
f.write(f"#define {header.name} {hex(header.addr)}\n")
elif isinstance(header, Field):
f.write(f"#define {header.name + '_F_LSB'} {header.lsb}\n")
f.write(f"#define {header.name + '_F_MSB'} {header.msb}\n")
def _convert_to_regmap(rdl_json, base_name, base_addr):
header_list = []
if rdl_json is None:
return
if base_name != "":
base_name += "_"
if rdl_json['type'] == 'addrmap' or rdl_json['type'] == 'regfile':
name = base_name + rdl_json['inst_name']
addr = base_addr + rdl_json['addr_offset']
for child in rdl_json['children']:
child_header_list = _convert_to_regmap(child, name, addr)
header_list += child_header_list
elif rdl_json['type'] == 'reg':
name = base_name + rdl_json['inst_name']
addr = base_addr + rdl_json['addr_offset']
reg_name = (name + '_R').upper()
lsb = rdl_json['children'][0]['lsb']
msb = rdl_json['children'][-1]['msb']
reg = Reg(name=reg_name, addr=addr, lsb=lsb, msb=msb)
header_list.append(reg)
# child
for child in rdl_json['children']:
child_header_list = _convert_to_regmap(child, name, addr)
header_list += child_header_list
elif rdl_json['type'] == 'field':
field_name = (base_name + rdl_json['inst_name']).upper()
lsb = rdl_json['lsb']
msb = rdl_json['msb']
field = Field(name=field_name, lsb=lsb, msb=msb)
header_list.append(field)
return header_list
def parse_arguments():
# Create argument parser
parser = argparse.ArgumentParser()
# Arguments
parser.add_argument("--rdl", nargs='+', default=[], required=True)
parser.add_argument(
"--output", help="systemRDL: output directory", type=str, default="")
parser.add_argument(
"--name", help="glb: name of the addrmap", type=str, default="addrmap")
parser.add_argument("--json", help="export json", action='store_true')
parser.add_argument("--html", help="export html", action='store_true')
parser.add_argument("--header", help="export h and svh",
action='store_true')
# Parse arguments
args = parser.parse_args()
return args
def gen_rdl_header(top_name, rdl_file, output_folder):
# Generate HTML and addressmap header
rdlc = RDLCompiler()
try:
rdlc.compile_file(rdl_file)
# Elaborate the design
root = rdlc.elaborate()
except RDLCompileError:
# A compilation error occurred. Exit with error code
sys.exit(1)
root = rdlc.elaborate()
exporter = HTMLExporter()
exporter.export(root, os.path.join(output_folder, f"{top_name}_html"))
rdl_json = convert_addrmap(rdlc, root.top)
convert_to_json(rdl_json, os.path.join(output_folder, f"{top_name}.json"))
convert_to_header(rdl_json, os.path.join(output_folder, top_name))
def run_systemrdl(ordt_path, name, rdl_file, parms_file, output_folder):
os.system(
f"java -jar {ordt_path} -reglist {os.path.join(output_folder, name + '.reglist')}"
f" -parms {parms_file} -systemverilog {output_folder} {rdl_file}")
|
clayman-micro/passport | src/passport/use_cases/users.py | <filename>src/passport/use_cases/users.py
from passport.domain import User
from passport.services.users import UserService
from passport.storage import DBStorage
from passport.use_cases import UseCase
class LoginUseCase(UseCase):
async def execute(self, email: str, password: str) -> User:
storage = DBStorage(self.app["db"])
service = UserService(storage)
user = await service.login(email, password)
return user
class RegisterUserUseCase(UseCase):
async def execute(self, email: str, password: str) -> User:
storage = DBStorage(self.app["db"])
service = UserService(storage)
user = await service.register(email, password)
return user
|
clayman-micro/passport | src/passport/storage/migrations/versions/615677fb3a4f_add_users_table.py | """
Add users table
Revision ID: 615677fb3a4f
Revises:
Create Date: 2020-09-09 21:36:41.102344
"""
import sqlalchemy # type: ignore
from alembic import op # type: ignore
# revision identifiers, used by Alembic.
revision = "<PASSWORD>"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"users",
sqlalchemy.Column("id", sqlalchemy.Integer(), nullable=False),
sqlalchemy.Column("email", sqlalchemy.String(length=255), nullable=False),
sqlalchemy.Column("password", sqlalchemy.String(length=255), nullable=False),
sqlalchemy.Column("is_active", sqlalchemy.Boolean(), nullable=True),
sqlalchemy.Column("last_login", sqlalchemy.DateTime(), nullable=True),
sqlalchemy.Column("created_on", sqlalchemy.DateTime(), nullable=True),
sqlalchemy.PrimaryKeyConstraint("id"),
sqlalchemy.UniqueConstraint("email"),
)
def downgrade():
op.drop_table("users")
|
clayman-micro/passport | src/passport/use_cases/__init__.py | <filename>src/passport/use_cases/__init__.py
from aiohttp import web
class UseCase:
def __init__(self, app: web.Application) -> None:
self.app = app
|
clayman-micro/passport | tests/handlers/test_api.py | <filename>tests/handlers/test_api.py
from datetime import datetime, timedelta
from typing import Dict
import jwt
import orjson # type: ignore
import pytest # type: ignore
from passlib.handlers.pbkdf2 import pbkdf2_sha512 # type: ignore
from passport.domain import TokenType, User
from passport.services.tokens import TokenGenerator
from passport.storage.users import users as users_table
def prepare_request(data, json=False):
headers = {}
if json:
data = orjson.dumps(data)
headers = {"Content-Type": "application/json"}
return {"data": data, "headers": headers}
@pytest.fixture(scope="function")
def prepare_user():
async def go(user: Dict, app):
data: Dict = user.copy()
data.setdefault("is_active", True)
key = await app["db"].fetch_val(
users_table.insert().returning(users_table.c.id),
values={
"email": data["email"],
"password": pbkdf2_sha512.encrypt(data["password"], rounds=10000, salt_size=10),
"is_active": data["is_active"],
"created_on": datetime.now(),
},
)
return User(key=key, email=data["email"], password=data["password"]) # type: ignore
return go
@pytest.mark.integration
@pytest.mark.parametrize("json", [True, False])
async def test_registration_success(aiohttp_client, app, json):
data = {"email": "<EMAIL>", "password": "<PASSWORD>"}
client = await aiohttp_client(app)
url = app.router.named_resources()["api.users.register"].url_for()
resp = await client.post(url, **prepare_request(data, json))
assert resp.status == 201
@pytest.mark.integration
@pytest.mark.parametrize("json", [True, False])
async def test_registration_failed_without_password(aiohttp_client, app, json):
data = {"email": "<EMAIL>"}
client = await aiohttp_client(app)
url = app.router.named_resources()["api.users.register"].url_for()
resp = await client.post(url, **prepare_request(data, json))
assert resp.status == 422
@pytest.mark.integration
@pytest.mark.parametrize("json", [True, False])
async def test_registration_failed_already_existed(aiohttp_client, app, prepare_user, json):
data = {"email": "<EMAIL>", "password": "<PASSWORD>"}
client = await aiohttp_client(app)
await prepare_user({"email": "<EMAIL>", "password": "<PASSWORD>"}, app)
url = app.router.named_resources()["api.users.register"].url_for()
resp = await client.post(url, **prepare_request(data, json))
assert resp.status == 422
@pytest.mark.integration
@pytest.mark.parametrize("json", [True, False])
async def test_login_success(aiohttp_client, app, prepare_user, json):
client = await aiohttp_client(app)
url = app.router.named_resources()["api.users.login"].url_for()
data = {"email": "<EMAIL>", "password": "<PASSWORD>"}
await prepare_user(data, app)
resp = await client.post(url, **prepare_request(data, json))
assert resp.status == 200
assert "X-ACCESS-TOKEN" in resp.headers
assert "X-REFRESH-TOKEN" in resp.headers
@pytest.mark.integration
@pytest.mark.parametrize("json", [True, False])
@pytest.mark.parametrize("password", ["", "<PASSWORD>"])
async def test_login_failed(aiohttp_client, app, prepare_user, json, password):
client = await aiohttp_client(app)
url = app.router.named_resources()["api.users.login"].url_for()
email = "<EMAIL>"
await prepare_user({"email": email, "password": "<PASSWORD>"}, app)
payload = {"email": email, "password": password}
resp = await client.post(url, **prepare_request(payload, json))
assert resp.status == 403
@pytest.mark.integration
@pytest.mark.parametrize("json", [True, False])
async def test_login_unregistered(aiohttp_client, app, prepare_user, json):
client = await aiohttp_client(app)
url = app.router.named_resources()["api.users.login"].url_for()
payload = {"email": "<EMAIL>", "password": "<PASSWORD>"}
resp = await client.post(url, **prepare_request(payload, json))
assert resp.status == 404
@pytest.mark.integration
async def test_refresh_success(aiohttp_client, app, prepare_user):
client = await aiohttp_client(app)
url = app.router.named_resources()["api.tokens.refresh"].url_for()
user = await prepare_user({"email": "<EMAIL>", "password": "<PASSWORD>"}, app)
generator = TokenGenerator(private_key=app["config"].tokens.private_key)
refresh_token = generator.generate(
user, token_type=TokenType.refresh, expire=app["config"].tokens.refresh_token_expire,
)
headers = {"X-REFRESH-TOKEN": refresh_token}
resp = await client.post(url, headers=headers)
assert resp.status == 200
assert "X-ACCESS-TOKEN" in resp.headers
access_token = resp.headers["X-ACCESS-TOKEN"]
token = jwt.decode(access_token, app["config"].tokens.public_key, algorithms="RS256")
assert token["user"]["id"] == user.key
assert token["token_type"] == "access"
@pytest.mark.integration
async def test_refresh_failed_with_wrong_token_type(aiohttp_client, app, prepare_user):
client = await aiohttp_client(app)
url = app.router.named_resources()["api.tokens.refresh"].url_for()
user = await prepare_user({"email": "<EMAIL>", "password": "<PASSWORD>-secret"}, app)
generator = TokenGenerator(private_key=app["config"].tokens.private_key)
refresh_token = generator.generate(
user, token_type=TokenType.access, expire=app["config"].tokens.refresh_token_expire,
)
headers = {"X-REFRESH-TOKEN": refresh_token}
resp = await client.post(url, headers=headers)
assert resp.status == 403
@pytest.mark.integration
@pytest.mark.parametrize("user_id", ["", 0, "foo" "2", None])
async def test_refresh_failed(aiohttp_client, app, prepare_user, user_id):
client = await aiohttp_client(app)
url = app.router.named_resources()["api.tokens.refresh"].url_for()
now = datetime.utcnow()
refresh_token = jwt.encode(
{
"id": user_id,
"email": "",
"token_type": TokenType.refresh.value,
"iss": "urn:passport",
"exp": now + timedelta(seconds=app["config"].tokens.refresh_token_expire),
"iat": now,
},
app["config"].tokens.private_key,
algorithm="RS256",
)
headers = {"X-REFRESH-TOKEN": refresh_token}
resp = await client.post(url, headers=headers)
assert resp.status == 403
@pytest.mark.integration
async def test_refresh_failed_for_inactive(aiohttp_client, app, prepare_user):
client = await aiohttp_client(app)
url = app.router.named_resources()["api.tokens.refresh"].url_for()
user = await prepare_user({"email": "<EMAIL>", "password": "<PASSWORD>", "is_active": False}, app,)
generator = TokenGenerator(private_key=app["config"].tokens.private_key)
refresh_token = generator.generate(
user, token_type=TokenType.refresh, expire=app["config"].tokens.refresh_token_expire,
)
headers = {"X-REFRESH-TOKEN": refresh_token}
resp = await client.post(url, headers=headers)
assert resp.status == 403
|
clayman-micro/passport | src/passport/services/users.py | <filename>src/passport/services/users.py
from aiohttp_micro.core.exceptions import EntityAlreadyExist # type: ignore
from passport.domain import User
from passport.domain.storage import Storage
from passport.exceptions import Forbidden
class UserService:
def __init__(self, storage: Storage) -> None:
self.storage = storage
async def register(self, email: str, password: str) -> User:
exist = await self.storage.users.exists(email)
if exist:
raise EntityAlreadyExist()
user = User( # type: ignore
key=0, email=email, password="", is_superuser=False, permissions=[]
)
user.set_password(password)
await self.storage.users.add(user)
return user
async def login(self, email: str, password: str) -> User:
user = await self.storage.users.fetch_by_email(email)
is_valid = user.verify_password(password)
if not is_valid:
raise Forbidden()
return user
async def fetch(self, key: int, active: bool) -> User:
user = await self.storage.users.fetch_by_key(key)
return user
|
clayman-micro/passport | src/passport/domain/__init__.py | from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional
from aiohttp_micro.core.entities import Entity # type: ignore
from passlib.handlers.pbkdf2 import pbkdf2_sha512 # type: ignore
class TokenType(Enum):
access = "access"
refresh = "refresh"
@dataclass
class Permission(Entity):
name: str
@dataclass
class User(Entity):
email: str
password: Optional[str] = None
is_superuser: bool = False
permissions: List[Permission] = field(default_factory=list)
def set_password(self, password: str) -> None:
self.password = <PASSWORD>(password, rounds=10000, salt_size=10)
def verify_password(self, password: str) -> bool:
try:
valid = pbkdf2_sha512.verify(password, self.password)
except ValueError:
valid = False
return valid
|
clayman-micro/passport | src/passport/handlers/api/tokens.py | from aiohttp import web
from aiohttp_micro.core.exceptions import EntityNotFound # type: ignore
from aiohttp_micro.web.handlers import json_response # type: ignore
from passport.domain import TokenType
from passport.exceptions import BadToken
from passport.handlers import (
session_required,
UserResponseSchema,
)
from passport.services.tokens import TokenDecoder, TokenGenerator
from passport.services.users import UserService
from passport.storage import DBStorage
@session_required
async def access(request: web.Request) -> web.Response:
config = request.app["config"]
generator = TokenGenerator(private_key=config.tokens.private_key)
access_token = generator.generate(request["user"], expire=config.tokens.access_token_expire)
schema = UserResponseSchema()
response = schema.dump({"user": request["user"]})
return json_response(response, headers={"X-ACCESS-TOKEN": access_token})
async def refresh(request: web.Request) -> web.Response:
config = request.app["config"]
token = request.headers.get("X-REFRESH-TOKEN", "")
if not token:
raise web.HTTPUnauthorized(text="Refresh token required")
try:
decoder = TokenDecoder(public_key=config.tokens.public_key)
user = decoder.decode(token, TokenType.refresh)
except BadToken:
raise web.HTTPForbidden
try:
storage = DBStorage(request.app["db"])
service = UserService(storage)
user = await service.fetch(key=user.key, active=True)
except EntityNotFound:
raise web.HTTPForbidden
generator = TokenGenerator(private_key=config.tokens.private_key)
access_token = generator.generate(user=user, expire=config.tokens.access_token_expire)
schema = UserResponseSchema()
response = schema.dump({"user": user})
return json_response(response, headers={"X-ACCESS-TOKEN": access_token})
|
clayman-micro/passport | src/passport/handlers/__init__.py | <gh_stars>0
import functools
from aiohttp import web
from aiohttp_micro.core.exceptions import EntityNotFound # type: ignore
from aiohttp_micro.core.schemas import EntitySchema # type: ignore
from marshmallow import fields, Schema
from passport.domain import User
from passport.exceptions import BadToken, TokenExpired
from passport.services.tokens import TokenDecoder
from passport.storage import DBStorage
class UserSchema(EntitySchema):
entity_cls = User
email = fields.Str(required=True, description="Email")
class CredentialsPayloadSchema(Schema):
email = fields.Str(required=True, description="User email")
password = fields.Str(required=True, description="User password")
class UserResponseSchema(Schema):
user = fields.Nested(UserSchema, required=True)
def token_required(header: str = "X-ACCESS-TOKEN"):
def wrapper(f):
@functools.wraps(f)
async def wrapped(request: web.Request):
token = request.headers.get(header, "")
if not token:
raise web.HTTPUnauthorized(text="Auth token required")
try:
config = request.app["config"]
decoder = TokenDecoder(public_key=config.tokens.public_key)
user = decoder.decode(token)
except (BadToken, TokenExpired):
raise web.HTTPForbidden
request["user"] = user
return await f(request)
return wrapped
return wrapper
def session_required(f):
@functools.wraps(f)
async def wrapper(request: web.Request):
config = request.app["config"]
session_key = request.cookies.get(config.sessions.cookie, None)
if session_key:
storage = DBStorage(request.app["db"])
user_key = await storage.sessions.fetch(key=session_key)
if user_key:
try:
user = await storage.users.fetch_by_key(key=user_key)
except EntityNotFound:
raise web.HTTPForbidden
request["user"] = user
return await f(request)
raise web.HTTPForbidden
return wrapper
|
clayman-micro/passport | src/passport/app.py | import os
from aiohttp import web
from aiohttp_micro import setup as setup_micro
from aiohttp_micro import setup_logging, setup_metrics
from aiohttp_storage import setup as setup_storage # type: ignore
from passport.config import AppConfig
from passport.handlers import auth as auth_endpoints
from passport.handlers.api import keys
from passport.handlers.api import tokens as token_endpoints
from passport.handlers.api import users as user_endpoints
def init(app_name: str, config: AppConfig) -> web.Application:
app = web.Application()
app["app_root"] = os.path.dirname(__file__)
setup_micro(app, app_name, config)
setup_storage(
app, root=os.path.join(app["app_root"], "storage"), config=app["config"].db,
)
setup_logging(app)
setup_metrics(app)
# Public user endpoints
app.router.add_post("/auth/login", auth_endpoints.login, name="auth.login")
app.router.add_post("/auth/logout", auth_endpoints.logout, name="auth.logout")
app.router.add_get("/api/keys", keys, name="api.keys")
# User API endpoints
app.router.add_get("/api/profile", user_endpoints.profile, name="api.users.profile")
app.router.add_post("/api/login", user_endpoints.login, name="api.users.login")
app.router.add_post("/api/register", user_endpoints.register, name="api.users.register")
# Manage tokens endpoints
app.router.add_get("/api/tokens/access", token_endpoints.access, name="api.tokens.access")
app.router.add_post(
"/api/tokens/refresh", token_endpoints.refresh, name="api.tokens.refresh",
)
return app
|
clayman-micro/passport | src/passport/handlers/auth.py | <gh_stars>0
import secrets
from datetime import datetime, timedelta
from typing import Dict
from aiohttp import web
from aiohttp_micro.core.exceptions import EntityNotFound # type: ignore
from aiohttp_micro.web.handlers import ( # type: ignore
json_response,
validate_payload,
)
from passport.exceptions import Forbidden
from passport.handlers import CredentialsPayloadSchema, session_required
from passport.storage import DBStorage
from passport.use_cases.users import LoginUseCase
@validate_payload(CredentialsPayloadSchema)
async def login(payload: Dict[str, str], request: web.Request) -> web.Response:
use_case = LoginUseCase(app=request.app)
try:
user = await use_case.execute(payload["email"], payload["password"])
except Forbidden:
raise web.HTTPForbidden()
except EntityNotFound:
raise web.HTTPNotFound()
config = request.app["config"]
session_key = secrets.token_urlsafe(32)
expires = datetime.now() + timedelta(days=config.sessions.expire)
storage = DBStorage(database=request.app["db"])
await storage.sessions.add(user, session_key, expires)
request.app["logger"].info("User logged in", user=user.email)
response = json_response({})
response.set_cookie(
name=config.sessions.cookie,
value=session_key,
max_age=config.sessions.expire * 24 * 60 * 60,
domain=config.sessions.domain,
httponly=True,
)
return response
@session_required
async def logout(request: web.Request) -> web.Response:
request.app["logger"].info("User logged out", user=request["user"].email)
config = request.app["config"]
redirect = web.HTTPFound(location="/")
redirect.del_cookie(name=config.sessions.cookie, domain=config.sessions.domain)
raise redirect
|
clayman-micro/passport | src/passport/__main__.py | from pathlib import Path
import click
import uvloop # type: ignore
from aiohttp_micro.cli.server import server # type: ignore
from aiohttp_storage.management.storage import storage # type: ignore
from config import EnvValueProvider, FileValueProvider, load # type: ignore
from passport.app import init
from passport.config import AppConfig, VaultConfig, VaultProvider
@click.group()
@click.option("--conf-dir", default=None)
@click.option("--debug", default=False, is_flag=True)
@click.pass_context
def cli(ctx, conf_dir: str = None, debug: bool = False) -> None:
uvloop.install()
if conf_dir:
conf_path = Path(conf_dir)
else:
conf_path = Path.cwd()
vault_config = VaultConfig()
load(vault_config, providers=[EnvValueProvider()])
config = AppConfig(
defaults={
"debug": debug,
"db": {"user": "passport", "password": "<PASSWORD>", "database": "passport"},
"tokens": {"access_token_expire": 900, "refresh_token_expire": 43200},
"sessions": {"domain": ".clayman.pro", "cookie": "session", "expire": 30},
}
)
load(
config,
providers=[
VaultProvider(config=vault_config, mount_point="credentials"),
FileValueProvider(conf_path),
EnvValueProvider(),
],
)
app = init("passport", config)
if config.debug:
app["logger"].debug("Application config", db=config.db.uri, sentry=config.sentry_dsn)
ctx.obj["app"] = app
ctx.obj["config"] = config
cli.add_command(server, name="server")
cli.add_command(storage, name="storage")
if __name__ == "__main__":
cli(obj={})
|
clayman-micro/passport | src/passport/storage/migrations/versions/a01d1258d7a7_add_sessions.py | <gh_stars>0
"""Add sessions
Revision ID: a01d1258d7a7
Revises: 9ca629ddd362
Create Date: 2020-09-10 13:16:28.154448
"""
import sqlalchemy as sa # type: ignore
from alembic import op # type: ignore
# revision identifiers, used by Alembic.
revision = "a01d1258d7a7"
down_revision = "9ca629ddd362"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"sessions",
sa.Column("key", sa.String(length=44), nullable=False),
sa.Column("expires", sa.DateTime(), nullable=True),
sa.Column("user", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["user"], ["users.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("key"),
)
def downgrade():
op.drop_table("sessions")
|
clayman-micro/passport | src/passport/storage/__init__.py | from aiohttp_storage.storage import ( # type: ignore
DBStorage as AbstractDBStorage,
)
from databases import Database
from passport.domain.storage import Storage
from passport.storage.sessions import SessionDBStorage
from passport.storage.users import UsersDBRepo
class DBStorage(Storage, AbstractDBStorage):
def __init__(self, database: Database) -> None:
super().__init__(database=database)
self.sessions = SessionDBStorage(database=database)
self.users = UsersDBRepo(database=database)
|
clayman-micro/passport | src/passport/domain/storage/__init__.py | <filename>src/passport/domain/storage/__init__.py
from abc import ABC
from passport.domain.storage.sessions import SessionRepo
from passport.domain.storage.users import UsersRepo
class Storage(ABC):
sessions: SessionRepo
users: UsersRepo
|
clayman-micro/passport | src/passport/services/tokens.py | <gh_stars>0
from datetime import datetime, timedelta
import jwt
from passport.domain import TokenType, User
from passport.exceptions import BadToken, TokenExpired
class TokenGenerator:
__slots__ = ("_private_key",)
def __init__(self, private_key: str) -> None:
self._private_key = private_key
def generate(self, user: User, token_type: TokenType = TokenType.access, expire: int = 600,) -> str:
now = datetime.utcnow()
return jwt.encode(
{
"user": {"id": user.key, "email": user.email},
"token_type": token_type.value,
"iss": "urn:passport",
"exp": now + timedelta(seconds=expire),
"iat": now,
},
self._private_key,
algorithm="RS256",
)
class TokenDecoder:
__slots__ = ("_public_key",)
def __init__(self, public_key: str) -> None:
self._public_key = public_key
def decode(self, token: str, token_type: TokenType = TokenType.access) -> User:
try:
token_data = jwt.decode(token, self._public_key, issuer="urn:passport", algorithms=["RS256"])
except jwt.ExpiredSignatureError:
raise TokenExpired()
except jwt.DecodeError:
raise BadToken()
if token_data.get("token_type", None) != token_type.value:
raise BadToken()
if "user" in token_data:
try:
user_key = int(token_data["user"].get("id", None))
except ValueError:
raise BadToken()
else:
raise BadToken()
return User(key=user_key, email=token_data["user"].get("email", "")) # type: ignore
|
clayman-micro/passport | src/passport/handlers/api/users.py | from typing import Dict
from aiohttp import web
from aiohttp_micro.core.exceptions import ( # type: ignore
EntityAlreadyExist,
EntityNotFound,
)
from aiohttp_micro.web.handlers import ( # type: ignore
json_response,
validate_payload,
)
from passport.domain import TokenType
from passport.exceptions import Forbidden
from passport.handlers import (
CredentialsPayloadSchema,
token_required,
UserResponseSchema,
)
from passport.services.tokens import TokenGenerator
from passport.use_cases.users import LoginUseCase, RegisterUserUseCase
@validate_payload(CredentialsPayloadSchema)
async def register(payload: Dict[str, str], request: web.Request) -> web.Response:
use_case = RegisterUserUseCase(app=request.app)
try:
user = await use_case.execute(email=payload["email"], password=payload["password"])
except EntityAlreadyExist:
return json_response({"errors": {"email": "Already exist"}}, status=422)
schema = UserResponseSchema()
response = schema.dump({"user": user})
return json_response(response, status=201)
@validate_payload(CredentialsPayloadSchema)
async def login(payload: Dict[str, str], request: web.Request) -> web.Response:
use_case = LoginUseCase(app=request.app)
try:
user = await use_case.execute(email=payload["email"], password=payload["password"])
except Forbidden:
raise web.HTTPForbidden()
except EntityNotFound:
raise web.HTTPNotFound()
config = request.app["config"]
generator = TokenGenerator(private_key=config.tokens.private_key)
schema = UserResponseSchema()
response = schema.dump({"user": user})
return json_response(
response,
headers={
"X-ACCESS-TOKEN": generator.generate(user, expire=config.tokens.access_token_expire),
"X-REFRESH-TOKEN": generator.generate(
user, token_type=TokenType.refresh, expire=config.tokens.refresh_token_expire,
),
},
)
@token_required()
async def profile(request: web.Request) -> web.Response:
schema = UserResponseSchema()
response = schema.dump({"user": request["user"]})
return json_response(response)
|
clayman-micro/passport | src/passport/storage/migrations/versions/9ca629ddd362_add_permissions.py | <reponame>clayman-micro/passport
"""Add permissions
Revision ID: 9ca629ddd362
Revises: 615677fb3a4f
Create Date: 2020-09-09 23:37:57.555057
"""
import sqlalchemy as sa # type: ignore
from alembic import op # type: ignore
# revision identifiers, used by Alembic.
revision = "9ca629ddd362"
down_revision = "615677fb3a4f"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"permissions",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("enabled", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"user_permissions",
sa.Column("user_id", sa.Integer(), nullable=False),
sa.Column("permission_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(["permission_id"], ["permissions.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["user_id"], ["users.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("user_id", "permission_id"),
)
op.add_column("users", sa.Column("is_superuser", sa.Boolean(), nullable=True))
def downgrade():
op.drop_column("users", "is_superuser")
op.drop_table("user_permissions")
op.drop_table("permissions")
|
clayman-micro/passport | src/passport/storage/users.py | <filename>src/passport/storage/users.py
from datetime import datetime
import sqlalchemy # type: ignore
from aiohttp_micro.core.exceptions import EntityNotFound # type: ignore
from aiohttp_storage.storage import metadata # type: ignore
from databases import Database
from sqlalchemy import func
from sqlalchemy.orm.query import Query # type: ignore
from passport.domain import Permission, User
from passport.domain.storage.users import UsersRepo
users = sqlalchemy.Table(
"users",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("email", sqlalchemy.String(255), nullable=False, unique=True),
sqlalchemy.Column("password", sqlalchemy.String(255), nullable=False),
sqlalchemy.Column("is_active", sqlalchemy.Boolean, default=True),
sqlalchemy.Column("is_superuser", sqlalchemy.Boolean, default=False),
sqlalchemy.Column("last_login", sqlalchemy.DateTime, default=datetime.utcnow),
sqlalchemy.Column("created_on", sqlalchemy.DateTime, default=datetime.utcnow),
)
permissions = sqlalchemy.Table(
"permissions",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("name", sqlalchemy.String(255), nullable=False, unique=True),
sqlalchemy.Column("enabled", sqlalchemy.Boolean, default=True),
)
user_permissions = sqlalchemy.Table(
"user_permissions",
metadata,
sqlalchemy.Column(
"user_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey("users.id", ondelete="CASCADE"),
nullable=False,
primary_key=True,
),
sqlalchemy.Column(
"permission_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey("permissions.id", ondelete="CASCADE"),
nullable=False,
primary_key=True,
),
)
class UsersDBRepo(UsersRepo):
def __init__(self, database: Database) -> None:
self._database = database
def get_query(self) -> Query:
return sqlalchemy.select([users.c.id, users.c.email, users.c.password]).where(
users.c.is_active == True # noqa: E712
)
def _process_row(self, row) -> User:
return User(key=row["id"], email=row["email"], password=row["password"]) # type: ignore
async def fetch_by_key(self, key: int) -> User:
query = self.get_query().where(users.c.id == key)
row = await self._database.fetch_one(query)
if not row:
raise EntityNotFound()
return self._process_row(row)
async def fetch_by_email(self, email: str) -> User:
query = self.get_query().where(users.c.email == email)
row = await self._database.fetch_one(query)
if not row:
raise EntityNotFound()
return self._process_row(row)
async def exists(self, email: str) -> bool:
query = sqlalchemy.select([func.count(users.c.id)]).where(users.c.email == email)
count = await self._database.fetch_val(query)
return count > 0
async def add(self, user: User) -> None:
key = await self._database.execute(
users.insert().returning(users.c.id),
values={"email": user.email, "password": <PASSWORD>, "is_active": True, "created_on": datetime.now()},
)
if user.permissions:
await self._database.execute_many(
user_permissions.insert(),
[{"user_id": key, "permission_id": permission.key} for permission in user.permissions],
)
user.key = key
async def add_permission(self, user: User, permission: Permission) -> None:
raise NotImplementedError()
async def remove_permission(self, user: User, permission: Permission) -> None:
raise NotImplementedError()
|
clayman-micro/passport | src/passport/exceptions.py | class BadToken(Exception):
pass
class TokenExpired(Exception):
pass
class Forbidden(Exception):
pass
|
clayman-micro/passport | src/passport/client.py | <gh_stars>0
import functools
from typing import AsyncGenerator
from aiohttp import ClientSession, web
from config import Config, StrField # type: ignore
from passport.domain import User
from passport.exceptions import BadToken, TokenExpired
from passport.services.tokens import TokenDecoder
class PassportConfig(Config):
host = StrField(env="PASSPORT_HOST")
public_key = StrField()
def user_required(header: str = "X-ACCESS-TOKEN"):
def wrapper(f):
@functools.wraps(f)
async def wrapped(request):
token = request.headers.get(header, "")
if not token:
raise web.HTTPUnauthorized(text="Auth token required")
try:
config = request.app["config"]
decoder = TokenDecoder(public_key=config.passport.public_key)
user: User = decoder.decode(token)
except (BadToken, TokenExpired):
raise web.HTTPForbidden
request["user"] = user
return await f(request)
return wrapped
return wrapper
async def passport_ctx(app: web.Application) -> AsyncGenerator[None, None]:
config = app["config"]
app["logger"].debug("Fetch passport keys")
if not config.passport.host:
app["logger"].error("Passport host should be defined")
raise RuntimeError("Passport host should be defined")
verify_ssl = True
if app["config"].debug:
verify_ssl = False
url = f"{config.passport.host}/api/keys"
async with ClientSession() as session:
async with session.get(url, ssl=verify_ssl) as resp:
if resp.status != 200:
app["logger"].error("Fetch passport keys failed", status=resp.status)
raise RuntimeError("Could not fetch passport keys")
keys = await resp.json()
config.passport.public_key = keys["public"]
yield
def setup(app: web.Application) -> None:
app.cleanup_ctx.append(passport_ctx)
|
clayman-micro/passport | src/passport/handlers/api/__init__.py | <filename>src/passport/handlers/api/__init__.py<gh_stars>0
from aiohttp import web
from aiohttp_micro.web.handlers import json_response # type: ignore
from marshmallow import fields, Schema # type: ignore
class KeysResponseSchema(Schema):
public = fields.Str()
async def keys(request: web.Request) -> web.Response:
config = request.app["config"]
schema = KeysResponseSchema()
response = schema.dump({"public": config.tokens.public_key})
return json_response(response)
|
clayman-micro/passport | src/passport/storage/sessions.py | from datetime import datetime
import sqlalchemy # type: ignore
from aiohttp_storage.storage import metadata # type: ignore
from databases import Database
from passport.domain import User
from passport.domain.storage.sessions import SessionRepo
sessions = sqlalchemy.Table(
"sessions",
metadata,
sqlalchemy.Column("key", sqlalchemy.String(44), primary_key=True),
sqlalchemy.Column("expires", sqlalchemy.DateTime, default=datetime.utcnow),
sqlalchemy.Column(
"user", sqlalchemy.Integer, sqlalchemy.ForeignKey("users.id", ondelete="CASCADE"), nullable=False,
),
)
class SessionDBStorage(SessionRepo):
def __init__(self, database: Database) -> None:
self._database = database
async def fetch(self, key: str) -> int:
query = sqlalchemy.select([sessions.c.user]).where(sessions.c.key == key)
user_key = await self._database.fetch_val(query)
return user_key
async def add(self, user: User, key: str, expires: datetime) -> None:
await self._database.execute(
sessions.insert(), values={"key": key, "user": user.key, "expires": expires},
)
async def remove(self, key: str) -> None:
await self._database.execute(sessions.delete().where(sessions.c.key == key))
|
clayman-micro/passport | src/passport/domain/storage/sessions.py | <reponame>clayman-micro/passport
from datetime import datetime
from typing import Protocol
from passport.domain import User
class SessionRepo(Protocol):
async def fetch(self, key: str) -> int:
...
async def add(self, user: User, key: str, expires: datetime) -> None:
...
async def remove(self, key: str) -> None:
...
|
clayman-micro/passport | src/passport/domain/storage/users.py | from typing import Protocol
from passport.domain import Permission, User
class UsersRepo(Protocol):
async def fetch_by_key(self, key: int) -> User:
...
async def fetch_by_email(self, email: str) -> User:
...
async def exists(self, email: str) -> bool:
...
async def add(self, user: User) -> None:
...
async def add_permission(self, user: User, permission: Permission) -> None:
...
async def remove_permission(self, user: User, permission: Permission) -> None:
...
async def save_user(self, email: str, password: str) -> int:
...
|
clayman-micro/passport | tests/conftest.py | import faker # type: ignore
import pytest # type: ignore
from aiohttp import web
from aiohttp_storage.tests import storage # type: ignore
from passport.app import AppConfig, init
@pytest.fixture(scope="session")
def config():
conf = AppConfig(
defaults={
"consul": {"host": "localhost", "port": 8500},
"debug": True,
"db": {"user": "passport", "password": "<PASSWORD>", "database": "passport"},
"tokens": {
"public_key": "ssh-rsa <KEY> # noqa
"private_key": """
-----BEGIN RSA PRIVATE KEY-----
<KEY>
""",
},
}
)
return conf
@pytest.fixture(scope="function")
def app(pg_server, config):
config.db.host = pg_server["params"]["host"]
config.db.port = pg_server["params"]["port"]
config.db.user = pg_server["params"]["user"]
config.db.password = pg_server["params"]["password"]
config.db.database = pg_server["params"]["database"]
app = init("passport", config)
with storage(config=app["config"].db, root=app["storage_root"]):
yield app
@pytest.fixture(scope="function")
async def prepared_app(app):
runner = web.AppRunner(app)
await runner.setup()
yield app
await runner.cleanup()
@pytest.fixture(scope="function")
async def client(app, aiohttp_client):
client = await aiohttp_client(app)
return client
@pytest.fixture(scope="session")
def fake():
return faker.Faker()
|
swaranjalii/modern-django | config/settings/local.py | <reponame>swaranjalii/modern-django<filename>config/settings/local.py
from .base import *
SECRET_KEY = env('DJANGO_SECRET_KEY', default=')<KEY>')
DEBUG = env.bool('DJANGO_DEBUG', default=True) |
ccolorado/branch_drawer | branch_drawer.py | #!/usr/bin/env python2.7
import argparse
import ConfigParser
import io
import os
import os.path
import subprocess
DEFAULTS = """
[branch_drawer]
# Location of drawer repo
drawer_dir=~/.branch_drawer.d
# Drawer Name
drawer_link_name=Branch_Drawer
"""
# git config hook.yourhook.yourconfigval value
def create_conf():
user_config = CONFIG_FILES[-1]
if not os.path.exists(user_config):
with open(user_config, 'w') as f:
f.write(DEFAULTS)
def get_repo_root_directory():
repo_root_directory = subprocess.check_output( "git rev-parse --show-toplevel", shell = True )
return repo_root_directory.strip()
def get_current_branch_name():
current_branch_name = subprocess.check_output( "basename $(git symbolic-ref -q HEAD)", shell = True )
return current_branch_name.strip()
def get_repo_name():
repo_name = subprocess.check_output( "basename %s" % get_repo_root_directory(), shell = True )
return repo_name.strip()
def parse_args():
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument('--conf-file', '-c', help='Specify the config file')
args, remaining = conf_parser.parse_known_args()
config = ConfigParser.RawConfigParser()
config.readfp(io.BytesIO(DEFAULTS))
if args.conf_file:
config.read([args.conf_file])
else:
create_conf()
config.read(CONFIG_FILES)
opts = dict(config.items('branch_drawer'))
parser = argparse.ArgumentParser(parents=[conf_parser])
parser.set_defaults(**opts)
parser.add_argument('--drawer_dir', '-d', help='Drawer storage location')
parser.add_argument('--drawer_link_name', '-l', help='In repo drawer link name')
args = parser.parse_args()
args.drawer_dir = args.drawer_dir or os.environ.get('drawer_dir') or opts['drawer_dir']
args.drawer_link_name = args.drawer_link_name or os.environ.get('drawer_link_name') or opts['drawer_link_name']
return args
def main():
global CONF
CONF = parse_args()
CONFIG_FILES = ['/etc/branch_drawer', os.path.join(os.path.expanduser('~'), '.branch_drawer') ]
if __name__ == '__main__':
main()
|
DiegoGom/Photo-whit-NodeJS-Python-FsWebCam | tomarfoto.py | #!/usr/bin/env python
# -*- coding: utf-8
import os
import time
os.system("mkdir hola")
|
siyuan0/pytorch_model_prune | prune.py | <reponame>siyuan0/pytorch_model_prune
import torch
import torch.nn as nn
import numpy as np
import FLOP
PRUNE_ID = 0
DEBUG_MODE = False
## This code prunes all the Conv2d layers in a given pytorch model. The Conv2d are pruned by removing
## channels based on an evaluation of their weights. The pruning is done with these restrictions:
## 1. Each Conv2d after pruning will retain at least 1 channel
## 2. Conv2d layers with groups != 1 or bias != False are not pruned
## After pruning, a zero_padding layer is added to pad the output tensor up to the correct dimensions
## To use the pruning, write something like model = prune_model(model, factor_removed=)
## args: model - your pytorch model
## factor_removed - the proportion of layers pruning will try to remove
## Idea is from 'Pruning Filters for Efficient ConvNets' by <NAME>, et al
## (https://arvix.org/abs/1608.08710)
class debug_mode(object):
def __enter__(self):
global DEBUG_MODE
self.prev = DEBUG_MODE
DEBUG_MODE = True
def __exit__(self, *args):
DEBUG_MODE = self.prev
def mask(model, cut_off=0):
for p in model.parameters():
p_mask = abs(p)>cut_off
p *= p_mask.float()
return model
def layer_eval(layer):
element_squared = [e.item()**2 for e in layer.view(-1)]
return sum(element_squared)
def unwrap_model(model):
# loops through all layers of model, including inside modulelists and sequentials
layers = []
def unwrap_inside(modules):
for m in modules.children():
if isinstance(m,nn.Sequential):
unwrap_inside(m)
elif isinstance(m,nn.ModuleList):
for m2 in m:
unwrap_inside(m2)
else:
layers.append(m)
unwrap_inside(model)
return nn.ModuleList(layers)
class zero_padding(nn.Module):
#my version of zero padding, pads up to given number of channels, at the specified index
def __init__(self, num_channels, keep_channel_idx):
super(zero_padding, self).__init__()
self.num_channels = num_channels
self.keep_channel_idx = keep_channel_idx
def forward(self,x):
output = torch.zeros(x.size()[0],self.num_channels,x.size()[2],x.size()[3])
output[:,self.keep_channel_idx,:,:] = x
return output
class pruned_conv2d(nn.Module):
def __init__(self, conv2d, cut_off=0.0):
super(pruned_conv2d, self).__init__()
self.in_channels = conv2d.in_channels
self.out_channels = conv2d.out_channels
self.kernel_size = conv2d.kernel_size
self.stride = conv2d.stride
self.padding = conv2d.padding
self.dilation = conv2d.dilation
self.groups = conv2d.groups
self.bias = conv2d.bias
global PRUNE_ID
self.id = PRUNE_ID
PRUNE_ID+=1
self.keep_channel = []
self.keep_channel_idx = []
if self.groups != 1 or self.bias != None:
self.new_conv2d = conv2d
else:
for idx, channel in enumerate(conv2d.weight):
if layer_eval(channel)>cut_off:
self.keep_channel.append(torch.unsqueeze(channel,0))
self.keep_channel_idx.append(idx)
if len(self.keep_channel_idx) == 0:
# if no channels are above cut-off, keep the best channel
best_channel_eval = 0
for idx, channel in enumerate(conv2d.weight):
if layer_eval(channel) > best_channel_eval:
best_channel = channel
best_channel_idx = idx
self.keep_channel.append(torch.unsqueeze(best_channel,0))
self.keep_channel_idx.append(best_channel_idx)
self.new_conv2d = nn.Conv2d(in_channels=self.in_channels,
out_channels=len(self.keep_channel_idx),
kernel_size=self.kernel_size,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
bias=False)
self.new_conv2d.weight = torch.nn.Parameter(torch.cat(self.keep_channel,0))
self.zero_padding = zero_padding(self.out_channels, self.keep_channel_idx)
def forward(self,x):
if self.groups != 1 or self.bias != None:
return self.new_conv2d(x)
else:
if DEBUG_MODE:
try:
x = self.new_conv2d(x)
except Exception as e:
print('failed here')
print('input size: '+ str(x.size()))
print('layer: ' + str(self.new_conv2d))
print('layer weight: ' +str(self.new_conv2d.weight.size()))
print(str(e))
quit()
else:
x = self.new_conv2d(x)
return self.zero_padding(x)
class prune_model(nn.Module):
def __init__(self, model, factor_removed=0.75):
super(prune_model,self).__init__()
self.model = model
self.factor = factor_removed
self.modulelist = unwrap_model(self.model)
print('number of parameters before pruning: %d' %sum([p.numel() for p in self.model.parameters()]))
print('FLOP: %d' %FLOP.count_model_param_flops(self.model, 300))
self.layer_eval_list = []
for m in self.modulelist:
if m.__class__.__name__ == "Conv2d":
for layer in m.weight:
self.layer_eval_list.append(layer_eval(layer))
self.layer_eval_list.sort()
self.cut_off = self.layer_eval_list[int(factor_removed*len(self.layer_eval_list))]
def replace_inside(modules):
for name, m in iter(modules._modules.items()):
if isinstance(m,nn.Sequential):
replace_inside(m)
elif isinstance(m,nn.ModuleList):
for m2 in m:
replace_inside(m2)
else:
if m.__class__.__name__ == "Conv2d":
modules._modules[name] = pruned_conv2d(m, self.cut_off)
replace_inside(self.model)
print('number of parameters after pruning: %d' %sum([p.numel() for p in self.model.parameters()]))
print('FLOP: %d' %FLOP.count_model_param_flops(self.model, 300))
def forward(self,x, phase='eval', use_RNN=False):
return self.model(x, phase, use_RNN)
|
Zachary-Jackson/Tacocat | forms.py | <gh_stars>1-10
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import (DataRequired, Email, Length, EqualTo,
ValidationError)
from models import User
def email_exists(form, field):
"""This checks the database to see if a user's email already exists
in the database. If so a ValidationError is raised."""
if User.select().where(User.email == field.data).exists():
raise ValidationError('User with that email already exists.')
class RegisterForm(Form):
email = StringField(
'Email',
validators=[
DataRequired(),
Email(),
email_exists
])
password = PasswordField(
'Password',
validators=[
DataRequired(),
Length(min=2),
EqualTo('password2', message='Passwords must match')
])
password2 = PasswordField(
'<PASSWORD>',
validators=[DataRequired()]
)
class LoginForm(Form):
"""This is the form user's complete to login."""
email = StringField('Email', validators=[DataRequired(), Email()])
password = StringField('Password', validators=[DataRequired()])
class TacoForm(Form):
"""This gets the user's taco choices."""
protein = StringField('Protein', validators=[DataRequired()])
shell = StringField('Shell', validators=[DataRequired()])
cheese = StringField('Cheese')
extras = StringField('Extras')
|
Zachary-Jackson/Tacocat | models.py | <gh_stars>1-10
import datetime
from flask.ext.bcrypt import generate_password_hash
from flask.ext.login import UserMixin
from peewee import *
DATABASE = SqliteDatabase('taco.db')
class User(UserMixin, Model):
"""This is the model for a user."""
email = CharField(unique=True)
password = CharField(max_length=35)
joined_at = DateTimeField(default=datetime.datetime.now)
is_admin = BooleanField(default=False)
class Meta:
database = DATABASE
order_by = ('joined_at',)
@classmethod
def create_user(cls, email, password, admin=False):
try:
with DATABASE.transaction():
cls.create(
email=email,
password=generate_password_hash(password),
is_admin=admin)
except IntegrityError:
raise ValueError("User already exists")
class Taco(Model):
""" This is the model for a taco. Yum!"""
user = ForeignKeyField(
rel_model=User,
related_name='user'
)
timestamp = DateTimeField(default=datetime.datetime.now)
protein = CharField()
shell = CharField()
cheese = CharField()
extras = CharField()
class Meta:
database = DATABASE
order_by = ('-timestamp',)
def initialize():
DATABASE.connect()
DATABASE.create_tables([User, Taco], safe=True)
DATABASE.close()
|
Zachary-Jackson/Tacocat | tacocat.py | from flask import (Flask, render_template, flash, redirect, url_for, g)
from flask.ext.bcrypt import check_password_hash
from flask.ext.login import (LoginManager, login_user, logout_user,
login_required, current_user)
import forms
import models
DEBUG = True
PORT = 8000
HOST = '0.0.0.0'
app = Flask(__name__)
app.secret_key = '<KEY>
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(userid):
"""This finds a user or return None"""
try:
return models.User.get(models.User.id == userid)
except models.DoesNotExist:
return None
@app.before_request
def before_request():
"""Connect to the database before each request."""
g.db = models.DATABASE
g.db.connect()
g.user = current_user
@app.after_request
def after_request(response):
"""Closes the database connection after each request."""
g.db.close()
return response
@app.route('/')
def index():
"""This is Tacocat's main homepage."""
tacos = models.Taco.select().limit(100)
return render_template('index.html', tacos=tacos)
@app.route('/register', methods=('GET', 'POST'))
def register():
"""This is the new user registration page."""
form = forms.RegisterForm()
if form.validate_on_submit():
flash("You are registered!", "success")
models.User.create_user(
email=form.email.data,
password=form.password.data
)
return redirect(url_for('index'))
return render_template('register.html', form=form)
@app.route('/login', methods=('GET', 'POST'))
def login():
"""This page allows the user to login."""
form = forms.LoginForm()
if form.validate_on_submit():
try:
user = models.User.get(models.User.email == form.email.data)
except models.DoesNotExist:
flash("Your email does not exist :{")
else:
if check_password_hash(user.password, form.password.data):
login_user(user)
flash("Welcome to Tacocat!", "success")
return redirect(url_for('index'))
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
"""This page allows the user to logout."""
logout_user()
flash("Leaving is only premissable if you are going to make new tacos.")
return redirect(url_for('index'))
@app.route('/taco', methods=('GET', 'POST'))
@login_required
def post():
"""This lets a user submit a post."""
form = forms.TacoForm()
if form.validate_on_submit():
models.Taco.create(user=g.user.id,
protein=form.protein.data.strip().lower(),
shell=form.shell.data.strip().lower(),
cheese=form.cheese.data.strip().lower(),
extras=form.extras.data.strip().lower())
flash("Your taco has been added. Thanks!", "success")
return redirect(url_for('index'))
return render_template("taco.html", form=form)
if __name__ == '__main__':
models.initialize()
try:
models.User.create_user(
email="<EMAIL>",
password="password",
admin=True
)
except ValueError:
pass
app.run(debug=DEBUG, host=HOST, port=PORT)
|
rajeshnokia/monolithe | monolithe/generators/lang/python/converter.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
def _string_clean(string):
"""
"""
rep = {
"IPID": "IpID",
"VCenter": "Vcenter",
"vCenter": "Vcenter",
"VPort": "Vport",
"IPv6": "Ipv6",
"IPv4": "Ipv4",
"DHCPv4": "Dhcpv4",
"DHCPv6": "Dhcpv6"
}
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(list(rep.keys())))
return pattern.sub(lambda m: rep[re.escape(m.group(0))], string)
def get_idiomatic_name(name):
"""
"""
first_cap_re = re.compile("(.)([A-Z](?!s([A-Z])*)[a-z]+)")
all_cap_re = re.compile("([a-z0-9])([A-Z])")
s1 = first_cap_re.sub(r"\1_\2", _string_clean(name))
return all_cap_re.sub(r"\1_\2", s1).lower()
def get_type_name(type_name, sub_type=None):
"""
"""
if type_name in ("string", "enum"):
return "str"
if type_name == "boolean":
return "bool"
if type_name in ("integer", "long"):
return "int"
if type_name == "time":
return "float"
if type_name == "object":
return "dict"
return type_name
|
rajeshnokia/monolithe | tests/base/sdk/python/tdldk/cli/cli.py | #!/usr/bin/env python
#
# __code_header example
# put your license header here
# it will be added to all the generated files
#
import argparse
import sys
sys.path.append("../")
class _HelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
print("\n{}:\n{}".format(choice.upper(), "-" * (len(choice) + 1)))
print(subparser.format_help())
parser.exit()
def main(argv=sys.argv):
default_parser = argparse.ArgumentParser(description="CLI for ToDoList", add_help=False)
default_parser.add_argument("-v", "--verbose", help="Activate verbose mode", action="store_true")
default_parser.add_argument("--username", help="Username to get an api key or set 'TDL_USERNAME' in your variable environment")
default_parser.add_argument("--password", help="Password to get an api key or set 'TDL_PASSWORD' in your variable environment")
default_parser.add_argument("--api", help="URL of the API endpoint or set 'TDL_API_URL' in your variable environment")
default_parser.add_argument("--version", help="Version of the API or set 'TDL_API_VERSION' in your variable environment")
default_parser.add_argument("--enterprise", help="Name of the enterprise to connect or set 'TDL_ENTERPRISE' in your variable environment")
default_parser.add_argument("--json", help="Add this option get a JSON output or set TDL_JSON_OUTPUT=True", action="store_true")
parser = argparse.ArgumentParser(description="CLI for TDL Software Development Kit", add_help=False)
parser.add_argument("-h", "--help", action=_HelpAction, help="help for help if you need some help")
subparsers = parser.add_subparsers(dest="command",
title="All available commands")
# List Command
list_parser = subparsers.add_parser("list", description="List all objects", parents=[default_parser])
list_parser.add_argument("list", help="Name of the object (See command 'objects' to list all objects name)")
list_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the PARENT_NAME and PARENT_UUID")
list_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
list_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
list_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
list_parser.add_argument("-p", "--page", dest="page", help="The page number that needs to be retreived. This value is ignored unless you also configure the page size parameter. Default value is 0", type=int, default=0)
list_parser.add_argument("-s", "--page-size", dest="page_size", help="The size of a single page that needs to be retreived. If this is configured, the list command will only return a maximum of this amount of results", type=int)
# Count Command
count_parser = subparsers.add_parser("count", description="Count all objects", parents=[default_parser])
count_parser.add_argument("count", help="Name of the object (See command 'objects' to list all objects name)")
count_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
count_parser.add_argument("-f", "--filter", dest="filter", help="Specify a filter predicate")
count_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
count_parser.add_argument("-q", "--query", dest="query_parameters", nargs="*", help="List of Key=Value that will be sent as query parameters", required=False)
# Show Command
show_parser = subparsers.add_parser("show", description="Show a specific object", parents=[default_parser])
show_parser.add_argument("show", help="Name of the object to show (See command 'objects' to list all objects name)")
show_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
show_parser.add_argument("-x", "--fields", dest="fields", help="Specify output fields", nargs="+", type=str)
# Create Command
create_parser = subparsers.add_parser("create", description="Create a new object", parents=[default_parser])
create_parser.add_argument("create", help="Name of the object to create (See command 'objects' to list all objects name)")
create_parser.add_argument("--in", dest="parent_infos", nargs=2, help="Specify the parent name and its uuid")
create_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
# Update Command
update_parser = subparsers.add_parser("update", description="Update an existing object", parents=[default_parser])
update_parser.add_argument("update", help="Name of the object to update (See command 'objects' to list all objects name)")
update_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
update_parser.add_argument("-p", "--params", dest="params", nargs="*", help="List of Key=Value parameters", required=True)
# Delete Command
delete_parser = subparsers.add_parser("delete", description="Delete an existing object", parents=[default_parser])
delete_parser.add_argument("delete", help="Name of the object to update (See command 'objects' to list all objects name)")
delete_parser.add_argument("-i", "--id", dest="id", help="Identifier of the object to show", required=True)
# Assign Command
assign_parser = subparsers.add_parser('assign', description="Assign a set of new objects according to their identifier", parents=[default_parser])
assign_parser.add_argument('assign', help='Name of the object to assign (See command `objects` to list all objects name)')
assign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to assign', required=True)
assign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
# Unassign Command
unassign_parser = subparsers.add_parser('unassign', description="Unassign a set of new objects according to their identifier", parents=[default_parser])
unassign_parser.add_argument('unassign', help='Name of the object to unassign (See command `objects` to list all objects name)')
unassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to unassign', required=True)
unassign_parser.add_argument('--from', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
# Reassign Command
reassign_parser = subparsers.add_parser('reassign', description="Reassign all objects according to their identifier", parents=[default_parser])
reassign_parser.add_argument('reassign', help='Name of the object to reassign (See command `objects` to list all objects name)')
reassign_parser.add_argument('--ids', dest='ids', nargs='*', help='Identifier of the object to reassign. If --ids is not specified, it will remove all assigned objects')
reassign_parser.add_argument('--to', dest='parent_infos', nargs=2, help="Specify the resource name and its uuid", required=True)
# Resources Command
objects_parser = subparsers.add_parser("objects", description="Explore all objects", parents=[default_parser])
objects_parser.add_argument("-f", "--filter", dest="filter", help="Filter by name (ex: -f nsg)")
objects_parser.add_argument("-p", "--parent", dest="parent", help="Filter by parent (ex -p enterprise)")
objects_parser.add_argument("-c", "--child", dest="child", help="Filter by children (ex: -c domain)")
args = parser.parse_args()
from commands import CLICommand
CLICommand.execute(args)
if __name__ == "__main__":
main() |
rajeshnokia/monolithe | tests/base/sdk/python/tdldk/cli/__init__.py | # -*- coding: utf-8 -*-
#
# __code_header example
# put your license header here
# it will be added to all the generated files
#
import requests
import requests.packages.urllib3.exceptions as exceptions
requests.packages.urllib3.disable_warnings(exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings()
|
KonecnyDavid/IdosNextDeparture | Scraper/scrape.py | <reponame>KonecnyDavid/IdosNextDeparture
import requests
import re
import json
from bs4 import BeautifulSoup
from_stop = "Jungmannova"
lines = {
"1": ("reckovice", "ecerova"),
"6": ("kralovo pole, nadrazi", "<NAME>")
}
week_start = 17
def create_url(line_n, end_stop, day):
return "https://idos.idnes.cz/brno/zjr/vysledky/?date={}.02.2020&l=Tram%20{}&f={}&t={}&ttn=IDSJMK".format(day, line_n, from_stop, end_stop)
def parse(content):
print("Parsing previous request")
soup = BeautifulSoup(content, 'html.parser')
result = soup.find("table", class_="times")
body = result.find("tbody")
rows = body.findAll("tr")
res = {}
for idx, row in enumerate(rows):
tds = row.findAll("td")
times = list(
filter(
lambda el: len(el) == 2,
re.findall(r"[0-9]+", str(tds[1].prettify().encode("utf-8")))
)
)
res[(idx + 3) % 24] = times
print("Success")
return res
def parse_row(row):
pass
res = {}
for line_n, destinations in lines.items():
destinations_res = {}
for destination in destinations:
days_res = {}
for day in range(week_start, week_start + 7):
url = create_url(line_n, destination, day)
print(
"Making request to: Line {} | {} -> {} | Day {}"
.format(line_n, from_stop, destination, day)
)
response = requests.get(url)
data = parse(response.content.decode('utf-8', 'ignore'))
days_res[day - week_start] = data
destinations_res[destination] = days_res
res[line_n] = destinations_res
json_data = json.dumps(res)
with open("./data.json", "w") as file:
file.write(json_data)
|
rgbkrk/jupytext | jupytext/version.py | <filename>jupytext/version.py<gh_stars>0
"""Jupytext's version number"""
__version__ = '1.3.3-dev'
|
jbaker10/sal-saml | urls.py | from django.conf.urls import include, url
from django.conf import settings
from django.conf.urls.static import static
from sal.origurls import *
urlpatterns += [
url(r'^saml2/', include('djangosaml2.urls')),
]
|
jbaker10/sal-saml | process_build.py | <filename>process_build.py
import argparse
import subprocess
import os
# parser = argparse.ArgumentParser()
# parser.add_argument('tag', nargs='?', default='')
# args = parser.parse_args()
tag = os.getenv('TAG', '')
if tag == '':
if os.getenv('CIRCLE_BRANCH') == 'master':
tag = 'latest'
else:
tag = os.getenv('CIRCLE_BRANCH')
dockerfile_content = """FROM macadmins/sal:{}
MAINTAINER <NAME> <<EMAIL>>
ENV DJANGO_SAML_VERSION 0.16.11
RUN apt-get update && apt-get install -y python-setuptools python-dev libxmlsec1-dev libxml2-dev xmlsec1 python-pip
RUN pip install -U setuptools
RUN pip install git+git://github.com/francoisfreitag/djangosaml2.git@<PASSWORD>
ADD attributemaps /home/app/sal/sal/attributemaps
RUN mv /home/app/sal/sal/urls.py /home/app/sal/sal/origurls.py
ADD urls.py /home/app/sal/sal/urls.py
""".format(tag)
with open("Dockerfile", "w") as dockerfile:
dockerfile.write(dockerfile_content)
cmd = [
'docker',
'build',
'-t',
'macadmins/sal-saml:{}'.format(tag),
'.'
]
print subprocess.check_output(cmd)
cmd = [
'docker',
'login',
'-u',
'{}'.format(os.getenv('DOCKER_USER')),
'-p',
'{}'.format(os.getenv('DOCKER_PASS'))
]
try:
print subprocess.check_output(cmd)
except subprocess.CalledProcessError:
print 'Failed to login to docker'
cmd = [
'docker',
'push',
'macadmins/sal-saml:{}'.format(tag)
]
print subprocess.check_output(cmd)
|
Sudhishna/jetez | jet/main.py | <reponame>Sudhishna/jetez<gh_stars>0
#!/usr/bin/env python
# Copyright 2018 Juniper Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""JetEZ
:Organization: Juniper Networks, Inc.
:Copyright: Copyright (c) 2018, Juniper Networks, Inc. All rights reserved.
:Date: 04/19/2018
:Version: 0.4
"""
from jet import crypto
from jet import utils
import argparse
import datetime
import logging
import shutil
import sys
import os
import yaml
DESCIPTION = """
JetEZ - Easy SDK
----------------
This tool creates a JET install package (.tgz) from source directory using
the parameters from jet.yaml project description file.
"""
# setup logging to stdout
log = logging.getLogger("jet")
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
log.addHandler(ch)
def main():
parser = utils.FileArgumentParser(description=DESCIPTION, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument_with_check('--source', dest='source', metavar='DIR', required=True, help="source directory")
parser.add_argument('-v', '--version', dest='version', type=str, help="version string")
parser.add_argument_with_check('-k', '--key', dest='key', metavar='FILE', required=True, help="path to signing key")
parser.add_argument_with_check('-c', '--cert', dest='cert', metavar='FILE', required=True, help="path to signing cert")
parser.add_argument_with_check('-j', '--jet', dest='jet', metavar='FILE', required=False, help="path to project file (default: <source>/jet.yaml)")
parser.add_argument('-b', '--build', dest='build', default=".build", help="build directory (default: .build)")
parser.add_argument('-d', '--debug', dest='debug', action='store_true', help="verbose logging")
args = parser.parse_args()
if args.debug:
ch.setLevel(logging.DEBUG)
version = args.version if args.version else datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
project_file = args.jet if args.jet else os.path.join(args.source, "jet.yaml")
log.info("load project file %s", project_file)
with utils.exit_on_error(Exception):
project = utils.load_project(project_file, version)
package = "%s-%s-%s-%s" % (project["basename"], project["arch"], project["abi"], version)
log.info("create temporary build directory %s", args.build)
if os.path.exists(args.build):
shutil.rmtree(args.build)
os.makedirs(args.build)
os.makedirs('%s/contents' % args.build)
contents = "%s/contents/contents" % args.build
os.makedirs(contents)
contents_pkg = '%s/pkg' % contents
os.makedirs(contents_pkg)
if project["sig"] is not None and project["sig"] == "xsig":
sig = project["sig"]
certs = "xcerts"
sha_bits = "sha256"
else:
sig = "sig"
certs = "certs"
sha_bits = "sha1"
content_manifest = """pkg/manifest uid=0 gid=0 mode=444
pkg/manifest.%s uid=0 gid=0 mode=444
pkg/manifest.%s uid=0 gid=0 mode=444
pkg/manifest.%s uid=0 gid=0 mode=444
""" % (sha_bits, sig, certs)
if project["sig"] is None:
content_manifest += "/set package_id=%s role=%s\n" % (project["package_id"], project["role"])
contents_symlink =""
mount_dir = "/packages/mnt/%s" % project["basename"]
for f in project["files"]:
destination = os.path.join(contents, *f['destination'].split("/"))
log.info("copy file %s to %s", f['source'], destination)
# create target directory
_d = f['destination'].split("/")[1:]
for d in _d[:1] if len(_d) == 2 else ["/".join(_d[:x]) for x in range(2, len(_d))]:
_d = os.path.join(contents, d)
if not os.path.exists(_d):
os.makedirs(_d)
# copy file
shutil.copy(os.path.join(args.source, f['source']), destination)
# add file to manifest
if project["sig"] is not None and project["sig"] == "xsig":
sha = crypto.generate_sha256(destination)
else:
sha = crypto.generate_sha1(destination)
content_manifest += "%s %s=%s uid=%s gid=%s mode=%s program_id=%s\n" % \
(f["destination"][1:] if f["destination"][0] == "/" else f["destination"],
sha_bits, sha, f["uid"], f["gid"], f["mode"], f["program_id"])
if f["symlink"]:
contents_symlink += "%s%s %s\n" % (mount_dir, f["destination"], f["destination"])
if project["scripts"] is not None:
dscripts = os.path.join(args.build, "scripts")
os.makedirs(dscripts)
log.info("copy file %s to %s/%s", project['scripts'], dscripts, project['scripts'])
shutil.copy(os.path.join(args.source, project['scripts']), dscripts)
script_file = "%s/%s" % (dscripts,project['scripts'])
os.chmod(script_file, 0o755)
content_manifest_file = '%s/manifest' % contents_pkg
log.info("create manifest file %s", content_manifest_file)
with open(content_manifest_file, "w") as f:
f.write(content_manifest)
content_manifest_sha_file = '%s/manifest.%s' % (contents_pkg, sha_bits)
with open(content_manifest_sha_file, "w") as f:
if project["sig"] is not None and project["sig"] == "xsig":
f.write("%s\n" % crypto.generate_sha256(content_manifest_file))
else:
f.write("%s\n" % crypto.generate_sha1(content_manifest_file))
contents_symlink_file = '%s.symlinks' % contents
log.info("create symlink file %s", contents_symlink_file)
with open(contents_symlink_file, "w") as f:
f.write(contents_symlink)
log.info("sign manifest file %s" % content_manifest_file)
crypto.sign(content_manifest_file, "%s.%s" % (content_manifest_file, sig), args.key, args.cert, sha_bits, certs)
for f in os.listdir(contents_pkg):
os.chmod(os.path.join(contents_pkg, f), 0o444)
log.info("create contents.iso")
utils.create_contents_iso(contents, "%s.iso" % contents)
shutil.rmtree(contents)
log.info("create package.xml")
utils.create_package_xml(project, version, package, args.build)
if project["sig"] is not None and project["sig"] == "xsig":
package_manifest = ""
else:
package_manifest = "/set package_id=31 role=Provider_Daemon\n"
package_manifest_files = ["contents/contents.iso", "contents/contents.symlinks", "package.xml"]
if project["scripts"] is not None:
package_manifest_files.append("scripts/%s" % project["scripts"])
for f in package_manifest_files:
if f == 'scripts/%s' % project['scripts'] and sha_bits == "sha1":
package_manifest += "%s %s=%s program_id=1\n" % (f, sha_bits, crypto.generate_sha1(os.path.join(args.build, f)))
elif sha_bits == "sha256":
package_manifest += "%s %s=%s\n" % (f, sha_bits, crypto.generate_sha256(os.path.join(args.build, f)))
else:
package_manifest += "%s %s=%s\n" % (f, sha_bits, crypto.generate_sha1(os.path.join(args.build, f)))
package_manifest_file = os.path.join(args.build, "manifest")
log.info("create manifest file %s", package_manifest_file)
with open(package_manifest_file, "w") as f:
f.write(package_manifest)
log.info("sign manifest file %s" % package_manifest_file)
crypto.sign(package_manifest_file, "%s.%s" % (package_manifest_file, sig), args.key, args.cert, sha_bits, certs)
log.info("create %s.tgz" % package)
utils.create_tgz(package, args.build)
log.info("package successfully created")
if __name__ == "__main__":
main()
|
Sudhishna/jetez | jet/crypto.py | # Copyright 2018 Juniper Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""JetEZ Library
:Organization: Juniper Networks, Inc.
:Copyright: Copyright (c) 2018, Juniper Networks, Inc. All rights reserved.
:Date: 04/12/2018
:Version: 0.4
"""
import base64
import logging
import hashlib
import os
import subprocess
FILE_BUF_SIZE=65536
log = logging.getLogger("jet")
def generate_sha256(filename):
"""generate sha256 hash of file
:param filename: filename
:type filename: str
:return: sha256 hash of file
:rtype: str
"""
hash = hashlib.sha256()
with open(filename, 'rb') as f:
while True:
data = f.read(FILE_BUF_SIZE)
if not data:
break
hash.update(data)
return hash.hexdigest()
def generate_sha1(filename):
"""generate sha1 hash of file
:param filename: filename
:type filename: str
:return: sha1 hash of file
:rtype: str
"""
hash = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(FILE_BUF_SIZE)
if not data:
break
hash.update(data)
return hash.hexdigest()
def sign(input, output, key, cert, sha_bits, certs):
"""sign file
:param input: path to input file
:type input: str
:param output: path to output file
:type output: str
:param key: path to private key
:type key: str
:param cert: path to public key/cert
:type cert: str
:param sha_bits: sha1 or sha256
:type sha_bits: str
:param certs: certs or xcerts
:type certs: str
"""
# get subject from certificate
_subject = subprocess.check_output("openssl x509 -in %s -noout -subject" % cert, shell=True)
subject = _subject.decode("utf8").split(" ", 1)[1].strip()
# create signature
signature = subprocess.check_output("openssl dgst -%s -sign %s %s" % (sha_bits, key, input), shell=True)
# create base64 from signature
signature64 = base64.b64encode(signature).decode("utf8")
# format signature file
signature_file="""%s
-----BEGIN JUNOS SIGNATURE-----
%s
-----END JUNOS SIGNATURE-----
""" % (subject, "\n".join([signature64[x:x+64] for x in range(0, len(signature64), 64)]))
# write signature file
with open(output, "w+") as f:
f.write(signature_file)
# create certificate chain
with open("%s/jet-certs.pem" % (os.path.dirname(__file__)), "r") as f:
cert_chain = f.read()
with open(cert, "r") as f:
cert_file = f.read()
with open("%s/manifest.%s" % (os.path.dirname(output), certs), "w+") as f:
f.write(cert_file)
f.write(cert_chain)
|
bmng-dev/bitmessage-pow | test_pow.py | import os
import sys
platform = os.environ.get('PLATFORM', '').lower()
interpreter_bits = (sys.maxsize.bit_length() + 7) / 8 * 8
if platform == 'x64' and interpreter_bits != 64:
raise SystemExit
if platform in ['win32', 'x86'] and interpreter_bits != 32:
raise SystemExit
import ctypes
import hashlib
import logging
import struct
import timeit
logging.basicConfig(format='%(levelname)8s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
Q_BE = struct.Struct('>Q')
Q_LE = struct.Struct('<Q')
payload = '\x00' * 4
time_to_live = 300
trials = 1000
padding = 1000
payload = payload[:2**18]
adjustedLength = len(payload) + padding
digest = hashlib.sha512(payload).digest()
target = 2 ** 64 / (trials * (adjustedLength + ((time_to_live * adjustedLength) / (2 ** 16))))
target_bytes = Q_BE.pack(target)
logger.info('Target: %#018x', target)
def do_pow_py_be():
message = bytearray(8 + len(digest))
message[8:] = digest
for nonce in xrange(0x7FFFFFFF):
Q_BE.pack_into(message, 0, nonce)
if 0 >= cmp(hashlib.sha512(hashlib.sha512(message).digest()).digest(), target_bytes):
return nonce
def do_pow_py_le():
message = bytearray(8 + len(digest))
message[8:] = digest
for nonce in xrange(0x7FFFFFFF):
Q_LE.pack_into(message, 0, nonce)
if 0 >= cmp(hashlib.sha512(hashlib.sha512(message).digest()).digest(), target_bytes):
return Q_BE.unpack_from(message, 0)[0]
def do_pow_vs():
lib = ctypes.CDLL('bin\\bmpow{0}.dll'.format(interpreter_bits))
lib.BitmessagePOW.argtypes = [ctypes.c_char_p, ctypes.c_uint64]
lib.BitmessagePOW.restype = ctypes.c_uint64
return lib.BitmessagePOW(digest, target)
for do_pow in [do_pow_py_be, do_pow_py_le, do_pow_vs]:
try:
start = timeit.default_timer()
nonce = do_pow()
elapsed = timeit.default_timer() - start
proof, = Q_BE.unpack_from(hashlib.sha512(hashlib.sha512(Q_BE.pack(nonce) + digest).digest()).digest(), 0)
logger.info('%s found %#018x for proof %#018x in %.6f seconds', do_pow.__name__, nonce, proof, elapsed)
except Exception as err:
logger.warning('%s failed: %s', do_pow.__name__, err)
|
bmng-dev/bitmessage-pow | test_old.py | <filename>test_old.py
import argparse
import ctypes
import hashlib
import logging
import struct
logging.basicConfig(format='%(levelname)8s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
Q = struct.Struct('!Q')
payload = '\x00' * 4
time_to_live = 300
trials = 1000
padding = 1000
payload = payload[:2**18]
adjustedLength = len(payload) + padding
digest = hashlib.sha512(payload).digest()
target = 2 ** 64 / (trials * (adjustedLength + ((time_to_live * adjustedLength) / (2 ** 16))))
parser = argparse.ArgumentParser()
parser.add_argument('--win', action='store_true')
args = parser.parse_args()
logger.info(args)
try:
if args.win:
lib = ctypes.WinDLL('BitMsgHash32.dll')
else:
lib = ctypes.CDLL('BitMsgHash32.dll')
do_pow = lib.BitmessagePOW
do_pow.argtypes = [ctypes.c_char_p, ctypes.c_uint64]
do_pow.restype = ctypes.c_uint64
logger.info('%.6f', do_pow(digest, target))
except:
logger.exception('Exception')
|
tbsexton/pythonbible | pythonbible/counters/book_counter.py | from functools import singledispatch
from typing import List
from pythonbible.normalized_reference import NormalizedReference
from pythonbible.parser import get_references
@singledispatch
def count_books(references: List[NormalizedReference]) -> int:
"""Return the count of books of the Bible included in the given reference(s)."""
return _get_number_of_books_in_references(references)
@count_books.register
def _count_books_single(reference: NormalizedReference) -> int:
return _get_number_of_books_in_reference(reference)
@count_books.register
def _count_books_string(reference: str) -> int:
return _get_number_of_books_in_references(get_references(reference))
def _get_number_of_books_in_references(references: List[NormalizedReference]) -> int:
return sum(_get_number_of_books_in_reference(reference) for reference in references)
def _get_number_of_books_in_reference(reference: NormalizedReference) -> int:
return (
reference.end_book.value - reference.book.value + 1 if reference.end_book else 1
)
|
tbsexton/pythonbible | pythonbible/counters/chapter_counter.py | <reponame>tbsexton/pythonbible
from functools import singledispatch
from typing import List
from pythonbible.books import Book
from pythonbible.normalized_reference import NormalizedReference
from pythonbible.parser import get_references
from pythonbible.verses import get_number_of_chapters
@singledispatch
def count_chapters(references: List[NormalizedReference]) -> int:
return _get_number_of_chapters_in_references(references)
@count_chapters.register
def _count_chapters_single(reference: NormalizedReference) -> int:
return _get_number_of_chapters_in_reference(reference)
@count_chapters.register
def _count_chapters_string(reference: str) -> int:
return _get_number_of_chapters_in_references(get_references(reference))
def _get_number_of_chapters_in_references(references: List[NormalizedReference]) -> int:
return sum(
_get_number_of_chapters_in_reference(reference) for reference in references
)
def _get_number_of_chapters_in_reference(reference: NormalizedReference) -> int:
if not reference.end_book or reference.book == reference.end_book:
return reference.end_chapter - reference.start_chapter + 1
# Start book chapters
number_of_chapters: int = (
get_number_of_chapters(reference.book) - reference.start_chapter + 1
)
# Middle book(s) chapters
for book_id in range(reference.book.value + 1, reference.end_book.value):
number_of_chapters += get_number_of_chapters(Book(book_id))
# End book chapters
number_of_chapters += reference.end_chapter
return number_of_chapters
|
tbsexton/pythonbible | pythonbible/counters/verse_counter.py | <reponame>tbsexton/pythonbible
from functools import singledispatch
from typing import List, Optional
from pythonbible.books import Book
from pythonbible.normalized_reference import NormalizedReference
from pythonbible.parser import get_references
from pythonbible.verses import get_max_number_of_verses, get_number_of_chapters
@singledispatch
def count_verses(references: List[NormalizedReference]) -> int:
return _get_number_verses_in_references(references)
@count_verses.register
def _count_verses_single(reference: NormalizedReference) -> int:
return _get_number_of_verses_in_reference(reference)
@count_verses.register
def _count_verses_string(reference: str) -> int:
return _get_number_verses_in_references(get_references(reference))
def _get_number_verses_in_references(references: List[NormalizedReference]) -> int:
return sum(
_get_number_of_verses_in_reference(reference) for reference in references
)
def _get_number_of_verses_in_reference(reference: NormalizedReference) -> int:
number_of_verses: int = 0
start_book = reference.book
end_book = reference.end_book or start_book
for book_id in range(start_book.value, end_book.value + 1):
book: Book = Book(book_id)
start_chapter: int = reference.start_chapter if book == start_book else 1
end_chapter: int = (
reference.end_chapter if book == end_book else get_number_of_chapters(book)
)
for chapter in range(start_chapter, end_chapter + 1):
start_verse: Optional[int] = (
reference.start_verse
if book == start_book and chapter == reference.start_chapter
else None
)
end_verse: Optional[int] = (
reference.end_verse
if book == end_book and chapter == reference.end_chapter
else None
)
number_of_verses += _get_number_of_verses_in_chapter(
book, chapter, start_verse, end_verse
)
return number_of_verses
def _get_number_of_verses_in_chapter(
book: Book, chapter: int, start_verse: Optional[int], end_verse: Optional[int]
) -> int:
return (
(end_verse or get_max_number_of_verses(book, chapter)) - (start_verse or 1) + 1
)
|
tbsexton/pythonbible | tests/test_counters.py | from typing import List
import pythonbible as bible
def test_count_books_single_book() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references("James 1:4-6")
# When we get the count of books in the references
number_of_books: int = bible.count_books(references[0])
# Then the count is correct
assert number_of_books == 1
def test_count_books_two_books() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references(
"Matthew 19:3 - Mark 6:9"
)
# When we get the count of books in the references
number_of_books: int = bible.count_books(references[0])
# Then the count is correct
assert number_of_books == 2
def test_count_books_multiple_books() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references(
"Genesis - Deuteronomy"
)
# When we get the count of books in the references
number_of_books: int = bible.count_books(references[0])
# Then the count is correct
assert number_of_books == 5
def test_count_books_multiple_references() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references(
"Genesis - Deuteronomy, Matthew 19:3 - Mark 6:9, James 1:4-6"
)
# When we get the count of books in the references
number_of_books: int = bible.count_books(references)
# Then the count is correct
assert number_of_books == 5 + 2 + 1
def test_count_books_string() -> None:
# Given a string containing one or more Scripture references
reference: str = "Genesis - Deuteronomy, Matthew 19:3 - Mark 6:9, James 1:4-6"
# When we get the count of books in the references
number_of_books: int = bible.count_books(reference)
# Then the count is correct
assert number_of_books == 5 + 2 + 1
def test_count_chapters_single_chapter() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references("James 1:4-6")
# When we get the count of chapters in the references
number_of_chapters: int = bible.count_chapters(references[0])
# Then the count is correct
assert number_of_chapters == 1
def test_count_chapters_two_chapters() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references("James 1-2")
# When we get the count of chapters in the references
number_of_chapters: int = bible.count_chapters(references[0])
# Then the count is correct
assert number_of_chapters == 2
def test_count_chapters_multiple_chapters() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references("James")
# When we get the count of chapters in the references
number_of_chapters: int = bible.count_chapters(references[0])
# Then the count is correct
assert number_of_chapters == 5
def test_count_chapters_multiple_books() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references(
"Matthew 28:1 - Luke 1:10"
)
# When we get the count of chapters in the references
number_of_chapters: int = bible.count_chapters(references[0])
# Then the count is correct
assert number_of_chapters == 1 + 16 + 1
def test_count_chapters_multiple_references() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references(
"Genesis, Matthew - Acts"
)
# When we get the count of chapters in the references
number_of_chapters: int = bible.count_chapters(references)
# Then the count is correct
assert number_of_chapters == 50 + 28 + 16 + 24 + 21 + 28 # 167 total
def test_count_chapters_string() -> None:
# Given a string containing one or more Scripture references
reference: str = "Genesis, Matthew - Acts"
# When we get the count of chapters in the reference
number_of_chapters: int = bible.count_chapters(reference)
# Then the count is correct
assert number_of_chapters == 50 + 28 + 16 + 24 + 21 + 28 # 167 total
def test_count_verses_single_verse() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references("Genesis 1:1")
# When we get the count of verses in the references
number_of_verses: int = bible.count_verses(references[0])
# Then the count is correct
assert number_of_verses == 1
def test_count_verses_multiple_verses() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references("Genesis 1:6-10")
# When we get the count of verses in the references
number_of_verses: int = bible.count_verses(references[0])
# Then the count is correct
assert number_of_verses == 5
def test_count_verses_multiple_chapters() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references(
"Matthew 5:3-7:27"
)
# When we get the count of verses in the references
number_of_verses: int = bible.count_verses(references[0])
# Then the count is correct
assert number_of_verses == 46 + 34 + 27
def test_count_verses_multiple_books() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references("1 John - Jude")
# When we get the count of verses in the references
number_of_verses: int = bible.count_verses(references[0])
# Then the count is correct
assert number_of_verses == (10 + 29 + 24 + 21 + 21) + 13 + 14 + 25
def test_count_verses_multiple_references() -> None:
# Given a list of references
references: List[bible.NormalizedReference] = bible.get_references(
"Genesis 1:1; John 3:16; Romans 15:5-7,13"
)
# When we get the count of verses in the references
number_of_verses: int = bible.count_verses(references)
# Then the count is correct
assert number_of_verses == 1 + 1 + (3 + 1)
def test_count_verses_string() -> None:
# Given a string containing one or more Scripture references
reference: str = "Genesis 1:1; John 3:16; Romans 15:5-7,13"
# When we get the count of verses in the reference
number_of_verses: int = bible.count_verses(reference)
# Then the count is correct
assert number_of_verses == 1 + 1 + (3 + 1)
|
alanmitchell/bmon-structure-queries | make_reading_info.py | #!/usr/bin/env python3
import sqlite3
import time
from datetime import datetime
import sys
from pathlib import Path
import pytz
try:
conn_readings = sqlite3.connect('bms_data.sqlite')
cur_readings = conn_readings.cursor()
# Delete the old sensor_stats database
Path('sensor_stats.sqlite').unlink(missing_ok=True)
conn_stats = sqlite3.connect('sensor_stats.sqlite')
cur_stats = conn_stats.cursor()
# Create the two tables in the Sensor Stats database
sql = """CREATE TABLE alert_events (
ts_unix INTEGER,
ts TEXT,
sensor_id TEXT,
message TEXT
);"""
cur_stats.execute(sql)
sql = """CREATE TABLE sensor_info (
sensor_id TEXT UNIQUE,
last_report NUMERIC,
reading_count INTEGER,
PRIMARY KEY (sensor_id)
);"""
cur_stats.execute(sql)
# Build the Sensor Information table in the new Stats database.
# This includes how many minutes ago the sensor last reported, and includes
# the total reading count for the sensor.
sql = 'SELECT name FROM sqlite_master WHERE type = "table"'
for row in cur_readings.execute(sql).fetchall():
sensor_id = row[0]
if not sensor_id.startswith('_'):
try:
sql = f'SELECT count(*) FROM [{sensor_id}]'
rdg_ct = cur_readings.execute(sql).fetchone()[0]
sql = f'SELECT max(ts) FROM [{sensor_id}]'
mins_ago = (time.time() - cur_readings.execute(sql).fetchone()[0]) / 60.0
mins_ago = round(mins_ago, 1)
sql = f"INSERT INTO sensor_info VALUES ('{sensor_id}', {mins_ago}, {rdg_ct})"
cur_stats.execute(sql)
except:
print(f'Problem with {sensor_id}')
print(sys.exc_info())
# Create an expanded Alert Events table in the Stats database, which adds a field
# that displays timestamp in human-readable Alaska Time.
tz_ak = pytz.timezone('US/Alaska') # want result in Alaska Time
sql = 'SELECT id, ts, message FROM _alert_log'
for sensor_id, ts_unix, message in cur_readings.execute(sql).fetchall():
try:
ts = datetime.utcfromtimestamp(ts_unix)
ts = pytz.utc.localize(ts) # make it timezone aware
ts = ts.astimezone(tz_ak) # convert to Alaska time
ts_text = ts.strftime('%Y-%m-%d %H:%M:%S')
sql = f"INSERT INTO alert_events VALUES ({ts_unix}, '{ts_text}', '{sensor_id}', '{message}')"
cur_stats.execute(sql)
except:
print(sys.exc_info())
except:
print(sys.exc_info())
finally:
conn_stats.commit()
conn_stats.close()
conn_readings.close()
|
nansencenter/DAPPER | dapper/mods/Lorenz05/settings01.py | import numpy as np
import dapper.mods as modelling
from dapper.mods.Lorenz05 import Model
from dapper.tools.localization import nd_Id_localization
# Sakov uses K=300000, BurnIn=1000*0.05
tseq = modelling.Chronology(0.002, dto=0.05, Ko=400, Tplot=2, BurnIn=5)
model = Model(b=8)
Dyn = {
'M': model.M,
'model': model.step,
'noise': 0,
}
X0 = modelling.GaussRV(mu=model.x0, C=0.001)
jj = np.arange(model.M) # obs_inds
Obs = modelling.partial_Id_Obs(model.M, jj)
Obs['noise'] = 1
Obs['localizer'] = nd_Id_localization((model.M,), (6,))
HMM = modelling.HiddenMarkovModel(Dyn, Obs, tseq, X0)
|
nansencenter/DAPPER | dapper/mods/__init__.py | <reponame>nansencenter/DAPPER
"""Models included with DAPPER.
.. include:: ./README.md
"""
import copy as cp
import inspect
from pathlib import Path
import numpy as np
import struct_tools
# Imports used to set up HMMs
import dapper.tools.progressbar as pb
from dapper.dpr_config import rc
from dapper.mods.utils import Id_mat, Id_op
from dapper.tools.chronos import Chronology
from dapper.tools.localization import no_localization
from dapper.tools.matrices import CovMat
from dapper.tools.randvars import RV, GaussRV
from dapper.tools.seeding import set_seed
from .integration import with_recursion, with_rk4
from .utils import Id_Obs, ens_compatible, linspace_int, partial_Id_Obs
class HiddenMarkovModel(struct_tools.NicePrint):
"""Container for a Hidden Markov Model (HMM).
This should contain the details necessary to run synthetic DA experiments,
also known as "twin experiment", or OSSE (observing system simulation experiment).
The synthetic truth and observations may then be obtained by running
`HiddenMarkovModel.simulate`.
See scripts in examples for more details.
Parameters
----------
Dyn: `Operator` or dict
Operator for the dynamics.
Obs: `Operator` or dict
Operator for the observations
tseq: `dapper.tools.chronos.Chronology`
Time sequence of the HMM process.
X0: `dapper.tools.randvars.RV`
Random distribution of initial condition
liveplotters: `list`, optional
A list of tuples. See example use in function `LPs` of `dapper.mods.Lorenz63`.
- The first element of the tuple determines if the liveplotter
is shown by default. If `False`, the liveplotter is only shown when
included among the `liveplots` argument of `assimilate`
- The second element in the tuple gives the corresponding liveplotter
function/class.
sectors: `dict`, optional
Labelled indices referring to parts of the state vector.
When defined, field-mean statistics are computed for each sector.
Example use can be found in `examples/param_estim.py`
and `dapper/mods/Lorenz96/miyoshi2011.py`
name: str, optional
Label for the `HMM`.
"""
def __init__(self, *args, **kwargs):
# Valid args/kwargs, along with type and default.
# Note: it's still ok to write attributes to the HMM following the init.
attrs = dict(Dyn=(Operator, None),
Obs=(Operator, None),
tseq=(Chronology, None),
X0=(RV, None),
liveplotters=(list, []),
sectors=(dict, {}),
name=(str, HiddenMarkovModel._default_name))
# Transfer args to kwargs
for arg, kw in zip(args, attrs):
assert (kw not in kwargs), "Could not sort out arguments."
kwargs[kw] = arg
# Un-abbreviate
abbrevs = {"LP": "liveplotters", "loc": "localizer"}
for k in list(kwargs):
try:
full = abbrevs[k]
except KeyError:
pass
else:
assert (full not in kwargs), "Could not sort out arguments."
kwargs[full] = kwargs.pop(k)
# Transfer kwargs to self
for k, (type_, default) in attrs.items():
# Get kwargs[k] or default
if k in kwargs:
v = kwargs.pop(k)
elif callable(default):
v = default()
else:
v = default
# Convert dict to type
if not isinstance(v, (type_, type(None))):
v = type_(**v)
# Write
setattr(self, k, v)
assert kwargs == {}, f"Arguments {list(kwargs)} is/are invalid."
# Further defaults
if not hasattr(self.Obs, "localizer"):
self.Obs.localizer = no_localization(self.Nx, self.Ny)
# Validation
if self.Obs.noise.C == 0 or self.Obs.noise.C.rk != self.Obs.noise.C.M:
raise ValueError("Rank-deficient R not supported.")
# ndim shortcuts
@property
def Nx(self): return self.Dyn.M
@property
def Ny(self): return self.Obs.M
printopts = {'ordering': ['Dyn', 'Obs', 'tseq', 'X0'], "indent": 4}
def simulate(self, desc='Truth & Obs'):
"""Generate synthetic truth and observations."""
Dyn, Obs, tseq, X0 = self.Dyn, self.Obs, self.tseq, self.X0
# Init
xx = np.zeros((tseq.K + 1, Dyn.M))
yy = np.zeros((tseq.Ko+1, Obs.M))
x = X0.sample(1)
xx[0] = x
# Loop
for k, ko, t, dt in pb.progbar(tseq.ticker, desc):
x = Dyn(x, t-dt, dt)
x = x + np.sqrt(dt)*Dyn.noise.sample(1)
if ko is not None:
yy[ko] = Obs(x, t) + Obs.noise.sample(1)
xx[k] = x
return xx, yy
def copy(self):
return cp.deepcopy(self)
@staticmethod
def _default_name():
name = inspect.getfile(inspect.stack()[2][0])
try:
name = str(Path(name).relative_to(rc.dirs.dapper/'mods'))
except ValueError:
name = str(Path(name))
return name
class Operator(struct_tools.NicePrint):
"""Container for the dynamical and the observational maps.
Parameters
----------
M: int
Length of output vectors.
model: function
The actual operator.
noise: RV, optional
The associated additive noise. The noise can also be a scalar or an
array, producing `GaussRV(C=noise)`.
Any remaining keyword arguments are written to the object as attributes.
"""
def __init__(self, M, model=None, noise=None, **kwargs):
self.M = M
# None => Identity model
if model is None:
model = Id_op()
kwargs['linear'] = lambda x, t, dt: Id_mat(M)
self.model = model
# None/0 => No noise
if isinstance(noise, RV):
self.noise = noise
else:
if noise is None:
noise = 0
if np.isscalar(noise):
self.noise = GaussRV(C=noise, M=M)
else:
self.noise = GaussRV(C=noise)
# Write attributes
for key, value in kwargs.items():
setattr(self, key, value)
def __call__(self, *args, **kwargs):
return self.model(*args, **kwargs)
printopts = {'ordering': ['M', 'model', 'noise'], "indent": 4}
|
nansencenter/DAPPER | tests/test_example_2.py | <reponame>nansencenter/DAPPER<filename>tests/test_example_2.py
"""Stupidly compare the full results table.
Use `pytest -vv tests/test_example_2.py` for a better diff when tests fail.
Possible reasons for failing:
- Random number generation might change on different versions/platforms.
- pytest imports some other Lorenz63/module, which modifies the Forcing param,
or the HMM.tseq params, or something else.
"""
import pytest
import dapper as dpr
import dapper.da_methods as da
statkeys = ["err.rms.a", "err.rms.f", "err.rms.u"]
##############################
# L63
##############################
@pytest.fixture(scope="module")
def L63_table():
xps = L63_gen()
table = xps.tabulate_avrgs(statkeys, decimals=4, colorize=False)
return table.splitlines(True)
def L63_gen():
from dapper.mods.Lorenz63.sakov2012 import HMM as _HMM
HMM = _HMM.copy()
HMM.tseq.BurnIn = 0
HMM.tseq.Ko = 10
dpr.set_seed(3000)
# xps
xps = dpr.xpList()
xps += da.Climatology()
xps += da.OptInterp()
xps += da.Var3D(xB=0.1)
xps += da.ExtKF(infl=90)
xps += da.EnKF("Sqrt", N=3, infl=1.30)
xps += da.EnKF("Sqrt", N=10, infl=1.02, rot=True)
xps += da.EnKF("PertObs", N=500, infl=0.95, rot=False)
xps += da.EnKF_N(N=10, rot=True)
xps += da.iEnKS("Sqrt", N=10, infl=1.02, rot=True)
xps += da.PartFilt(N=100, reg=2.4, NER=0.3)
xps += da.PartFilt(N=800, reg=0.9, NER=0.2)
xps += da.PartFilt(N=4000, reg=0.7, NER=0.05)
xps += da.PFxN(xN=1000, N=30, Qs=2, NER=0.2)
xps += da.OptPF(N=100, Qs=2, reg=0.7, NER=0.3)
xps += da.EnKS("Serial", N=30, Lag=1)
xps += da.EnRTS("Serial", N=30, DeCorr=0.99)
# Run
xps.launch(HMM, False, store_u=True)
return xps
L63_old = """
da_method infl upd_a N rot xN reg NER | err.rms.a 1σ err.rms.f 1σ err.rms.u 1σ
-- ----------- ----- ------- ---- ----- ---- --- ---- - ----------------- ----------------- -----------------
0 Climatology | 7.7676 ±1.2464 7.7676 ±1.2464 7.2044 ±2.4251
1 OptInterp | 1.1648 ±0.1744 7.1198 ±1.1388 1.8578 ±0.4848
2 Var3D | 1.0719 ±0.1192 1.7856 ±0.3686 1.2522 ±0.1616
3 ExtKF 90 | 1.1932 ±0.4338 3.0113 ±1.1553 2.0016 ±0.8629
4 EnKF 1.3 Sqrt 3 False | 0.5003 ±0.1105 1.1807 ±0.2613 0.8284 ±0.2526
5 EnKF 1.02 Sqrt 10 True | 0.5773 ±0.0715 1.6134 ±0.4584 0.8839 ±0.1746
6 EnKF 0.95 PertObs 500 False | 0.7422 ±0.3080 2.0616 ±1.0183 1.3171 ±0.4809
7 EnKF_N 1 10 True 1 | 1.6050 ±0.5066 3.6838 ±0.7965 2.3756 ±0.4367
8 iEnKS 1.02 Sqrt 10 True | 0.3927 ±0.2562 1.9267 ±0.7922 0.3172 ±0.1362
9 PartFilt 100 2.4 0.3 | 0.3574 ±0.1387 2.2799 ±1.5794 1.0327 ±0.7116
10 PartFilt 800 0.9 0.2 | 0.5229 ±0.0832 1.3370 ±0.4291 0.8152 ±0.2085
11 PartFilt 4000 0.7 0.05 | 0.2481 ±0.0474 0.6470 ±0.2298 0.3855 ±0.1051
12 PFxN 30 1000 0.2 | 0.5848 ±0.0926 0.9573 ±0.2248 0.7203 ±0.1870
13 OptPF 100 0.7 0.3 | 0.6577 ±0.1388 1.4330 ±0.4286 0.8705 ±0.2341
14 EnKS 1 Serial 30 False | 0.6586 ±0.1577 1.1681 ±0.3682 0.5304 ±0.1671
15 EnRTS 1 Serial 30 False | 0.9215 ±0.3187 2.3817 ±0.9076 0.7596 ±0.4891
"""[1:-1].splitlines(True)
# Example use of pytest-benchmark
# def test_duration(benchmark):
# benchmark(L63_gen)
def test_len63(L63_table):
assert len(L63_old) == len(L63_table)
@pytest.mark.parametrize(("lineno"), range(len(L63_old)))
def test_tables_L63(L63_table, lineno):
expected = L63_old[lineno].rstrip()
new = L63_table[lineno].rstrip()
assert new == expected
##############################
# L96
##############################
@pytest.fixture(scope="module")
def L96_table():
import dapper.mods.Lorenz96 as model
from dapper.mods.Lorenz96.sakov2008 import HMM as _HMM
model.Force = 8.0 # undo pinheiro2019
HMM = _HMM.copy()
HMM.tseq.BurnIn = 0
HMM.tseq.Ko = 10
dpr.set_seed(3000)
# xps
xps = dpr.xpList()
xps += da.Climatology()
xps += da.OptInterp()
xps += da.Var3D(xB=0.02)
xps += da.ExtKF(infl=6)
xps += da.EnKF("PertObs", N=40, infl=1.06)
xps += da.EnKF("Sqrt", N=28, infl=1.02, rot=True)
xps += da.EnKF_N(N=24, rot=True)
xps += da.EnKF_N(N=24, rot=True, xN=2)
xps += da.iEnKS("Sqrt", N=40, infl=1.01, rot=True)
xps += da.LETKF(N=7, rot=True, infl=1.04, loc_rad=4)
xps += da.SL_EAKF(N=7, rot=True, infl=1.07, loc_rad=6)
xps.launch(HMM, store_u=True)
table = xps.tabulate_avrgs(statkeys, decimals=4, colorize=False)
return table.splitlines(True)
L96_old = """
da_method infl upd_a N rot xN loc_rad | err.rms.a 1σ err.rms.f 1σ err.rms.u 1σ
-- ----------- ---- ------- -- ----- -- ------- - ----------------- ----------------- -----------------
0 Climatology | 0.8334 ±0.2326 0.8334 ±0.2326 0.8334 ±0.2326
1 OptInterp | 0.1328 ±0.0271 0.8345 ±0.2330 0.1328 ±0.0271
2 Var3D | 0.1009 ±0.0080 0.0874 ±0.0085 0.1009 ±0.0080
3 ExtKF 6 | 0.0269 ±0.0010 0.0269 ±0.0012 0.0269 ±0.0010
4 EnKF 1.06 PertObs 40 False | 0.0318 ±0.0018 0.0317 ±0.0016 0.0318 ±0.0018
5 EnKF 1.02 Sqrt 28 True | 0.0375 ±0.0018 0.0375 ±0.0019 0.0375 ±0.0018
6 EnKF_N 1 24 True 1 | 0.0311 ±0.0009 0.0310 ±0.0010 0.0311 ±0.0009
7 EnKF_N 1 24 True 2 | 0.0304 ±0.0012 0.0304 ±0.0013 0.0304 ±0.0012
8 iEnKS 1.01 Sqrt 40 True | 0.0254 ±0.0009 0.0255 ±0.0009 0.0254 ±0.0008
9 LETKF 1.04 7 True 1 4 | 0.0319 ±0.0013 0.0317 ±0.0013 0.0319 ±0.0013
10 SL_EAKF 1.07 7 True 6 | 0.0260 ±0.0017 0.0256 ±0.0014 0.0260 ±0.0017
"""[1:-1].splitlines(True)
def test_len96(L96_table):
assert len(L96_old) == len(L96_table)
@pytest.mark.parametrize(("lineno"), range(len(L96_old)))
def test_tables_L96(L96_table, lineno):
expected = L96_old[lineno].rstrip()
new = L96_table[lineno].rstrip()
assert new == expected
|
nansencenter/DAPPER | dapper/tools/localization.py | <filename>dapper/tools/localization.py
"""Localization tools, including distance and tapering comps.
A good introduction to localization:
Sakov (2011), Computational Geosciences:
'Relation between two common localisation methods for the EnKF'.
"""
import itertools
import numpy as np
def pairwise_distances(A, B=None, domain=None):
"""Euclidian distance (not squared) between pts. in `A` and `B`.
Parameters
----------
A: array of shape (nPoints, nDims).
A collection a points.
B:
Same as `A`, but nPoints can differ.
domain: tuple
Assume the domain is a **periodic** hyper-rectangle whose
edges along dimension `i` span from 0 to `domain[i]`.
NB: Behaviour not defined if `any(A.max(0) > domain)`, and likewise for `B`.
Returns
-------
Array of of shape `(nPointsA, nPointsB)`.
Examples
--------
>>> A = [[0, 0], [0, 1], [1, 0], [1, 1]]
>>> with np.printoptions(precision=2):
... print(pairwise_distances(A))
[[0. 1. 1. 1.41]
[1. 0. 1.41 1. ]
[1. 1.41 0. 1. ]
[1.41 1. 1. 0. ]]
The function matches `pdist(..., metric='euclidean')`, but is faster:
>>> from scipy.spatial.distance import pdist, squareform
>>> (pairwise_distances(A) == squareform(pdist(A))).all()
True
As opposed to `pdist`, it also allows comparing `A` to a different set of points,
`B`, without the augmentation/block tricks needed for pdist.
>>> A = np.arange(4)[:, None]
>>> pairwise_distances(A, [[2]]).T
array([[2., 1., 0., 1.]])
Illustration of periodicity:
>>> pairwise_distances(A, domain=(4, ))
array([[0., 1., 2., 1.],
[1., 0., 1., 2.],
[2., 1., 0., 1.],
[1., 2., 1., 0.]])
NB: If an input array is 1-dim, it is seen as a single point.
>>> pairwise_distances(np.arange(4))
array([[0.]])
"""
if B is None:
B = A
# Prep
A = np.atleast_2d(A)
B = np.atleast_2d(B)
mA, nA = A.shape
mB, nB = B.shape
assert nA == nB, "The last axis of A and B must have equal length."
# Diff
d = A[:, None] - B # shape: (mA, mB, nDims)
# Make periodic
if domain:
domain = np.reshape(domain, (1, 1, -1)) # for broadcasting
d = abs(d)
d = np.minimum(d, domain-d)
distances = np.sqrt((d * d).sum(axis=-1)) # == sla.norm(d, axis=-1)
return distances.reshape(mA, mB)
def dist2coeff(dists, radius, tag=None):
"""Compute tapering coefficients corresponding to a distances.
NB: The radius is internally adjusted such that, independently of 'tag',
`coeff==np.exp(-0.5)` when `distance==radius`.
This is largely based on Sakov's enkf-matlab code. Two bugs have here been fixed:
- The constants were slightly wrong, as noted in comments below.
- It forgot to take sqrt() of coeffs when applying them through 'local analysis'.
"""
coeffs = np.zeros(dists.shape)
if tag is None:
tag = 'GC'
if tag == 'Gauss':
R = radius
coeffs = np.exp(-0.5 * (dists/R)**2)
elif tag == 'Exp':
R = radius
coeffs = np.exp(-0.5 * (dists/R)**3)
elif tag == 'Cubic':
R = radius * 1.87 # Sakov: 1.8676
inds = dists <= R
coeffs[inds] = (1 - (dists[inds] / R) ** 3) ** 3
elif tag == 'Quadro':
R = radius * 1.64 # Sakov: 1.7080
inds = dists <= R
coeffs[inds] = (1 - (dists[inds] / R) ** 4) ** 4
elif tag == 'GC': # eqn 4.10 of Gaspari-Cohn'99, or eqn 25 of Sakov2011relation
R = radius * 1.82 # =np.sqrt(10/3). Sakov: 1.7386
# 1st segment
ind1 = dists <= R
r2 = (dists[ind1] / R) ** 2
r3 = (dists[ind1] / R) ** 3
coeffs[ind1] = 1 + r2 * (- r3 / 4 + r2 / 2) + r3 * (5 / 8) - r2 * (5 / 3)
# 2nd segment
ind2 = np.logical_and(R < dists, dists <= 2*R)
r1 = (dists[ind2] / R)
r2 = (dists[ind2] / R) ** 2
r3 = (dists[ind2] / R) ** 3
coeffs[ind2] = r2 * (r3 / 12 - r2 / 2) + r3 * (5 / 8) \
+ r2 * (5 / 3) - r1 * 5 + 4 - (2 / 3) / r1
elif tag == 'Step':
R = radius
inds = dists <= R
coeffs[inds] = 1
else:
raise KeyError('No such coeff function.')
return coeffs
def inds_and_coeffs(dists, radius, cutoff=1e-3, tag=None):
"""Compute indices and coefficients of localization.
- inds : the indices of pts that are "close to" centre.
- coeffs : the corresponding tapering coefficients.
"""
coeffs = dist2coeff(dists, radius, tag)
# Truncate using cut-off
inds = np.arange(len(dists))[coeffs > cutoff]
coeffs = coeffs[inds]
return inds, coeffs
def localization_setup(y2x_distances, batches):
def localization_now(radius, direction, t, tag=None):
"""Provide localization setup for time t."""
y2x = y2x_distances(t)
if direction == 'x2y':
def obs_taperer(batch):
# Don't use `batch = batches[iBatch]`
# (with iBatch as this function's input).
# This would slow down multiproc.,
# coz batches gets copied to each process.
x2y = y2x.T
dists = x2y[batch].mean(axis=0)
return inds_and_coeffs(dists, radius, tag=tag)
return batches, obs_taperer
elif direction == 'y2x':
def state_taperer(obs_idx):
return inds_and_coeffs(y2x[obs_idx], radius, tag=tag)
return state_taperer
return localization_now
def no_localization(Nx, Ny):
def obs_taperer(batch):
return np.arange(Ny), np.ones(Ny)
def state_taperer(obs_idx):
return np.arange(Nx), np.ones(Nx)
def localization_now(radius, direction, t, tag=None):
"""Returns all of the indices, with all tapering coeffs. set to 1.
Used to validate local DA methods, eg. `LETKF<==>EnKF('Sqrt')`.
"""
assert radius == np.inf, "Localization functions not specified"
if direction == 'x2y':
return [np.arange(Nx)], obs_taperer
elif direction == 'y2x':
return state_taperer
return localization_now
def rectangular_partitioning(shape, steps, do_ind=True):
"""N-D rectangular batch generation.
Parameters
----------
shape: (len(grid[dim]) for dim in range(ndim))
steps: (step_len[dim] for dim in range(ndim))
Returns
-------
A list of batches,
where each element (batch) is a list of indices.
Example
-------
>>> shape = [4, 13]
>>> steps = [2, 4]
>>> batches = rectangular_partitioning(shape, steps, do_ind=False)
>>> M = np.prod(shape)
>>> nB = len(batches)
>>> values = np.random.choice(np.arange(nB), nB, 0)
>>> Z = np.zeros(shape)
>>> for ib, b in enumerate(batches):
... Z[tuple(b)] = values[ib]
>>> plt.imshow(Z) # doctest: +SKIP
"""
assert len(shape) == len(steps)
# ndim = len(steps)
# An ndim list of (average) local grid lengths:
nLocs = [round(n/d) for n, d in zip(shape, steps)]
# An ndim list of (marginal) grid partitions
# [array_split() handles non-divisibility]:
edge_partitions = [np.array_split(np.arange(n), nLoc)
for n, nLoc in zip(shape, nLocs)]
batches = []
for batch_edges in itertools.product(*edge_partitions):
# The 'indexing' argument below is actually inconsequential:
# it merely changes batch's internal ordering.
batch_rect = np.meshgrid(*batch_edges, indexing='ij')
coords = [ii.flatten() for ii in batch_rect]
batches += [coords]
if do_ind:
def sub2ind(sub):
return np.ravel_multi_index(sub, shape)
batches = [sub2ind(b) for b in batches]
return batches
# NB: Don't try to put the time-dependence of obs_inds inside obs_taperer().
# That would require calling ind2sub len(batches) times per analysis,
# and the result cannot be easily cached, because of multiprocessing.
def safe_eval(fun, t):
try:
return fun(t)
except TypeError:
return fun
# NB: Why is the 'order' argument not supported by this module? Because:
# 1) Assuming only order (orientation) 'C' simplifies the module's code.
# 2) It's not necessary, because the module only communicates to *exterior* via indices
# [of what assumes to be X.flatten(order='C')], and not coordinates!
# Thus, the only adaptation necessary if the order is 'F' is to reverse
# the shape parameter passed to these functions (example: mods/QG/sakov2008).
def nd_Id_localization(shape,
batch_shape=None,
obs_inds=None,
periodic=True):
"""Localize Id (direct) point obs of an N-D, homogeneous, rectangular domain."""
M = np.prod(shape)
if batch_shape is None:
batch_shape = (1,)*len(shape)
if obs_inds is None:
obs_inds = np.arange(M)
def ind2sub(ind):
return np.asarray(np.unravel_index(ind, shape)).T
batches = rectangular_partitioning(shape, batch_shape)
state_coord = ind2sub(np.arange(M))
def y2x_distances(t):
obs_coord = ind2sub(safe_eval(obs_inds, t))
return pairwise_distances(obs_coord, state_coord, shape if periodic else None)
return localization_setup(y2x_distances, batches)
|
nansencenter/DAPPER | dapper/xp_launch.py | <filename>dapper/xp_launch.py
"""Tools (notably `xpList`) for setup and running of experiments (known as `xp`s).
See `dapper.da_methods.da_method` for the strict definition of `xp`s.
"""
import copy
import dataclasses as dcs
import os
import re
import shutil
import sys
from functools import wraps
from pathlib import Path
from textwrap import dedent
import dill
import numpy as np
import struct_tools
import tabulate as _tabulate
from tabulate import tabulate
from tqdm.auto import tqdm
import dapper.stats
import dapper.tools.progressbar as pb
from dapper.tools.colors import stripe
from dapper.tools.datafiles import create_run_dir
from dapper.tools.remote.uplink import submit_job_GCP
from dapper.tools.seeding import set_seed
from dapper.tools.viz import collapse_str
_tabulate.MIN_PADDING = 0
def seed_and_simulate(HMM, xp):
"""Default experiment setup (sets seed and simulates truth and obs).
Used by `xpList.launch` via `run_experiment`.
Parameters
----------
HMM: HiddenMarkovModel
Container defining the system.
xp: object
Type: a `dapper.da_methods.da_method`-decorated class.
.. caution:: `xp.seed` should be set (and `int`).
Without `xp.seed` the seed does not get set,
and different `xp`s will use different seeds
(unless you do some funky hacking).
Reproducibility for a script as a whole can still be achieved
by setting the seed at the outset of the script.
To avoid even that, set `xp.seed to `None` or `"clock"`.
Returns
-------
tuple (xx, yy)
The simulated truth and observations.
"""
set_seed(getattr(xp, 'seed', False))
xx, yy = HMM.simulate()
return HMM, xx, yy
def run_experiment(xp, label, savedir, HMM, setup=seed_and_simulate, free=True,
statkeys=False, fail_gently=False, **stat_kwargs):
"""Used by `xpList.launch` to run each single (DA) experiment ("xp").
This involves steps similar to `examples/basic_1.py`, i.e.:
- `setup` : Initialize experiment.
- `xp.assimilate` : run DA, pass on exception if fail_gently
- `xp.stats.average_in_time` : result averaging
- `xp.avrgs.tabulate` : result printing
- `dill.dump` : result storage
Parameters
----------
xp: object
Type: a `dapper.da_methods.da_method`-decorated class.
label: str
Name attached to progressbar during assimilation.
savedir: str
Path of folder wherein to store the experiment data.
HMM: HiddenMarkovModel
Container defining the system.
setup: function
This function must take two arguments: `HMM` and `xp`,
and return the `HMM` to be used by the DA methods
(typically the same as the input `HMM`, but could be modified),
and the (typically synthetic) truth and obs time series.
This gives you the ability to customize almost any aspect of the
individual experiments within a batch launch of experiments.
Typically you will grab one or more parameter values
stored in the `xp` (see `dapper.da_methods.da_method`) and act on them,
or set them in some other object that impacts the experiment.
Thus, by generating a new `xp` for each such parameter value you can
investigate the impact/sensitivity of the results to this parameter.
Examples include:
- Setting the seed. See the default `setup`, namely `seed_and_simulate`,
for how this is done.
- Setting some aspect of the `HMM` such as the observation noise,
or the interval between observations.
- Setting some parameter of the model (not otherwise detailed in the `HMM`).
For example, the `Force` parameter of `dapper.mods.Lorenz96`, as done in
see `examples/basic_3`.
- Using a different `HMM` entirely for the truth/obs (`xx`/`yy`) generation,
than the one that will be used by the DA. Or loading the truth/obs
time series from file. In both cases, you might also have to do some
cropping or slicing of `xx` and `yy` before returning them.
free: bool
Whether (or not) to `del xp.stats` after the experiment is done,
so as to free up memory and/or not save this data
(just keeping `xp.avrgs`).
statkeys: list
A list of names (possibly in the form of abbreviations) of the
statistical averages that should be printed immediately afther
this xp.
fail_gently: bool
Whether (or not) to propagate exceptions.
"""
# We should copy HMM so as not to cause any nasty surprises such as
# expecting param=1 when param=2 (coz it's not been reset).
# NB: won't copy implicitly ref'd obj's (like L96's core). => bug w/ MP?
hmm = copy.deepcopy(HMM)
# GENERATE TRUTH/OBS
hmm, xx, yy = setup(hmm, xp)
# ASSIMILATE
xp.assimilate(hmm, xx, yy, label, fail_gently=fail_gently, **stat_kwargs)
# Clear references to mpl (for pickling purposes)
if hasattr(xp.stats, "LP_instance"):
del xp.stats.LP_instance
# AVERAGE
xp.stats.average_in_time(free=free)
# PRINT
if statkeys:
statkeys = () if statkeys is True else statkeys
print(xp.avrgs.tabulate(statkeys))
# SAVE
if savedir:
with open(Path(savedir)/"xp", "wb") as FILE:
dill.dump({'xp': xp}, FILE)
class xpList(list):
"""Subclass of `list` specialized for experiment ("xp") objects.
Main use: administrate experiment launches.
Modifications to `list`:
- `xpList.append` supports `unique` to enable lazy `xp` declaration.
- `__iadd__` (`+=`) supports adding single `xp`s.
this is hackey, but convenience is king.
- `__getitem__` supports lists, similar to `np.ndarray`
- `__repr__`: prints the list as rows of a table,
where the columns represent attributes whose value is not shared among all `xp`s.
Refer to `xpList.prep_table` for more information.
Add-ons:
- `xpList.launch`: run the experiments in current list.
- `xpList.prep_table`: find all attributes of the `xp`s in the list;
classify as distinct, redundant, or common.
- `xpList.gen_names`: use `xpList.prep_table` to generate
a short & unique name for each `xp` in the list.
- `xpList.tabulate_avrgs`: tabulate time-averaged results.
- `xpList.inds` to search by kw-attrs.
Parameters
----------
args: entries
Nothing, or a list of `xp`s.
unique: bool
Duplicates won't get appended. Makes `append` (and `__iadd__`) relatively slow.
Use `extend` or `__add__` or `combinator` to bypass this validation.
Also see
--------
- Examples: `examples/basic_2`, `examples/basic_3`
- `dapper.xp_process.xpSpace`, which is used for experient result **presentation**,
as opposed to this class (`xpList`), which handles **launching** experiments.
"""
def __init__(self, *args, unique=False):
self.unique = unique
super().__init__(*args)
def __iadd__(self, xp):
if not hasattr(xp, '__iter__'):
xp = [xp]
for item in xp:
self.append(item)
return self
def append(self, xp):
"""Append **if** not `self.unique` & present."""
if not (self.unique and xp in self):
super().append(xp)
def __getitem__(self, keys):
"""Indexing, also by a list"""
try:
B = [self[k] for k in keys] # if keys is list
except TypeError:
B = super().__getitem__(keys) # if keys is int, slice
if hasattr(B, '__len__'):
B = xpList(B) # Cast
return B
def inds(self, strict=True, missingval="NONSENSE", **kws):
"""Find (all) indices of `xps` whose attributes match kws.
If strict, then `xp`s lacking a requested attr. will not match,
unless the `missingval` matches the required value.
"""
def match(xp):
def missing(v): return missingval if strict else v
matches = [getattr(xp, k, missing(v)) == v for k, v in kws.items()]
return all(matches)
return [i for i, xp in enumerate(self) if match(xp)]
@property
def da_methods(self):
"""List `da_method` attributes in this list."""
return [xp.da_method for xp in self]
def prep_table(self, nomerge=()):
"""Classify all attrs. of all `xp`s as `distinct`, `redundant`, or `common`.
An attribute of the `xp`s is inserted in one of the 3 dicts as follows:
The attribute names become dict keys. If the values of an attribute
(collected from all of the `xp`s) are all __equal__, then the attribute
is inserted in `common`, but only with **a single value**.
If they are all the same **or missing**, then it is inserted in `redundant`
**with a single value**. Otherwise, it is inserted in `distinct`,
with **its full list of values** (filling with `None` where the attribute
was missing in the corresponding `xp`).
The attrs in `distinct` are sufficient to (but not generally necessary,
since there might exist a subset of attributes that) uniquely identify each `xp`
in the list (the `redundant` and `common` can be "squeezed" out).
Thus, a table of the `xp`s does not need to list all of the attributes.
This function also does the heavy lifting for `xpSpace.squeeze`.
Parameters
----------
nomerge: list
Attributes that should always be seen as distinct.
"""
def _aggregate_keys():
"""Aggregate keys from all `xp`"""
if len(self) == 0:
return []
# Start with da_method
aggregate = ['da_method']
# Aggregate all other keys
for xp in self:
# Get dataclass fields
try:
dc_fields = dcs.fields(xp.__class__)
dc_names = [F.name for F in dc_fields]
keys = xp.__dict__.keys()
except TypeError:
# Assume namedtuple
dc_names = []
keys = xp._fields
# For all potential keys:
for k in keys:
# If not already present:
if k not in aggregate:
# If dataclass, check repr:
if k in dc_names:
if dc_fields[dc_names.index(k)].repr:
aggregate.append(k)
# Else, just append
else:
aggregate.append(k)
# Remove unwanted
excluded = [re.compile('^_'), 'avrgs', 'stats', 'HMM', 'duration']
aggregate = struct_tools.complement(aggregate, excluded)
return aggregate
def _getattr_safe(xp, key):
# Don't use None, to avoid mixing with actual None's
# TODO 4: use an object yet more likely to be unique.
missing = "N/A"
a = getattr(xp, key, missing)
# Replace ndarray by its id, since o/w it will
# complain that you must use all().
# Alternative: replace all == (and !=) below by "is".
# Tabulation with multi-line params actually works,
# (though it's still likely to take up too much space,
# unless we set np.printoptions...).
# However, then python (since 3.8) will complain about
# comparison to literal.
if isinstance(a, np.ndarray):
shorten = 6
a = f"arr(<id {id(a)//10**shorten}>)"
# TODO 3: leave formatting to sub() below?
# TODO 4: do similar formatting for other non-trivial params?
# TODO 4: document alternative way to specify non-trivial params:
# use key to be looked up in some globally accessible dct.
# Advantage: names are meaningful, rather than ids.
return a
def replace_NA_by_None(vals):
"""Supports different types of `vals`."""
def sub(v):
return None if v == "N/A" else v
if isinstance(vals, str):
vals = sub(vals)
else:
try:
vals = [sub(v) for v in vals]
except TypeError:
vals = sub(vals)
return vals
# Main
distinct, redundant, common = {}, {}, {}
for key in _aggregate_keys():
vals = [_getattr_safe(xp, key) for xp in self]
if struct_tools.flexcomp(key, *nomerge):
dct, vals = distinct, vals
elif all(vals[0] == v for v in vals):
dct, vals = common, vals[0]
else:
nonNA = next(v for v in vals if "N/A" != v)
if all(v == "N/A" or v == nonNA for v in vals):
dct, vals = redundant, nonNA
else:
dct, vals = distinct, vals
dct[key] = replace_NA_by_None(vals)
return distinct, redundant, common
def __repr__(self):
distinct, redundant, common = self.prep_table()
s = '<xpList> of length %d with attributes:\n' % len(self)
s += tabulate(distinct, headers="keys", showindex=True)
s += "\nOther attributes:\n"
s += str(struct_tools.AlignedDict({**redundant, **common}))
return s
def gen_names(self, abbrev=6, tab=False):
"""Similiar to `self.__repr__()`, but:
- returns *list* of names
- tabulation is optional
- attaches (abbreviated) labels to each attribute
"""
distinct, redundant, common = self.prep_table(nomerge=["da_method"])
labels = distinct.keys()
values = distinct.values()
# Label abbreviation
labels = [collapse_str(k, abbrev) for k in labels]
# Make label columns: insert None or lbl+":", depending on value
def column(lbl, vals):
return [None if v is None else lbl+":" for v in vals]
labels = [column(lbl, vals) for lbl, vals in zip(labels, values)]
# Interlace labels and values
table = [x for (a, b) in zip(labels, values) for x in (a, b)]
# Rm da_method label (but keep value)
table.pop(0)
# Transpose
table = list(map(list, zip(*table)))
# Tabulate
table = tabulate(table, tablefmt="plain")
# Rm space between lbls/vals
table = re.sub(': +', ':', table)
# Rm alignment
if not tab:
table = re.sub(r' +', r' ', table)
return table.splitlines()
@wraps(dapper.stats.tabulate_avrgs)
def tabulate_avrgs(self, *args, colorize=True, **kwargs):
distinct, redundant, common = self.prep_table()
averages = dapper.stats.tabulate_avrgs([C.avrgs for C in self], *args, **kwargs)
columns = {**distinct, '|': ['|']*len(self), **averages} # merge
table = tabulate(columns, headers="keys", showindex=True).replace('␣', ' ')
if colorize:
table = stripe(table)
return table
def launch(self, HMM, save_as="noname", mp=False, fail_gently=None, **kwargs):
"""Essentially: `for xp in self: run_experiment(xp, ..., **kwargs)`.
See `run_experiment` for documentation on the `kwargs` and `fail_gently`.
See `dapper.tools.datafiles.create_run_dir` for documentation `save_as`.
Depending on `mp`, `run_experiment` is delegated as follows:
- `False`: caller process (no parallelisation)
- `True` or `"MP"` or an `int`: multiprocessing on this host
- `"GCP"` or `"Google"` or `dict(server="GCP")`: the DAPPER server
(Google Cloud Computing with HTCondor).
- Specify a list of files as `mp["files"]` to include them
in working directory of the server workers.
- In order to use absolute paths, the list should cosist
of tuples, where the first item is relative to the second
(which is an absolute path). The root is then not included
in the working directory of the server.
- If this dict field is empty, then all python files
in `sys.path[0]` are uploaded.
See `examples/basic_2.py` and `examples/basic_3.py` for example use.
"""
# Parse mp option
if not mp:
mp = dict()
elif mp in [True, "MP"]:
mp = dict(server="local")
elif isinstance(mp, int):
mp = dict(server="local", NPROC=mp)
elif mp in ["GCP", "Google"]:
mp = dict(server="GCP", files=[], code="")
# Parse fail_gently
if fail_gently is None:
if mp and mp["server"] == "GCP":
fail_gently = False
# coz cloud processing is entirely de-coupled anyways
else:
fail_gently = True
# True unless otherwise requested
kwargs["fail_gently"] = fail_gently
# Bundle HMM with kwargs
kwargs['HMM'] = HMM
# Data path
save_as, xpi_dir = create_run_dir(save_as, mp)
# No parallelization
if not mp:
for ixp, (xp, label) in enumerate(zip(self, self.gen_names())):
run_experiment(xp, label, xpi_dir(ixp), **kwargs)
# Local multiprocessing
elif mp["server"].lower() == "local":
def run_with_fixed_args(arg):
xp, ixp = arg
run_experiment(xp, None, xpi_dir(ixp), **kwargs)
args = zip(self, range(len(self)))
pb.disable_progbar = True
pb.disable_user_interaction = True
NPROC = mp.get("NPROC", None) # None => mp.cpu_count()
from dapper.tools.multiproc import mpd # will fail on GCP
with mpd.Pool(NPROC) as pool:
list(tqdm(
pool.imap(
run_with_fixed_args, args),
total=len(self),
desc="Parallel experim's",
smoothing=0.1))
pb.disable_progbar = False
pb.disable_user_interaction = False
# Google cloud platform, multiprocessing
elif mp["server"] == "GCP":
for ixp, xp in enumerate(self):
with open(xpi_dir(ixp)/"xp.var", "wb") as f:
dill.dump(dict(xp=xp), f)
with open(save_as/"xp.com", "wb") as f:
dill.dump(kwargs, f)
# mkdir extra_files
extra_files = save_as / "extra_files"
os.mkdir(extra_files)
# Default extra_files: .py files in sys.path[0] (main script's path)
if not mp.get("files", []):
ff = os.listdir(sys.path[0])
mp["files"] = [f for f in ff if f.endswith(".py")]
# Copy into extra_files
for f in mp["files"]:
if isinstance(f, (str, Path)):
# Example: f = "A.py"
path = Path(sys.path[0]) / f
dst = f
else: # instance of tuple(path, root)
# Example: f = ("~/E/G/A.py", "G")
path, root = f
dst = Path(path).relative_to(root)
dst = extra_files / dst
os.makedirs(dst.parent, exist_ok=True)
try:
shutil.copytree(path, dst) # dir -r
except OSError:
shutil.copy2(path, dst) # file
# Loads PWD/xp_{var,com} and calls run_experiment()
with open(extra_files/"load_and_run.py", "w") as f:
f.write(dedent("""\
import dill
from dapper.xp_launch import run_experiment
# Load
with open("xp.com", "rb") as f: com = dill.load(f)
with open("xp.var", "rb") as f: var = dill.load(f)
# User-defined code
%s
# Run
try:
result = run_experiment(var['xp'], None, ".", **com)
except SystemError as err:
if err.args and "opcode" in err.args[0]:
err.args += ("It seems your local python version"
" is incompatible with that of the cluster.",)
raise
""") % dedent(mp["code"]))
with open(extra_files/"dpr_config.yaml", "w") as f:
f.write("\n".join([
"data_root: '$cwd'",
"liveplotting: no",
"welcome_message: no"]))
submit_job_GCP(save_as)
return save_as
def combinator(param_dict, **glob_dict):
"""Mass creation of `xp`'s by combining the value lists in the `param_dict`.
Returns a function (`for_params`) that creates all possible combinations
of parameters (from their value list) for a given `dapper.da_methods.da_method`.
This is a good deal more efficient than relying on `xpList`'s `unique`. Parameters
- not found among the args of the given DA method are ignored by `for_params`.
- specified as keywords to the `for_params` fix the value
preventing using the corresponding (if any) value list in the `param_dict`.
.. caution::
Beware! If, eg., `infl` or `rot` are in `param_dict`, aimed at the `EnKF`,
but you forget that they are also attributes some method where you don't
actually want to use them (eg. `SVGDF`),
then you'll create many more than you intend.
"""
def for_params(method, **fixed_params):
dc_fields = [f.name for f in dcs.fields(method)]
params = struct_tools.intersect(param_dict, dc_fields)
params = struct_tools.complement(params, fixed_params)
params = {**glob_dict, **params} # glob_dict 1st
def xp1(dct):
xp = method(**struct_tools.intersect(dct, dc_fields), **fixed_params)
for key, v in struct_tools.intersect(dct, glob_dict).items():
setattr(xp, key, v)
return xp
return [xp1(dct) for dct in struct_tools.prodct(params)]
return for_params
|
CherryYin/ml_algorithm | naive_bayes/bayes_longxinchen.py | #! /usr/bin/env python
# coding=utf-8
# Authors: Hanxiaoyang <<EMAIL>>
# simple naive bayes classifier to classify sohu news topic
# data can be downloaded in http://www.sogou.com/labs/dl/cs.html
# 代码功能:简易朴素贝叶斯分类器,用于对搜狐新闻主题分类,数据可在http://www.sogou.com/labs/dl/cs.html下载(精简版)
# 详细说明参见博客http://blog.csdn.net/han_xiaoyang/article/details/50629608
# 作者:寒小阳<<EMAIL>>
import os,sys, math, random, collections
def shuffle(inFile):
'''
简单的乱序操作,用于生成训练集和测试集
'''
textLines = [line.strip() for line in open(inFile)]
print "正在准备训练和测试数据,请稍后..."
random.shuffle(textLines)
num = len(textLines)
trainText = textLines[:3 * num / 5]
testText = textLines[3 * num / 5:]
print "准备训练和测试数据准备完毕,下一步..."
return trainText, testText
# 总共有9种新闻类别,我们给每个类别一个编号
labels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
def label2id(label):
for i in xrange(len(labels)):
if label == labels[i]:
return i
raise Exception('Error lable %s' % (label))
def doc_dict():
'''
构造和类别数等长的0向量
'''
return [0] * len(labels)
def mutual_info(N, Nij, Ni_, N_j):
'''
计算互信息,这里log的底取为2
'''
return Nij * 1.0 / N * math.log(N * (Nij + 1) * 1.0 / (Ni_ * N_j)) / math.log(2)
def count_for_cates(trainText, featureFile):
'''
遍历文件,统计每个词在每个类别出现的次数,和每类的文档数
并写入结果特征文件
'''
docCount = [0] * len(labels)
wordCount = dict() #collections.defaultdict(list)
# 扫描文件和计数
for line in trainText:
label, text = line.strip().split(' ', 1)
index = label2id(label[0])
words = text.strip().split(' ')
for word in words:
word = word.strip()
if word == '':continue
if word not in wordCount:
wordCount[word] = doc_dict()
wordCount[word][index] += 1
docCount[index] += 1
# 计算互信息值
print "计算互信息,提取关键/特征词中,请稍后..."
miDict = dict() #collections.defaultdict(list)
N = sum(docCount)
for k, vs in wordCount.items():
miDict[k] = doc_dict()
for i in xrange(len(vs)):
N11 = vs[i]
N10 = sum(vs) - N11
N01 = docCount[i] - N11
N00 = N - N11 - N10 - N01
mi = mutual_info(N, N11, N10 + N11, N01 + N11) + \
mutual_info(N, N10, N10 + N11, N00 + N10) + \
mutual_info(N, N01, N01 + N11, N01 + N00) + \
mutual_info(N, N00, N00 + N10, N00 + N01)
miDict[k][i] = mi
fWords = set()
for i in xrange(len(docCount)):
keyf = lambda x: x[1][i]
sortedDict = sorted(miDict.items(), key=keyf, reverse=True)
for j in xrange(250):
fWords.add(sortedDict[j][0])
out = open(featureFile, 'w')
# 输出各个类的文档数目
out.write(str(docCount) + "\n")
# 输出互信息最高的词作为特征词
for fword in fWords:
out.write(fword + "\n")
print "特征词写入完毕..."
out.close()
def load_feature_words(featureFile):
'''
从特征文件导入特征词
'''
f = open(featureFile)
# 各个类的文档数目
docCounts = eval(f.readline())
features = set()
# 读取特征词
for line in f:
features.add(line.strip())
f.close()
return docCounts, features
def train_bayes(featureFile, trainText, modelFile):
'''
训练贝叶斯模型,实际上计算每个类中特征词的出现次数
'''
print "使用朴素贝叶斯训练中..."
docCounts, features = load_feature_words(featureFile)
wordCount = dict() #collections.defaultdict(list)
for word in features:
wordCount[word] = doc_dict()
# 每类文档特征词出现的次数
tCount = [0] * len(docCounts)
for line in open(trainText):
lable, text = line.strip().split(' ', 1)
index = label2id(lable[0])
words = text.split(' ')
for word in words:
if word in features:
tCount[index] += 1
wordCount[word][index] += 1
outModel = open(modelFile, 'w')
# 拉普拉斯平滑
print "训练完毕,写入模型..."
for k, v in wordCount.items():
scores = [(v[i] + 1) * 1.0 / (tCount[i] + len(wordCount)) for i in xrange(len(v))]
outModel.write(k + "\t" + str(scores) + "\n")
outModel.close()
def load_model(modelFile):
'''
从模型文件中导入计算好的贝叶斯模型
'''
print "加载模型中..."
f = open(modelFile)
scores = {}
for line in f:
word,counts = line.strip().rsplit('\t',1)
scores[word] = eval(counts)
f.close()
return scores
def predict(featureFile, modelFile, testText):
'''
预测文档的类标,标准输入每一行为一个文档
'''
docCounts, features = load_feature_words(featureFile)
docScores = [math.log(count * 1.0 / sum(docCounts)) for count in docCounts]
scores = load_model(modelFile)
rCount = 0
docCount = 0
print "正在使用测试数据验证模型效果..."
for line in testText:
lable, text = line.strip().split(' ', 1)
index = label2id(lable[0])
words = text.split(' ')
preValues = list(docScores)
for word in words:
if word in features:
for i in xrange(len(preValues)):
preValues[i] += math.log(scores[word][i])
m = max(preValues)
pIndex = preValues.index(m)
if pIndex == index:
rCount += 1
# print lable,lables[pIndex],text
docCount += 1
print("总共测试文本量: %d , 预测正确的类别量: %d, 朴素贝叶斯分类器准确度:%f" % (rCount, docCount, rCount * 1.0 / docCount))
if __name__=="__main__":
"""
if len(sys.argv) != 4:
print "Usage: python naive_bayes_text_classifier.py sougou_news.txt feature_file.out model_file.out"
sys.exit()
inFile = sys.argv[1]
featureFile = sys.argv[2]
modelFile = sys.argv[3]
"""
abs_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir))
root_path = os.path.join(abs_path, "data\\SogouC.reduced\\")
inFile = os.path.join(root_path, 'splited_text.txt')
featureFile = os.path.join(root_path, 'feature.txt')
modelFile = os.path.join(abs_path, 'model/nb_model')
trainText, testText = shuffle(inFile)
count_for_cates(trainText, featureFile)
train_bayes(featureFile, inFile, modelFile)
predict(featureFile, modelFile, testText) |
CherryYin/ml_algorithm | knn/kd_tree.py | <gh_stars>0
#coding:utf-8
import numpy as np
import gzip
import itertools
from datetime import datetime
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(input_file, is_value_binary, is_matrix):
with gzip.open(input_file, 'rb') as zipf:
magic = _read32(zipf)
if magic !=2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, input_file.name))
num_images = _read32(zipf)
rows = _read32(zipf)
cols = _read32(zipf)
print magic, num_images, rows, cols
buf = zipf.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
#reshape成二维
data = data.reshape(num_images, rows, cols)
#二值化
data_value_binary = np.minimum(data, 1)
#按行相加,存到钱28个元素中,按列相加,存入后28个元素中
#如果分类效果不好,可再计算按对角线相加、行列式等
data_tidy = np.zeros((num_images, rows + cols + 1), dtype=np.uint32)
for i in range(num_images):
data_tidy[i, :rows] = np.sum(data_value_binary[i], axis=1)
data_tidy[i, rows:(rows+cols)] = (np.sum(data_value_binary[i].transpose(), axis=1))
return data_tidy
#抽取标签
#仿照tensorflow中mnist.py写的
def extract_labels(input_file):
with gzip.open(input_file, 'rb') as zipf:
magic = _read32(zipf)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' % (magic, input_file.name))
num_items = _read32(zipf)
buf = zipf.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
return labels
class node:
def __init__(self, point, label):
self.left = None
self.right = None
self.point = point
self.label = label
self.parent = None
pass
def set_left(self, left):
if left == None: pass
left.parent = self
self.left = left
def set_right(self, right):
if right == None: pass
right.parent = self
self.right = right
def median(lst):
m = len(lst) / 2
return lst[m], m
def build_kdtree(data, d):
data = sorted(data, key=lambda x: x[d.next()])
p, m = median(data)
tree = node(p[:-1], p[-1])
del data[m]
if m > 0: tree.set_left(build_kdtree(data[:m], d))
if len(data) > 1: tree.set_right(build_kdtree(data[m:], d))
return tree
def distance(a, b):
diff = a - b
squaredDiff = diff ** 2
return np.sum(squaredDiff)
def search_kdtree(tree, d, target, k):
den = d.next()
if target[den] < tree.point[den]:
if tree.left != None:
return search_kdtree(tree.left, d, target, k)
else:
if tree.right != None:
return search_kdtree(tree.right, d, target, k)
def update_best(t, best):
if t == None: return
label = t.label
t = t.point
d = distance(t, target)
for i in range(k):
if d < best[i][1]:
for j in range(0, i):
best[j][1] = best[j+1][1]
best[j][0] = best[j+1][0]
best[j][2] = best[j+1][2]
best[i][1] = d
best[i][0] = t
best[i][2] = label
best = []
for i in range(k):
best.append( [tree.point, 100000.0, 10] )
while (tree.parent != None):
update_best(tree.parent.left, best)
update_best(tree.parent.right, best)
tree = tree.parent
return best
def testHandWritingClass():
## step 1: load data
print "step 1: load data..."
train_x = extract_images('data/mnist/train_images', True, True)
train_y = extract_labels('data/mnist/train_labels')
test_x = extract_images('data/mnist/test_images', True, True)
test_y = extract_labels('data/mnist/test_labels')
l = min(train_x.shape[0], train_y.shape[0])
rows = train_x.shape[1]
for i in range(l):
train_x[i, -1] = train_y[i]
densim = itertools.cycle(range(0, rows-1))
## step 2: training...
print "step 2: build tree..."
mnist_tree = build_kdtree(train_x, densim)
## step 3: testing
print "step 3: testing..."
a = datetime.now()
numTestSamples = test_x.shape[0]
matchCount = 0
test_num = numTestSamples
K = 3
for i in xrange(test_num):
best_k = search_kdtree(mnist_tree, densim, test_x[i, :-1], K)
classCount = {}
for j in range(K):
voteLabel = best_k[j][2]
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
maxCount = 0
predict = 0
for key, value in classCount.items():
if value > maxCount:
maxCount = value
predict = key
if predict == test_y[i]:
matchCount += 1
if i % 100 == 0:
print "完成%d张图片"%(i)
accuracy = float(matchCount) / test_num
b = datetime.now()
print "一共运行了%d秒"%((b-a).seconds)
## step 4: show the result
print "step 4: show the result..."
print 'The classify accuracy is: %.2f%%' % (accuracy * 100)
if __name__ == '__main__':
testHandWritingClass()
|
CherryYin/ml_algorithm | policy_tree/policy_tree.py | <reponame>CherryYin/ml_algorithm
#coding:utf-8
import sys
#from tree import *
reload(sys)
sys.setdefaultencoding('utf-8')
from pylab import *
def createDataSet():
"""
创建数据集
:return:总数据集、特征标签列表、特征值map、类别值列表
"""
dataSet = [[u'青年', u'否', u'否', u'一般', u'拒绝'],
[u'青年', u'否', u'否', u'好', u'拒绝'],
[u'青年', u'是', u'否', u'好', u'同意'],
[u'青年', u'是', u'是', u'一般', u'同意'],
[u'青年', u'否', u'否', u'一般', u'拒绝'],
[u'中年', u'否', u'否', u'一般', u'拒绝'],
[u'中年', u'否', u'否', u'好', u'拒绝'],
[u'中年', u'是', u'是', u'好', u'同意'],
[u'中年', u'否', u'是', u'非常好', u'同意'],
[u'中年', u'否', u'是', u'非常好', u'同意'],
[u'老年', u'否', u'是', u'非常好', u'同意'],
[u'老年', u'否', u'是', u'好', u'同意'],
[u'老年', u'是', u'否', u'好', u'同意'],
[u'老年', u'是', u'否', u'非常好', u'同意'],
[u'老年', u'否', u'否', u'一般', u'拒绝'],
]
labels = [u'年龄', u'有工作', u'有房子', u'信贷情况']
label_value_map = {u'年龄': [u'青年', u'中年', u'老年'],
u'有工作': [u'是', u'否'],
u'有房子': [u'是', u'否'],
u'信贷情况': [u'一般', u'好', u'非常好']
}
class_value_list = [u'同意', u'拒绝']
# 返回数据集和每个维度的名称
return dataSet, labels, label_value_map, class_value_list
"""
H(p) = -∑ (pi * logpi)
计算各分类(y)的经验熵
feature_class_map: 按类别存储的特征map
sample_num:样本总数
base:对数的底,书上对二值分类用2为底,其他是e为底。缺省为2
return:经验熵
"""
def compute_empirical_entropy(class_map, sample_num, base=2):
entropy = 0
for class_x in class_map:
prob_x = float(class_map[class_x]) / sample_num
entropy -= prob_x * math.log(prob_x, base)
return entropy
"""
计算基本的经验熵,H(D)
dataSet:总体数据集
class_list:类别列表
return:基本的经验熵
"""
def compute_base_entroy(dataSet, class_list):
feature_class_map = {}
for class_i in class_list:
feature_class_map[class_i] = 0
sample_num = 0
for features in dataSet:
class_x = features[-1]
feature_class_map[class_x] += 1
sample_num += 1
base_entropy = compute_empirical_entropy(feature_class_map, sample_num)
return base_entropy
"""
计算条件熵
H(Y|X) = ∑ (pi * H(Y|X=xi)
dataSet:总的数据集
i:给定的特征列
labels:特征维度标签
label_value_map:特征值map,存储了各特征标签下的值
return: 条件熵
"""
def comput_condition_entropy(dataSet, i, value_list):
condition_entropy = 0
for value in value_list:
value_num = 0
sample_num = len(dataSet)
subdataSet_map = {}
for j in range(sample_num):
if value == dataSet[j, i]:
value_num += 1
class_x = dataSet[j, -1]
if class_x not in subdataSet_map.keys():
subdataSet_map[class_x] = 1
else:
subdataSet_map[class_x] += 1
prob_value = float(value_num) / sample_num
entropy = compute_empirical_entropy(subdataSet_map, value_num)
condition_entropy += prob_value * entropy
return condition_entropy
"""
计算信息增益
g(D,A) = H(D) - H(D|A)
base_entropy:数据集的经验熵
dataSet:总的数据集
i:给定的特征列
value_list:给定特征的特征值列表
return: 信息增益
"""
def compute_info_gain(base_entropy, dataSet, i, value_list):
#value_list = label_value_map[labels[i]]
condition_entropy = comput_condition_entropy(dataSet, i,value_list)
info_gain = base_entropy - condition_entropy
return info_gain
"""
计算信息增益比
g(D,A)
gR(D,A) = -----------
HA(D)
info_gain:信息增益
dataSet:总的数据集
i:给定的特征列
return: 信息增益比
"""
def compute_info_gain_rato(info_fain, dataSet, i):
value_map = {}
sample_num = len(dataSet)
entropy_base_i = 0
for value in dataSet[:, i]:
if value not in value_map.keys():
value_map[value] = 1
value_map[value] += 1
info_gain_rato = compute_empirical_entropy(value_map, sample_num)
return info_gain_rato
"""**********************************以下代码基本上是参考别人的**************************************"""
def splitDataSet(dataSet, axis, value):
"""
按照给定特征划分数据集
:param dataSet: 待划分的数据集
:param axis: 划分数据集的特征的维度
:param value: 特征的值
:return: 符合该特征的所有实例(并且自动移除掉这维特征)
"""
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis] # 删掉这一维特征
reducedFeatVec.extend(featVec[axis + 1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplitByID3(dataSet, labels, label_value_map, class_list):
"""
选择最好的数据集划分方式
:param dataSet:
:return:
"""
numFeatures = len(dataSet[0]) - 1 # 最后一列是分类
base_entropy = compute_base_entroy(dataSet, class_list)
bestInfoGain = 0.0
bestFeature = -1
for i in range(numFeatures): # 遍历所有维度特征
print i
value_list = label_value_map[labels[i]]
infoGain = compute_info_gain(base_entropy, dataSet, i, value_list)
if (infoGain > bestInfoGain): # 选择最大的信息增益
bestInfoGain = infoGain
bestFeature = i
return bestFeature # 返回最佳特征对应的维度
"""
返回出现次数最多的分类名称
:param classList: 类列表
:return: 出现次数最多的类名称
"""
def majorityCnt(classList):
classCount = {} # 这是一个字典
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
"""
创建决策树
:param dataSet:数据集
:param labels:数据集每一维的名称
:return:决策树
"""
def createTree(dataSet, labels, label_value_map, calss_value_list, chooseBestFeatureToSplitFunc=chooseBestFeatureToSplitByID3):
classList = [example[-1] for example in dataSet] # 类别列表
if classList.count(classList[0]) == len(classList):
return classList[0] # 当类别完全相同则停止继续划分
if len(dataSet[0]) == 1: # 当只有一个特征的时候,遍历完所有实例返回出现次数最多的类别
return majorityCnt(classList)
dataSet_array = np.array(dataSet)
bestFeat = chooseBestFeatureToSplitFunc(dataSet_array, labels, label_value_map, class_value_list)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel: {}}
del (labels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:] # 复制操作
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels, label_value_map, class_value_list)
return myTree
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像时负号'-'显示为方块的问题
##################################
# 测试决策树的构建
myDat, labels,label_value_map, class_value_list = createDataSet()
#myDat = np.array(myDat)
myTree = createTree(myDat, labels, label_value_map, class_value_list)
# 绘制决策树
from policy_tree import treeplotter
treeplotter.createPlot(myTree) |
CherryYin/ml_algorithm | naive_bayes/sogouC_prepare_bayes.py | <reponame>CherryYin/ml_algorithm<gh_stars>0
#coding:utf-8
import os, sys
import jieba
from datetime import datetime
reload(sys)
sys.setdefaultencoding("utf-8")
stopWordList= [','.decode('utf-8'), '的'.decode('utf-8'), '\n', ' ', '。'.decode('utf-8'), '、'.decode('utf-8'), '在'.decode('utf-8'),
'了'.decode('utf-8'), '是'.decode('utf-8'), '“'.decode('utf-8'), '”'.decode('utf-8'), '&', 'nbsp', '和'.decode('utf-8'),
':'.decode('utf-8'), ':'.decode('utf-8'), '有'.decode('utf-8'), '也'.decode('utf-8'), '我'.decode('utf-8'), ','.decode('utf-8'),
'对'.decode('utf-8'), '就'.decode('utf-8'), '中'.decode('utf-8'), '他'.decode('utf-8'), ')'.decode('utf-8'), '('.decode('utf-8'),
'-'.decode('utf-8'), ';'.decode('utf-8'), ')'.decode('utf-8'), '.', '('.decode('utf-8'), '?'.decode('utf-8'), '》'.decode('utf-8'),
'《'.decode('utf-8'), ':', '[', ']'.decode('utf-8'), '!'.decode('utf-8'), '\"', 'NUL', ' '.decode('utf-8'), '', ' '.decode('utf-8'), '☆'.decode('utf-8'),
'③'.decode('utf-8'), '①'.decode('utf-8'), '②'.decode('utf-8'), '/', '据'.decode('utf-8'), '1', '2'.decode('utf-8'), '3', '4', '5',
'6', '7', '8', '9', 'G'.decode('utf-8'), ':', 'the', 'are', 'is', '会'.decode('utf-8'), 'The', '很'.decode('utf-8'), ' 将'.decode('utf-8'),
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
stopWOrdSet = set(stopWordList)
labels = ['A','B','C','D','E','F','G','H','I']
class sogouC(object):
def __init__(self, root_path, sub_path_list, vocab_mini_count = 5):
self.root_path = root_path
self.original_path = os.path.join(root_path, 'original\\')
self.splited_path = os.path.join(root_path, 'splitted\\')
self.sub_path_list = sub_path_list
self.vocab_mini_count = vocab_mini_count
def split_words(self):
i = 0
splited_doc_cache = []
for sub_path in self.sub_path_list:
original_path = os.path.join(self.original_path, sub_path)
splited_path = os.path.join(self.splited_path, sub_path)
print original_path, 'splitting start...'
if not os.path.isdir(splited_path):
os.makedirs(splited_path)
dirs = os.listdir(original_path)
j = 1
for filepath in dirs:
words = [labels[i]+filepath]
with open(os.path.join(original_path, filepath), 'r') as f1:
lines = f1.readlines()
for line in lines:
line = line.strip()
if line== '': continue
words.extend(list(jieba.cut(line.strip()))) #去掉段首的两个空格,并把一篇新闻存成一行。
splited_doc_cache.append(words)
j += 1
i += 1
with open(os.path.join(root_path, 'splited_text.txt'), 'w') as f2:
for words in splited_doc_cache:
line = ''
for word in words:
if word in stopWOrdSet: continue
line = line + word + ' '
f2.write(line + '\n')
def get_vocab(self):
vocab_map = dict()
vocab_set = set()
for sub_path in self.sub_path_list:
begin = datetime.now()
splited_path = os.path.join(self.splited_path, sub_path)
print splited_path, 'read start...'
dirs = os.listdir(splited_path)
for filepath in dirs:
with open(os.path.join(splited_path, filepath), 'r') as f1:
all_the_text = f1.read()
words = all_the_text.split(' ')
for word in words:
word = word.strip()
if word not in vocab_map:
vocab_map[word] = 0
vocab_map[word] += 1
vocab_set.add(word)
end = datetime.now()
print "time cost is %d second."%((end-begin).seconds)
vocab_set_sorted = sorted(vocab_map.iteritems(), key=lambda d: d[1], reverse=True)
vocab_set_valid = []
with open(os.path.join(root_path, 'dict.txt'), 'w') as f2:
for word, i in vocab_set_sorted:
if i > self.vocab_mini_count:
vocab_set_valid.append(word)
f2.write(word + '\n')
return vocab_set_valid
def get_vocab_new(self):
vocab_map = dict()
vocab_set = set()
begin = datetime.now()
sub_path = 'C0000081'
splited_path = os.path.join(self.splited_path, sub_path)
dirs = os.listdir(splited_path)
for filepath in dirs:
with open(os.path.join(splited_path, filepath), 'r') as f1:
all_the_text = f1.read()
words = all_the_text.split(' ')
for word in words:
word = word.strip()
if word not in vocab_map:
vocab_map[word] = 0
vocab_map[word] += 1
vocab_set.add(word)
end = datetime.now()
print "time cost is %d ms." % ((end - begin).microseconds/1000)
abs_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir))
root_path = os.path.join(abs_path, "data\\SogouC.reduced\\")
sub_paths = ['C000008\\', 'C000010\\', 'C000013\\', 'C000014\\', 'C000016\\', 'C000020\\', 'C000022\\', 'C000023\\', 'C000024\\']
print "split words..."
sc = sogouC(root_path, sub_paths, vocab_mini_count=5)
sc.split_words()
#print "get vocab..."
#sc.get_vocab()
#print len(vocab) |
CherryYin/ml_algorithm | naive_bayes/bayes_2.py | <filename>naive_bayes/bayes_2.py<gh_stars>0
#coding:utf-8
import numpy as np
#coding:utf-8
import numpy as np
import gzip
import itertools
from datetime import datetime
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(input_file, is_value_binary, is_matrix):
with gzip.open(input_file, 'rb') as zipf:
magic = _read32(zipf)
if magic !=2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, input_file.name))
num_images = _read32(zipf)
rows = _read32(zipf)
cols = _read32(zipf)
print magic, num_images, rows, cols
buf = zipf.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
#reshape成二维
data = data.reshape(num_images, rows, cols)
#二值化
data_value_binary = np.minimum(data, 1)
#按行相加,存到钱28个元素中,按列相加,存入后28个元素中
#如果分类效果不好,可再计算按对角线相加、行列式等
data_tidy = np.zeros((num_images, rows + cols + 1), dtype=np.uint32)
for i in range(num_images):
data_tidy[i, :rows] = np.sum(data_value_binary[i], axis=1)
data_tidy[i, rows:(rows+cols)] = (np.sum(data_value_binary[i].transpose(), axis=1))
return data_tidy
#抽取标签
#仿照tensorflow中mnist.py写的
def extract_labels(input_file):
with gzip.open(input_file, 'rb') as zipf:
magic = _read32(zipf)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' % (magic, input_file.name))
num_items = _read32(zipf)
buf = zipf.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
return labels
def createVocabList(dataSet):
vocabSet = set( [] )
for document in dataSet:
#对vocabSet和document做并集,并赋给vocabSet
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print "the word: %s is not in my Vocabulary!" %word
return returnVec
def bagOfWords2vec(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
else:
print "the word: %s is not in my Vocabulary!" %word
return returnVec
def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory) / float(numTrainDocs) #p(ci)
#p0Num = np.zeros(numWords)
p0Num = np.ones(numWords) #采用对数后,为了防止概率太小,初始化为1
#p1Num = np.zeros(numWords)
p1Num = np.ones(numWords)
#p0Denom = 0.0
#p1Denom = 0.0
p0Denom = 2.0
p1Denom = 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i]) #p(w)
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
#p1Vect = p1Num / p1Denom
#p0Vect = p0Num / p0Denom
p1Vect = np.log(p1Num/p1Denom) #由于概率相乘,会产生下溢出,这里将初始概率改为对数概率
p0Vect = np.log(p0Num/p0Denom)
return p0Vect, p1Vect, pAbusive
|
CherryYin/ml_algorithm | SVM/drama_fans_predict.py | <reponame>CherryYin/ml_algorithm<gh_stars>0
#coding:utf-8
import numpy as np
import random
import os
from libsvm.python.svmutil import *
def split_data(input_data_path, input_label_path, input_size, K):
train_x = []
test_x = []
train_y = []
test_y = []
with open(input_data_path, 'r') as f1:
x_lines = f1.readlines()
with open(input_label_path, 'r') as f1:
y_lines = f1.readlines()
counter = 0
test_num = 0
length = min(len(x_lines), len(y_lines))
for i in range(length):
Xis = x_lines[i].split(', ')
yis = y_lines[i].strip()
if counter % K == 0:
test_num = random.randint(0, K)
counter = 0
if counter == test_num:
test_x.append([int(Xis[0][1:]), float(Xis[1]), float(Xis[2][:-2])])
test_y.append(int(yis))
else:
train_x.append([int(Xis[0][1:]), float(Xis[1]), float(Xis[2][:-2])])
train_y.append(int(yis))
counter += 1
"""数据归一化"""
train_x = np.array(train_x)
test_x = np.array(test_x)
for i in range(input_size):
divided_max_value = 1 / max(train_x[:, i])
train_x[:, i] *= divided_max_value
test_x[:, i] *= divided_max_value
return train_x.tolist(), train_y, test_x.tolist(), test_y
abs_path = os.path.abspath(os.path.join(os.path.dirname("__file__"), os.path.pardir))
root_path = os.path.join(abs_path, "data\\drama\\")
train_x, train_y, test_x, test_y = split_data(os.path.join(root_path,"features_for_pca.txt"), os.path.join(root_path,"labels.txt"), 5, 8)
print train_x[:10]
print "训练模型开始..."
prob = svm_problem(train_y,train_x,isKernel=True)
param = svm_parameter('-t 4 -c 4 -b 1')
m = svm_train(prob, param)
# For the format of precomputed kernel, please read LIBSVM README.
# Other utility functions
print "测试模型开始 ... "
model_path = os.path.join(abs_path,'model/svm_drama_5.model')
svm_save_model(model_path, m)
m = svm_load_model(model_path)
p_label, p_acc, p_val = svm_predict(test_y, test_x, m, '-b 1')
ACC, MSE, SCC = evaluations(test_y, p_label)
print ACC, MSE, SCC
|
CherryYin/ml_algorithm | naive_bayes/byes.py | #coding:utf-8
import numpy as np
def loadDataSet():
postingList = [[ 'my', 'dog', 'has', 'flea', 'problem', 'help', 'please'],
['maybe', 'not', 'take', 'hime', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'hime'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0, 1, 0, 1, 0, 1]
return postingList, classVec
def createVocabList(dataSet):
vocabSet = set( [] )
for document in dataSet:
#对vocabSet和document做并集,并赋给vocabSet
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print "the word: %s is not in my Vocabulary!" %word
return returnVec
def bagOfWords2vec(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
else:
print "the word: %s is not in my Vocabulary!" %word
return returnVec
def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix)
numWords = len(trainMatrix[0])
pAbusive = sum(trainCategory) / float(numTrainDocs) #p(ci)
#p0Num = np.zeros(numWords)
p0Num = np.ones(numWords) #采用对数后,为了防止概率太小,初始化为1
#p1Num = np.zeros(numWords)
p1Num = np.ones(numWords)
#p0Denom = 0.0
#p1Denom = 0.0
p0Denom = 2.0
p1Denom = 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i]) #p(w)
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
#p1Vect = p1Num / p1Denom
#p0Vect = p0Num / p0Denom
p1Vect = np.log(p1Num/p1Denom) #由于概率相乘,会产生下溢出,这里将初始概率改为对数概率
p0Vect = np.log(p0Num/p0Denom)
return p0Vect, p1Vect, pAbusive
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
p1 = sum(vec2Classify * p1Vec) + np.log(pClass1)
p0 = sum(vec2Classify * p0Vec) + np.log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
def testingNB():
postingList, classVec = loadDataSet()
vocabList = createVocabList(postingList)
l = len(postingList)
trainMatrix = []
for i in range(l):
trainMatrix.append(setOfWords2Vec(vocabList, postingList[i]))
p0Vect, p1Vect, pAb = trainNB0(trainMatrix, classVec)
print p0Vect
print p1Vect
print pAb
testEntry = ['love', 'my', 'dalmation']
thisDoc = np.array(setOfWords2Vec(vocabList, testEntry))
print testEntry, 'classified as: ', classifyNB(thisDoc, p0Vect, p1Vect, pAb)
testEntry = ['stupid', 'garbage']
thisDoc = np.array(setOfWords2Vec(vocabList, testEntry))
print testEntry, 'classified as: ', classifyNB(thisDoc, p0Vect, p1Vect, pAb)
testingNB()
|
CherryYin/ml_algorithm | knn/kd_tree_hankcs.py | <gh_stars>0
# -*- coding:utf-8 -*-
# Filename: kdtree.py
# Author:hankcs
# Date: 2015/2/4 15:01
import copy
import itertools
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib import animation
T = [[2, 3], [5, 4], [9, 6], [4, 7], [8, 1], [7, 2]]
def draw_point(data):
X, Y = [], []
for p in data:
X.append(p[0])
Y.append(p[1])
plt.plot(X, Y, 'bo')
def draw_line(xy_list):
for xy in xy_list:
x, y = xy
plt.plot(x, y, 'g', lw=2)
def draw_square(square_list):
currentAxis = plt.gca()
colors = itertools.cycle(["r", "b", "g", "c", "m", "y", '#EB70AA', '#0099FF'])
for square in square_list:
currentAxis.add_patch(
Rectangle((square[0][0], square[0][1]), square[1][0] - square[0][0], square[1][1] - square[0][1],
color=next(colors)))
def median(lst):
m = len(lst) / 2
return lst[m], m
history_quare = []
def build_kdtree(data, d, square):
history_quare.append(square)
data = sorted(data, key=lambda x: x[d])
p, m = median(data)
del data[m]
print data, p
if m >= 0:
sub_square = copy.deepcopy(square)
if d == 0:
sub_square[1][0] = p[0]
else:
sub_square[1][1] = p[1]
history_quare.append(sub_square)
if m > 0: build_kdtree(data[:m], not d, sub_square)
if len(data) > 1:
sub_square = copy.deepcopy(square)
if d == 0:
sub_square[0][0] = p[0]
else:
sub_square[0][1] = p[1]
build_kdtree(data[m:], not d, sub_square)
build_kdtree(T, 0, [[0, 0], [10, 10]])
print history_quare
# draw an animation to show how it works, the data comes from history
# first set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = plt.axes(xlim=(0, 2), ylim=(-2, 2))
line, = ax.plot([], [], 'g', lw=2)
label = ax.text([], [], '')
# initialization function: plot the background of each frame
def init():
plt.axis([0, 10, 0, 10])
plt.grid(True)
plt.xlabel('x_1')
plt.ylabel('x_2')
plt.title('build kd tree (www.hankcs.com)')
draw_point(T)
currentAxis = plt.gca()
colors = itertools.cycle(["#FF6633", "g", "#3366FF", "c", "m", "y", '#EB70AA', '#0099FF', '#66FFFF'])
# animation function. this is called sequentially
def animate(i):
square = history_quare[i]
currentAxis.add_patch(
Rectangle((square[0][0], square[0][1]), square[1][0] - square[0][0], square[1][1] - square[0][1],
color=next(colors)))
return
# call the animator. blit=true means only re-draw the parts that have changed.
#animation是画动态图的类
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=len(history_quare), interval=1000, repeat=False,
blit=False)
plt.show()
anim.save('kdtree_build.gif', fps=2, writer='imagemagick') |
CherryYin/ml_algorithm | knn/knn_mnist.py | <reponame>CherryYin/ml_algorithm<filename>knn/knn_mnist.py
#coding:utf-8
import numpy as np
import os
import gzip
from six.moves import urllib
import operator
from datetime import datetime
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
#下载mnist数据集,仿照tensorflow的base.py中的写法。
def maybe_download(filename, path, source_url):
if not os.path.exists(path):
os.makedirs(path)
filepath = os.path.join(path, filename)
if not os.path.exists(filepath):
urllib.request.urlretrieve(source_url, filepath)
return filepath
#按32位读取,主要为读校验码、图片数量、尺寸准备的
#仿照tensorflow的mnist.py写的。
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
#抽取图片,并按照需求,可将图片中的灰度值二值化,按照需求,可将二值化后的数据存成矩阵或者张量
#仿照tensorflow中mnist.py写的
def extract_images(input_file, is_value_binary, is_matrix):
with gzip.open(input_file, 'rb') as zipf:
magic = _read32(zipf)
if magic !=2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %(magic, input_file.name))
num_images = _read32(zipf)
rows = _read32(zipf)
cols = _read32(zipf)
print magic, num_images, rows, cols
buf = zipf.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
if is_matrix:
data = data.reshape(num_images, rows*cols)
else:
data = data.reshape(num_images, rows, cols)
if is_value_binary:
return np.minimum(data, 1)
else:
return data
#抽取标签
#仿照tensorflow中mnist.py写的
def extract_labels(input_file):
with gzip.open(input_file, 'rb') as zipf:
magic = _read32(zipf)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' % (magic, input_file.name))
num_items = _read32(zipf)
buf = zipf.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
return labels
# 一般的knn分类,跟全部数据同时计算一般距离,然后找出最小距离的k张图,并找出这k张图片的标签,标签占比最大的为newInput的label
#copy大神http://blog.csdn.net/zouxy09/article/details/16955347的
def kNNClassify(newInput, dataSet, labels, k):
numSamples = dataSet.shape[0] # shape[0] stands for the num of row
init_shape = newInput.shape[0]
newInput = newInput.reshape(1, init_shape)
#np.tile(A,B):重复A B次,相当于重复[A]*B
#print np.tile(newInput, (numSamples, 1)).shape
diff = np.tile(newInput, (numSamples, 1)) - dataSet # Subtract element-wise
squaredDiff = diff ** 2 # squared for the subtract
squaredDist = np.sum(squaredDiff, axis = 1) # sum is performed by row
distance = squaredDist ** 0.5
sortedDistIndices = np.argsort(distance)
classCount = {} # define a dictionary (can be append element)
for i in xrange(k):
## step 3: choose the min k distance
voteLabel = labels[sortedDistIndices[i]]
## step 4: count the times labels occur
# when the key voteLabel is not in dictionary classCount, get()
# will return 0
classCount[voteLabel] = classCount.get(voteLabel, 0) + 1
## step 5: the max voted class will return
maxCount = 0
maxIndex = 0
for key, value in classCount.items():
if value > maxCount:
maxCount = value
maxIndex = key
return maxIndex
maybe_download('train_images', 'data/mnist', SOURCE_URL+TRAIN_IMAGES)
maybe_download('train_labels', 'data/mnist', SOURCE_URL+TRAIN_LABELS)
maybe_download('test_images', 'data/mnist', SOURCE_URL+TEST_IMAGES)
maybe_download('test_labels', 'data/mnist', SOURCE_URL+TEST_LABELS)
# 主函数,先读图片,然后用于测试手写数字
#copy大神http://blog.csdn.net/zouxy09/article/details/16955347的
def testHandWritingClass():
## step 1: load data
print "step 1: load data..."
train_x = extract_images('data/mnist/train_images', True, True)
train_y = extract_labels('data/mnist/train_labels')
test_x = extract_images('data/mnist/test_images', True, True)
test_y = extract_labels('data/mnist/test_labels')
## step 2: training...
print "step 2: training..."
pass
## step 3: testing
print "step 3: testing..."
a = datetime.now()
numTestSamples = test_x.shape[0]
matchCount = 0
test_num = numTestSamples/10
for i in xrange(test_num):
predict = kNNClassify(test_x[i], train_x, train_y, 3)
if predict == test_y[i]:
matchCount += 1
if i % 100 == 0:
print "完成%d张图片"%(i)
accuracy = float(matchCount) / test_num
b = datetime.now()
print "一共运行了%d秒"%((b-a).seconds)
## step 4: show the result
print "step 4: show the result..."
print 'The classify accuracy is: %.2f%%' % (accuracy * 100)
if __name__ == '__main__':
testHandWritingClass()
|
pugmajere/fc-effects | layouts/create_strands.py | <reponame>pugmajere/fc-effects
#!/usr/bin/python
points = []
adj = 50 / 2 * 0.01
def f(x):
return [0.01 * x - adj, 0.2 * x - adj, 0.05 * x - adj]
def g(x):
return [0.01 * x - adj, 0.25 * x - adj * 2, 0.05 * x - adj]
def h(x):
return [0.01 * x - adj, 0.175 * x - adj * 3, 0.05 * x - adj]
for i in range(50):
points.append(f(i))
for i in range(50):
points.append(g(i))
for i in range(50):
points.append(h(i))
def PointToString(point):
return '{"point": [%f, %f, %f]}' % (point[0], point[1], point[2])
def PointsToStrings(points):
return [PointToString(p) for p in points]
print '[\n %s\n]' % ',\n '.join(PointsToStrings(points))
|
Pheonnexx/alexa_pokemon | app/grab_pokemon_info.py | <reponame>Pheonnexx/alexa_pokemon<filename>app/grab_pokemon_info.py
import logging
import requests
import json
from flask import Flask, render_template
from flask_ask import Ask, statement, question, session
logging.getLogger("flask_ask").setLevel(logging.DEBUG)
@ask.launch
def start_pokemon_ask():
welcome_to_pokemon_ask_msg = render_template('welcome')
return question(welcome_to_pokemon_ask_msg)
@ask.intent("AllStatsIntent", convert={'pokemon_name': str})
def get_all_stats_for_pokemon(pokemon_name):
response = requests.get('http://pokeapi.co/api/v2/pokemon/{}'.format(pokemon_name))
pokemon = json.loads(response.text)
if pokemon:
pokemon_name = pokemon['name']
pokemon_types = []
for types in pokemon['types']:
pokemon_types.append(types)
msg = render_template('found_pokemon', name = pokemon_name, types = pokemon_types)
else:
msg = render_template('pokemon_not_found')
return statement(msg)
if __name__ == '__main__':
app.run(debug=True)
|
Pheonnexx/alexa_pokemon | app/__init__.py | from flask import Flask
from flask_ask import Ask
app = Flask(__name__)
ask = Ask(app, "/")
|
mrasskazov/tf-dev-env | scripts/gather-unittest-targets.py | <reponame>mrasskazov/tf-dev-env<gh_stars>10-100
#!/usr/bin/env python
import json
import sys
import os
from xml.etree import ElementTree
patchsets = json.load(sys.stdin)
home_dir = os.getenv("HOME", '/root')
with open("%s/contrail/controller/ci_unittests.json" % home_dir, 'r') as fh:
unittests = json.load(fh)
# load vnc structure to evaluate UT targets
# stdin is a patchsets_info.json file wich has gerrit plain structure
# ci_unittests.json has vnc structure
with open("%s/contrail/.repo/manifest.xml" % home_dir, 'r') as f:
vnc_raw = ElementTree.parse(f).getroot()
remotes = dict()
for remote in vnc_raw.findall(".//remote"):
remotes[remote.get('name')] = remote.get('fetch').split('/')[-1]
projects = dict()
for project in vnc_raw.findall(".//project"):
projects[remotes[project.get('remote')] + '/' + project.get('name')] = project.get('path')
review_files = set()
for patchset in patchsets:
if patchset["project"] not in projects:
continue
path = projects[patchset["project"]]
review_files.update([path + '/' + file for file in patchset["files"]])
actual_targets = set()
misc_targets = set()
for ffile in review_files:
for package in unittests.keys():
for sd in unittests[package]["source_directories"]:
if sd in ffile:
actual_targets.update(unittests[package]["scons_test_targets"])
misc_targets.update(unittests[package]["misc_test_targets"])
break
if not actual_targets:
actual_targets = set(unittests['default']["scons_test_targets"])
misc_targets = set(unittests['default']["misc_test_targets"])
for misc_target in misc_targets:
actual_targets.update(unittests[misc_target]["scons_test_targets"])
for target in actual_targets:
print(target)
|
mrasskazov/tf-dev-env | scripts/controller_ut/run-tests.py | #!/usr/bin/env python3
import os
import random
import sys
import json
import jinja2
import argparse
import subprocess
import string
import logging
import shutil
from lxml import etree
logging.basicConfig(level=logging.DEBUG)
class TestResult:
SUCCESS = 0
FAILURE = 1
MISSING_XML = 2
MISSING_LOG = 3
class TestSuite:
def __init__(self, name):
self.name = name
self.disabled = None
self.errors = self.failures = None
self.test_cases = []
@property
def failed(self):
return any([t.status != "run" for t in self.test_cases])
class TestCase:
def __init__(self, name):
self.name = name
self.classname = None
self.status = None
self.time = None
self.failures = []
class TestFailure:
def __init__(self):
self.message = ""
self.type = ""
self.data = ""
class TungstenTestRunner(object):
def __init__(self):
self.args = None
self.tests = []
self.test_results = {}
def parse_arguments(self):
parser = argparse.ArgumentParser(description="Tungsten Test Runner")
parser.add_argument("--debug", dest="debug", action="store_true")
parser.add_argument("--less-strict", dest="strict", action="store_false")
parser.add_argument("--skip-tests", dest="skip_tests", action="store")
parser.add_argument("-j", help="Allow N jobs at once for scons run.", dest="job_count", type=int)
parser.add_argument("targets", type=str, nargs="+")
self.args = parser.parse_args()
if not self.args.job_count:
self.args.job_count = os.cpu_count()
def _get_relative_path(self, path):
rel_start = path.find("build/")
return path[rel_start:]
def _skip_tests_args(self):
args = ""
if not self.args.skip_tests:
return args
command = [shutil.which("python2"),
shutil.which("scons"),
"--help"]
lines = subprocess.check_output(command).decode('utf-8').split("\n")
for line in lines:
if line.strip().split('=')[0] == '--skip-tests':
args = '--skip-tests=' + str(self.args.skip_tests)
break
return args
def describe_tests(self):
logging.info("Gathering tests for the following targets: %s", (self.args.targets))
command = [shutil.which("python2"),
shutil.which("scons"),
"--describe-tests",
self._skip_tests_args()] + self.args.targets
lines = subprocess.check_output(command).decode('utf-8').split("\n")
for line in lines:
if len(line) == 0 or line[0] != '{':
logging.debug("Not a valid JSON: '%s'", line)
continue
test_details = json.loads(line)
if test_details['matched']:
self.tests += [test_details]
for test in self.tests:
key = self._get_relative_path(test['node_path'])
self.test_results[key] = {"result": "SUCCESS", "details": []}
logging.debug("Found %d tests for targets.", len(self.tests))
def run_tests(self, targets=None):
"""Run tests with SCons, optionally overriding targets to execute."""
if targets is None:
targets = self.args.targets
scons_env = os.environ.copy()
args = []
if 'KVERS' in scons_env:
args += ['--kernel-dir=/lib/modules/{}/build'.format(scons_env['KVERS'])]
if not self.args.strict:
scons_env['NO_HEAPCHECK'] = '1'
command = [shutil.which("python2"),
shutil.which("scons"),
"-j", str(self.args.job_count),
"--keep-going",
self._skip_tests_args()] + args + targets
logging.info("Executing SCons command: %s", " ".join(command))
rc = subprocess.call(command, env=scons_env)
return rc, targets
def _parse_junit_xml(self, xml_path):
"""Parse the XML file and return all tests that were executed."""
if not os.path.exists(xml_path):
return (TestResult.MISSING_XML, None)
logging.debug("Parsing %s", xml_path)
with open(xml_path, "rb") as fh:
xml_doc = fh.read()
soup = etree.fromstring(xml_doc)
status = TestResult.SUCCESS
suite_objs = []
# check if the root tag is testsuite, and if not, find
# all testsuite tags under the root tag.
if soup.tag == 'testsuite':
suites = [soup]
else:
assert soup.tag == 'testsuites'
suites = soup.findall("testsuite")
for suite in suites:
if int(suite.attrib["errors"]) > 0 or int(suite.attrib["failures"]) > 0:
status = TestResult.FAILURE
suite_obj = TestSuite(name=suite.attrib["name"])
# XXX(kklimonda): see if those can be generated from test cases
for attr in ["disabled", "errors", "failures"]:
if attr in suite.attrib:
setattr(suite_obj, attr, suite.attrib[attr])
for test in suite.findall('testcase'):
test_obj = TestCase(name=test.attrib['name'])
for attr in ["classname", "status", "time"]:
if attr in test.attrib:
setattr(test_obj, attr, test.attrib[attr])
failures = test.findall('failure')
if failures:
for failure in failures:
fail_obj = TestFailure()
for attr in ["message", "type"]:
if attr in failure.attrib:
setattr(fail_obj, attr, failure.attrib[attr])
fail_obj.data = failure.text
test_obj.failures += [fail_obj]
suite_obj.test_cases += [test_obj]
suite_objs += [suite_obj]
return status, suite_objs
def _store_test_results(self, suite, result, tests):
key = self._get_relative_path(suite['node_path'])
xml_basepath = self._get_relative_path(os.path.splitext(suite['xml_path'])[0])
log_basepath = self._get_relative_path(os.path.splitext(suite['log_path'])[0])
rnd_suffix_len = 8
# If there is no log file, assume a total failure and store that info.
if not os.path.exists(suite['log_path']):
result = TestResult.MISSING_LOG
while True:
random_string = "".join([random.choice(string.ascii_lowercase) for i in range(rnd_suffix_len)])
xml_path = xml_basepath + "." + random_string + ".xml"
log_path = log_basepath + "." + random_string + ".log"
if not (os.path.exists(xml_path) or os.path.exists(log_path)):
break
if os.path.exists(suite['xml_path']):
os.rename(suite['xml_path'], xml_path)
else:
logging.warning('{} does not exist!'.format(suite['xml_path']))
if os.path.exists(suite['log_path']):
os.rename(suite['log_path'], log_path)
else:
logging.warning('{} does not exist!'.format(suite['log_path']))
result_text = "SUCCESS" if result == TestResult.SUCCESS else "FAILURE"
self.test_results[key]['result'] = result_text
self.test_results[key]["details"] += [{
"result": result,
"xml_path": xml_path,
"log_path": log_path,
"tests": tests
}]
def _get_test_for_target(self, target):
for test in self.tests:
if self._get_relative_path(test['node_path']) == target:
return test
raise RuntimeError("No test found for target " + target)
def analyze_test_results(self, targets=None):
"""Parses XML output from tests looking for failures.
Parse XML output from tests and keep track of any failures, also
renaming XML and log files so they are not overwritten by consecutive
runs.
"""
global_status = TestResult.SUCCESS
failed_targets = []
# if we have not received targets, we want to analyze everything - pull
# targets directly from self.tests.
if not targets:
targets = [self._get_relative_path(t['node_path']) for t in self.tests]
for target in targets:
test = self._get_test_for_target(target)
logging.debug("Analyzing test results for %s", test['node_path'])
status, tests = self._parse_junit_xml(test['xml_path'])
if status == TestResult.MISSING_XML:
logging.warning("Test %s generated no XML - assuming failure.", test['node_path'])
self._store_test_results(test, status, tests)
if status != TestResult.SUCCESS:
global_status = TestResult.FAILURE
failed_targets += [self._get_relative_path(test['node_path'])]
return global_status, failed_targets
def generate_test_report(self, scons_rc, final_result):
tpl = """Tungsten Test Runner Results
============================
SCons targets executed:
{% for target in scons_targets %}
{{ target }}
{% endfor %}
SCons Result: {{ scons_rc }}
Analyzer Result: {{ final_result }}
Test Results:
{% for key, values in results.items() %}
========================
SCons target: {{ key }}
Result: {{ values['result'] }}
------------------------
{% for test in values['details'] %}
Run #{{ loop.index }}
Result: {{ test.result }}
Tests: {{ test.test | length }}
Failures: {{ test.failures }}
Errors: {{ test.errors }}
XML Log: {{ test.xml_path }}
Console Log: {{ test.log_path }}
Details:
{% for test_suite in test.tests -%}
{% for test_case in test_suite.test_cases -%}
{% if test_case.failures | length > 0 %}
{{- test_suite.name }}.{{- test_case.name }} - FAILED
{% for failure in test_case.failures %}
{{- failure.data -}}
{%- endfor -%}
{% elif test_case.status == "notrun" -%}
{{- test_suite.name }}.{{- test_case.name }} - SKIPPED
{% else %}
{{- test_suite.name }}.{{- test_case.name }} - SUCCESS
{% endif -%}
{% endfor -%}
{% endfor -%}
{% endfor -%}
{% endfor -%}
"""
text = ''
template = jinja2.Template(tpl)
ctx = {
"scons_targets": self.args.targets,
"scons_rc": scons_rc,
"final_result": final_result,
"results": self.test_results}
try:
text = template.render(ctx)
except Exception as e:
print('Unit test report generation failed!')
print('The exception is ignored to allow the job to successfully finish if no tests '
'failed.')
print('See https://contrail-jws.atlassian.net/browse/JD-475 for more information.')
print(e)
print(text)
def main():
runner = TungstenTestRunner()
runner.parse_arguments()
runner.describe_tests()
failed_targets = None
for counter in range(3):
rc, targets = runner.run_tests(targets=failed_targets)
if rc > 0:
logging.info("SCons failed with exit code {}. Analyzing results.".format(rc))
else:
logging.info("SCons succeeded. Analyzing results.")
# First analysis is done over all tests, because at this point
# a) we want to analyze everything
# b) targets that we have are "generic", not for each test - can't
# match it against tests that we store.
result, failed_targets = runner.analyze_test_results(targets=(None if counter == 0 else targets))
logging.info("Analyzer result is " + ("SUCCESS" if result == TestResult.SUCCESS else "FAILURE"))
if rc > 0 and result == TestResult.SUCCESS:
logging.error("SCons failed, but analyzer didn't find any errors.")
if not failed_targets:
logging.critical("Analyzer didn't find targets to retry. Exiting.")
sys.exit(rc)
if result == TestResult.SUCCESS:
break
logging.warning("Test Failure, {} targets failed:\n".format(len(failed_targets)) +
"\n\t".join(failed_targets))
logging.info("Retrying, %d attempts remaining.", counter)
runner.generate_test_report(rc, "SUCCESS" if result == TestResult.SUCCESS else "FAILURE")
sys.exit(rc)
if __name__ == "__main__":
main()
|
mrasskazov/tf-dev-env | scripts/patch-repo-manifest.py | #!/usr/bin/env python
import argparse
import json
import logging
import os
import sys
import traceback
from xml.etree import ElementTree
def dbg(msg):
logging.debug(msg)
def err(msg):
logging.error(msg)
class Manifest(object):
def __init__(self, file, remote):
self.remote = remote
if file:
with open(file, 'r') as f:
self._root = ElementTree.parse(f).getroot()
else:
self._root = ElementTree.fromstring('<manifest></manifest>')
def add_remote(self, org):
remote_name = 'gerritreview-' + org
xpath = './/remote[@name=\'%s\']' % remote_name
if not self._root.findall(xpath):
remote = ElementTree.Element('remote', {'fetch': os.path.join(self.remote, org), 'name': remote_name})
self._root.insert(0, remote)
return remote_name
def set_branch_default(self, branch):
defaults = self._root.findall('.//default')
if defaults:
for default in defaults:
rev = default.get('revision').split('/')[:-1]
rev.append(branch)
b = branch if not rev else "/".join(rev)
default.set('revision', b)
def _apply_patch(self, patch):
branch = patch.get('branch', None)
org_project = patch['project']
org = org_project.split('/')[0]
project = org_project.split('/')[1]
remote = self.add_remote(org)
xpath = './/project[@name=\'%s\']' % project
for p in self._root.findall(xpath):
p.set('remote', remote)
if branch:
p.set('revision', branch)
def apply_patches(self, patchsets):
for p in patchsets:
self._apply_patch(p)
def dump(self, file):
if not file:
ElementTree.dump(self._root)
return
with open(file, "w") as f:
f.write(ElementTree.tostring(self._root, encoding='utf-8').decode('utf-8'))
def load_patchsets(file):
with open(file, 'r') as f:
return json.load(f)
def main():
parser = argparse.ArgumentParser(
description="TF tool for Gerrit patchset dependencies resolving")
parser.add_argument("--debug", dest="debug", action="store_true")
parser.add_argument("--source", help="Source file with manifest", dest="source", type=str)
parser.add_argument("--remote", help="Remote to set in manifest", dest="remote", type=str)
parser.add_argument("--branch", help="Branch", dest="branch", type=str, default=None)
parser.add_argument("--patchsets", help="File with patchsets", dest="patchsets", type=str, default=None)
parser.add_argument("--output",
help="Save result into the file instead stdout",
default=None, dest="output", type=str)
args = parser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=log_level)
if not args.remote and args.patchsets:
err("ERROR: Please specify remote cause patchsets is present")
sys.exit(1)
try:
manifest = Manifest(args.source, args.remote)
if args.branch:
manifest.set_branch_default(args.branch)
if args.patchsets:
manifest.apply_patches(load_patchsets(args.patchsets))
manifest.dump(args.output)
except Exception as e:
print(traceback.format_exc())
err("ERROR: failed patch manifest: %s" % e)
sys.exit(1)
if __name__ == "__main__":
main()
|
MShokry/AIND-Planning | example_have_cake.py | from aimacode.logic import PropKB
from aimacode.planning import Action
from aimacode.search import (
Node, breadth_first_search, astar_search, depth_first_graph_search,
uniform_cost_search, greedy_best_first_graph_search, Problem,
)
from aimacode.utils import expr
from lp_utils import (
FluentState, encode_state, decode_state
)
from my_planning_graph import PlanningGraph
from run_search import run_search
from functools import lru_cache
class HaveCakeProblem(Problem):
def __init__(self, initial: FluentState, goal: list):
self.state_map = initial.pos + initial.neg
Problem.__init__(self, encode_state(initial, self.state_map), goal=goal)
self.actions_list = self.get_actions()
def get_actions(self):
precond_pos = [expr("Have(Cake)")]
precond_neg = []
effect_add = [expr("Eaten(Cake)")]
effect_rem = [expr("Have(Cake)")]
eat_action = Action(expr("Eat(Cake)"),
[precond_pos, precond_neg],
[effect_add, effect_rem])
precond_pos = []
precond_neg = [expr("Have(Cake)")]
effect_add = [expr("Have(Cake)")]
effect_rem = []
bake_action = Action(expr("Bake(Cake)"),
[precond_pos, precond_neg],
[effect_add, effect_rem])
return [eat_action, bake_action]
def actions(self, state: str) -> list: # of Action
possible_actions = []
kb = PropKB()
kb.tell(decode_state(state, self.state_map).pos_sentence())
for action in self.actions_list:
is_possible = True
for clause in action.precond_pos:
if clause not in kb.clauses:
is_possible = False
for clause in action.precond_neg:
if clause in kb.clauses:
is_possible = False
if is_possible:
possible_actions.append(action)
return possible_actions
def result(self, state: str, action: Action):
new_state = FluentState([], [])
old_state = decode_state(state, self.state_map)
for fluent in old_state.pos:
if fluent not in action.effect_rem:
new_state.pos.append(fluent)
for fluent in action.effect_add:
if fluent not in new_state.pos:
new_state.pos.append(fluent)
for fluent in old_state.neg:
if fluent not in action.effect_add:
new_state.neg.append(fluent)
for fluent in action.effect_rem:
if fluent not in new_state.neg:
new_state.neg.append(fluent)
return encode_state(new_state, self.state_map)
def goal_test(self, state: str) -> bool:
kb = PropKB()
kb.tell(decode_state(state, self.state_map).pos_sentence())
for clause in self.goal:
if clause not in kb.clauses:
return False
return True
def h_1(self, node: Node):
# note that this is not a true heuristic
h_const = 1
return h_const
@lru_cache(maxsize=8192)
def h_pg_levelsum(self, node: Node):
# uses the planning graph level-sum heuristic calculated
# from this node to the goal
# requires implementation in PlanningGraph
pg = PlanningGraph(self, node.state)
pg_levelsum = pg.h_levelsum()
return pg_levelsum
@lru_cache(maxsize=8192)
def h_ignore_preconditions(self, node: Node):
# not implemented
count = 0
return count
def have_cake():
def get_init():
pos = [expr('Have(Cake)'),
]
neg = [expr('Eaten(Cake)'),
]
return FluentState(pos, neg)
def get_goal():
return [expr('Have(Cake)'),
expr('Eaten(Cake)'),
]
return HaveCakeProblem(get_init(), get_goal())
if __name__ == '__main__':
p = have_cake()
print("**** Have Cake example problem setup ****")
print("Initial state for this problem is {}".format(p.initial))
print("Actions for this domain are:")
for a in p.actions_list:
print(' {}{}'.format(a.name, a.args))
print("Fluents in this problem are:")
for f in p.state_map:
print(' {}'.format(f))
print("Goal requirement for this problem are:")
for g in p.goal:
print(' {}'.format(g))
print()
print("*** Breadth First Search")
run_search(p, breadth_first_search)
print("*** Depth First Search")
run_search(p, depth_first_graph_search)
print("*** Uniform Cost Search")
run_search(p, uniform_cost_search)
print("*** Greedy Best First Graph Search - null heuristic")
run_search(p, greedy_best_first_graph_search, parameter=p.h_1)
print("*** A-star null heuristic")
run_search(p, astar_search, p.h_1)
# print("A-star ignore preconditions heuristic")
# rs(p, "astar_search - ignore preconditions heuristic", astar_search, p.h_ignore_preconditions)
# print(""A-star levelsum heuristic)
# rs(p, "astar_search - levelsum heuristic", astar_search, p.h_pg_levelsum)
|
MShokry/AIND-Planning | lp_utils.py | from aimacode.logic import associate
from aimacode.utils import expr
class FluentState():
""" state object for planning problems as positive and negative fluents
"""
def __init__(self, pos_list, neg_list):
self.pos = pos_list
self.neg = neg_list
def sentence(self):
return expr(conjunctive_sentence(self.pos, self.neg))
def pos_sentence(self):
return expr(conjunctive_sentence(self.pos, []))
def conjunctive_sentence(pos_list, neg_list):
""" returns expr conjuntive sentence given positive and negative fluent lists
:param pos_list: list of fluents
:param neg_list: list of fluents
:return: expr sentence of fluent conjunction
e.g. "At(C1, SFO) ∧ ~At(P1, SFO)"
"""
clauses = []
for f in pos_list:
clauses.append(expr("{}".format(f)))
for f in neg_list:
clauses.append(expr("~{}".format(f)))
return associate('&', clauses)
def encode_state(fs: FluentState, fluent_map: list) -> str:
""" encode fluents to a string of T/F using mapping
:param fs: FluentState object
:param fluent_map: ordered list of possible fluents for the problem
:return: str eg. "TFFTFT" string of mapped positive and negative fluents
"""
state_tf = []
for fluent in fluent_map:
if fluent in fs.pos:
state_tf.append('T')
else:
state_tf.append('F')
return "".join(state_tf)
def decode_state(state: str, fluent_map: list) -> FluentState:
""" decode string of T/F as fluent per mapping
:param state: str eg. "TFFTFT" string of mapped positive and negative fluents
:param fluent_map: ordered list of possible fluents for the problem
:return: fs: FluentState object
lengths of state string and fluent_map list must be the same
"""
fs = FluentState([], [])
for idx, char in enumerate(state):
if char == 'T':
fs.pos.append(fluent_map[idx])
else:
fs.neg.append(fluent_map[idx])
return fs
|
MShokry/AIND-Planning | rep.py | <gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
|
MShokry/AIND-Planning | mytest.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 16 11:06:40 2018
@author: mshokry
"""
import argparse
from timeit import default_timer as timer
from aimacode.search import InstrumentedProblem
from aimacode.search import (breadth_first_search, astar_search,
breadth_first_tree_search, depth_first_graph_search, uniform_cost_search,
greedy_best_first_graph_search, depth_limited_search,
recursive_best_first_search,Node)
from my_air_cargo_problems import air_cargo_p1, air_cargo_p2, air_cargo_p3
from my_planning_graph import PlanningGraph
from run_search import run_search
print(air_cargo_p1)
#node = Node(air_cargo_p1.initial)
#print(node)
p = air_cargo_p1()
print("**** Have Cake example problem setup ****")
print("Initial state for this problem is {}".format(p.initial))
print("Actions for this domain are:")
for a in p.actions_list:
print(' {}{}'.format(a.name, a.args))
print("Fluents in this problem are:")
for f in p.state_map:
print(' {}'.format(f))
print("Goal requirement for this problem are:")
for g in p.goal:
print(' {}'.format(g))
print()
print("*** Breadth First Search")
run_search(p, breadth_first_search)
print("*** Depth First Search")
run_search(p, depth_first_graph_search)
print("*** Uniform Cost Search")
run_search(p, uniform_cost_search)
print("*** Greedy Best First Graph Search - null heuristic")
run_search(p, greedy_best_first_graph_search, parameter=p.h_1)
print("*** A-star null heuristic")
run_search(p, astar_search, p.h_1)
p = air_cargo_p1()
print("**** Air Cargo Problem 1 setup ****")
print("Initial state for this problem is {}".format(p.initial))
print("Actions for this domain are:")
for a in p.actions_list:
print(' {}{}'.format(a.name, a.args))
print("Fluents in this problem are:")
for f in p.state_map:
print(' {}'.format(f))
print("Goal requirement for this problem are:")
for g in p.goal:
print(' {}'.format(g))
print()
print("*** Breadth First Search")
run_search(p, breadth_first_search)
print("*** Depth First Search")
run_search(p, depth_first_graph_search)
print("*** Uniform Cost Search")
run_search(p, uniform_cost_search)
print("*** Greedy Best First Graph Search - null heuristic")
run_search(p, greedy_best_first_graph_search, parameter=p.h_1)
print("*** A-star null heuristic")
run_search(p, astar_search, p.h_1)
print("*** A-star ignore preconditions heuristic")
run_search(p, astar_search, p.h_ignore_preconditions)
print("*** A-star ignore delete lists heuristic")
run_search(p, astar_search, p.h_ignore_delete_lists)
print("*** A-star levelsum heuristic")
run_search(p, astar_search, p.h_pg_levelsum) |
MShokry/AIND-Planning | run_search_timer.py | import time
import signal
from run_search import *
timeout = time.time() + 60*5 # 5 Minutes sleep
def handle(signum,frame):
raise TimeoutError()
#raise Exception("Time Up exeeded 7 Minutes")
#signal.alarm(7*60)
for i in range(1,11):
start = timer()
signal.signal(signal.SIGALRM,handle)
signal.alarm(10*60)
try :
main("3","%s" %i)
#os.system("python run_search.py -p 3 -s 2 ")
except :
end = timer()
print ("### Time Up exeeded 10 Minutes ###", end-start)
continue
signal.alarm(0)
signal.alarm(0)
|
MShokry/AIND-Planning | oth.py |
import numpy as np
import math
class Clashes:
## constructor does not do anything
def __init__(self):
# self.chromosome=[2,4,7,4,8,5,5,2];
# self.chromosome=[3,2,7,5,2,4,1,1];
# self.chromosome=[2,4,4,1,5,1,2,4];
# self.chromosome=[3,2,5,4,3,2,1,3];
## GA Crossover Quiz
# self.chromosome=[3,2,7,4,8,5,5,2];
# self.chromosome=[3,2,7,5,2,1,2,4];
self.chromosome=[2,4,7,5,2,4,1,1];
def final_value(self, grid):
row_clashes = self.calculate_row_clashes(grid);
column_clashes = self.calculate_column_clashes(grid)
forward_diag_clashes = self.calculate_forward_diag_clashes(grid)
backward_diag_clashes = self.calculate_backward_diag_clashes(grid)
return 28 - (row_clashes + column_clashes + forward_diag_clashes + backward_diag_clashes);
def nCr(self, n,r):
f = math.factorial
return f(n) // f(r) // f(n-r)
def calculate_column_clashes(self, grid):
ncols = 8
column_clashes = 0
for i in range(ncols):
column_array = self.get_column_array(grid, i)
if(column_array[i].count('Q') > 1):
n_c_r = self.nCr(column_array[i].count('Q'), 2)
column_clashes += n_c_r
print(column_array[i], n_c_r)
return column_clashes
def calculate_row_clashes(self, grid):
row_clashes = 0
row_array = self.get_rows(grid)
for i in range(len(row_array)):
if(row_array[i].count('Q') > 1):
n_c_r = self.nCr(row_array[i].count('Q'), 2)
row_clashes += n_c_r
print(row_array[i], n_c_r)
return row_clashes
def calculate_forward_diag_clashes(self, grid):
forward_clashes = 0
forward_diag = self.get_forward_diagonals(grid)
print(forward_diag)
for i in range(len(forward_diag)):
print(forward_diag[i], forward_diag[i].count('Q'))
if(forward_diag[i].count('Q') > 1):
n_c_r = self.nCr(forward_diag[i].count('Q'), 2)
print(n_c_r)
forward_clashes += n_c_r
return forward_clashes
def calculate_backward_diag_clashes(self, grid):
backward_clashes = 0
reverse_diag = self.get_backward_diagonals(grid)
print(reverse_diag)
for j in range(len(reverse_diag)):
print(reverse_diag[j], reverse_diag[j].count('Q'))
if (reverse_diag[j].count('Q') > 1):
n_c_r = self.nCr(reverse_diag[j].count('Q'), 2)
print(n_c_r)
backward_clashes += n_c_r
return backward_clashes;
def get_rows(self, grid):
return [[c for c in r] for r in grid]
def get_cols(self, grid):
return zip(*grid)
def get_column_array(self, grid, i):
return [row[i] for row in grid]
def get_backward_diagonals(self, grid):
b = [None] * (len(grid) - 1)
grid = [b[i:] + r + b[:i] for i, r in enumerate(self.get_rows(grid))]
return [[c for c in r if not c is None] for r in self.get_cols(grid)]
def get_forward_diagonals(self, grid):
b = [None] * (len(grid) - 1)
grid = [b[:i] + r + b[i:] for i, r in enumerate(self.get_rows(grid))]
return [[c for c in r if not c is None] for r in self.get_cols(grid)]
##client call
##array = ['2','4','7','5','8','5','5','2']
matrix = np.array([
['0','0','0','0','0','0','0','0'],
['Q','0','0','0','0','0','0','Q'],
['0','0','0','0','0','0','0','0'],
['0','Q','0','0','0','0','0','0'],
['0','0','0','Q','0','Q','Q','0'],
['0','0','0','0','0','0','0','0'],
['0','0','Q','0','0','0','0','0'],
['0','0','0','0','Q','0','0','0']
])
clash = Clashes()
total_clash = clash.final_value(matrix)
print('final value : ', total_clash)
|
binp-automation/ksfc-devsup | script/test.py | <filename>script/test.py
#!/usr/bin/python3
import os
import sys
import time
from subprocess import run
from lib.setup import setup
from lib.epics import CaRepeater, Ioc, caget, caput
def test_binding():
assert run(["cargo", "test"], cwd="./binding").returncode == 0
def test_ioc():
with CaRepeater(), Ioc("iocBoot/iocrsbind/st.cmd"):
time.sleep(2.0)
assert caget("FC:IDN").startswith("Agilent Technologies,53220A,")
#assert caget("FC:FREQ_1") == "0"
if __name__ == "__main__":
setup()
#test_binding()
test_ioc()
|
cburmeister/pwgen-xkcd | pwgen_xkcd.py | <reponame>cburmeister/pwgen-xkcd
#!/usr/bin/python
import click
import random
@click.command()
@click.argument('filename', type=click.File('r'))
@click.option('--n', default=10, help='Number of passphrases to generate.')
@click.option('--max_words', default=4, help='Number of words.')
@click.option('--min_word_length', default=4, help='Minimum word length.')
@click.option('--max_word_length', default=8, help='Maxmimum word length.')
def main(filename, n, max_words, min_word_length, max_word_length):
"""
Generate an xkcd passphrase randomly selected from a list of words.
"""
def get_words(filename):
return filename.readlines()
def get_candidates(words, min_length, max_length):
return [x for x in words if min_length <= len(x) <= max_length]
def get_random_words(words, num_words):
return random.sample(words, num_words)
def get_phrase(words):
return ''.join([x.strip().lower() for x in words])
words = get_words(filename)
candidates = get_candidates(words, min_word_length, max_word_length)
for _ in range(0, n):
random_words = get_random_words(candidates, max_words)
click.echo(get_phrase(random_words))
if __name__ == "__main__":
main()
|
cburmeister/pwgen-xkcd | setup.py | from setuptools import setup
setup(
name='pwgen_xkcd',
version='1.0',
url='http://github.com/cburmeister/pwgen_xkcd/',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
oescription='',
py_modules=['pwgen_xkcd'],
platforms='any',
install_requires=[
'click==3.3',
],
entry_points='''
[console_scripts]
pwgen_xkcd=pwgen_xkcd:main
'''
)
|
ElliottYan/self-att | fairseq/criterions/cross_entropy_with_extra_loss.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch.nn.functional as F
from fairseq import utils
from . import FairseqCriterion, register_criterion, cross_entropy, label_smoothed_cross_entropy
@register_criterion('cross_entropy_extra_loss')
class CrossEntropyExtraLossCriterions(cross_entropy.CrossEntropyCriterion):
def __init__(self, args, task):
super(CrossEntropyExtraLossCriterions, self).__init__(args, task)
self.alpha = args.extra_loss_weight
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
if type(net_output[1]) == dict:
if 'extra_loss' in net_output[1].keys():
extra_loss = net_output[1]['extra_loss']
else:
extra_loss = 0
raise ValueError('Extra-loss criterions must be used by model with extra loss appended.')
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
loss = F.nll_loss(lprobs, target, size_average=False, ignore_index=self.padding_idx,
reduce=reduce)
loss += extra_loss
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'extra-loss': utils.item(extra_loss.data) if reduce else extra_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
cross_entropy.CrossEntropyCriterion.add_args(parser)
parser.add_argument('--extra-loss-weight', default=1., type=float, metavar='D',
help='epsilon for extra loss, 0 means take no extra loss into account')
@register_criterion('label_smoothed_cross_entropy_extra_loss')
class LabelSmoothedCrossEntropyExtraLossCriterion(label_smoothed_cross_entropy.LabelSmoothedCrossEntropyCriterion):
def __init__(self, args, task):
super(LabelSmoothedCrossEntropyExtraLossCriterion, self).__init__(args, task)
self.alpha = args.extra_loss_weight
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample['net_input'])
if type(net_output[1]) == dict:
if 'extra_loss' in net_output[1].keys():
extra_loss = net_output[1]['extra_loss']
else:
extra_loss = 0
raise ValueError('Extra-loss criterions must be used by model with extra loss appended.')
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
loss += extra_loss
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'nll_loss': utils.item(nll_loss.data) if reduce else nll_loss.data,
'extra_loss': utils.item(extra_loss.data) if reduce else extra_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# todo : solve this ugly function call.
label_smoothed_cross_entropy.LabelSmoothedCrossEntropyCriterion.add_args(parser)
parser.add_argument('--extra-loss-weight', default=1., type=float, metavar='D',
help='epsilon for extra loss, 0 means take no extra loss into account') |
sarthmit/mixture_density_VAEs | train_reg_gaussVAE.py | import os
from os.path import join as pjoin
import h5py
import cPickle as cp
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from models.gaussVAE import GaussVAE
from utils.sampling_utils import *
try:
import PIL.Image as Image
except ImportError:
import Image
# command line arguments
flags = tf.flags
flags.DEFINE_integer("batchSize", 100, "batch size.")
flags.DEFINE_integer("nEpochs", 500, "number of epochs to train.")
flags.DEFINE_float("adamLr", 5e-4, "AdaM learning rate.")
flags.DEFINE_integer("hidden_size", 500, "number of hidden units in en/decoder.")
flags.DEFINE_integer("latent_size", 75, "dimensionality of latent variables.")
flags.DEFINE_string("experimentDir", "MNIST/", "directory to save training artifacts.")
inArgs = flags.FLAGS
def get_file_name(expDir, vaeParams, trainParams):
# concat hyperparameters into file name
output_file_base_name = '_'+''.join('{}_{}_'.format(key, val) for key, val in sorted(vaeParams.items()) if key not in ['prior', 'input_d'])
output_file_base_name += ''.join('{}_{}_'.format(key, vaeParams['prior'][key]) for key in sorted(['mu', 'sigma']))
output_file_base_name += '_adamLR_'+str(trainParams['adamLr'])
# check if results file already exists, if so, append a number
results_file_name = pjoin(expDir, "train_logs/gauss_regVae_trainResults"+output_file_base_name+".txt")
file_exists_counter = 0
while os.path.isfile(results_file_name):
file_exists_counter += 1
results_file_name = pjoin(expDir, "train_logs/gauss_regVae_trainResults"+output_file_base_name+"_"+str(file_exists_counter)+".txt")
if file_exists_counter > 0:
output_file_base_name += "_"+str(file_exists_counter)
return output_file_base_name
### Training function
def trainVAE(data, vae_hyperParams, hyperParams, param_save_path, logFile=None):
N_train, d = data['train'].shape
N_valid, d = data['valid'].shape
nTrainBatches = N_train/hyperParams['batchSize']
nValidBatches = N_valid/hyperParams['batchSize']
vae_hyperParams['batchSize'] = hyperParams['batchSize']
# init Mix Density VAE
model = GaussVAE(vae_hyperParams)
# get training op
optimizer = tf.train.AdamOptimizer(hyperParams['adamLr']).minimize(-model.elbo_obj)
# get op to save the model
persister = tf.train.Saver()
with tf.Session(config=hyperParams['tf_config']) as s:
s.run(tf.initialize_all_variables())
# for early stopping
best_elbo = -10000000.
best_epoch = 0
for epoch_idx in xrange(hyperParams['nEpochs']):
# training
train_elbo = 0.
for batch_idx in xrange(nTrainBatches):
x = data['train'][batch_idx*hyperParams['batchSize']:(batch_idx+1)*hyperParams['batchSize'],:]
_, elbo_val = s.run([optimizer, model.elbo_obj], {model.X: x})
train_elbo += elbo_val
# validation
valid_elbo = 0.
for batch_idx in xrange(nValidBatches):
x = data['valid'][batch_idx*hyperParams['batchSize']:(batch_idx+1)*hyperParams['batchSize'],:]
valid_elbo += s.run(model.elbo_obj, {model.X: x})
# check for ELBO improvement
star_printer = ""
train_elbo /= nTrainBatches
valid_elbo /= nValidBatches
if valid_elbo > best_elbo:
best_elbo = valid_elbo
best_epoch = epoch_idx
star_printer = "***"
# save the parameters
persister.save(s, param_save_path)
# log training progress
logging_str = "Epoch %d. Train ELBO: %.3f, Validation ELBO: %.3f %s" %(epoch_idx+1, train_elbo, valid_elbo, star_printer)
print logging_str
if logFile:
logFile.write(logging_str + "\n")
logFile.flush()
# check for convergence
if epoch_idx - best_epoch > hyperParams['lookahead_epochs'] or np.isnan(train_elbo): break
return model
### Marginal Likelihood Calculation
def calc_margLikelihood(data, model, param_file_path, vae_hyperParams, nSamples=50):
N,d = data.shape
# get op to load the model
persister = tf.train.Saver()
with tf.Session() as s:
persister.restore(s, param_file_path)
sample_collector = []
for s_idx in xrange(nSamples):
samples = s.run(model.get_log_margLL(N), {model.X: data})
if not np.isnan(samples.mean()) and not np.isinf(samples.mean()):
sample_collector.append(samples)
if len(sample_collector) < 1:
print "\tMARG LIKELIHOOD CALC: No valid samples were collected!"
return np.nan
all_samples = np.hstack(sample_collector)
m = np.amax(all_samples, axis=1)
mLL = m + np.log(np.mean(np.exp( all_samples - m[np.newaxis].T ), axis=1))
return mLL.mean()
### Sample Images
def sample_from_model(model, param_file_path, vae_hyperParams, image_file_path, nImages=100):
# get op to load the model
persister = tf.train.Saver()
with tf.Session() as s:
persister.restore(s, param_file_path)
samples = s.run(model.get_samples(nImages))
image = Image.fromarray(tile_raster_images(X=samples, img_shape=(28, 28), tile_shape=(int(np.sqrt(nImages)), int(np.sqrt(nImages))), tile_spacing=(1, 1)))
image.save(image_file_path+".png")
if __name__ == "__main__":
# load MNIST
f = h5py.File('./MNIST/data/binarized_mnist.h5')
mnist = {'train':np.copy(f['train']), 'valid':np.copy(f['valid']), 'test':np.copy(f['test'])}
np.random.shuffle(mnist['train'])
# set architecture params
vae_hyperParams = {'input_d':mnist['train'].shape[1], 'hidden_d':inArgs.hidden_size, 'latent_d':inArgs.latent_size, 'prior':{'mu':0., 'sigma':1.}}
# set training hyperparameters
train_hyperParams = {'adamLr':inArgs.adamLr, 'nEpochs':inArgs.nEpochs, 'batchSize':inArgs.batchSize, 'lookahead_epochs':25, \
'tf_config': tf.ConfigProto(gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5), log_device_placement=False)}
# setup files to write results and save parameters
outfile_base_name = get_file_name(inArgs.experimentDir, vae_hyperParams, train_hyperParams)
logging_file = open(inArgs.experimentDir+"train_logs/gauss_regVae_trainResults"+outfile_base_name+".txt", 'w')
param_file_name = inArgs.experimentDir+"params/gauss_regVae_params"+outfile_base_name+".ckpt"
# train
print "Training model..."
model = trainVAE(mnist, vae_hyperParams, train_hyperParams, param_file_name, logging_file)
# evaluate marginal likelihood
print "Calculating the marginal likelihood..."
margll_valid = calc_margLikelihood(mnist['valid'], model, param_file_name, vae_hyperParams)
margll_test = calc_margLikelihood(mnist['test'], model, param_file_name, vae_hyperParams)
logging_str = "\n\nValidation Marginal Likelihood: %.3f, Test Marginal Likelihood: %.3f" %(margll_valid, margll_test)
print logging_str
logging_file.write(logging_str+"\n")
logging_file.close()
# draw some samples
print "Drawing samples..."
sample_from_model(model, param_file_name, vae_hyperParams, inArgs.experimentDir+'samples/gauss_regVae_samples'+outfile_base_name)
|
sarthmit/mixture_density_VAEs | models/gaussVAE.py | <reponame>sarthmit/mixture_density_VAEs<filename>models/gaussVAE.py
import numpy as np
import tensorflow as tf
### Base neural network
def init_mlp(layer_sizes, std=.01):
params = {'w':[], 'b':[]}
for n_in, n_out in zip(layer_sizes[:-1], layer_sizes[1:]):
params['w'].append(tf.Variable(tf.random_normal([n_in, n_out], stddev=std)))
params['b'].append(tf.Variable(tf.zeros([n_out,])))
return params
def mlp(X, params):
h = [X]
for w,b in zip(params['w'][:-1], params['b'][:-1]):
h.append( tf.nn.relu( tf.matmul(h[-1], w) + b ) )
return tf.matmul(h[-1], params['w'][-1]) + params['b'][-1]
def compute_nll(x, x_recon_linear):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(x_recon_linear, x), reduction_indices=1, keep_dims=True)
def gauss2gauss_KLD(mu_post, sigma_post, mu_prior, sigma_prior):
d = (mu_post - mu_prior)
d = tf.mul(d,d)
return -.5 * tf.reduce_sum(-tf.div(d + tf.mul(sigma_post,sigma_post),sigma_prior*sigma_prior) \
- 2*tf.log(sigma_prior) + 2.*tf.log(sigma_post) + 1., reduction_indices=1, keep_dims=True)
def log_normal_pdf(x, mu, sigma):
d = mu - x
d2 = tf.mul(-1., tf.mul(d,d))
s2 = tf.mul(2., tf.mul(sigma,sigma))
return tf.reduce_sum(tf.div(d2,s2) - tf.log(tf.mul(sigma, 2.506628)), reduction_indices=1, keep_dims=True)
### Gaussian Mixture Model VAE Class
class GaussVAE(object):
def __init__(self, hyperParams):
self.X = tf.placeholder("float", [None, hyperParams['input_d']])
self.prior = hyperParams['prior']
self.encoder_params = self.init_encoder(hyperParams)
self.decoder_params = self.init_decoder(hyperParams)
self.x_recons_linear = self.f_prop()
self.elbo_obj = self.get_ELBO()
def init_encoder(self, hyperParams):
return {'base':init_mlp([hyperParams['input_d'], hyperParams['hidden_d']]),
'mu':init_mlp([hyperParams['hidden_d'], hyperParams['latent_d']]),
'sigma':init_mlp([hyperParams['hidden_d'], hyperParams['latent_d']])}
def init_decoder(self, hyperParams):
return init_mlp([hyperParams['latent_d'], hyperParams['hidden_d'], hyperParams['input_d']])
def f_prop(self):
h1 = mlp(self.X, self.encoder_params['base'])
self.mu = mlp(h1, self.encoder_params['mu'])
self.sigma = tf.exp(mlp(h1, self.encoder_params['sigma']))
self.z = self.mu + tf.mul(self.sigma, tf.random_normal(tf.shape(self.sigma)))
x_recon_linear = mlp(self.z, self.decoder_params)
return x_recon_linear
def get_ELBO(self):
# compose elbo
elbo = -compute_nll(self.X, self.x_recons_linear)
elbo -= gauss2gauss_KLD(self.mu, self.sigma, self.prior['mu'], self.prior['sigma'])
return tf.reduce_mean(elbo)
def get_log_margLL(self, batchSize):
ll = -compute_nll(self.X, self.x_recons_linear)
# calc prior
log_prior = log_normal_pdf(self.z, self.prior['mu'], self.prior['sigma'])
# calc post
log_post = log_normal_pdf(self.z, self.mu, self.sigma)
return ll + log_prior - log_post
def get_samples(self, nImages):
z = self.prior['mu'] + tf.mul(self.prior['sigma'], tf.random_normal((nImages, tf.shape(self.decoder_params['w'][0])[0])))
return tf.sigmoid(mlp(z, self.decoder_params))
|
informix/compose-ha-demo | informix-server3/add.py | import requests
import json
query='''{"$sql": "execute function admin ('create dbspace', 'dbs1', '/opt/ibm/data/spaces/dbs1', '20M', '0')" }'''
query='''{"$sql": "execute function admin ('cdr add trustedhost', 'server3 informix, server3.composehademo_default informix')" }'''
req = "http://server1:27018/sysadmin/system.sql?query="+query
reply = requests.get(req)
if reply.status_code == 200:
print ("Query SUCCESS")
else:
print ("Query FAILURE")
print (reply.content)
|
bashirmohd/Hyplate | webapp/app.py | from flask import Flask, render_template
from json import loads
from engine import *
app = Flask(__name__)
@app.route('/')
def home():
return render_template('hyplate/index.html')
@app.route('/register')
def register():
return render_template('hyhub/oauth/register.html')
@app.route('/forgotpassword')
def forgotpassword():
return render_template('hyhub/oauth/forgotpassword.html')
@app.route('/login')
def login():
return render_template('hyhub/oauth/signin.html')
@app.route('/hyhub')
def hyhub():
datainex = 'static/db/dataindex.json'
dataindex = [i for i in connect_db('dataindex').find()]
return render_template('hyhub/hyhub.html', dataindex=dataindex)
@app.route('/infographics')
def infographics():
return render_template('hyhub/infographics.html')
@app.route('/news')
def news():
data = []
lines = [i for i in connect_db('newssources').find()]
for line in lines:
articles = read_rss(line['feed'])
for article in articles:
article.update(line)
data.append(article)
shuffle(data)
return render_template('hyplate/news.html', news=data)
|
bashirmohd/Hyplate | webapp/engine.py | from requests import get
from bs4 import BeautifulSoup
from datetime import datetime as dt
from random import shuffle
from pyquery import PyQuery
from pymongo import MongoClient
###Connect db
def connect_db(collection: str, db='Hyplate'):
conn = MongoClient(host = '127.0.0.1', port = 27017)
return conn[db][collection]
##########
def read_rss(url):
article_list = []
r = get(url)
soup = BeautifulSoup(r.content, features='xml')
articles = soup.findAll('item')
for a in articles:
try:
article_obj = {}
try:
article_obj['title'] = a.find('title').text
except:
article_obj['title'] = ''
try:
article_obj['link'] = a.find('link').text
except:
article_obj['link'] = ''
try:
date = a.find('pubDate').text[0:17]
article_obj['date'] = dt.strptime(date, '%a, %d %b %Y').date()
except:
article_obj['date'] = date
try:
p1 = a.find('content:encoded')
p1 = p1.text.replace('<', '<').replace('>', '>').replace(']]>', '>').replace('<![CDATA[', '')
p = PyQuery(p1)
img = p('div img').attr('src') or ('img').attr('src')
article_obj['description'] = p('p').text()[0:200]+'...'
article_obj['img'] = img
except:
p1 = a.find('description').text
p1 = p1.replace('<![CDATA[', '')
article_obj['description'] = p1[0:200]+'...'
article_obj['img'] = '/static/assets/img/logos/infographics.png'
try:
article_obj['category'] =a.find('category').text
except:
article_obj['category'] = 'Web Scraping'
article_list.append(article_obj)
except:
print('Cannot Pass'+url)
pass
return article_list |
lanse011/tss-merge | merge.py | # Helper function to check if the two intervals overlap
def is_overlapping(a, b):
if b[0] > a[0] and b[0] < a[1]:
return True
else:
return False
class Merge:
def merge(intervals):
""" Merging a list of intervals into overlapping intervals"""
# return empty list if intervals contains no data
if len(intervals) is 0:
return []
# check that list only contains intervals with length 2
for i, x in enumerate(intervals):
if type(x) is not list or len(x) is not 2:
return "'" + str(x) + "' is not an interval"
# sort the intervals by its first value
intervals.sort(key = lambda x: x[0])
# stores the resulting list
result = []
# put the first interval into the resulting list
result.append(intervals[0])
# iterate over the remaining intervals
for i in range(1, len(intervals)):
# get the last interval of the result list
pop_element = result.pop()
# get the current interval from the intervals list
current_interval = intervals[i]
# check if it overlaps
if is_overlapping(pop_element, current_interval):
# if yes: extend the interval and add it to the result list
new_element = [pop_element[0], max(pop_element[1], current_interval[1])]
result.append(new_element)
else:
# if no: put the last interval and the current interval to the result list
result.append(pop_element)
result.append(current_interval)
return result
|
lanse011/tss-merge | test_merge.py | <reponame>lanse011/tss-merge
import unittest
from merge import Merge
class TestMerge(unittest.TestCase):
""" Test merge with valid data"""
def test_merge(self):
test_intervals = [[25,30], [2,19], [14, 23], [4,8]]
merge_result = Merge.merge(intervals=test_intervals)
self.assertEqual([[2,23], [25,30]], merge_result)
""" Test merge with an interval that has 3 values instead of 2"""
def test_merge_with_wrong_interval_input1(self):
test_intervals = [[25, 30, 40], [2,19], [14, 23], [4,8]]
merge_result = Merge.merge(intervals=test_intervals)
self.assertEqual("'[25, 30, 40]' is not an interval", merge_result)
""" Test merge with a character instead of numbers"""
def test_merge_with_wrong_interval_input2(self):
test_intervals = ['A', [2,19], [14, 23], [4,8]]
merge_result = Merge.merge(intervals=test_intervals)
self.assertEqual("'A' is not an interval", merge_result)
""" Test merge with empty intervals list """
def test_merge_with_empty_interval_input(self):
test_intervals = []
merge_result = Merge.merge(intervals=test_intervals)
self.assertEqual([], merge_result)
if __name__ == '__main__':
unittest.main() |
bdowning/aiotools | examples/ticktock.py | <filename>examples/ticktock.py
import aiotools
import asyncio
async def mytick(interval):
print('tick')
async def run():
t = aiotools.create_timer(mytick, 1.0)
await asyncio.sleep(4)
t.cancel()
await t
if __name__ == '__main__':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(run())
finally:
loop.stop()
|
bdowning/aiotools | examples/contextgroup.py | <reponame>bdowning/aiotools
from pprint import pprint
import asyncio
import aiotools
@aiotools.actxmgr
async def mygen(id):
yield f'mygen id is {id}'
async def run():
ctxgrp = aiotools.actxgroup(mygen(i) for i in range(10))
async with ctxgrp as values:
pprint(values)
if __name__ == '__main__':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(run())
finally:
loop.stop()
|
bdowning/aiotools | src/aiotools/ptaskgroup.py | <filename>src/aiotools/ptaskgroup.py
import asyncio
import enum
import itertools
import logging
import sys
import traceback
from types import TracebackType
from typing import (
Any,
Coroutine,
Optional,
Type,
TypeVar,
Union,
)
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol # type: ignore # noqa
import weakref
from . import compat
__all__ = (
'PersistentTaskGroup',
)
TAny = TypeVar('TAny')
_ptaskgroup_idx = itertools.count()
_log = logging.getLogger(__name__)
_has_task_name = (sys.version_info >= (3, 8, 0))
class UndefinedResult(enum.Enum):
UNDEFINED = 0
UNDEFINED = UndefinedResult.UNDEFINED
class ExceptionHandler(Protocol):
async def __call__(self, exc: Exception) -> None:
...
async def _default_exc_handler(exc: Exception) -> None:
traceback.print_exc()
class PersistentTaskGroup:
_exc_handler: ExceptionHandler
_tasks: "weakref.WeakSet[asyncio.Task[Any]]"
def __init__(
self,
*,
name: str = None,
exception_handler: ExceptionHandler = None,
) -> None:
self._name = name or f"PTaskGroup-{next(_ptaskgroup_idx)}"
self._tasks = weakref.WeakSet()
if exception_handler is None:
self._exc_handler = _default_exc_handler
else:
self._exc_handler = exception_handler
@property
def name(self) -> str:
return self._name
def create_task(
self,
coro: Coroutine[Any, Any, TAny],
*,
name: str = None,
) -> "asyncio.Task[Union[TAny, UndefinedResult]]":
# TODO: functools.wraps equivalent for coro?
async def wrapped_task() -> Union[TAny, UndefinedResult]:
current_task = compat.current_task()
assert current_task is not None
_log.debug("%r is spawned in %r.", current_task, self)
try:
return await coro
except asyncio.CancelledError:
_log.debug("%r in %r has been cancelled.", current_task, self)
except Exception as exc:
await self._exc_handler(exc)
except BaseException:
# TODO: implement
raise
# As our fallback handler handled the exception, the task should
# terminate silently with no explicit result.
# TODO: Add support for ExceptionGroup in Python 3.11, for the cases
# with nested sub-tasks and sub-taskgroups.
return UNDEFINED
loop = compat.get_running_loop()
if _has_task_name:
t = loop.create_task(wrapped_task(), name=name)
else:
t = loop.create_task(wrapped_task())
self._tasks.add(t)
return t
async def shutdown(self) -> None:
remaining_tasks = {*self._tasks}
cancelled_tasks = set()
for t in remaining_tasks:
if t.cancelled():
continue
if not t.done():
t.cancel()
cancelled_tasks.add(t)
# Even though we handle CancelledError in wrapped_task,
# there are still possibilities to raise CancelledError
# when the tasks are not scheduled yet.
await asyncio.gather(*cancelled_tasks, return_exceptions=True)
async def __aenter__(self) -> "PersistentTaskGroup":
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
await self.shutdown()
return False
def __repr__(self) -> str:
return f"<PersistentTaskGroup {self.name}>"
|
bdowning/aiotools | tests/test_defer.py | <gh_stars>100-1000
import pytest
import asyncio
from aiotools.defer import defer, adefer
from aiotools.func import apartial
def test_defer():
x = []
@defer
def myfunc(defer):
x.append(1)
defer(lambda: x.append(1))
x.append(2)
defer(lambda: x.append(2))
x.append(3)
defer(lambda: x.append(3))
myfunc()
assert x == [1, 2, 3, 3, 2, 1]
def test_defer_inner_exception():
x = []
@defer
def myfunc(defer):
x.append(1)
defer(lambda: x.append(1))
x.append(2)
defer(lambda: x.append(2))
raise ZeroDivisionError
x.append(3)
defer(lambda: x.append(3))
with pytest.raises(ZeroDivisionError):
myfunc()
assert x == [1, 2, 2, 1]
def test_defer_wrong_func():
with pytest.raises(AssertionError):
@defer
async def myfunc(defer):
pass
@pytest.mark.asyncio
async def test_adefer():
x = []
@adefer
async def myfunc(defer):
x.append(1)
defer(lambda: x.append(1))
x.append(2)
defer(lambda: x.append(2))
x.append(3)
defer(lambda: x.append(3))
await myfunc()
assert x == [1, 2, 3, 3, 2, 1]
def test_adefer_wrong_func():
with pytest.raises(AssertionError):
@adefer
def myfunc(defer):
pass
@pytest.mark.asyncio
async def test_adefer_coro():
x = []
async def async_append(target, item):
target.append(item)
await asyncio.sleep(0)
@adefer
async def myfunc(defer):
x.append(1)
defer(async_append(x, 1))
x.append(2)
defer(async_append(x, 2))
x.append(3)
defer(async_append(x, 3))
await myfunc()
assert x == [1, 2, 3, 3, 2, 1]
@pytest.mark.asyncio
async def test_adefer_corofunc():
x = []
async def async_append(target, item):
target.append(item)
await asyncio.sleep(0)
@adefer
async def myfunc(defer):
x.append(1)
defer(apartial(async_append, x, 1))
x.append(2)
defer(apartial(async_append, x, 2))
x.append(3)
defer(apartial(async_append, x, 3))
await myfunc()
assert x == [1, 2, 3, 3, 2, 1]
@pytest.mark.asyncio
async def test_adefer_inner_exception():
x = []
async def async_append(target, item):
target.append(item)
await asyncio.sleep(0)
@adefer
async def myfunc(defer):
x.append(1)
defer(apartial(async_append, x, 1))
x.append(2)
defer(apartial(async_append, x, 2))
raise ZeroDivisionError
x.append(3)
defer(apartial(async_append, x, 3))
with pytest.raises(ZeroDivisionError):
await myfunc()
assert x == [1, 2, 2, 1]
|
bdowning/aiotools | src/aiotools/defer.py | """
Provides a Golang-like ``defer()`` API using decorators, which allows grouping
resource initialization and cleanup in one place without extra indentations.
Example:
.. code-block:: python3
async def init(x):
...
async def cleanup(x):
...
@aiotools.adefer
async def do(defer): # <-- be aware of defer argument!
x = SomeResource()
await init(x)
defer(cleanup(x))
...
...
This is equivalent to:
.. code-block:: python3
async def do():
x = SomeResource()
await init(x)
try:
...
...
finally:
await cleanup(x)
Note that :class:`aiotools.context.AsyncContextGroup` or
:class:`contextlib.AsyncExitStack` serves well for the same purpose, but for simple
cleanups, this defer API makes your codes simple because it steps aside the main
execution context without extra indentations.
.. warning::
Any exception in the deferred functions is raised transparently, and may block
execution of the remaining deferred functions.
This behavior may be changed in the future versions, though.
"""
from collections import deque
import functools
import inspect
from typing import (
Union,
Awaitable,
Callable,
)
__all__ = (
'defer', 'adefer',
)
def defer(func):
"""
A synchronous version of the defer API.
It can only defer normal functions.
"""
assert not inspect.iscoroutinefunction(func), \
'the decorated function must not be async'
@functools.wraps(func)
def _wrapped(*args, **kwargs):
deferreds = deque()
def defer(f: Callable) -> None:
assert not inspect.iscoroutinefunction(f), \
'the deferred function must not be async'
assert not inspect.iscoroutine(f), \
'the deferred object must not be a coroutine'
deferreds.append(f)
try:
return func(defer, *args, **kwargs)
finally:
while deferreds:
f = deferreds.pop()
f()
return _wrapped
def adefer(func):
"""
An asynchronous version of the defer API.
It can defer coroutine functions, coroutines, and normal functions.
"""
assert inspect.iscoroutinefunction(func), \
'the decorated function must be async'
@functools.wraps(func)
async def _wrapped(*args, **kwargs):
deferreds = deque()
def defer(f: Union[Callable, Awaitable]) -> None:
deferreds.append(f)
try:
return await func(defer, *args, **kwargs)
finally:
while deferreds:
f = deferreds.pop()
if inspect.iscoroutinefunction(f):
await f()
elif inspect.iscoroutine(f):
await f
else:
f()
return _wrapped
|
bdowning/aiotools | src/aiotools/iter.py | <filename>src/aiotools/iter.py
__all__ = (
'aiter',
)
_sentinel = object()
async def aiter(obj, sentinel=_sentinel):
"""
Analogous to the builtin :func:`iter()`.
"""
if sentinel is _sentinel:
# Since we cannot directly return the return value of obj.__aiter__()
# as being an async-generator, we do the async-iteration here.
async for item in obj:
yield item
else:
while True:
item = await obj()
if item == sentinel:
break
yield item
|
bdowning/aiotools | src/aiotools/context.py | <gh_stars>100-1000
"""
Provides an implementation of asynchronous context manager and its applications.
.. note::
The async context managers in this module are transparent aliases to
``contextlib.asynccontextmanager`` of the standard library in Python 3.7
and later.
"""
import abc
import contextlib
import asyncio
import functools
import inspect
from typing import Any, Callable, Iterable, Optional, List
__all__ = [
'AsyncContextManager', 'async_ctx_manager', 'actxmgr', 'aclosing',
'AsyncContextGroup', 'actxgroup',
]
if hasattr(contextlib, 'asynccontextmanager'):
__all__ += ['AsyncExitStack']
AbstractAsyncContextManager = \
contextlib.AbstractAsyncContextManager
AsyncContextManager = \
contextlib._AsyncGeneratorContextManager # type: ignore
AsyncExitStack = contextlib.AsyncExitStack
async_ctx_manager = contextlib.asynccontextmanager
else:
__all__ += ['AsyncContextDecorator', 'actxdecorator']
class AbstractAsyncContextManager(abc.ABC): # type: ignore
"""
The base abstract interface for asynchronous context manager.
"""
async def __aenter__(self):
return self # pragma: no cover
@abc.abstractmethod
async def __aexit__(self, exc_type, exc_value, tb):
return None # pragma: no cover
@classmethod
def __subclasshook__(cls, C):
if cls is AbstractAsyncContextManager:
if (any('__aenter__' in B.__dict__ for B in C.__mro__) and
any('__aexit__' in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
class AsyncContextDecorator:
"""
Make an asynchronous context manager be used as a decorator function.
"""
def _recreate_cm(self):
return self
def __call__(self, func):
@functools.wraps(func)
async def inner(*args, **kwargs):
async with self._recreate_cm():
return (await func(*args, **kwargs))
return inner
actxdecorator = AsyncContextDecorator
class AsyncContextManager(AsyncContextDecorator, # type: ignore
AbstractAsyncContextManager):
"""
Converts an async-generator function into asynchronous context manager.
"""
def __init__(self, func: Callable[..., Any], args, kwargs):
if not inspect.isasyncgenfunction(func):
raise RuntimeError('Context manager function must be '
'an async-generator')
self._agen = func(*args, **kwargs)
self.func = func
self.args = args
self.kwargs = kwargs
def _recreate_cm(self):
return self.__class__(self.func, self.args, self.kwargs)
async def __aenter__(self):
try:
return (await self._agen.__anext__())
except StopAsyncIteration:
# The generator should yield at least once.
raise RuntimeError("async-generator didn't yield") from None
async def __aexit__(self, exc_type, exc_value, tb):
if exc_type is None:
# This is the normal path when the context body
# did not raise any exception.
try:
await self._agen.__anext__()
except StopAsyncIteration:
return
else:
# The generator has already yielded,
# no more yields are allowed.
raise RuntimeError("async-generator didn't stop") from None
else:
# The context body has raised an exception.
if exc_value is None:
# Ensure exc_value is a valid Exception.
exc_value = exc_type()
try:
# Throw the catched exception into the generator,
# so that it can handle as it wants.
await self._agen.athrow(exc_type, exc_value, tb)
# Here the generator should have finished!
# (i.e., it should not yield again in except/finally blocks!)
raise RuntimeError("async-generator didn't stop after athrow()")
# NOTE for PEP-479
# StopAsyncIteration raised inside the context body
# is converted to RuntimeError.
# In the standard library's contextlib.py, there is
# an extra except clause to catch StopIteration here,
# but this is unnecessary now.
except StopAsyncIteration as exc_new_value:
return exc_new_value is not exc_value
except RuntimeError as exc_new_value:
# When the context body did not catch the exception, re-raise.
if exc_new_value is exc_value:
return False
# When the context body's exception handler raises
# another chained exception, re-raise.
if isinstance(exc_value, (StopIteration, StopAsyncIteration)):
if exc_new_value.__cause__ is exc_value:
return False
# If this is a purely new exception, raise the new one.
raise
except (BaseException, asyncio.CancelledError) as exc:
if exc is not exc_value:
raise
def async_ctx_manager(func):
@functools.wraps(func)
def helper(*args, **kwargs):
return AsyncContextManager(func, args, kwargs)
return helper
class aclosing:
"""
An analogy to :func:`contextlib.closing` for async generators.
The motivation has been proposed by:
* https://github.com/njsmith/async_generator
* https://vorpus.org/blog/some-thoughts-on-asynchronous-api-design-\
in-a-post-asyncawait-world/#cleanup-in-generators-and-async-generators
* https://www.python.org/dev/peps/pep-0533/
"""
def __init__(self, thing):
self.thing = thing
async def __aenter__(self):
return self.thing
async def __aexit__(self, *args):
await self.thing.aclose()
class AsyncContextGroup:
"""
Merges a group of context managers into a single context manager.
Internally it uses :func:`asyncio.gather()` to execute them with overlapping,
to reduce the execution time via asynchrony.
Upon entering, you can get values produced by the entering steps from
the passed context managers (those ``yield``-ed) using an ``as`` clause of
the ``async with``
statement.
After exits, you can check if the context managers have finished
successfully by ensuring that the return values of ``exit_states()`` method
are ``None``.
.. note::
You cannot return values in context managers because they are
generators.
If an exception is raised before the ``yield`` statement of an async
context manager, it is stored at the corresponding manager index in the
as-clause variable. Similarly, if an exception is raised after the
``yield`` statement of an async context manager, it is stored at the
corresponding manager index in the ``exit_states()`` return value.
Any exceptions in a specific context manager does not interrupt others;
this semantic is same to ``asyncio.gather()``'s when
``return_exceptions=True``. This means that, it is user's responsibility
to check if the returned context values are exceptions or the intended ones
inside the context body after entering.
:param context_managers: An iterable of async context managers.
If this is ``None``, you may add async context
managers one by one using the :meth:`~.add`
method.
"""
def __init__(self,
context_managers: Optional[Iterable[AbstractAsyncContextManager]] = None): # noqa
self._cm = list(context_managers) if context_managers else []
self._cm_yields: List[asyncio.Task] = []
self._cm_exits: List[asyncio.Task] = []
def add(self, cm):
"""
TODO: fill description
"""
self._cm.append(cm)
async def __aenter__(self):
# Exceptions in context managers are stored into _cm_yields list.
# NOTE: There is no way to "skip" the context body even if the entering
# process fails.
self._cm_yields[:] = await asyncio.gather(
*(e.__aenter__() for e in self._cm),
return_exceptions=True)
return self._cm_yields
async def __aexit__(self, *exc_info):
# Clear references to context variables.
self._cm_yields.clear()
# Exceptions are stored into _cm_exits list.
self._cm_exits[:] = await asyncio.gather(
*(e.__aexit__(*exc_info) for e in self._cm),
return_exceptions=True)
def exit_states(self):
"""
TODO: fill description
"""
return self._cm_exits
# Shorter aliases
actxmgr = async_ctx_manager
actxgroup = AsyncContextGroup
|
bdowning/aiotools | src/aiotools/timer.py | <filename>src/aiotools/timer.py
"""
Provides a simple implementation of timers run inside asyncio event loops.
"""
import asyncio
import contextlib
import enum
import functools
from typing import Callable, Optional
from unittest import mock
from .compat import get_running_loop
from .taskgroup import TaskGroup
__all__ = (
'create_timer',
'TimerDelayPolicy',
'VirtualClock',
)
class TimerDelayPolicy(enum.Enum):
"""
An enumeration of supported policies for when the timer function takes
longer on each tick than the given timer interval.
"""
DEFAULT = 0
CANCEL = 1
def create_timer(cb: Callable[[float], None], interval: float,
delay_policy: TimerDelayPolicy = TimerDelayPolicy.DEFAULT,
loop: Optional[asyncio.AbstractEventLoop] = None) -> asyncio.Task:
"""
Schedule a timer with the given callable and the interval in seconds.
The interval value is also passed to the callable.
If the callable takes longer than the timer interval, all accumulated
callable's tasks will be cancelled when the timer is cancelled.
Args:
cb: TODO - fill argument descriptions
Returns:
You can stop the timer by cancelling the returned task.
"""
if loop is None:
loop = get_running_loop()
async def _timer():
fired_tasks = []
try:
async with TaskGroup() as task_group:
while True:
if delay_policy == TimerDelayPolicy.CANCEL:
for t in fired_tasks:
if not t.done():
t.cancel()
await asyncio.gather(*fired_tasks, return_exceptions=True)
fired_tasks.clear()
else:
fired_tasks[:] = [t for t in fired_tasks if not t.done()]
t = task_group.create_task(cb(interval=interval))
fired_tasks.append(t)
await asyncio.sleep(interval)
except asyncio.CancelledError:
pass
finally:
await asyncio.sleep(0)
return loop.create_task(_timer())
class VirtualClock:
"""
Provide a virtual clock for an asyncio event loop
which makes timing-based tests deterministic and instantly completed.
"""
def __init__(self) -> None:
self.vtime = 0.0
def virtual_time(self) -> float:
"""
Return the current virtual time.
"""
return self.vtime
def _virtual_select(self, orig_select, timeout):
self.vtime += timeout
return orig_select(0) # override the timeout to zero
@contextlib.contextmanager
def patch_loop(self):
"""
Override some methods of the current event loop
so that sleep instantly returns while proceeding the virtual clock.
"""
loop = get_running_loop()
with mock.patch.object(
loop._selector,
'select',
new=functools.partial(self._virtual_select, loop._selector.select),
), \
mock.patch.object(
loop,
'time',
new=self.virtual_time,
):
yield
|
bdowning/aiotools | tests/test_iter.py | import asyncio
import pytest
from aiotools.iter import aiter
def test_iter():
# extra test for cross-check with builtin iter()
def stream():
for i in range(10):
yield i
last_x = -1
for x in iter(stream()):
last_x = x
assert last_x == 9
def test_iter_sentinel():
# extra test for cross-check with builtin iter()
_call_count = 0
_sentinel = 5
def get_next():
nonlocal _call_count
_call_count += 1
return _call_count
last_x = -1
for x in iter(get_next, _sentinel):
last_x = x
assert last_x == _sentinel - 1
@pytest.mark.asyncio
async def test_aiter():
async def stream():
for i in range(10):
yield i
last_x = -1
async for x in aiter(stream()):
last_x = x
assert last_x == 9
@pytest.mark.asyncio
async def test_aiter_with_sentinel():
_call_count = 0
_sentinel = 5
async def get_next():
nonlocal _call_count
_call_count += 1
await asyncio.sleep(0.001)
return _call_count
last_x = -1
async for x in aiter(get_next, _sentinel):
last_x = x
assert last_x == _sentinel - 1
@pytest.mark.asyncio
async def test_aiter_with_null_sentinel():
_call_count = 0
_sentinel = 3
async def get_next():
nonlocal _call_count
_call_count += 1
await asyncio.sleep(0.001)
if _call_count >= _sentinel:
return None
return _call_count
last_x = -1
async for x in aiter(get_next, None):
last_x = x
assert last_x == _sentinel - 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.