repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
LOSTIN | LOSTIN-main/GNN-LSTM/make_master_file.py | ### script for writing meta information of datasets into master.csv
### for graph property prediction datasets.
import pandas as pd
dataset_list = []
dataset_dict = {}
### add cdfg_lut
name = 'vgraph'
dataset_dict[name] = {'eval metric': 'rmse'}
dataset_dict[name]['download_name'] = 'vgraph'
dataset_dict[name]['version'] = 1
dataset_dict[name]['add_inverse_edge'] = False
dataset_dict[name]['split'] = 'scaffold'
dataset_dict[name]['num tasks'] = 1
dataset_dict[name]['has_node_attr'] = True
dataset_dict[name]['has_edge_attr'] = False
dataset_dict[name]['task type'] = 'regression'
dataset_dict[name]['num classes'] = -1
dataset_dict[name]['additional node files'] = 'None'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['binary'] = False
df = pd.DataFrame(dataset_dict)
# saving the dataframe
df.to_csv('master.csv') | 852 | 29.464286 | 67 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/read_graph_pyg.py | import pandas as pd
import torch
from torch_geometric.data import Data
import os.path as osp
import numpy as np
from read_graph_raw import read_csv_graph_raw
from tqdm import tqdm
def read_graph_pyg(raw_dir, add_inverse_edge = False, additional_node_files = [], additional_edge_files = [], binary = False):
graph_list = read_csv_graph_raw(raw_dir, add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files)
pyg_graph_list = []
print('Converting graphs into PyG objects...')
for graph in tqdm(graph_list):
g = Data()
g.__num_nodes__ = graph['num_nodes']
g.edge_index = torch.from_numpy(graph['edge_index'])
del graph['num_nodes']
del graph['edge_index']
if graph['edge_feat'] is not None:
g.edge_attr = torch.from_numpy(graph['edge_feat'])
del graph['edge_feat']
if graph['node_feat'] is not None:
g.x = torch.from_numpy(graph['node_feat'])
del graph['node_feat']
for key in additional_node_files:
g[key] = torch.from_numpy(graph[key])
del graph[key]
for key in additional_edge_files:
g[key] = torch.from_numpy(graph[key])
del graph[key]
pyg_graph_list.append(g)
return pyg_graph_list
if __name__ == '__main__':
pass | 1,387 | 27.916667 | 156 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/read_graph_raw.py | import pandas as pd
import os.path as osp
import os
import numpy as np
from tqdm import tqdm
### reading raw files from a directory.
### for homogeneous graph
def read_csv_graph_raw(raw_dir, add_inverse_edge = False, additional_node_files = [], additional_edge_files = []):
'''
raw_dir: path to the raw directory
add_inverse_edge (bool): whether to add inverse edge or not
return: graph_list, which is a list of graphs.
Each graph is a dictionary, containing edge_index, edge_feat, node_feat, and num_nodes
edge_feat and node_feat are optional: if a graph does not contain it, we will have None.
additional_node_files and additional_edge_files must be in the raw directory.
- The name should be {additional_node_file, additional_edge_file}.csv.gz
- The length should be num_nodes or num_edges
additional_node_files must start from 'node_'
additional_edge_files must start from 'edge_'
'''
print('Loading necessary files...')
print('This might take a while.')
# loading necessary files
try:
edge = pd.read_csv(osp.join(raw_dir, 'edge.csv.gz'), compression='gzip', header = None).values.T.astype(np.int64) # (2, num_edge) numpy array
num_node_list = pd.read_csv(osp.join(raw_dir, 'num-node-list.csv.gz'), compression='gzip', header = None).astype(np.int64)[0].tolist() # (num_graph, ) python list
num_edge_list = pd.read_csv(osp.join(raw_dir, 'num-edge-list.csv.gz'), compression='gzip', header = None).astype(np.int64)[0].tolist() # (num_edge, ) python list
except FileNotFoundError:
raise RuntimeError('No necessary file')
try:
node_feat = pd.read_csv(osp.join(raw_dir, 'node-feat.csv.gz'), compression='gzip', header = None).values
if 'int' in str(node_feat.dtype):
node_feat = node_feat.astype(np.int64)
else:
# float
node_feat = node_feat.astype(np.float32)
except FileNotFoundError:
node_feat = None
try:
edge_feat = pd.read_csv(osp.join(raw_dir, 'edge-feat.csv.gz'), compression='gzip', header = None).values
if 'int' in str(edge_feat.dtype):
edge_feat = edge_feat.astype(np.int64)
else:
#float
edge_feat = edge_feat.astype(np.float32)
except FileNotFoundError:
edge_feat = None
additional_node_info = {}
for additional_file in additional_node_files:
assert(additional_file[:5] == 'node_')
# hack for ogbn-proteins
if additional_file == 'node_species' and osp.exists(osp.join(raw_dir, 'species.csv.gz')):
os.rename(osp.join(raw_dir, 'species.csv.gz'), osp.join(raw_dir, 'node_species.csv.gz'))
temp = pd.read_csv(osp.join(raw_dir, additional_file + '.csv.gz'), compression='gzip', header = None).values
if 'int' in str(temp.dtype):
additional_node_info[additional_file] = temp.astype(np.int64)
else:
# float
additional_node_info[additional_file] = temp.astype(np.float32)
additional_edge_info = {}
for additional_file in additional_edge_files:
assert(additional_file[:5] == 'edge_')
temp = pd.read_csv(osp.join(raw_dir, additional_file + '.csv.gz'), compression='gzip', header = None).values
if 'int' in str(temp.dtype):
additional_edge_info[additional_file] = temp.astype(np.int64)
else:
# float
additional_edge_info[additional_file] = temp.astype(np.float32)
graph_list = []
num_node_accum = 0
num_edge_accum = 0
print('Processing graphs...')
for num_node, num_edge in tqdm(zip(num_node_list, num_edge_list), total=len(num_node_list)):
graph = dict()
### handling edge
if add_inverse_edge:
### duplicate edge
duplicated_edge = np.repeat(edge[:, num_edge_accum:num_edge_accum+num_edge], 2, axis = 1)
duplicated_edge[0, 1::2] = duplicated_edge[1,0::2]
duplicated_edge[1, 1::2] = duplicated_edge[0,0::2]
graph['edge_index'] = duplicated_edge
if edge_feat is not None:
graph['edge_feat'] = np.repeat(edge_feat[num_edge_accum:num_edge_accum+num_edge], 2, axis = 0)
else:
graph['edge_feat'] = None
for key, value in additional_edge_info.items():
graph[key] = np.repeat(value[num_edge_accum:num_edge_accum+num_edge], 2, axis = 0)
else:
graph['edge_index'] = edge[:, num_edge_accum:num_edge_accum+num_edge]
if edge_feat is not None:
graph['edge_feat'] = edge_feat[num_edge_accum:num_edge_accum+num_edge]
else:
graph['edge_feat'] = None
for key, value in additional_edge_info.items():
graph[key] = value[num_edge_accum:num_edge_accum+num_edge]
num_edge_accum += num_edge
### handling node
if node_feat is not None:
graph['node_feat'] = node_feat[num_node_accum:num_node_accum+num_node]
else:
graph['node_feat'] = None
for key, value in additional_node_info.items():
graph[key] = value[num_node_accum:num_node_accum+num_node]
graph['num_nodes'] = num_node
num_node_accum += num_node
graph_list.append(graph)
return graph_list | 5,396 | 36.741259 | 170 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/main_gnn_customized_inference.py | ### Libraries
# torchtext 0.6.0
import numpy as np
import argparse
from tqdm import tqdm
# Libraries
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# graph loading dependency
from torch_geometric.data import DataLoader
from dataset_pyg import PygGraphPropPredDataset
from gnn import GNN
# Hybridmodel model
class Hybridmodel(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=64, graph_emb=11, model_name='gin', num_layer=5):
super(Hybridmodel, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=2,
batch_first=True, bidirectional=False)
self.gmodel = GNN(gnn_type = model_name, num_tasks = 1, num_layer = num_layer, emb_dim = graph_emb, drop_ratio = 0.2, virtual_node = False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim + graph_emb, 100))
self.linear.append(Linear(100,100))
self.linear.append(Linear(100,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(100))
self.norm.append(BatchNorm1d(100))
def forward(self, text, text_len, graph_batch):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, _ = self.lstm(packed_input)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
g_emb = self.gmodel(graph_batch)
combined_emb = torch.cat((out, g_emb[text[:,0]-7]),1)
flow_fea=F.relu(self.linear[0](combined_emb))
flow_fea=self.norm[0](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
def load_checkpoint(load_path, model, optimizer, device):
if load_path==None:
return
state_dict = torch.load(load_path, map_location=device)
print(f'Model loaded from <== {load_path}')
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
return state_dict['valid_loss']
def main():
# arguments
parser = argparse.ArgumentParser(description='LSTM baseline for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of hidden units in GNNs')
parser.add_argument('--graph_emb', type=int, default=32, help='dimensionality of hidden units in GNNs')
parser.add_argument('--dest_folder', type=str, default='model_ckt/area', help='Destination folder that saves the model')
parser.add_argument('--data_folder', type=str, default='lstm/data_area', help='The folder that saves the data')
parser.add_argument('--model_name', type=str, default='gin', help='GNN model name')
parser.add_argument('--num_layer', type=int, default=5, help='GNN model name')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Load graphs
pyg_dataset = PygGraphPropPredDataset(name = 'vgraph')
graph_loader = DataLoader(pyg_dataset, batch_size=32, shuffle=False)
# Fields
area_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('area', area_field)]
print("Loading data ...")
# TabularDataset
train, valid, test = TabularDataset.splits(path=args.data_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=False, sort_within_batch=False)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = Hybridmodel(input_dim=len(flow_field.vocab), emb_dim=args.emb_dim, graph_emb=args.graph_emb, model_name=args.model_name, num_layer=args.num_layer).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
load_checkpoint(args.dest_folder + '/model_sum_'+args.model_name+str(args.num_layer)+'_batch_32.pt', model, optimizer, device)
y_pred = []
y_true = []
relative_error = []
flow_l = []
design = []
for graph_batch in graph_loader:
graph_batch = graph_batch.to(device)
model.eval()
with torch.no_grad():
for ((flow, flow_len), labels), _ in tqdm(test_iter, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
y_pred.extend(output.tolist())
y_true.extend(labels.tolist())
rmae = np.abs(np.divide(np.subtract(output.tolist(), labels.tolist()), labels.tolist()))
relative_error.extend(rmae)
flow_l.extend(flow_len.tolist())
design.extend((flow[:,0]-7).tolist())
output = pd.DataFrame({'design_name':design, 'flow_length':flow_l, 'labels': y_true, 'prediction': y_pred, 'relative error': relative_error})
output.to_csv('inference_'+args.dest_folder.split('/')[1]+'_'+args.model_name+str(args.num_layer)+'.csv',index=False)
print(np.mean(relative_error))
if __name__ == "__main__":
main() | 6,775 | 37.942529 | 169 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/gnn.py | import torch
from torch_geometric.nn import MessagePassing,BatchNorm
from torch_geometric.nn import global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
import torch.nn.functional as F
from torch_geometric.nn.inits import uniform
from torch.nn import Sequential, ReLU, Linear, ModuleList
from conv import GNN_node, GNN_node_Virtualnode
from torch_scatter import scatter_mean
class GNN(torch.nn.Module):
def __init__(self, num_tasks, num_layer = 5, emb_dim = 100,
gnn_type = 'gin', virtual_node = True, residual = False, drop_ratio = 0.5, JK = "last", graph_pooling = "sum"):
'''
num_tasks (int): number of labels to be predicted
virtual_node (bool): whether to add virtual node or not
'''
super(GNN, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.graph_pooling = graph_pooling
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
### GNN to generate node embeddings
if virtual_node:
self.gnn_node = GNN_node_Virtualnode(num_layer, emb_dim, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)
else:
self.gnn_node = GNN_node(num_layer, emb_dim, JK = JK, drop_ratio = drop_ratio, residual = residual, gnn_type = gnn_type)
### Pooling function to generate whole-graph embeddings
if self.graph_pooling == "sum":
self.pool = global_add_pool
elif self.graph_pooling == "mean":
self.pool = global_mean_pool
elif self.graph_pooling == "max":
self.pool = global_max_pool
elif self.graph_pooling == "attention":
self.pool = GlobalAttention(gate_nn = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, 1)))
elif self.graph_pooling == "set2set":
self.pool = Set2Set(emb_dim, processing_steps = 2)
else:
raise ValueError("Invalid graph pooling type.")
self.graph_pred_linear=ModuleList()
self.graph_norm=ModuleList()
if graph_pooling == "set2set":
self.graph_pred_linear.append(Linear(2*emb_dim, emb_dim))
else:
self.graph_pred_linear.append(Linear(emb_dim, emb_dim))
def forward(self, batched_data):
h_node = self.gnn_node(batched_data)
h_graph = self.pool(h_node, batched_data.batch)
return self.graph_pred_linear[0](h_graph)
if __name__ == '__main__':
GNN(num_tasks = 10) | 2,744 | 36.60274 | 188 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/conv.py | import torch
from torch_geometric.nn import MessagePassing
import torch.nn.functional as F
from torch_geometric.nn import global_mean_pool, global_add_pool
from node_encoder import NodeEncoder,EdgeEncoder
from torch_geometric.utils import degree
import math
### GIN convolution along the graph structure
class GINConv(MessagePassing):
def __init__(self, emb_dim):
'''
emb_dim (int): node embedding dimensionality
'''
super(GINConv, self).__init__(aggr = "add")
#self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, emb_dim))
self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, emb_dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.edge_encoder = EdgeEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
edge_embedding = self.edge_encoder(edge_attr)
out = self.mlp((1 + self.eps) *x + self.propagate(edge_index, x=x, edge_attr=edge_embedding))
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GCN convolution along the graph structure
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super(GCNConv, self).__init__(aggr='add')
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.root_emb = torch.nn.Embedding(1, emb_dim)
self.edge_encoder = EdgeEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.edge_encoder(edge_attr)
row, col = edge_index
#edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)
deg = degree(row, x.size(0), dtype = x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr = edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GNN to generate node embedding
class GNN_node(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
num_layer (int): number of GNN message passing layers
'''
super(GNN_node, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.node_encoder = NodeEncoder(emb_dim)
###List of GNNs
self.convs = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### computing input node embedding
h_list = [self.node_encoder(x)]
for layer in range(self.num_layer):
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h += h_list[layer]
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer):
node_representation += h_list[layer]
return node_representation
### Virtual GNN to generate node embedding
class GNN_node_Virtualnode(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
'''
super(GNN_node_Virtualnode, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.node_encoder = NodeEncoder(emb_dim)
### set the initial virtual node embedding to 0.
self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)
torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)
### List of GNNs
self.convs = torch.nn.ModuleList()
### batch norms applied to node embeddings
self.batch_norms = torch.nn.ModuleList()
### List of MLPs to transform virtual node at every layer
self.mlp_virtualnode_list = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
for layer in range(num_layer - 1):
self.mlp_virtualnode_list.append(torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), \
torch.nn.Linear(2*emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU()))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### virtual node embeddings for graphs
virtualnode_embedding = self.virtualnode_embedding(torch.zeros(batch[-1].item() + 1).to(edge_index.dtype).to(edge_index.device))
h_list = [self.node_encoder(x)]
for layer in range(self.num_layer):
### add message from virtual nodes to graph nodes
h_list[layer] = h_list[layer] + virtualnode_embedding[batch]
### Message passing among graph nodes
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h = h + h_list[layer]
h_list.append(h)
### update the virtual nodes
if layer < self.num_layer - 1:
### add message from graph nodes to virtual nodes
virtualnode_embedding_temp = global_add_pool(h_list[layer], batch) + virtualnode_embedding
### transform virtual nodes using MLP
if self.residual:
virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
else:
virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer):
node_representation += h_list[layer]
return node_representation
if __name__ == "__main__":
pass
| 8,791 | 35.481328 | 182 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/main_gnn_customized_area.py | ### Libraries
import numpy as np
import argparse
from tqdm import tqdm
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
# torchtext 0.6.0
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# Evaluation
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# graph loading dependency
from torch_geometric.data import DataLoader
from dataset_pyg import PygGraphPropPredDataset
from gnn import GNN
### dir
data_folder = 'lstm/data_area'
destination_folder = 'model_ckt/area'
# Hybrid model
class Hybridmodel(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=64, graph_emb=11):
super(Hybridmodel, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=2,
batch_first=True, bidirectional=False)
self.gmodel = GNN(gnn_type = 'gin', num_tasks = 1, num_layer = 2, emb_dim = graph_emb, drop_ratio = 0.5, virtual_node = False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim + graph_emb, 100))
self.linear.append(Linear(100,100))
self.linear.append(Linear(100,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(100))
self.norm.append(BatchNorm1d(100))
def forward(self, text, text_len, graph_batch):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, _ = self.lstm(packed_input)
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
g_emb = self.gmodel(graph_batch)
combined_emb = torch.cat((out, g_emb[text[:,0]-7]),1)
flow_fea=F.relu(self.linear[0](combined_emb))
flow_fea=self.norm[0](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
# Training Function
def training(model, device,
optimizer,
train_loader,
valid_loader,
graph_loader,
num_epochs,
eval_every,
args,
criterion = nn.MSELoss(),
file_path = destination_folder,
best_valid_loss = float("Inf"), best_train_loss = float("Inf")):
# initialize running values
running_loss = 0.0
valid_running_loss = 0.0
global_step = 0
train_loss_list = []
valid_loss_list = []
global_steps_list = []
# read graphs
for graph_batch in graph_loader:
graph_batch = graph_batch.to(device)
# training loop
model.train()
for epoch in range(num_epochs):
for ((flow, flow_len), labels), _ in tqdm(train_loader, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update running values
running_loss += loss.item()
global_step += 1
# evaluation step
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
# validation loop
for ((flow, flow_len), labels), _ in valid_loader:
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len, graph_batch)
loss = criterion(output, labels)
valid_running_loss += loss.item()
# evaluation
average_train_loss = running_loss / eval_every
average_valid_loss = valid_running_loss / len(valid_loader)
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
# resetting running values
running_loss = 0.0
valid_running_loss = 0.0
model.train()
# print progress
print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader),
average_train_loss, average_valid_loss))
# checkpoint
if best_valid_loss + best_train_loss > average_valid_loss + average_train_loss:
best_valid_loss = average_valid_loss
best_train_loss = average_train_loss
save_checkpoint(file_path + '/model_batch_'+str(args.batch_size)+'.pt', model, optimizer, best_valid_loss)
save_metrics(file_path + '/metrics_batch_'+str(args.batch_size)+'.pt', train_loss_list, valid_loss_list, global_steps_list)
save_metrics(file_path + '/metrics_batch_'+str(args.batch_size)+'.pt', train_loss_list, valid_loss_list, global_steps_list)
print('Finished Training!')
def main():
# arguments
parser = argparse.ArgumentParser(description='Customized model for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of input embedding of transformations')
parser.add_argument('--graph_emb', type=int, default=32, help='dimensionality of hidden units in GNNs')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Load graphs
pyg_dataset = PygGraphPropPredDataset(name = 'vgraph')
graph_loader = DataLoader(pyg_dataset, batch_size=32, shuffle=False)
# Fields
area_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('area', area_field)]
# TabularDataset
print('Data loading ...')
train, valid, test = TabularDataset.splits(path=data_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = Hybridmodel(input_dim=len(flow_field.vocab), emb_dim=args.emb_dim, graph_emb=args.graph_emb).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
training(model = model, device = device, optimizer = optimizer, \
train_loader = train_iter, valid_loader = valid_iter, graph_loader = graph_loader, eval_every = len(train_iter), \
num_epochs=args.epochs, args = args)
if __name__ == "__main__":
main() | 9,153 | 36.670782 | 148 | py |
LOSTIN | LOSTIN-main/GNN-LSTM/dataset_pyg.py | from torch_geometric.data import InMemoryDataset
import pandas as pd
import shutil, os
import os.path as osp
import torch
import numpy as np
from read_graph_pyg import read_graph_pyg
class PygGraphPropPredDataset(InMemoryDataset):
def __init__(self, name, root = 'dataset', transform=None, pre_transform = None, meta_dict = None):
'''
- name (str): name of the dataset
- root (str): root directory to store the dataset folder
- transform, pre_transform (optional): transform/pre-transform graph objects
- meta_dict: dictionary that stores all the meta-information about data. Default is None,
but when something is passed, it uses its information. Useful for debugging for external contributers.
'''
self.name = name ## original name, e.g., ogbg-molhiv
if meta_dict is None:
self.dir_name = '_'.join(name.split('-'))
# check if previously-downloaded folder exists.
# If so, use that one.
if osp.exists(osp.join(root, self.dir_name + '_pyg')):
self.dir_name = self.dir_name + '_pyg'
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in master:
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
self.download_name = self.meta_info['download_name'] ## name of downloaded file, e.g., tox21
self.num_tasks = int(self.meta_info['num tasks'])
self.eval_metric = self.meta_info['eval metric']
self.task_type = self.meta_info['task type']
self.__num_classes__ = int(self.meta_info['num classes'])
self.binary = self.meta_info['binary'] == 'True'
super(PygGraphPropPredDataset, self).__init__(self.root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
def get_idx_split(self, split_type = None):
if split_type is None:
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
# short-cut if split_dict.pt exists
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
train_idx = pd.read_csv(osp.join(path, 'train.csv.gz'), compression='gzip', header = None).values.T[0]
valid_idx = pd.read_csv(osp.join(path, 'valid.csv.gz'), compression='gzip', header = None).values.T[0]
test_idx = pd.read_csv(osp.join(path, 'test.csv.gz'), compression='gzip', header = None).values.T[0]
return {'train': torch.tensor(train_idx, dtype = torch.long), 'valid': torch.tensor(valid_idx, dtype = torch.long), 'test': torch.tensor(test_idx, dtype = torch.long)}
@property
def num_classes(self):
return self.__num_classes__
@property
def raw_file_names(self):
if self.binary:
return ['data.npz']
else:
file_names = ['edge']
if self.meta_info['has_node_attr'] == 'True':
file_names.append('node-feat')
if self.meta_info['has_edge_attr'] == 'True':
file_names.append('edge-feat')
return [file_name + '.csv.gz' for file_name in file_names]
@property
def processed_file_names(self):
return 'geometric_data_processed.pt'
def process(self):
### read pyg graph list
add_inverse_edge = self.meta_info['add_inverse_edge'] == 'True'
if self.meta_info['additional node files'] == 'None':
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if self.meta_info['additional edge files'] == 'None':
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
data_list = read_graph_pyg(self.raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files, binary=self.binary)
if self.task_type == 'subtoken prediction':
graph_label_notparsed = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
graph_label = [str(graph_label_notparsed[i][0]).split(' ') for i in range(len(graph_label_notparsed))]
for i, g in enumerate(data_list):
g.y = graph_label[i]
else:
if self.binary:
graph_label = np.load(osp.join(self.raw_dir, 'graph-label.npz'))['graph_label']
else:
graph_label = pd.read_csv(osp.join(self.raw_dir, 'graph-label.csv.gz'), compression='gzip', header = None).values
has_nan = np.isnan(graph_label).any()
for i, g in enumerate(data_list):
if 'classification' in self.task_type:
if has_nan:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
else:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.long)
else:
g.y = torch.from_numpy(graph_label[i]).view(1,-1).to(torch.float32)
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
# self.data = data
self.slices = slices
print('Saving...')
torch.save((data, slices), self.processed_paths[0])
if __name__ == '__main__':
pyg_dataset = PygGraphPropPredDataset(name = 'vgraph')
print(pyg_dataset.num_classes)
print(pyg_dataset[0])
#print(pyg_dataset[1].x)
#print(pyg_dataset.slices)
from torch_geometric.loader import DataLoader
loader = DataLoader(pyg_dataset, batch_size=32, shuffle=False)
for batch in loader:
#print(batch.edge_index[:,2781])
print(batch)
#print(batch.y)
print(len(batch.y))
#break
| 6,691 | 38.364706 | 199 | py |
LOSTIN | LOSTIN-main/LSTM/LSTM_inference.py | ### Libraries
# torchtext 0.6.0
import numpy as np
import argparse
from tqdm import tqdm
# Libraries
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# LSTM model
class LSTM(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=128):
super(LSTM, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm_1 = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.lstm_2 = nn.LSTM(input_size=hidden_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim,30))
self.linear.append(Linear(30,30))
self.linear.append(Linear(30,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(30))
self.norm.append(BatchNorm1d(30))
def forward(self, text, text_len):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, (h1, c1) = self.lstm_1(packed_input)
packed_output, _ = self.lstm_2(packed_output, (h1, c1))
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
#print(output.shape)
#print(out)
#flow_fea = F.dropout(out,p=0.5,training=self.training)
flow_fea=F.relu(self.linear[0](out))
flow_fea=self.norm[0](flow_fea)
#flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save and Load Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_checkpoint(load_path, model, optimizer, device):
if load_path==None:
return
state_dict = torch.load(load_path, map_location=device)
print(f'Model loaded from <== {load_path}')
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
return state_dict['valid_loss']
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_metrics(load_path):
if load_path==None:
return
state_dict = torch.load(load_path, map_location=device)
print(f'Model loaded from <== {load_path}')
return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']
def main():
# arguments
parser = argparse.ArgumentParser(description='LSTM baseline for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--dest_folder', type=str, default='data_area', help='Destination folder that saves the model')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Fields
area_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('area', area_field)]
print("Loading data ...")
# TabularDataset
train, valid, test = TabularDataset.splits(path=args.dest_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
test_iter = BucketIterator(test, batch_size=args.batch_size, device=device, sort=False, sort_within_batch=False)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = LSTM(input_dim=len(flow_field.vocab),emb_dim=args.emb_dim).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
load_checkpoint(args.dest_folder + '/model.pt', model, optimizer, device)
y_pred = []
y_true = []
relative_error = []
flow_l = []
design = []
model.eval()
with torch.no_grad():
for ((flow, flow_len), labels), _ in tqdm(test_iter, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
y_pred.extend(output.tolist())
y_true.extend(labels.tolist())
rmae = np.abs(np.divide(np.subtract(output.tolist(), labels.tolist()), labels.tolist()))
relative_error.extend(rmae)
flow_l.extend(flow_len.tolist())
design.extend((flow[:,0]-7).tolist())
output = pd.DataFrame({'design_name':design, 'flow_length':flow_l, 'labels': y_true, 'prediction': y_pred, 'relative error': relative_error})
output.to_csv('lstm_'+args.dest_folder+'.csv',index=False)
print(np.mean(relative_error))
if __name__ == "__main__":
main() | 6,857 | 33.29 | 149 | py |
LOSTIN | LOSTIN-main/LSTM/LSTM_area.py | ### Libraries
# torchtext 0.6.0
import numpy as np
import argparse
from tqdm import tqdm
# Libraries
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# Evaluation
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
destination_folder = 'data_area'
# LSTM model
class LSTM(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=128):
super(LSTM, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm_1 = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.lstm_2 = nn.LSTM(input_size=hidden_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim,30))
self.linear.append(Linear(30,30))
self.linear.append(Linear(30,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(30))
self.norm.append(BatchNorm1d(30))
def forward(self, text, text_len):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, (h1, c1) = self.lstm_1(packed_input)
packed_output, _ = self.lstm_2(packed_output, (h1, c1))
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
#print(output.shape)
#print(out)
#flow_fea = F.dropout(out,p=0.5,training=self.training)
flow_fea=F.relu(self.linear[0](out))
flow_fea=self.norm[0](flow_fea)
#flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save and Load Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_checkpoint(load_path, model, optimizer):
if load_path==None:
return
state_dict = torch.load(load_path)
print(f'Model loaded from <== {load_path}')
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
return state_dict['valid_loss']
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_metrics(load_path):
if load_path==None:
return
state_dict = torch.load(load_path)
print(f'Model loaded from <== {load_path}')
return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']
# Training Function
def training(model, device,
optimizer,
train_loader,
valid_loader,
num_epochs,
eval_every,
criterion = nn.MSELoss(),
file_path = destination_folder,
best_valid_loss = float("Inf"), best_train_loss = float("Inf")):
# initialize running values
running_loss = 0.0
valid_running_loss = 0.0
global_step = 0
train_loss_list = []
valid_loss_list = []
global_steps_list = []
# training loop
model.train()
for epoch in range(num_epochs):
for ((flow, flow_len), labels), _ in tqdm(train_loader, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update running values
running_loss += loss.item()
global_step += 1
# evaluation step
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
# validation loop
for ((flow, flow_len), labels), _ in valid_loader:
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
loss = criterion(output, labels)
valid_running_loss += loss.item()
# evaluation
average_train_loss = running_loss / eval_every
average_valid_loss = valid_running_loss / len(valid_loader)
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
# resetting running values
running_loss = 0.0
valid_running_loss = 0.0
model.train()
# print progress
print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader),
average_train_loss, average_valid_loss))
# checkpoint
if best_valid_loss + best_train_loss > average_valid_loss + average_train_loss:
best_valid_loss = average_valid_loss
best_train_loss = average_train_loss
save_checkpoint(file_path + '/model.pt', model, optimizer, best_valid_loss)
save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)
save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)
print('Finished Training!')
def main():
# arguments
parser = argparse.ArgumentParser(description='LSTM baseline for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of hidden units in GNNs (default: 300)')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Fields
area_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('area', area_field)]
# TabularDataset
train, valid, test = TabularDataset.splits(path=destination_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = LSTM(input_dim=len(flow_field.vocab),emb_dim=args.emb_dim).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
training(model = model, device = device, optimizer = optimizer, \
train_loader = train_iter, valid_loader = valid_iter, eval_every = len(train_iter), \
num_epochs=args.epochs)
if __name__ == "__main__":
main() | 9,043 | 34.328125 | 148 | py |
LOSTIN | LOSTIN-main/LSTM/LSTM_delay.py | ### Libraries
# torchtext 0.6.0
import numpy as np
import argparse
from tqdm import tqdm
# Libraries
import matplotlib.pyplot as plts
import pandas as pd
import torch
# Preliminaries
from torchtext.data import Field, TabularDataset, BucketIterator
# Models
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn import ReLU, Linear, BatchNorm1d, ModuleList
# Training
import torch.optim as optim
# Evaluation
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
destination_folder = 'data_delay'
# LSTM model
class LSTM(nn.Module):
def __init__(self, input_dim, emb_dim, hidden_dim=128):
super(LSTM, self).__init__()
self.embedding = nn.Embedding(input_dim, emb_dim)
self.lstm_1 = nn.LSTM(input_size=emb_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.lstm_2 = nn.LSTM(input_size=hidden_dim, hidden_size=hidden_dim, num_layers=1,
batch_first=True, bidirectional=False)
self.linear=ModuleList()
self.linear.append(Linear(hidden_dim,30))
self.linear.append(Linear(30,30))
self.linear.append(Linear(30,1))
self.norm=ModuleList()
self.norm.append(BatchNorm1d(30))
self.norm.append(BatchNorm1d(30))
def forward(self, text, text_len):
text_emb = self.embedding(text)
packed_input = pack_padded_sequence(text_emb, text_len, batch_first=True, enforce_sorted=False)
packed_output, (h1, c1) = self.lstm_1(packed_input)
packed_output, _ = self.lstm_2(packed_output, (h1, c1))
output, _ = pad_packed_sequence(packed_output, batch_first=True)
out = output[:, -1, :]
#print(output.shape)
#print(out)
#flow_fea = F.dropout(out,p=0.5,training=self.training)
flow_fea=F.relu(self.linear[0](out))
flow_fea=self.norm[0](flow_fea)
#flow_fea=F.dropout(flow_fea,p=0.4,training=self.training)
flow_fea=F.relu(self.linear[1](flow_fea))
flow_fea=self.norm[1](flow_fea)
flow_fea=F.dropout(flow_fea,p=0.2,training=self.training)
flow_out=self.linear[2](flow_fea)
flow_out = torch.squeeze(flow_out, 1)
return flow_out
# Save and Load Functions
def save_checkpoint(save_path, model, optimizer, valid_loss):
if save_path == None:
return
state_dict = {'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'valid_loss': valid_loss}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_checkpoint(load_path, model, optimizer):
if load_path==None:
return
state_dict = torch.load(load_path)
print(f'Model loaded from <== {load_path}')
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
return state_dict['valid_loss']
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_metrics(load_path):
if load_path==None:
return
state_dict = torch.load(load_path)
print(f'Model loaded from <== {load_path}')
return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']
# Training Function
def training(model, device,
optimizer,
train_loader,
valid_loader,
num_epochs,
eval_every,
criterion = nn.MSELoss(),
file_path = destination_folder,
best_valid_loss = float("Inf"),best_train_loss = float("Inf")):
# initialize running values
running_loss = 0.0
valid_running_loss = 0.0
global_step = 0
train_loss_list = []
valid_loss_list = []
global_steps_list = []
# training loop
model.train()
for epoch in range(num_epochs):
for ((flow, flow_len), labels), _ in tqdm(train_loader, desc="Iteration"):
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
loss = criterion(output, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update running values
running_loss += loss.item()
global_step += 1
# evaluation step
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
# validation loop
for ((flow, flow_len), labels), _ in valid_loader:
labels = labels.to(device)
flow = flow.to(device)
flow_len = flow_len.to("cpu")
output = model(flow, flow_len)
loss = criterion(output, labels)
valid_running_loss += loss.item()
# evaluation
average_train_loss = running_loss / eval_every
average_valid_loss = valid_running_loss / len(valid_loader)
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
# resetting running values
running_loss = 0.0
valid_running_loss = 0.0
model.train()
# print progress
print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch+1, num_epochs, global_step, num_epochs*len(train_loader),
average_train_loss, average_valid_loss))
# checkpoint
if best_valid_loss + best_train_loss > average_valid_loss + average_train_loss:
best_valid_loss = average_valid_loss
best_train_loss = average_train_loss
save_checkpoint(file_path + '/model.pt', model, optimizer, best_valid_loss)
save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)
save_metrics(file_path + '/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)
print('Finished Training!')
def main():
# arguments
parser = argparse.ArgumentParser(description='LSTM baseline for flow perf prediction')
parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)')
parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)')
parser.add_argument('--epochs', type=int, default=1, help='number of epochs to train (default: 300)')
parser.add_argument('--emb_dim', type=int, default=20, help='dimensionality of hidden units in GNNs (default: 300)')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
# Fields
delay_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
flow_field = Field(lower=True, include_lengths=True, batch_first=True)
fields = [ ('flow', flow_field), ('delay', delay_field)]
# TabularDataset
train, valid, test = TabularDataset.splits(path=destination_folder, train='train.csv', validation='valid.csv', test='test.csv',
format='CSV', fields=fields, skip_header=True)
# Iterators
train_iter = BucketIterator(train, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
valid_iter = BucketIterator(valid, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
#test_iter = BucketIterator(test, batch_size=args.batch_size, sort_key=lambda x: len(x.flow), device=device, sort=True, sort_within_batch=True)
# Vocabulary
flow_field.build_vocab(train, min_freq=1, specials_first = False)
learning_rate=2e-3
weight_decay=2e-6
model = LSTM(input_dim=len(flow_field.vocab),emb_dim=args.emb_dim).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
training(model = model, device = device, optimizer = optimizer, \
train_loader = train_iter, valid_loader = valid_iter, eval_every = len(train_iter), \
num_epochs=args.epochs)
if __name__ == "__main__":
main() | 9,049 | 34.490196 | 148 | py |
LOSTIN | LOSTIN-main/verilog2graph/verilog_cleanser.py | ### remove / and [] in verilog files, so as to be compatible with the parser
path='epfl/'
new_path='epfl_new/'
filename=['adder.v', 'arbiter.v', 'bar.v', 'div.v', 'log2.v', 'max.v', 'multiplier.v', 'sin.v', 'sqrt.v', 'square.v', 'voter.v']
for fname in filename:
# read verilog file
f = open(path + fname, "r")
design = f.readlines()
# remove / and [] in file
new_design = []
for i in range(len(design)):
line = design[i].replace('[','').replace(']','').replace('\\','')
new_design.append(line)
# save modified designs
new_fname = 'new_'+fname
f = open(new_path+new_fname, "w")
f.writelines(new_design)
| 663 | 30.619048 | 128 | py |
LOSTIN | LOSTIN-main/verilog2graph/parser.py | from lark import Lark, Transformer, v_args
from typing import Dict, List, Tuple, Optional, Union
verilog_netlist_grammar = r"""
start: description*
?description: module
?module: "module" identifier list_of_ports? ";" module_item* "endmodule"
list_of_ports: "(" port ("," port)* ")"
?port: identifier
| named_port_connection
?module_item: input_declaration
| output_declaration
| net_declaration
| continuous_assign
| module_instantiation
input_declaration: "input" range? list_of_variables ";"
output_declaration: "output" range? list_of_variables ";"
net_declaration: "wire" range? list_of_variables ";"
continuous_assign: "assign" list_of_assignments ";"
list_of_assignments: assignment ("," assignment)*
assignment: lvalue "=" expression
?lvalue: identifier
| identifier_indexed
| identifier_sliced
| concatenation
concatenation: "{" expression ("," expression)* "}"
grouping: "(" lvalue ")"
and_oper: expression "&" expression
or_oper: expression "|" expression
not_oper: "~" lvalue
?expression: identifier
| identifier_indexed
| identifier_sliced
| concatenation
| number
| and_oper
| or_oper
| not_oper
| grouping
identifier_indexed: identifier "[" number "]"
identifier_sliced: identifier range
module_instantiation: identifier module_instance ("," module_instance)* ";"
module_instance: identifier "(" list_of_module_connections? ")"
list_of_module_connections: module_port_connection ("," module_port_connection)*
| named_port_connection ("," named_port_connection)*
module_port_connection: expression
named_port_connection: "." identifier "(" expression ")"
identifier: CNAME
?range: "[" number ":" number "]"
?list_of_variables: identifier ("," identifier)*
string: ESCAPED_STRING
// FIXME TODO: Use INT
unsigned_hex_str: HEXDIGIT+
signed_hex_str: ( "-" | "+" ) unsigned_hex_str
number:
| unsigned_hex_str -> number
| signed_hex_str -> number
| unsigned_hex_str base unsigned_hex_str -> number_explicit_length
| base unsigned_hex_str -> number_implicit_length
base: BASE
BASE: "'b" | "'B" | "'h" | "'H" | "'o" | "'O'" | "'d" | "'D"
COMMENT_SLASH: /\/\*(\*(?!\/)|[^*])*\*\//
COMMENT_BRACE: /\(\*(\*(?!\))|[^*])*\*\)/
NEWLINE: /\\?\r?\n/
%import common.WORD
%import common.ESCAPED_STRING
%import common.CNAME
//%import common.SIGNED_NUMBER
//%import common.INT
//%import common.SIGNED_INT
%import common.WS
%import common.HEXDIGIT
%ignore WS
%ignore COMMENT_SLASH
%ignore COMMENT_BRACE
%ignore NEWLINE
"""
class Number:
def __init__(self, length: Optional[int], base: Optional[str], mantissa: str):
assert isinstance(mantissa, str), "Mantissa is expected to be a string."
assert length is None or isinstance(length, int)
self.length = length
self.base = base
self.mantissa = mantissa
def as_integer(self):
base_map = {
'h': 16,
'b': 2,
'd': 10,
'o': 8
}
if self.base is None:
int_base = 10
else:
base = self.base.lower()
assert base in base_map, "Unknown base: '{}'".format(base)
int_base = base_map[base]
return int(self.mantissa, base=int_base)
def __int__(self):
return self.as_integer()
def as_bits_lsb_first(self):
"""
Get integer value as a list of bits.
If the length of the Number is not None then the list is either extended or truncated to the given length.
Extension is sign extended.
:return:
"""
value = self.as_integer()
x = value
bits = []
while x != 0:
bits.append(x & 1)
x //= 2
if self.length is not None:
if len(bits) < self.length:
sign = 1 if value < 0 else 0
# Extend.
bits.extend([sign] * (self.length - len(bits)))
elif len(bits) > self.length:
# Truncate
bits = bits[0:self.length]
return bits
def as_bits_msb_first(self):
return list(reversed(self.as_bits_lsb_first()))
def __repr__(self):
if self.base is None:
return "{}".format(self.as_integer())
elif self.length is None:
return "'{}{}".format(self.base, self.mantissa)
else:
return "{}'{}{}".format(self.length, self.base, self.mantissa)
def test_class_number():
assert Number(None, None, '12').as_bits_lsb_first() == [0, 0, 1, 1]
assert Number(None, None, '12').as_bits_msb_first() == [1, 1, 0, 0]
assert Number(5, None, '12').as_bits_msb_first() == [0, 1, 1, 0, 0]
assert Number(3, None, '12').as_bits_msb_first() == [1, 0, 0]
assert Number(3, 'h', 'c').as_bits_msb_first() == [1, 0, 0]
class Range:
def __init__(self, start, end):
self.start = start
self.end = end
def to_indices(self):
"""
Convert to list of indices in the range.
:return:
"""
return list(reversed(range(self.end.as_integer(), self.start.as_integer() + 1)))
def __repr__(self):
return "[{}:{}]".format(self.start, self.end)
class Vec:
def __init__(self, name: str, range: Range):
self.name = name
self.range = range
def __repr__(self):
return "{}{}".format(self.name, self.range)
# class PortConnection:
#
# def __init__(self, port_name: str, signal_name: str):
# self.port_name = port_name
# self.signal_name = signal_name
#
# def __repr__(self):
# return ".{}({})".format(self.port_name, self.signal_name)
class Identifier:
def __init__(self, name: str):
self.name = name
def __repr__(self):
return self.name
class IdentifierIndexed:
def __init__(self, name: str, index):
self.name = name
self.index = index
def __repr__(self):
return "{}[{}]".format(self.name, self.index)
class IdentifierSliced:
def __init__(self, name: str, range: Range):
self.name = name
self.range = range
def __repr__(self):
return "{}{}".format(self.name, self.range)
class Concatenation:
def __init__(self, elements: List[Union[Identifier, IdentifierIndexed, IdentifierSliced]]):
self.elements = elements
def __repr__(self):
return "Concatenation()".format(", ".join([str(e) for e in self.elements]))
class ModuleInstance:
def __init__(self, module_name: str, instance_name: str, ports: Dict[str, str]):
self.module_name = module_name
self.instance_name = instance_name
self.ports = ports
def __repr__(self):
return "ModuleInstance({}, {}, {})".format(self.module_name, self.instance_name, self.ports)
class NetDeclaration:
def __init__(self, net_name: str, range: Range):
self.net_name = net_name
self.range = range
def __repr__(self):
if self.range is not None:
return "NetDeclaration({} {})".format(self.net_name, self.range)
else:
return "NetDeclaration({})".format(self.net_name)
class OutputDeclaration(NetDeclaration):
def __repr__(self):
if self.range is not None:
return "OutputDeclaration({} {})".format(self.net_name, self.range)
else:
return "OutputDeclaration({})".format(self.net_name)
class InputDeclaration(NetDeclaration):
def __repr__(self):
if self.range is not None:
return "InputDeclaration({} {})".format(self.net_name, self.range)
else:
return "InputDeclaration({})".format(self.net_name)
class ContinuousAssign:
def __init__(self, assignments: List[Tuple[str, str]]):
assert isinstance(assignments, list)
self.assignments = assignments
def __repr__(self):
return "ContinuousAssign({})" \
.format(", ".join(("{} = {}".format(l, r) for l, r in self.assignments)))
class Module:
def __init__(self, module_name: str, port_list: List[str], module_items: List):
self.module_name = module_name
self.port_list = port_list
self.module_items = module_items
self.net_declarations = []
self.output_declarations = []
self.input_declarations = []
self.module_instances = []
self.assignments = []
self.sub_modules = []
for it in module_items:
if isinstance(it, OutputDeclaration):
self.output_declarations.append(it)
elif isinstance(it, InputDeclaration):
self.input_declarations.append(it)
elif isinstance(it, NetDeclaration):
self.net_declarations.append(it)
elif isinstance(it, ModuleInstance):
self.module_instances.append(it)
elif isinstance(it, ContinuousAssign):
self.assignments.append(it)
elif isinstance(it, Module):
self.sub_modules.append(it)
def __repr__(self):
return "Module({}, {}, {})".format(self.module_name, self.port_list, self.module_items)
class Netlist:
def __init__(self, modules: List[Module]):
self.modules = modules
def __repr__(self):
return "Netlist({})".format(self.modules)
class VerilogTransformer(Transformer):
list_of_ports = list
def unsigned_hex_str(self, hexstr):
return "".join((str(h) for h in hexstr))
@v_args(inline=True)
def signed_hex_str(self, sign, hexstr):
return sign + hexstr
@v_args(inline=True)
def identifier(self, identifier):
return str(identifier)
@v_args(inline=True)
def base(self, base):
return str(base)[1]
@v_args(inline=True)
def identifier_sliced(self, name, range: Range):
return IdentifierSliced(name, range)
@v_args(inline=True)
def identifier_indexed(self, name, index):
return IdentifierIndexed(name, index)
@v_args(inline=True)
def named_port_connection(self, port_name: str, expression):
return {port_name: expression}
@v_args(inline=True)
def assignment(self, left, right):
return left, right
def list_of_assignments(self, args) -> List:
return list(args[0])
def continuous_assign(self, assignments):
return ContinuousAssign(assignments)
@v_args(inline=True)
def module(self, module_name, list_of_ports, *module_items):
# TODO: What happens if list_of_ports is not present?
items = []
for it in module_items:
if isinstance(it, list):
items.extend(it)
else:
items.append(it)
return Module(module_name, list_of_ports, items)
@v_args(inline=True)
def module_instantiation(self, module_name, *module_instances) -> List[ModuleInstance]:
instances = []
for module_instance in module_instances:
instance_name, ports = module_instance
instances.append(ModuleInstance(module_name, instance_name, ports))
return instances
def net_declaration(self, args) -> List[NetDeclaration]:
if len(args) > 0 and isinstance(args[0], Range):
_range = args[0]
variable_names = args[1:]
else:
_range = None
variable_names = args
declarations = []
for name in variable_names:
declarations.append(NetDeclaration(name, _range))
return declarations
def output_declaration(self, args) -> List[OutputDeclaration]:
if len(args) > 0 and isinstance(args[0], Range):
_range = args[0]
variable_names = args[1:]
else:
_range = None
variable_names = args
declarations = []
for name in variable_names:
declarations.append(OutputDeclaration(name, _range))
return declarations
def input_declaration(self, args) -> List[InputDeclaration]:
if len(args) > 0 and isinstance(args[0], Range):
_range = args[0]
variable_names = args[1:]
else:
_range = None
variable_names = args
declarations = []
for name in variable_names:
declarations.append(InputDeclaration(name, _range))
return declarations
def list_of_module_connections(self, module_connections):
connections = dict()
for conn in module_connections:
connections.update(**conn)
return connections
@v_args(inline=True)
def module_instance(self, instance_name, module_connections):
return (instance_name, module_connections)
@v_args(inline=True)
def range(self, start, end):
return Range(start, end)
@v_args(inline=True)
def number(self, string):
return Number(None, None, string)
@v_args(inline=True)
def number_explicit_length(self, length, base, mantissa):
length = int(length)
return Number(length, base, mantissa)
@v_args(inline=True)
def number_implicit_length(self, base, mantissa):
return Number(None, base, mantissa)
def concatenation(self, l) -> Concatenation:
result = []
for x in l:
if isinstance(x, Concatenation):
result.extend(x.elements)
else:
result.append(x)
return Concatenation(result)
def start(self, description):
if isinstance(description, list):
return Netlist(description)
else:
return Netlist([description])
def parse_verilog(data: str) -> Netlist:
"""
Parse a string containing data of a verilog file.
:param data: Raw verilog string.
:return:
"""
verilog_parser = Lark(verilog_netlist_grammar,
parser='lalr',
lexer='standard',
transformer=VerilogTransformer()
)
netlist = verilog_parser.parse(data)
assert isinstance(netlist.modules, list)
return netlist
def test_parse_verilog1():
data = r"""
module blabla(port1, port_2);
input [0:1234] asdf;
output [1:3] qwer;
wire [1234:45] mywire;
assign a = b;
assign {a, b[1], c[0: 39]} = {x, y[5], z[1:40]};
assign {a, b[1], c[0: 39]} = {x, y[5], 1'h0 };
(* asdjfasld ajsewkea 3903na ;lds *)
wire zero_set;
OR _blabla_ ( .A(netname), .B (qwer) );
OR blabla2 ( .A(netname), .B (1'b0) );
wire zero_res;
(* src = "alu_shift.v:23" *)
wire zero_set;
NOT _072_ (
.A(func_i[2]),
.Y(_008_)
);
endmodule
"""
netlist = parse_verilog(data)
# print(netlist.pretty())
def test_parse_verilog2():
from . import test_data
data = test_data.verilog_netlist()
netlist = parse_verilog(data)
# print(netlist)
# print(netlist.pretty())
| 15,344 | 26.450805 | 114 | py |
LOSTIN | LOSTIN-main/verilog2graph/verilog2graph.py | import networkx as nx
import json
from parser import parse_verilog
from graph import Graph
### convert to networkx graphs
def network_to_networkx(network):
"""method to export a pathpy Network to a networkx compatible graph
Parameters
----------
network: Network
Returns
-------
networkx Graph or DiGraph
"""
# keys to exclude since they are handled differently in networkx
excluded_node_props = {"degree", "inweight", "outweight", "indegree", "outdegree"}
try:
import networkx as nx
except ImportError:
raise PathpyError("To export a network to networkx it must be installed")
directed = network.directed
if directed:
graph = nx.DiGraph()
else:
graph = nx.Graph()
for node_id, node_props in network.nodes.items():
valid_props = {k: v for k, v in node_props.items() if k not in excluded_node_props}
graph.add_node(node_id, **valid_props)
for edge, edge_props in network.edges.items():
graph.add_edge(*edge, **edge_props)
return graph
### remove wire nodes
def graph_optimize(G):
wire_nodes=[]
for n in G.nodes():
if n.startswith('n'):
wire_nodes.append(n)
# get in edges of wire nodes
in_edge=G.in_edges(n)
# get the parent of wire nodes
for e in in_edge:
source_node=e[0]
# get out edges of wire ndoes
out_edge=G.out_edges(n)
# get the children of wire nodes
target_node=[]
for e in out_edge:
target_node.append(e[1])
# add new edges
for target in target_node:
G.add_edges_from([(source_node, target)])
# remove wire nets
for n in wire_nodes:
G.remove_node(n)
#print(len(wire_nodes))
return G
### save the graph into json
def json_save(G, fname):
f = open(fname + '.json', 'w')
G_dict = dict(nodes=[[n, G.nodes[n]] for n in G.nodes()], \
edges=[(e[0], e[1], G.edges[e]) for e in G.edges()])
json.dump(G_dict, f)
f.close()
### load the graph from json
def json_load(fname):
f = open(fname + '.json', 'r')
G = nx.DiGraph()
d = json.load(f)
f.close()
G.add_nodes_from(d['nodes'])
G.add_edges_from(d['edges'])
return G
###
path = 'epfl_new/'
filename = ['adder.v', 'arbiter.v', 'bar.v', 'div.v', 'log2.v', 'max.v', 'multiplier.v', 'sin.v', 'sqrt.v', 'square.v', 'voter.v']
graph_path='epfl_graph/'
for fname in filename:
top = parse_verilog(open(path + 'new_' + fname).read())
top_module = top.modules[0]
graph_oper = Graph()
graph_oper.generate_verilog_graph(top_module)
# convert to networkx graph
G = network_to_networkx(graph_oper._graph)
G_new = graph_optimize(G)
# save graphs into json
json_save(G_new, graph_path+fname.split('.')[0])
# print(fname)
| 2,965 | 25.247788 | 130 | py |
LOSTIN | LOSTIN-main/verilog2graph/graph.py | import pathpy as pp
import igraph
import lark
from collections import deque
# Graph class
class Graph:
def __init__(self):
self._graph = pp.Network(directed=True)
self._num_ANDs = 0
self._num_ORs = 0
self._num_NOTs = 0
def create_node(self, node_name, node_type):
node_dict = {
'node_type': node_type,
'neighbors': []
}
self._graph.add_node(node_name, node_attributes=node_dict)
def remove_node(self, node_name):
self._graph.remove_node(node_name)
# Assume unweighted edge at the moment...
def connect(self, node_l, node_r):
self._graph.add_edge(v=node_l, w=node_r)
def disconnect(self, node_l, node_r):
self._graph.remove_edge(v=node_l, w=node_r)
def _get_operater_node(self, oper_type):
if oper_type == 'and_oper':
node_type = f'&_{self._num_ANDs}'
self._num_ANDs += 1
elif oper_type == 'or_oper':
node_type = f'|_{self._num_ORs}'
self._num_ORs += 1
elif oper_type == 'not_oper':
node_type = f'~_{self._num_NOTs}'
self._num_NOTs += 1
else:
raise NotImplementedError
return node_type
def generate_verilog_graph(self, module, sub_modules=None):
# Initialization
inputs, outputs, netlist, table = {}, {}, {}, {}
self._name = module.module_name
# Create temporary input, output, net dictionaries
for v_in in module.input_declarations:
if isinstance(v_in.net_name, lark.tree.Tree):
for node in v_in.net_name.children:
inputs[node] = []
self.create_node(node, 'input')
else:
inputs[v_in.net_name] = []
self.create_node(v_in.net_name, 'input')
for v_out in module.output_declarations:
if isinstance(v_out.net_name, lark.tree.Tree):
for node in v_out.net_name.children:
outputs[node] = []
self.create_node(node, 'output')
else:
outputs[v_out.net_name] = []
self.create_node(v_out.net_name, 'output')
for v_net in module.net_declarations:
if isinstance(v_net.net_name, lark.tree.Tree):
for node in v_net.net_name.children:
netlist[node] = []
else:
netlist[v_net.net_name] = []
# Create component look-up table if sub-modules are there
if sub_modules:
for sub_module in sub_modules:
table[sub_module.module_name] = {}
for in_port in sub_module.input_declarations:
table[sub_module.module_name][in_port.net_name] = 'in'
for out_port in sub_module.output_declarations:
table[sub_module.module_name][out_port.net_name] = 'out'
# Create instance nodes + update dictionaries
for v_inst in module.module_instances:
self.create_node(v_inst.instance_name, v_inst.module_name)
for port, net in v_inst.ports.items():
port_polarity = table[v_inst.module_name][port]
if net in netlist.keys():
netlist[net].append((v_inst.instance_name, port_polarity))
continue
if net in inputs.keys():
inputs[net].append(v_inst.instance_name)
continue
if net in outputs.keys():
outputs[net].append(v_inst.instance_name)
continue
# Operator based
for assignment in module.assignments:
unpacked = assignment.assignments[0]
assigned_node = unpacked[0]
expression = unpacked[1]
tree_q = deque()
tree_q.append((assigned_node, expression))
while tree_q:
parent_node, tree = tree_q.popleft()
operator_node = self._get_operater_node(tree.data)
self.create_node(operator_node, tree.data)
self.connect(operator_node, parent_node)
children_nodes = tree.children;
for children_node in children_nodes:
if isinstance(children_node, lark.tree.Tree):
tree_q.append((operator_node, children_node))
else:
self.connect(children_node, operator_node)
# Complete edge connections using temporary dictionaries
if sub_modules:
for net, item_list in netlist.items():
item_len = len(item_list)
for current_idx, (current_node, current_polarity) in enumerate(item_list):
dynamic_idx = current_idx+1
while dynamic_idx < item_len:
neighbor_node, _ = item_list[dynamic_idx]
if current_polarity == 'out':
self.connect(current_node, neighbor_node)
else:
self.connect(neighbor_node, current_node)
dynamic_idx += 1
for in_net, item_list in inputs.items():
for node in item_list:
self.connect(in_net, node)
for out_net, item_list in outputs.items():
for node in item_list:
self.connect(node, out_net)
def visualize_graph(self, image, visual_style):
g = igraph.Graph(directed=True)
for e in self._graph.edges:
if g.vcount()== 0 or e[0] not in g.vs()["name"]:
g.add_vertex(e[0])
if g.vcount()== 0 or e[1] not in g.vs()["name"]:
g.add_vertex(e[1])
g.add_edge(e[0], e[1])
visual_style["vertex_label"] = g.vs["name"]
igraph.plot(g, f'{image}.png', **visual_style)
| 6,086 | 35.668675 | 90 | py |
LOSTIN | LOSTIN-main/CNN/utils.py | import os
import re
import datetime
import numpy as np
from subprocess import check_output
#abc_binary='./abc'
#abc_command='read adder.v; strash;rw; rw; rf; rfz; b; rf; b; rw; rfz; read OCL.lib;map -v;ps;'
#proc = check_output([abc_binary, '-c', abc_command])
def run_abc(input_file, command):
abc_binary='./abc'
abc_command='read '+input_file+'; strash; '+command+' read 7nm_lvt_ff.lib; map -v; ps;'
try:
proc = check_output([abc_binary, '-c', abc_command])
return proc
except Exception as e:
return None
# parse delay and area from the stats command of ABC
def get_metrics(stats):
lines = stats.decode("utf-8").split('\n')
for i in range(len(lines)):
if len(lines[i])>5 and lines[i][:5]=='Area ':
break
line=lines[i+1].split(':')[-1].strip()
#print(line)
ob = re.search(r'Delay *= *[0-9]+.?[0-9]*', line)
delay = float(ob.group().split('=')[1].strip())
ob = re.search(r'Area *= *[0-9]+.?[0-9]*', line)
area = float(ob.group().split('=')[1].strip())
return delay, area
# parse delay, area, and more stats from the stats command of ABC
def get_cnn_metrics(stats):
lines = stats.decode("utf-8").split('\n')
line = lines[-2]
ob = re.search(r'delay *= *[0-9]+.?[0-9]*', line)
delay = float(ob.group().split('=')[1].strip())
ob = re.search(r'area *= *[0-9]+.?[0-9]*', line)
area = float(ob.group().split('=')[1].strip())
ob = re.search(r'nd *= *[0-9]*', line)
nd = int(ob.group().split('=')[1].strip())
ob = re.search(r'edge *= *[0-9]*', line)
edge = int(ob.group().split('=')[1].strip())
ob = re.search(r'lev *= *[0-9]*', line)
lev = int(ob.group().split('=')[1].strip())
ob = re.search(r'i/o *= *[0-9]* */ *[0-9]*', line)
io = ob.group().split('=')[1].strip().split('/')
i = int(io[0])
o = int(io[1])
return delay, area, edge, nd, lev, i, o
| 1,941 | 28.876923 | 96 | py |
LOSTIN | LOSTIN-main/CNN/cnn_data_gen.py | import utils
import pandas as pd
import numpy as np
import argparse
import pprint as pp
from os import listdir
from os.path import isfile, join
import torch
from torch.utils.data import TensorDataset, DataLoader
def main(args):
ff_10 = pd.read_csv('flow_10.csv',header=None)
ff_15 = pd.read_csv('flow_15.csv',header=None)
ff_20 = pd.read_csv('flow_20.csv',header=None)
ff_25 = pd.read_csv('flow_25.csv',header=None)
keyword = args['key']
label_dir = 'dataset-ground-truth'
label_list = [f for f in listdir(label_dir) if isfile(join(label_dir, f))]
verilog_list = ['div', 'max', 'multiplier', 'sin', 'square', 'voter', 'adder', 'arbiter', 'bar', 'log2', 'sqrt']
stat_list = []
dataset_x = []
dataset_y = []
# Collect all of the data from the abc first
for verilog in verilog_list:
v_file = f'epfl/{verilog}.v'
stat = utils.run_abc(v_file, '')
delay, area, edge, nd, lev, i, o = utils.get_cnn_metrics(stat)
stat_list.append((delay, area, edge, nd, lev, i, o))
print("Acquired all of the data from abc!")
# Main loop
for i, verilog in enumerate(verilog_list):
print("Begin processing the data for the verilog file: ", verilog)
delay, area, edge, nd, lev, i, o = stat_list[i]
label_file_10, label_file_15, label_file_20, label_file_25 = '', '', '', ''
for f in label_list:
if (keyword in f) and (verilog in f):
if '10' in f:
label_file_10 = f
elif '15' in f:
label_file_15 = f
elif '20' in f:
label_file_20 = f
elif '25' in f:
label_file_25 = f
print("Label 10 file: ", label_file_10)
print("Label 15 file: ", label_file_15)
print("Label 20 file: ", label_file_20)
print("Label 25 file: ", label_file_25)
label_10 = pd.read_csv(f'{label_dir}/{label_file_10}', header=None)
label_15 = pd.read_csv(f'{label_dir}/{label_file_15}', header=None)
label_20 = pd.read_csv(f'{label_dir}/{label_file_20}', header=None)
label_25 = pd.read_csv(f'{label_dir}/{label_file_25}', header=None)
# Processing Length 10 Flow
for i in range(50000):
commands = ff_10[0][i].split(';')
data = np.zeros([1, 26, 7])
data[0][0] = [i, o, nd, lev, edge, area, delay]
for j in range(10):
if commands[j] == 'b':
data[0][j+1][0] = 1.0
elif commands[j] == 'rf':
data[0][j+1][1] = 1.0
elif commands[j] == 'rfz':
data[0][j+1][2] = 1.0
elif commands[j] == 'rw':
data[0][j+1][3] = 1.0
elif commands[j] == 'rwz':
data[0][j+1][4] = 1.0
elif commands[j] == 'resub':
data[0][j+1][5] = 1.0
elif commands[j] == 'resub -z':
data[0][j+1][6] = 1.0
else:
raise NotImplementedError
dataset_x.append(data)
dataset_y.append([label_10[0][i]])
print("Completed processing for flow-length 10")
# Processing Length 15 Flow
for i in range(50000):
commands = ff_15[0][i].split(';')
data = np.zeros([1, 26, 7])
data[0][0] = [i, o, nd, lev, edge, area, delay]
for j in range(15):
if commands[j] == 'b':
data[0][j+1][0] = 1.0
elif commands[j] == 'rf':
data[0][j+1][1] = 1.0
elif commands[j] == 'rfz':
data[0][j+1][2] = 1.0
elif commands[j] == 'rw':
data[0][j+1][3] = 1.0
elif commands[j] == 'rwz':
data[0][j+1][4] = 1.0
elif commands[j] == 'resub':
data[0][j+1][5] = 1.0
elif commands[j] == 'resub -z':
data[0][j+1][6] = 1.0
else:
raise NotImplementedError
dataset_x.append(data)
dataset_y.append([label_15[0][i]])
print("Completed processing for flow-length 15")
# Processing Length 20 Flow
for i in range(100000):
commands = ff_20[0][i].split(';')
data = np.zeros([1, 26, 7])
data[0][0] = [i, o, nd, lev, edge, area, delay]
for j in range(20):
if commands[j] == 'b':
data[0][j+1][0] = 1.0
elif commands[j] == 'rf':
data[0][j+1][1] = 1.0
elif commands[j] == 'rfz':
data[0][j+1][2] = 1.0
elif commands[j] == 'rw':
data[0][j+1][3] = 1.0
elif commands[j] == 'rwz':
data[0][j+1][4] = 1.0
elif commands[j] == 'resub':
data[0][j+1][5] = 1.0
elif commands[j] == 'resub -z':
data[0][j+1][6] = 1.0
else:
raise NotImplementedError
dataset_x.append(data)
dataset_y.append([label_20[0][i]])
print("Completed processing for flow-length 20")
# Processing Length 25 Flow
for i in range(100000):
commands = ff_25[0][i].split(';')
data = np.zeros([1, 26, 7])
data[0][0] = [i, o, nd, lev, edge, area, delay]
for j in range(25):
if commands[j] == 'b':
data[0][j+1][0] = 1.0
elif commands[j] == 'rf':
data[0][j+1][1] = 1.0
elif commands[j] == 'rfz':
data[0][j+1][2] = 1.0
elif commands[j] == 'rw':
data[0][j+1][3] = 1.0
elif commands[j] == 'rwz':
data[0][j+1][4] = 1.0
elif commands[j] == 'resub':
data[0][j+1][5] = 1.0
elif commands[j] == 'resub -z':
data[0][j+1][6] = 1.0
else:
raise NotImplementedError
dataset_x.append(data)
dataset_y.append([label_25[0][i]])
print("Completed processing for flow-length 25")
tensor_x = torch.Tensor(dataset_x)
tensor_y = torch.Tensor(dataset_y)
my_dataset = TensorDataset(tensor_x, tensor_y)
dir_upper, dir_lower = args['dataset'], args['key']
torch.save(my_dataset, f'{dir_upper}/{dir_lower}.pt')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parser')
parser.add_argument('--key', help='Select area / delay', default='area')
parser.add_argument('--dataset', help='the save directory of dataset', default='cnn_dataset')
args = vars(parser.parse_args())
main(args)
| 7,046 | 33.208738 | 116 | py |
LOSTIN | LOSTIN-main/CNN/train_cnn.py | import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data as Data
from torch.utils.data import TensorDataset, DataLoader
import numpy as np
import argparse
# Keras -> PyTorch Implementation
# Modification: Classification -> Regression
class CNN_Regression(nn.Module):
def __init__(self):
super(CNN_Regression, self).__init__()
self.conv_1 = nn.Conv2d(1, 32, (1, 2))
self.conv_2 = nn.Conv2d(32, 64, (1, 2))
self.pool = nn.MaxPool2d(1, 1)
self.dropout_1 = nn.Dropout(0.3)
self.fc_1 = nn.Linear(64*26*5, 64)
self.dropout_2 = nn.Dropout(0.4)
self.fc_2 = nn.Linear(64, 1)
def forward(self, x):
elu = nn.ELU()
selu = nn.SELU()
x1 = self.conv_1(x)
x2 = self.conv_2(elu(x1))
x3 = self.pool(elu(x2))
x4 = self.dropout_1(x3)
x5 = self.fc_1(torch.flatten(x4, start_dim=1))
x6 = self.dropout_2(selu(x5))
out = self.fc_2(x6)
return out
def RMAE(output, target):
diff = ((target - output) / target).abs().sum()
rmae = diff / len(output)
return rmae
def main(args):
model = CNN_Regression()
optimizer = torch.optim.RMSprop(model.parameters(), lr=0.05)
criterion = torch.nn.MSELoss()
epoches = 20
batch_size = 32
data_key = args['data']
data_set = torch.load(f'cnn_dataset/{data_key}.pt')
device = torch.device("cuda:" + str(args['device'])) if torch.cuda.is_available() else torch.device("cpu")
data_0, dataset_test_0 = TensorDataset(*data_set[0:1800000]), TensorDataset(*data_set[1800000:3300000])
dataset_ratio = [660000, 165000, 975000]
dataset_train, dataset_valid, dataset_test_1 = torch.utils.data.random_split(data_0, dataset_ratio)
data_set_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
data_set_valid = DataLoader(dataset_valid, batch_size=batch_size, shuffle=True)
data_set_test = DataLoader(dataset_test_0 + dataset_test_1, batch_size=batch_size, shuffle=True)
checkpoint_path = args['ckpt']
test_accuracy_curve = []
if torch.cuda.is_available():
print("CUDA is available! Running on GPU")
model = model.to(device)
else:
print("Running on CPU")
# Training routine
for epoch_idx in range(epoches):
print("Training Epoch #: ", epoch_idx+1)
print()
for idx, (data_batch, label_batch) in enumerate(data_set_train):
if torch.cuda.is_available():
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
out = model.forward(data_batch)
loss = criterion(out, label_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx % 200 == 0:
batch_acc = RMAE(out, label_batch)
print("RMAE: ", batch_acc)
print("Training done! Now evaluating..")
print()
# Training accuracy
count, train_sum = 0, 0
for data_batch, label_batch in data_set_train:
if torch.cuda.is_available():
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
with torch.no_grad():
out = model.forward(data_batch)
batch_acc = RMAE(out, label_batch)
count += 1
train_sum += batch_acc
train_sum = train_sum / count
print("Training Result: ", train_sum, "%")
# Validation accuracy
count, valid_sum = 0, 0
for data_batch, label_batch in data_set_valid:
if torch.cuda.is_available():
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
with torch.no_grad():
out = model.forward(data_batch)
batch_acc = RMAE(out, label_batch)
count += 1
valid_sum += batch_acc
valid_sum = valid_sum / count
print("Validation Result: ", valid_sum, "%")
# Testing accuracy
count, test_sum = 0, 0
for data_batch, label_batch in data_set_test:
if torch.cuda.is_available():
data_batch = data_batch.to(device)
label_batch = label_batch.to(device)
with torch.no_grad():
out = model.forward(data_batch)
batch_acc = RMAE(out, label_batch)
count += 1
test_sum += batch_acc
test_sum = test_sum / count
test_accuracy_curve.append(test_sum)
print("Testing Result: ", test_sum, "%")
# Save the result
if test_sum <= min(test_accuracy_curve):
torch.save({'model_state_dict': model.state_dict(),
'train_accuracy': train_sum,
'valid_accuracy': valid_sum,
'test_accuracy': test_sum,
}, f'{checkpoint_path}/{data_key}_result.pth')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='this is just parser - nothing special')
parser.add_argument('--device', default='0')
parser.add_argument('--data', help='dataset file', default='area')
parser.add_argument('--ckpt', help='output checkpoint path', default='cnn_checkpoints')
args = vars(parser.parse_args())
main(args)
| 5,484 | 31.264706 | 110 | py |
LOSTIN | LOSTIN-main/dataset-generation/utils.py | import os
import re
import datetime
import numpy as np
from subprocess import check_output
#abc_binary='./abc'
#abc_command='read adder.v; strash;rw; rw; rf; rfz; b; rf; b; rw; rfz; read OCL.lib;map -v;ps;'
#proc = check_output([abc_binary, '-c', abc_command])
def run_abc(input_file, command):
abc_binary='./abc'
abc_command='read '+input_file+'; strash; '+command+' read 7nm_lvt_ff.lib; map -v; ps;'
try:
proc = check_output([abc_binary, '-c', abc_command])
return proc
except Exception as e:
return None
# parse delay and area from the stats command of ABC
def get_metrics(stats):
lines = stats.decode("utf-8").split('\n')
for i in range(len(lines)):
if len(lines[i])>5 and lines[i][:5]=='Area ':
break
line=lines[i+1].split(':')[-1].strip()
#print(line)
ob = re.search(r'Delay *= *[0-9]+.?[0-9]*', line)
delay = float(ob.group().split('=')[1].strip())
ob = re.search(r'Area *= *[0-9]+.?[0-9]*', line)
area = float(ob.group().split('=')[1].strip())
return delay, area
# parse delay, area, and more stats from the stats command of ABC
def get_cnn_metrics(stats):
lines = stats.decode("utf-8").split('\n')
line = lines[-2]
ob = re.search(r'delay *= *[0-9]+.?[0-9]*', line)
delay = float(ob.group().split('=')[1].strip())
ob = re.search(r'area *= *[0-9]+.?[0-9]*', line)
area = float(ob.group().split('=')[1].strip())
ob = re.search(r'nd *= *[0-9]*', line)
nd = int(ob.group().split('=')[1].strip())
ob = re.search(r'edge *= *[0-9]*', line)
edge = int(ob.group().split('=')[1].strip())
ob = re.search(r'lev *= *[0-9]*', line)
lev = int(ob.group().split('=')[1].strip())
ob = re.search(r'i/o *= *[0-9]* */ *[0-9]*', line)
io = ob.group().split('=')[1].strip().split('/')
i = int(io[0])
o = int(io[1])
return delay, area, edge, nd, lev, i, o
| 1,941 | 28.876923 | 96 | py |
LOSTIN | LOSTIN-main/dataset-generation/run_abc_syn.py | import utils
import pandas as pd
import numpy as np
import argparse
import pprint as pp
def main(args):
if int(args['flow_length'])==10:
ff=pd.read_csv('flow_10.csv',header=None)
elif int(args['flow_length'])==15:
ff=pd.read_csv('flow_15.csv',header=None)
elif int(args['flow_length'])==20:
ff=pd.read_csv('flow_20.csv',header=None)
else:
ff=pd.read_csv('flow_25.csv',header=None)
ff=np.array(ff)
Delays=[0 for i in range(len(ff))]
Areas=[0 for i in range(len(ff))]
# mark failure flows
e=[]
for i in range(len(ff)):
stat=utils.run_abc(str(args['input']),str(ff[i][0]))
if stat == None:
e.append(i)
continue
delay, area = utils.get_metrics(stat)
Delays[i]=delay
Areas[i]=area
if np.mod(i,100)==0:
print(i)
if np.mod(i+1,10000)==0 and i!=len(ff):
result_area=pd.DataFrame(Areas)
result_delay=pd.DataFrame(Delays)
filename=str(args['input']).split('/')[-1].split('.')[0]
result_area.to_csv('area_ground_truth_'+filename+'_flow_'+str(args['flow_length'])+'_part_'+str(int(i/10000))+'.csv',index=False,header=False)
result_delay.to_csv('delay_ground_truth_'+filename+'_flow_'+str(args['flow_length'])+'_part_'+str(int(i/10000))+'.csv',index=False,header=False)
result_area=pd.DataFrame(Areas)
result_delay=pd.DataFrame(Delays)
filename=str(args['input']).split('/')[-1].split('.')[0]
result_area.to_csv('area_ground_truth_'+filename+'_flow_'+str(args['flow_length'])+'.csv',index=False,header=False)
result_delay.to_csv('delay_ground_truth_'+filename+'_flow_'+str(args['flow_length'])+'.csv',index=False,header=False)
if len(e)>0:
error=pd.DataFrame(e)
error.to_csv('failures_'+filename+'_flow_'+str(args['flow_length'])+'.csv',index=False,header=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='provide arguments for the actor critic')
parser.add_argument('--flow-length', help='the number of optimizations in each synthesis flow', default=10)
parser.add_argument('--input', help='the name of input verilog file', default='epfl/div.v')
args = vars(parser.parse_args())
pp.pprint(args)
main(args)
| 2,333 | 35.46875 | 156 | py |
cbm-project | cbm-project-master/driver.py | from igraph import summary
from src.data_read import load, build_graphs
from src.data_synthetic import generate_temporal_graph
from src.transitions import egonet_transitions
def synthetic_data(n=500, p=0.25, anomalies=0.05, anomaly_type='clique', num_graphs=10):
graphs = generate_temporal_graph(n, p, anomalies=anomalies, anomaly_type=anomaly_type, num_graphs=num_graphs)
return graphs, f'synthetic_{anomaly_type}'
def real_data(name='enron_truncated_smallestest', extension='csv', resolution=5):
filename = f'./data/{name}.{extension}'
n, data = load(filename)
graphs = build_graphs(n, data, resolution=resolution, equal_temperament=True)
return graphs, name
def main(filename='', datatype='', verbose=False):
# parameters for synthetic data
n = 500
#p = 0.0025
#anomalies = 0.05
p = 0.01
anomalies = 0.03
anomaly_type = 'clique'
num_graphs = 5
# parameters for real data
name, extension = filename.split('.') if filename != '' else ('', '')
resolution = 5
graphs, dataset = synthetic_data(n=n, p=p, anomalies=anomalies, anomaly_type=anomaly_type, num_graphs=num_graphs) if datatype == 'synthetic' \
else real_data(name=name, extension=extension, resolution=resolution)
###
#temp = []
#for graph in graphs:
# temp.append(graph.to_graph_tool())
#graphs = temp
###
if verbose:
print('index\tnodes\tedges')
for idx, g in enumerate(graphs):
print(idx, end='', flush=True)
summary(g)
embeddings = egonet_transitions(n, graphs)
with open(f'./embeddings/{dataset}_embeddings.txt', 'w') as outfile:
for (ego, embedding) in embeddings:
outfile.write(f'{ego}\t{embedding}\n')
if __name__ == '__main__':
main(filename='', \
datatype='synthetic', \
verbose=True)
| 1,881 | 30.898305 | 146 | py |
cbm-project | cbm-project-master/src/data_synthetic.py | import igraph as ig
from random import random, seed
# n:
# number of nodes in the shared nodeset
# this will include both genuine and anomalous nodes
# p:
# probability of randomly connecting any two nodes
# this does not apply to anomalous nodes
# anomalies:
# percentage of the total nodes that should be anomalous
# this should be a floating-point number between 0.0 and 1.0
# anomaly_type:
# determines the behavior characterizing the anomalous nodes
# possibilities: ['clique', 'cycle', ...]
# num_graphs:
# number of graphs in the temporal sequence
def generate_temporal_graph(n, p, anomalies=0.05, anomaly_type='clique', num_graphs=10, save=True):
assert 0 <= p and p <= 1
graphs = []
nodes = list(range(n))
regular = nodes[:n - int(n*anomalies)]
anomalous = nodes[n - int(n*anomalies):]
for t in range(num_graphs):
edgelist = set()
for idx, u in enumerate(regular):
for v in regular[idx:]:
if random() < p:
edgelist.add((u, v))
graph = ig.Graph(n)
graph.add_edges(edgelist)
graphs.append(graph)
if anomaly_type == 'clique':
graphs = generate_temporal_cliques(graphs, anomalous)
elif anomaly_type == 'clique':
graphs = generate_temporal_cycles(graphs, anomalous)
else:
print(anomaly_type)
raise NotImplementedError
for idx, graph in enumerate(graphs):
graph.simplify()
graph.write_edgelist(f'data/synthetic_t{idx}_p{str(p).replace(".", "")}_a{str(anomalies).replace(".", "")}.edgelist')
#for i in range(len(graph.vs)):
# graph.vs[i]['name'] = i
with open(f'data/synthetic_ground_truth_p{str(p).replace(".", "")}_a{str(anomalies).replace(".", "")}.nodes', 'w') as outfile:
for v in regular:
outfile.write(f'{v} 0\n')
for v in anomalous:
outfile.write(f'{v} 1\n')
return graphs
def generate_temporal_cliques(graphs, anomalous):
flag = False
for graph in graphs:
if flag:
edgelist = {(u, v) for u in anomalous for v in anomalous if u != v}
else:
edgelist = {}
graph.add_edges(edgelist)
flag = not flag
return graphs
def generate_temporal_cycles():
pass
#g =
return
| 2,321 | 30.808219 | 130 | py |
cbm-project | cbm-project-master/src/data_read.py | from os.path import join
import datetime
import graph_tool.all as gt
# renames the vertices so they are integers in the range [0, ... n-1]
def reindex(edgelist):
vertices = list({u for u, v, t in edgelist} | {v for u, v, t in edgelist})
f = {v: i for (v, i) in zip(vertices, range(len(vertices)))}
edgelist = [(f[u], f[v], t) for u, v, t in edgelist]
return len(vertices), edgelist
# reindex the timestamps to be in the range [0, ... T-1]
def retime(edgelist):
edgelist = sorted(edgelist, key=lambda x : x[2])
times = [t for (u, v, t) in edgelist]
f = {t: idx for (t, idx) in zip(times, range(len(times)))}
return [(u, v, f[t]) for (u, v, t) in edgelist]
# returns a list of triplets (u, v, t)
# where the edge (u, v) occurs at time t
def load(filename):
edgelist = []
with open(filename, 'r') as infile:
extension = filename.split('.')[-1]
if 'csv' in extension:
count = 0
name_dict = {}
for line in infile:
t, u, v = line.strip().split(',')
year, month, day = map(int, t.split('-'))
t = datetime.date(year, month, day).toordinal()
if u not in name_dict.keys():
name_dict[u] = count
count += 1
if v not in name_dict.keys():
name_dict[v] = count
count += 1
edgelist.append((name_dict[u], name_dict[v], t))
n = len(name_dict)
elif 'edges' in extension:
for line in infile:
u, v, t = line.strip().split(',')
edgelist.append((u, v, int(t)))
n, edgelist = reindex(edgelist)
else:
raise IOError
return n, edgelist
# returns a list of graph_tool graphs
def build_graphs(n, edgelist, resolution=10, equal_temperament=False):
if resolution <= 1:
G = gt.Graph(directed=False)
G.add_edge_list([(u, v) for (u, v, t) in edgelist])
return [G]
if equal_temperament:
edgelist = retime(edgelist)
#nodes = {u for u, v, t in edgelist} | {v for u, v, t in edgelist}
times = [t for u, v, t in edgelist]
t_min, t_max = (min(times), max(times))
delta = (t_max - t_min)/resolution
timesteps = [t_min + delta*k for k in range(1, resolution)]
first = [[(u, v) for u, v, t in edgelist if t <= timesteps[0]]]
middle = [[(u, v) for u, v, t in edgelist if timesteps[idx] <= t < timesteps[idx+1]] \
for idx in range(len(timesteps)-1)]
last = [[(u, v) for u, v, t in edgelist if t > timesteps[-1]]]
edge_sequence = first + middle + last
graphs = [gt.Graph(directed=False) for edgelist in edge_sequence]
for idx, G in enumerate(graphs):
G.add_vertex(n=n)
G.add_edge_list(edge_sequence[idx])
return graphs
# returns a list of graph_tool graphs
# if size <= 1, load all the graphs from the 'graphs.adj' file
# if size >= 2, load only the graphs with that many vertices
def load_enumerated_graphs(path='enumerated_graphs', size=0):
graphs = []
with open(join(path, f'graph{size if size > 1 else "s"}.adj')) as infile:
for line in infile:
line = line.strip().split(' ')
if line[0] == 'Graph':
n = int(line[-1].strip('.'))
G = gt.Graph(directed=False)
G.add_vertex(n)
for _ in range(n):
line = next(infile).strip().strip(';').split(' ')
u = int(line[0])
for v in line[2:]:
if v != '':
v = int(v)
G.add_edge(u, v)
graphs.append(G)
return graphs
# pairs up the enumerated graphs of the same size
def load_graph_pairs(sizes=[2, 3, 4, 5]):
enums = {size: load_enumerated_graphs(size=size) for size in sizes}
transitions = [(size, g, h) for size in enums for g in enums[size] for h in enums[size] if g.num_edges() > 0 or h.num_edges() > 0]
return transitions
| 4,101 | 36.981481 | 134 | py |
cbm-project | cbm-project-master/src/transitions.py | import numpy as np
import igraph as ig
import graph_tool.all as gt
from graph_tool.topology import isomorphism, subgraph_isomorphism
from tqdm import tqdm
from pqdm.processes import pqdm
from multiprocessing import Pool
from itertools import chain, combinations
from src.data_read import load_graph_pairs
def powerset(iterable):
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
def process_iso(task):
idx, g, h, pregraph, postgraph = task
#print(g.vertices(), h.vertices(), pregraph.vertices(), postgraph.vertices())
if isomorphism(g, pregraph) and isomorphism(h, postgraph):
return idx, 1
else:
return idx, 0
def process_transitions(task):
ego, G, H, enumerated = task
G_vp = G.new_vertex_property('bool')
H_vp = H.new_vertex_property('bool')
G_nbhd = {w for w in G.vertex(ego).out_neighbors()} | {ego}
H_nbhd = {w for w in H.vertex(ego).out_neighbors()} | {ego}
# construct a sharerd vertex set for the egonets
for v in G.vertices():
if v in G_nbhd or v in H_nbhd:
G_vp[v] = True
else:
G_vp[v] = False
for v in H.vertices():
if v in H_nbhd or v in G_nbhd:
H_vp[v] = True
else:
H_vp[v] = False
G_egonet = gt.GraphView(G, vfilt=G_vp, directed=False)
H_egonet = gt.GraphView(H, vfilt=H_vp, directed=False)
G_ego_vert = powerset(G_egonet.vertices())
H_ego_vert = powerset(H_egonet.vertices())
G_subgraphs = []
H_subgraphs = []
for vertices in powerset(G_egonet.vertices()):
if len(vertices) != 0:
vp = G_egonet.new_vertex_property('bool')
G_subgraphs.append(gt.GraphView(G_egonet, vfilt=vp, directed=False))
for vertices in powerset(H_egonet.vertices()):
if len(vertices) != 0:
vp = H_egonet.new_vertex_property('bool')
H_subgraphs.append(gt.GraphView(H_egonet, vfilt=vp, directed=False))
transitions = [(g, h) for g in G_subgraphs for h in H_subgraphs \
if g.num_vertices() == h.num_vertices() \
and np.array_equal(g.get_vertices(), h.get_vertices())]
tasks = [(idx, g, h, pregraph, postgraph) for (g, h) in transitions for (idx, (size, pregraph, postgraph)) in enumerate(enumerated)]
#results = pqdm(tasks, process_iso, n_jobs=cpus, desc='ISOMORPHISMS', colour='red', leave=False)
results = []
#with Pool(2) as pool:
#for result in tqdm(map(process_iso, tasks), total=len(tasks), colour='red', leave=False):
# results.append(result)
#for idx, count in results:
# counts[idx] += count
counts = [0 for _ in enumerated]
for g, h in tqdm(transitions, desc='ISOMORPHISMS', total=len(transitions), colour='red', leave=False):
for idx, (size, pregraph, postgraph) in enumerate(enumerated):
if isomorphism(g, pregraph) and isomorphism(h, postgraph):
counts[idx] += 1
return counts
# count the transitions
#for idx, (size, pregraph, postgraph) in tqdm(enumerate(enumerated), desc='ISOMORPHISMS', total=len(enumerated), colour='red', leave=False):
# assert pregraph.num_vertices() == size and postgraph.num_vertices() == size
# G_iso = subgraph_isomorphism(pregraph, G_egonet, induced=True)
# H_iso = subgraph_isomorphism(postgraph, H_egonet, induced=True)
# if len(G_iso) > 0 and len(H_iso) > 0:
# for phi in G_iso:
# for psi in H_iso:
# if np.array_equal(phi.get_array(), psi.get_array()):
# counts[idx] += 1
#return counts
#for z in G_egonet.vertices():
# print(z)
#print(G_iso[0].get_array())
#for z in G_iso[0]:
# print(z in G_egonet.vertices())
#print(g_iso, h_iso)
def process_ego(task):
ego, graphs, pairs = task
#tasks = [(ego, graphs[t], graphs[t+1], pairs) for t in range(len(graphs) - 1)]
counts = []
for G, H in tqdm([(graphs[t], graphs[t+1]) for t in range(len(graphs) - 1)], desc='TIMES', total=len(graphs) - 1, colour='yellow', leave=False, disable=True):
G_nbhd = set(G.vs[ego].neighbors()) | {G.vs[ego]}
H_nbhd = set(H.vs[ego].neighbors()) | {H.vs[ego]}
# construct a sharerd vertex set for the egonets
shared_nbhd = G_nbhd | H_nbhd
G_egonet = G.subgraph(shared_nbhd)
H_egonet = H.subgraph(shared_nbhd)
enumerated_verts = powerset(shared_nbhd)
G_subgraphs = []
H_subgraphs = []
counts_t = [0 for _ in pairs]
for idx, (size, pregraph, postgraph) in tqdm(enumerate(pairs), desc='ISOMORPHISMS', total=len(pairs), colour='red', leave=False, disable=False):
for phi in G_egonet.get_subisomorphisms_vf2(pregraph):
for psi in H_egonet.get_subisomorphisms_vf2(postgraph):
if phi == psi:
counts_t[idx] += 1
counts.append(counts_t)
return (ego, np.asarray(counts).mean(axis=0).tolist())
# TODO
# n: the number of vertices in the shared vertex set
# graph_tool ensures that a graph with n vertices has vertex names {0, 1, ... n-1}
# graphs: a temporal sequence of graphs on a shared vertex set
def egonet_transitions(n, graphs):
pairs = load_graph_pairs(sizes=[2, 3])
pairs = [(size, ig.Graph.from_graph_tool(g), ig.Graph.from_graph_tool(h)) for (size, g, h) in pairs]
tasks = [(ego, graphs, pairs) for ego in range(n)]
embeddings = []
with Pool(4) as pool:
for embedding in tqdm(pool.imap(process_ego, tasks), desc='NODES', total=len(tasks), colour='green', leave=True, disable=False):
embeddings.append(embedding)
#with Pool(2) as pool:
# for embedding in tqdm(pool.imap(process_ego, tasks), total=len(tasks), colour='green', leave=False):
# embeddings.append(embedding)
#embeddings = pqdm(tasks, process_ego, n_jobs=cpus//2, desc='EGOS', colour='green')
return embeddings
#tqdm.set_lock(RLock())
#pool = Pool(cpus//4, initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),))
#pool.map(process_ego, tasks)
#return
##for ego in tqdm(range(n), desc='ITERATING OVER NODES', total=n, colour='green'):
#for ego in range(n):
# pass
#return
| 6,352 | 35.302857 | 162 | py |
cbm-project | cbm-project-master/embeddings/dbscan.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import matplotlib.patches as mpatches
from kneed import KneeLocator
from sklearn.neighbors import NearestNeighbors
from sklearn.manifold import SpectralEmbedding
from sklearn.metrics import davies_bouldin_score
from sklearn.cluster import DBSCAN, KMeans, SpectralClustering
from sklearn.decomposition import PCA
DIR_PLOTS = os.path.join('..', "outputs", "plots")
def add_value_labels(ax, spacing=5, fontsize=8):
"""Add labels to the end of each bar in a bar chart.
Arguments:
ax (matplotlib.axes.Axes): The matplotlib object containing the axes
of the plot to annotate.
spacing (int): The distance between the labels and the bars.
"""
# place a label for each bar
for rect in ax.patches:
# get X and Y placement of label from rect.
y_value = rect.get_height()
x_value = rect.get_x() + rect.get_width() / 2
# vertical alignment for positive values
va = 'bottom'
# if value of bar is negative: Place label below bar
if y_value < 0:
spacing *= -1 # invert space to place label below
va = 'top' # vertically align label at top
# use Y value as label and format number with one decimal place
label = "{:.1f}".format(y_value)
# Create annotation
ax.annotate(
text=label,
xy=(x_value, y_value),
fontsize=fontsize,
xytext=(0, spacing), # vertically shift label by `space`
textcoords="offset points", # interpret `xytext` as offset in points
ha='center', # horizontally center label
va=va # vertically align label differently for
# positive and negative values.
)
def find_elbow(embedding, knee_id="", n_neighbors=4):
"""
Given an embedding, finds and plots the nearest-neighbor distances in that space.
It returns the coordinates of the 'knee' of this curve, point which indicates the
optimal 'maximum distance' for a clustering algorithm such as DBSCAN to determine outliers.
"""
knn = NearestNeighbors(n_neighbors=n_neighbors)
knn_fit = knn.fit(embedding)
distances, _ = knn_fit.kneighbors(embedding)
distances = np.sort(distances, axis=0)[:,1]
kneedle = KneeLocator(np.linspace(0, len(distances)-1, len(distances)), distances, S=1.0, curve="convex", direction="increasing")
knee_x, knee_y = kneedle.elbow, distances[int(kneedle.elbow)]
print('Knee: the Maximum Curvature point is (x = {}, y = {})'.format(knee_x, knee_y))
kneedle.plot_knee()
PLT_SUBPLOT = "knee"
os.makedirs(os.path.join(DIR_PLOTS, PLT_SUBPLOT), exist_ok=True)
knee_fname = os.path.join(DIR_PLOTS, PLT_SUBPLOT, "{}.pdf".format(knee_id))
plt.savefig(knee_fname)
plt.close()
print("Saved knee plot as '{}'".format(knee_fname))
return knee_x, knee_y
def plot_scatter(embedding_df, clustering_df):
COLORS = list(mcolors.TABLEAU_COLORS.values()) + list(mcolors.CSS4_COLORS.values())
color_map, cluster_freqs = dict(), dict()
frequencies_iter = clustering_df['cluster'].value_counts().iteritems()
for idx, (cluster, freq) in enumerate(frequencies_iter):
color = COLORS[idx % len(COLORS)]
color_map[cluster] = color
cluster_freqs[cluster] = freq
x = cluster_freqs.keys()
heights = cluster_freqs.values()
fig, ax = plt.subplots()
marker_size = 5 / np.log10(embedding_df["emb_x"].shape[0]) + 4
ax = pca_df.plot.scatter("emb_x", "emb_y",
ax=ax,
color=clustering_df['cluster'].map(color_map),
s=marker_size,
# cmap='viridis',
)
if -1 in color_map:
outlier_color = color_map[-1] if -1 in color_map else None
outlier_patch = mpatches.Patch(color=outlier_color, label='DBSCAN Outliers')
ax.legend(handles=[outlier_patch])
fig.savefig('scatter.pdf')
fig, ax = plt.subplots()
ax.bar(x, height=heights, color=list(color_map.values()))
add_value_labels(ax, fontsize=6)
fig.savefig('bars.pdf')
return
embeddings = {}
with open('lucas_embeddings.txt') as infile:
for line in infile:
vertex, vector = line.strip().split('\t')
embeddings[int(vertex)] = list(map(float, vector.replace(',', '').replace('[', '').replace(']', '').split(' ')))
X = np.asarray([vector for vector in embeddings.values()])
x, y = find_elbow(X)
clustering = DBSCAN(eps=y, min_samples=2).fit(X)
clustering_dict = {'vertex': list(range(len(clustering.labels_))), 'cluster': clustering.labels_}
clustering_df = pd.DataFrame(clustering_dict)
pca = PCA(n_components=2)
Y = pca.fit_transform(X)
embedding_df = pd.DataFrame(X)
pca_df = pd.DataFrame(Y)
pca_df.columns = ['emb_x', 'emb_y']
plot_scatter(pca_df, clustering_df)
| 4,970 | 38.452381 | 133 | py |
pynamical | pynamical-main/setup.py | """Install pynamical."""
import os
from setuptools import setup
# provide a long description using reStructuredText
LONG_DESCRIPTION = r"""
**pynamical** is a Python package for modeling, simulating, visualizing, and animating discrete
nonlinear dynamical systems and chaos. pynamical uses pandas, numpy, and numba for fast simulation,
and matplotlib for beautiful visualizations and animations to explore system behavior. Compatible
with Python 2 and 3. See the examples and demos on `GitHub`_.
You can read/cite the journal article about pynamical: Boeing, G. 2016.
"`Visual Analysis of Nonlinear Dynamical Systems: Chaos, Fractals, Self-Similarity and the Limits of Prediction`_."
*Systems*, 4 (4), 37. doi:10.3390/systems4040037.
.. _GitHub: https://github.com/gboeing/pynamical
.. _Visual Analysis of Nonlinear Dynamical Systems\: Chaos, Fractals, Self-Similarity and the Limits of Prediction: http://geoffboeing.com/publications/nonlinear-chaos-fractals-prediction/
"""
# list of classifiers from the PyPI classifiers trove
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Scientific/Engineering :: Visualization",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Information Analysis",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
# only specify install_requires if not in RTD environment
if os.getenv("READTHEDOCS") == "True":
INSTALL_REQUIRES = []
else:
with open("requirements.txt") as f:
INSTALL_REQUIRES = [line.strip() for line in f.readlines()]
setup(
name="pynamical",
version="0.3.2",
description="Model, simulate, and visualize discrete nonlinear dynamical systems",
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
url="https://github.com/gboeing/pynamical",
author="Geoff Boeing",
author_email="boeing@usc.edu",
license="MIT",
platforms="any",
packages=["pynamical"],
python_requires=">=3.8",
install_requires=INSTALL_REQUIRES,
)
| 2,454 | 37.968254 | 188 | py |
pynamical | pynamical-main/tests/test_pynamical.py | """
pynamical tests
"""
import matplotlib as mpl
mpl.use("Agg") # use agg backend so you don't need a display on travis
import matplotlib.cm as cm
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from numba import jit
from pynamical import bifurcation_plot
from pynamical import cobweb_plot
from pynamical import cubic_map
from pynamical import logistic_map
from pynamical import phase_diagram
from pynamical import phase_diagram_3d
from pynamical import simulate
from pynamical import singer_map
_img_folder = ".temp"
def test_simulate():
pops = simulate(
model=logistic_map,
num_gens=200,
rate_min=3.7,
rate_max=3.9,
num_rates=100,
num_discard=100,
jit=True,
)
assert type(pops) == pd.DataFrame
assert pops.shape == (200, 100)
pops = simulate(
model=logistic_map,
num_gens=200,
rate_min=3.7,
rate_max=3.9,
num_rates=100,
num_discard=100,
jit=False,
)
assert type(pops) == pd.DataFrame
assert pops.shape == (200, 100)
def test_bifurcation_plot():
pops = simulate(
model=logistic_map, num_gens=200, rate_min=0, rate_max=4, num_rates=100, num_discard=100
)
assert type(pops) == pd.DataFrame
assert pops.shape == (200, 100)
# returns None
bifurcation_plot(pops, save=True, folder=_img_folder, filename="")
def test_phase_diagram():
pops = simulate(
model=singer_map, num_gens=200, rate_min=3.6, rate_max=4.0, num_rates=50, num_discard=100
)
assert type(pops) == pd.DataFrame
assert pops.shape == (200, 50)
# returns None
phase_diagram(
pops,
xmin=0.25,
xmax=0.75,
ymin=0.8,
ymax=1.01,
size=7,
discard_gens=10,
color="b",
color_reverse=True,
legend=True,
save=False,
folder=_img_folder,
filename="",
)
fig_ax = phase_diagram(
pops,
xmin=0.25,
xmax=0.75,
ymin=0.8,
ymax=1.01,
size=7,
discard_gens=10,
color=["b", "g"],
legend=True,
save=False,
show=False,
folder=_img_folder,
filename="",
)
assert type(fig_ax) == tuple
def test_phase_diagram_3d():
pops = simulate(model=cubic_map, num_gens=200, rate_min=3.5, num_rates=30, num_discard=100)
assert type(pops) == pd.DataFrame
assert pops.shape == (200, 30)
# returns None
phase_diagram_3d(
pops,
xmin=-1,
xmax=1,
ymin=-1,
ymax=1,
zmin=-1,
zmax=1,
alpha=0.2,
color="viridis",
azim=330,
legend=True,
save=False,
folder=_img_folder,
filename="",
)
# returns None
phase_diagram_3d(
pops,
xmin=-1,
xmax=1,
ymin=-1,
ymax=1,
zmin=-1,
zmax=1,
alpha=0.2,
color="inferno",
azim=330,
legend=True,
remove_ticks=False,
save=False,
folder=_img_folder,
filename="",
)
def test_cobweb_plot():
# returns None
cobweb_plot(r=3.9, save=False, folder=_img_folder, filename="")
| 3,335 | 19.981132 | 97 | py |
pynamical | pynamical-main/docs/source/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Configure pynamical docs."""
# pynamical documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 31 13:34:32 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# go up two levels from /docs/source to the package root
sys.path.insert(0, os.path.abspath("../.."))
# mock import these packages because readthedocs doesn't have them installed
autodoc_mock_imports = [
"pandas",
"numpy",
"numba",
"matplotlib",
"mpl_toolkits",
"matplotlib.pyplot",
"matplotlib.font_manager",
"matplotlib.cm",
"mpl_toolkits.mplot3d",
]
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pynamical"
copyright = "2015-2022, Geoff Boeing"
author = "Geoff Boeing"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
version = release = "0.3.2"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "pynamicaldoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pynamical.tex", "pynamical Documentation", "Geoff Boeing", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pynamical", "pynamical Documentation", [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pynamical",
"pynamical Documentation",
author,
"pynamical",
"One line description of project.",
"Miscellaneous",
),
]
| 5,062 | 29.871951 | 87 | py |
pynamical | pynamical-main/pynamical/pynamical.py | """pynamical core."""
import os
import matplotlib.cm as cm
import matplotlib.font_manager as fm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numba import jit
def get_title_font(family="Helvetica", style="normal", size=20, weight="normal", stretch="normal"):
"""
Define fonts to use for image titles.
Arguments
---------
family : string
style : string
size : numeric
weight : string
stretch : string
Returns
-------
matplotlib.font_manager.FontProperties
"""
if family == "Helvetica":
family = ["Helvetica", "Arial", "sans-serif"]
fp = fm.FontProperties(family=family, style=style, size=size, weight=weight, stretch=stretch)
return fp
def get_label_font(family="Helvetica", style="normal", size=16, weight="normal", stretch="normal"):
"""
Define fonts to use for image axis labels.
Arguments
---------
family : string
style : string
size : numeric
weight : string
stretch : string
Returns
-------
matplotlib.font_manager.FontProperties
"""
if family == "Helvetica":
family = ["Helvetica", "Arial", "sans-serif"]
fp = fm.FontProperties(family=family, style=style, size=size, weight=weight, stretch=stretch)
return fp
def save_fig(filename="image", folder="images", dpi=300, bbox_inches="tight", pad=0.1):
"""
Save the current figure as a file to disk.
Arguments
---------
filename: string
filename of image file to be saved
folder: string
folder in which to save the image file
dpi: int
resolution at which to save the image
bbox_inches: string
tell matplotlib to figure out the tight bbox of the figure
pad: float
inches to pad around the figure
Returns
-------
None
"""
if not os.path.exists(folder):
os.makedirs(folder)
plt.savefig(
"{}/{}.png".format(folder, filename), dpi=dpi, bbox_inches=bbox_inches, pad_inches=pad
)
def save_and_show(
fig, ax, save, show, filename="image", folder="images", dpi=300, bbox_inches="tight", pad=0.1
):
"""
Consistently handle plot completion.
Save then either display or return the figure.
Arguments
---------
fig: matplotlib figure
ax: matplotlib axis
save: bool
whether to save the image to disk, or not
show: bool
whether to display the image or instead just return figure and axis
filename: string
filename of image file to be saved
folder: string
folder in which to save the image file
dpi: int
resolution at which to save the image
bbox_inches: string
tell matplotlib to figure out the tight bbox of the figure
pad: float
inches to pad around the figure
Returns
-------
tuple
(fig, ax) if show=False, otherwise returns None
"""
if save:
save_fig(filename=filename, folder=folder, dpi=dpi, bbox_inches=bbox_inches, pad=pad)
if show:
plt.show()
else:
return fig, ax
@jit(cache=True, nopython=True) # pragma: no cover
def logistic_map(pop, rate):
"""
Define the equation for the logistic map.
Arguments
---------
pop: float
current population value at time t
rate: float
growth rate parameter values
Returns
-------
float
scalar result of logistic map at time t+1
"""
return pop * rate * (1 - pop)
@jit(cache=True, nopython=True) # pragma: no cover
def cubic_map(pop, rate):
"""
Define the equation for the cubic map.
Arguments
---------
pop: float
current population value at time t
rate: float
growth rate parameter values
Returns
-------
float
scalar result of cubic map at time t+1
"""
return rate * pop**3 + pop * (1 - rate)
@jit(cache=True, nopython=True) # pragma: no cover
def singer_map(pop, rate):
"""
Define the equation for the singer map.
Arguments
---------
pop: float
current population value at time t
rate: float
growth rate parameter values
Returns
-------
float
scalar result of singer map at time t+1
"""
return rate * (7.86 * pop - 23.31 * pop**2 + 28.75 * pop**3 - 13.3 * pop**4)
def simulate(
model=logistic_map,
num_gens=50,
rate_min=0.5,
rate_max=4,
num_rates=8,
num_discard=0,
initial_pop=0.5,
jit=True,
):
"""
Simulate a module.
Call simulator (either JIT compiled or not) to create a DataFrame with
columns for each growth rate, row labels for each time step, and values
computed by the model.
Arguments
---------
model: function
the function defining an iterated map to simulate; default is the
logistic map
num_gens: int
number of iterations to run the model
rate_min: float
the first growth rate for the model, between 0 and 4
rate_max: float
the last growth rate for the model, between 0 and 4
num_rates: int
how many growth rates between min and max to run the model on
num_discard: int
number of generations to discard before keeping population values
initial_pop: float
starting population when you run the model, between 0 and 1
jit: bool
if True, use jit compiled simulator function to speed up simulation,
if False, use uncompiled simulator function
Returns
-------
DataFrame
"""
if jit:
return simulate_jit(
model=model,
num_gens=num_gens,
rate_min=rate_min,
rate_max=rate_max,
num_rates=num_rates,
num_discard=num_discard,
initial_pop=initial_pop,
)
else:
return simulate_no_compile(
model=model,
num_gens=num_gens,
rate_min=rate_min,
rate_max=rate_max,
num_rates=num_rates,
num_discard=num_discard,
initial_pop=initial_pop,
)
def simulate_no_compile(model, num_gens, rate_min, rate_max, num_rates, num_discard, initial_pop):
"""
Create a DataFrame with columns for each growth rate.
Row labels for each time step and values computed by the model (without
JIT compilation).
Arguments
---------
model: function
the function defining an iterated map to simulate
num_gens: int
number of iterations to run the model
rate_min: float
the first growth rate for the model, between 0 and 4
rate_max: float
the last growth rate for the model, between 0 and 4
num_rates: int
how many growth rates between min and max to run the model on
num_discard: int
number of generations to discard before keeping population values
initial_pop: float
starting population when you run the model, between 0 and 1
Returns
-------
DataFrame
"""
pops = []
rates = np.linspace(rate_min, rate_max, num_rates)
# for each rate, run the function repeatedly, starting at the initial_pop
for rate in rates:
pop = initial_pop
# first run it num_discard times and ignore the results
for _ in range(num_discard):
pop = model(pop, rate)
# now that those gens are discarded, run it num_gens times
for _ in range(num_gens):
pops.append([rate, pop])
pop = model(pop, rate)
# return a DataFrame with one column for each growth rate and one row for
# each timestep (aka generation)
df = pd.DataFrame(data=pops, columns=["rate", "pop"])
df.index = pd.MultiIndex.from_arrays([num_rates * list(range(num_gens)), df["rate"].values])
return df.drop(labels="rate", axis=1).unstack()["pop"]
def simulate_jit(model, num_gens, rate_min, rate_max, num_rates, num_discard, initial_pop):
"""
Create a DataFrame with columns for each growth rate.
Row labels for each time step, and values computed by the model (with JIT
compilation). You can't pass a jitted function to a jitted function unless
you turn off 'nopython' mode (which makes it slow). In other words, you
can't pass different model functions directly to the simulate function.
Instead, use a closure: The make_jit_simulator function returns a jitted
simulator function that receives the jitted model function, without it
being an argument passed to the simulator function, because of the closure
local scope
Arguments
---------
model: function
the function defining an iterated map to simulate
num_gens: int
number of iterations to run the model
rate_min: float
the first growth rate for the model, between 0 and 4
rate_max: float
the last growth rate for the model, between 0 and 4
num_rates: int
how many growth rates between min and max to run the model on
num_discard: int
number of generations to discard before keeping population values
initial_pop: float
starting population when you run the model, between 0 and 1
Returns
-------
DataFrame
"""
# make the jitted simulator
jit_simulator = make_jit_simulator(
model=model,
num_gens=num_gens,
rate_min=rate_min,
rate_max=rate_max,
num_rates=num_rates,
num_discard=num_discard,
initial_pop=initial_pop,
)
# run the jit_simulator to create the pops to pass to the DataFrame
pops = jit_simulator()
# return a DataFrame with one column for each growth rate and one row for
# each timestep (aka generation)
df = pd.DataFrame(data=pops, columns=["rate", "pop"])
df.index = pd.MultiIndex.from_arrays([num_rates * list(range(num_gens)), df["rate"].values])
return df.drop(labels="rate", axis=1).unstack()["pop"]
def make_jit_simulator(model, num_gens, rate_min, rate_max, num_rates, num_discard, initial_pop):
"""
Create a jitted simulator function.
It receives the jitted model function, without it being an argument passed
to the simulator function, because of the closure local scope.
Arguments
---------
model: function
the function defining an iterated map to simulate
num_gens: int
number of iterations to run the model
rate_min: float
the first growth rate for the model, between 0 and 4
rate_max: float
the last growth rate for the model, between 0 and 4
num_rates: int
how many growth rates between min and max to run the model on
num_discard: int
number of generations to discard before keeping population values
initial_pop: float
starting population when you run the model, between 0 and 1
Returns
-------
function
"""
@jit(cache=True, nopython=True) # pragma: no cover
def jit_simulator(
num_gens=num_gens,
rate_min=rate_min,
rate_max=rate_max,
num_rates=num_rates,
num_discard=num_discard,
initial_pop=initial_pop,
):
pops = np.empty(shape=(num_gens * num_rates, 2), dtype=np.float64)
rates = np.linspace(rate_min, rate_max, num_rates)
# for each rate, run the function repeatedly, starting at initial_pop
for rate_num, rate in zip(range(len(rates)), rates):
pop = initial_pop
# first run it num_discard times and ignore the results
for _ in range(num_discard):
pop = model(pop, rate)
# now that those gens are discarded, run it num_gens times
for gen_num in range(num_gens):
row_num = gen_num + num_gens * rate_num
pops[row_num] = [rate, pop]
pop = model(pop, rate)
return pops
return jit_simulator
def get_bifurcation_plot_points(pops):
"""
Convert a DataFrame of values from the model into a set of xy points.
You can plot these points as a bifurcation diagram.
Arguments
---------
pops: DataFrame
population data output from the model
Returns
-------
DataFrame
"""
# create a new DataFrame to contain our xy points
xy_points = pd.DataFrame(columns=["x", "y"])
# for each column in the populations DataFrame
for rate in pops.columns:
# append the growth rate as the x column and all the population values
# as the y column
to_append = pd.DataFrame({"x": rate, "y": pops[rate]})
xy_points = pd.concat([xy_points, to_append])
# reset the index and drop old index before returning the xy point data
xy_points = xy_points.reset_index().drop(labels="index", axis=1)
return xy_points
def bifurcation_plot(
pops,
xmin=0,
xmax=4,
ymin=0,
ymax=1,
figsize=(10, 6),
title="Bifurcation Diagram",
xlabel="Growth Rate",
ylabel="Population",
color="#003399",
filename="image",
save=True,
show=True,
title_font=None,
label_font=None,
folder="images",
dpi=300,
bbox_inches="tight",
pad=0.1,
):
"""
Plot the results of the model as a bifurcation diagram.
Arguments
---------
pops: DataFrame
population data output from the model
xmin: float
minimum value on the x axis
xmax: float
maximum value on the x axis
ymin: float
minimum value on the y axis
ymax: float
maximum value on the y axis
figsize: tuple
(width, height) of figure
title: string
title of the plot
xlabel: string
label of the x axis
ylabel: string
label of the y axis
color: string
color of the points in the scatter plot
filename: string
name of image file to be saved, if applicable
save: bool
whether to save the image to disk or not
show: bool
whether to display the image on screen or not
title_font: matplotlib.font_manager.FontProperties
font properties for figure title
label_font: matplotlib.font_manager.FontProperties
font properties for axis labels
folder: string
folder in which to save the image file
dpi: int
resolution at which to save the image
bbox_inches: string
tell matplotlib to figure out the tight bbox of the figure
pad: float
inches to pad around the figure
Returns
-------
tuple
(fig, ax) if show=False, otherwise returns None
"""
if title_font is None:
title_font = get_title_font()
if label_font is None:
label_font = get_label_font()
# create a new matplotlib figure and axis and set its size
fig, ax = plt.subplots(figsize=figsize)
# plot the xy data
points = get_bifurcation_plot_points(pops)
_ = ax.scatter(points["x"], points["y"], c=color, edgecolor="None", alpha=1, s=1)
# set x and y limits, title, and x and y labels
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_title(title, fontproperties=get_title_font())
ax.set_xlabel(xlabel, fontproperties=get_label_font())
ax.set_ylabel(ylabel, fontproperties=get_label_font())
return save_and_show(
fig=fig,
ax=ax,
save=save,
show=show,
filename=filename,
folder=folder,
dpi=dpi,
bbox_inches=bbox_inches,
pad=pad,
)
def get_phase_colors(color_request, length=1, color_reverse=False, default_color="#003399"):
"""
Return a list of colors based on a request.
Request could be a list, string color name, or string colormap name.
Arguments
---------
color_request: string or list
what color the caller wants, could be a list, string color name, or
string colormap name
length: int
how many total colors to return in the list
color_reverse: bool
reverse the returned list of colors if True
default_color: string
if the list is shorter than the specified length, pad it out with
default_color
Returns
-------
list
"""
color_list = []
if isinstance(color_request, list):
# if they passed a list, then just use it
color_list = color_request
elif isinstance(color_request, str):
# if they passed a string, it could be a color name or a colormap name
if len(color_request) == 1 or color_request.startswith("#"):
# if it's only 1 character long or starts with a #, then it's a
# color name or hex code
color_list = [color_request]
default_color = color_request
else:
# if it's more than 1 character and doesn't start with # then it's
# the name of a colormap
color_map = cm.get_cmap(color_request)
color_list = color_map([x / float(length) for x in range(length)]).tolist()
# make sure list is same length as specified in length argument - if not,
# pad it out with default_color, that way, each scatterplot gets a color
color_list = (
color_list + [default_color for n in range(length - len(color_list))]
if len(color_list) < length
else color_list
)
# if the color_reverse=True, reverse list of colors before returning it
if color_reverse:
color_list.reverse()
return color_list
def get_phase_diagram_points(pops, discard_gens=1, dimensions=2):
"""
Convert a DataFrame of values from the model into a set of xy(z) points.
Arguments
---------
pops: DataFrame
population data output from the model
discard_gens: int
number of rows to discard before keeping points to plot
dimensions: int
{2, 3}, number of dimensions specifying if we want points for a 2-D or
3-D plot: (t, t+1) vs (t, t+1, t+2)
Returns
-------
DataFrame
"""
# drop the first row by default because every run has the same starting
# value, it leaves a visual artifact if specified by the argument, drop
# the initial n rows to show only the eventual attractor the system
# settles on
if discard_gens > 0 and len(pops) > discard_gens:
discard_gens = np.arange(0, discard_gens)
pops = pops.drop(labels=pops.index[discard_gens])
pops = pops.reset_index().drop(labels="index", axis=1)
# a point is defined by the name of its model run then its spatial coords
points = []
point_columns = ["name", "x", "y", "z"]
# for each column in the populations DataFrame, where the label is the
# 'name' of the model run
for name in pops.columns:
# for each row in the column
for label, _ in pops.iterrows():
# we can only create points up up to row dimensions-1 because we
# need future time steps to create each point
if label < len(pops) - (dimensions - 1):
point = [name]
for n in range(dimensions):
# append the value at the current time (aka row) as x, t+1
# as y (and t+2 as z if dimensions=3)
point.append(pops[name][label + n])
# append this point to the list of points
points.append(point)
# convert the list of points to a MultiIndex DataFrame
# with a level in the index called 'name' to represent each model run
df = pd.DataFrame(points, columns=point_columns[0 : dimensions + 1])
df.index = pd.MultiIndex.from_tuples(list(zip(df["name"], df.index)), names=["name", ""])
df = df.drop(labels="name", axis=1)
return df
def phase_diagram(
pops,
discard_gens=0,
figsize=(6, 6),
xmin=0,
xmax=1,
ymin=0,
ymax=1,
title="",
xlabel="Population (t)",
ylabel="Population (t + 1)",
marker=".",
size=5,
alpha=0.7,
color="#003399",
color_reverse=False,
legend=False,
filename="image",
save=True,
show=True,
title_font=None,
label_font=None,
folder="images",
dpi=300,
bbox_inches="tight",
pad=0.1,
):
"""
Draw a 2D phase diagram for one or more time series.
Plot the value at time t on the x-axis and the value at t+1 on the y-axis.
Arguments
---------
pops: DataFrame
population data output from the model
discard_gens: int
number of rows to discard before keeping points to plot
figsize: tuple
(width, height) of figure
xmin: float
minimum value on the x axis
xmax: float
maximum value on the x axis
ymin: float
minimum value on the y axis
ymax: float
maximum value on the y axis
title: string
title of the plot
xlabel: string
label of the x axis
ylabel: string
label of the y axis
marker: string
the type of point to use in the plot
size: float
the size of the marker
alpha: float
the opacity of the marker
color: string
color of the points in the scatter plot
color_reverse: bool
reverse the returned list of colors if True
legend: bool
if we should display a legend or not
filename: string
name of image file to be saved, if applicable
save: bool
whether to save the image to disk or not
show: bool
whether to display the image on screen or not
title_font: matplotlib.font_manager.FontProperties
font properties for figure title
label_font: matplotlib.font_manager.FontProperties
font properties for axis labels
folder: string
folder in which to save the image file
dpi: int
resolution at which to save the image
bbox_inches: string
tell matplotlib to figure out the tight bbox of the figure
pad: float
inches to pad around the figure
Returns
-------
tuple
(fig, ax) if show=False, otherwise returns None
"""
if title_font is None:
title_font = get_title_font()
if label_font is None:
label_font = get_label_font()
# first get the xy points to plot
points = get_phase_diagram_points(pops, discard_gens, dimensions=2)
plots = []
# get_phase_diagram_points() returns a MultiIndexed DataFrame, each run of
# the model has its own 'name' in the index
index = points.index.get_level_values("name")
names = np.unique(index)
# create a new matplotlib figure and axis and set its size
fig, ax = plt.subplots(figsize=figsize)
# set the plot title, x- and y-axis limits, and x- and y-axis labels
ax.set_title(title, fontproperties=get_title_font())
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xlabel(xlabel, fontproperties=get_label_font())
ax.set_ylabel(ylabel, fontproperties=get_label_font())
# make sure we have a list of colors as long as the number of model runs
color_list = get_phase_colors(color, len(names), color_reverse)
# plot xy data for each run of the model that appears in the MultiIndex
for n in range(len(names)):
xy = points.iloc[index == names[n]]
plots.append(
ax.scatter(
xy["x"],
xy["y"],
marker=marker,
c=[color_list[n]],
edgecolor="none",
s=size,
alpha=alpha,
)
)
# add a legend if argument is True
if legend:
ax.legend(plots, names.tolist(), loc=1, frameon=True, framealpha=1)
if filename == "":
filename = (
title.replace(" ", "-")
.replace("=", "-")
.replace(",", "-")
.replace(".", "")
.replace("--", "-")
)
return save_and_show(
fig=fig,
ax=ax,
save=save,
show=show,
filename=filename,
folder=folder,
dpi=dpi,
bbox_inches=bbox_inches,
pad=pad,
)
def phase_diagram_3d(
pops,
discard_gens=0,
figsize=(10, 8),
xmin=0,
xmax=1,
ymin=0,
ymax=1,
zmin=0,
zmax=1,
remove_ticks=True,
title="",
elev=25,
azim=240,
dist=10,
xlabel="Population (t)",
ylabel="Population (t + 1)",
zlabel="Population (t + 2)",
marker=".",
size=5,
alpha=0.7,
color="#003399",
color_reverse=False,
legend=False,
legend_bbox_to_anchor=None,
filename="image",
save=True,
show=True,
title_font=None,
label_font=None,
folder="images",
dpi=300,
bbox_inches="tight",
pad=0.1,
):
"""
Draw a 3D phase diagram for one or more time series.
Plot the value at time t on the x-axis, the value at t+1 on the y-axis,
and the value of t+2 on the z-axis.
Arguments
---------
pops: DataFram
population data output from the model
discard_gens: int
number of rows to discard before keeping points to plot
figsize: tuple
(width, height) of figure
xmin: float
minimum value on the x axis
xmax: float
maximum value on the x axis
ymin: float
minimum value on the y axis
ymax: float
maximum value on the y axis
zmin: float
minimum value on the z axis
zmax: float
maximum value on the z axis
remove_ticks: bool
remove axis ticks or not
title: string
title of the plot
elev: float
the elevation of the viewing perspective
azim: float
the azimuth of the viewing perspective
dist: float
the distance of the viewing perspective
xlabel: string
label of the x axis
ylabel: string
label of the y axis
zlabel: string
label of the z axis
marker: string
the type of point to use in the plot
size: float
the size of the marker
alpha: float
the opacity of the marker
color: string
color of the points in the scatter plot
color_reverse: bool
reverse the returned list of colors if True
legend: bool
if we should display a legend or not
legend_bbox_to_anchor: float
amount to offset the legend from its natural position
filename: string
name of image file to be saved, if applicable
save: bool
whether to save the image to disk or not
show: bool
whether to display the image on screen or not
title_font: matplotlib.font_manager.FontProperties
font properties for figure title
label_font: matplotlib.font_manager.FontProperties
font properties for axis labels
folder: string
folder in which to save the image file
dpi: int
resolution at which to save the image
bbox_inches: string
tell matplotlib to figure out the tight bbox of the figure
pad: float
inches to pad around the figure
Returns
-------
tuple
(fig, ax) if show=False, otherwise returns None
"""
if title_font is None:
title_font = get_title_font()
if label_font is None:
label_font = get_label_font()
# first get the xyz points to plot
points = get_phase_diagram_points(pops, discard_gens, dimensions=3)
plots = []
# get_phase_diagram_points() returns a MultiIndexed DataFrame, each run of
# the model has its own 'name' in the index
index = points.index.get_level_values("name")
names = np.unique(index)
# create new figure, set its size, and create an axis with 3-D projection
fig = plt.figure(figsize=figsize)
ax = fig.gca(projection="3d")
ax.xaxis.set_pane_color((1, 1, 1, 1))
ax.yaxis.set_pane_color((1, 1, 1, 1))
ax.zaxis.set_pane_color((1, 1, 1, 1))
# configure the perspective from which to view the 3D plot
ax.elev = elev
ax.azim = azim
ax.dist = dist
# set the plot title, axis limits, and axis labels
ax.set_title(title, fontproperties=get_title_font())
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_zlim(zmin, zmax)
ax.set_xlabel(xlabel, fontproperties=get_label_font())
ax.set_ylabel(ylabel, fontproperties=get_label_font())
ax.set_zlabel(zlabel, fontproperties=get_label_font())
# remove all ticks if argument is True
if remove_ticks:
ax.tick_params(
reset=True,
axis="both",
which="both",
pad=0,
width=0,
length=0,
bottom=False,
top=False,
left=False,
right=False,
labelbottom=False,
labeltop=False,
labelleft=False,
labelright=False,
)
else:
ax.tick_params(reset=True)
# make sure we have a list of colors as long as the number of model runs
color_list = get_phase_colors(color, len(names), color_reverse)
# plot xyz data for each run of the model that appears in the MultiIndex
for n in range(len(names)):
xyz = points.iloc[index == names[n]]
plots.append(
ax.scatter(
xyz["x"],
xyz["y"],
xyz["z"],
marker=marker,
c=[color_list[n]],
edgecolor=[color_list[n]],
s=size,
alpha=alpha,
)
)
# add a legend if argument is True
if legend:
ax.legend(
plots,
names.tolist(),
loc=1,
frameon=True,
framealpha=1,
bbox_to_anchor=legend_bbox_to_anchor,
)
if filename == "":
filename = (
title.replace(" ", "-")
.replace("=", "-")
.replace(",", "-")
.replace(".", "")
.replace("--", "-")
)
return save_and_show(
fig=fig,
ax=ax,
save=save,
show=show,
filename=filename,
folder=folder,
dpi=dpi,
bbox_inches=bbox_inches,
pad=pad,
)
def get_cobweb_points(model, r, x, n):
"""
Calculate the vertices of cobweb lines for a cobweb plot.
Steps in the calculation:
1) Let x = 0.5
2) Start on the x-axis at the point (x, 0).
3) Draw a vertical line to the red function curve: this point has the
coordinates (x, f(x)).
4) Draw a horizontal line from this point to the gray diagonal line: this
point has the coordinates (f(x), f(x)).
5) Draw a vertical line from this point to the red function curve: this
point has the coordinates (f(x), f(f(x))).
6) Repeat steps 4 and 5 recursively n times.
Arguments
---------
model: function
defining an iterated map to simulate
r: float
growth rate parameter value to pass to the map
x: float
starting population value
n: int
number of iterations to run
Returns
-------
tuple
cobweb_x_vals, cobweb_y_vals
"""
cobweb_points = [(x, 0)]
for _ in range(n):
y1 = model(x, r)
cobweb_points.append((x, y1))
cobweb_points.append((y1, y1))
y2 = model(y1, r)
cobweb_points.append((y1, y2))
x = y1
return zip(*cobweb_points)
def get_function_points(model, r, n, start, end):
"""
Calculate model results for n population values.
Values are evenly spaced between start and end values.
Arguments
---------
model: function
defining an iterated map to simulate
r: float
growth rate parameter value to pass to the map
n: int
number of iterations to run
start: float
lower limit of the function range
end: float
upper limit of the function range
Returns
-------
tuple
x_vals, y_vals
"""
x_vals = np.linspace(start, end, n)
y_vals = [model(x, r) for x in x_vals]
return x_vals, y_vals
def cobweb_plot(
model=logistic_map,
r=0,
function_n=1000,
cobweb_n=100,
cobweb_x=0.5,
num_discard=0,
title="",
filename="",
show=True,
save=True,
start=0,
end=1,
figsize=(6, 6),
diagonal_linewidth=1.35,
cobweb_linewidth=1,
function_linewidth=1.5,
title_font=None,
label_font=None,
folder="images",
dpi=300,
bbox_inches="tight",
pad=0.1,
):
"""
Draw a cobweb plot.
Run the map once each for 1000 population values evenly spaced between 0
and 1. This gives us the results of the equation (y values) across the
entire range of possible population values (x values). The gray diagonal
line is just a plot of y=x.
Arguments
---------
model: function
defining an iterated map to simulate
r: float
growth rate parameter value to pass to the map
function_n: int
number of iterations of the function to run
cobweb_n: int
number of iterations of the cobweb line to run
num_discard: int
how many initial iterations of the cobweb line to throw away
title: string
title of the plot
filename: string
name of image file to be saved, if applicable
save: bool
whether to save the image to disk or not
show: bool
whether to display the image on screen or not
start: float
lower limit of the function range
end: float
upper limit of the function range
figsize: tuple
(width, height) of figure
diagonal_linewidth: float
width of y=x line
cobweb_linewidth: float
width of cobweb line
function_linewidth: float
width of function line
title_font: matplotlib.font_manager.FontProperties
font properties for figure title
label_font: matplotlib.font_manager.FontProperties
font properties for axis labels
folder: string
folder in which to save the image file
dpi: int
resolution at which to save the image
bbox_inches: string
tell matplotlib to figure out the tight bbox of the figure
pad: float
inches to pad around the figure
Returns
-------
tuple
(fig, ax) if show=False, otherwise returns None
"""
if title_font is None:
title_font = get_title_font()
if label_font is None:
label_font = get_label_font()
func_x_vals, func_y_vals = get_function_points(
model=model, r=r, n=function_n, start=start, end=end
)
cobweb_x_vals, cobweb_y_vals = get_cobweb_points(model=model, r=r, x=cobweb_x, n=cobweb_n)
cobweb_x_vals = cobweb_x_vals[num_discard:]
cobweb_y_vals = cobweb_y_vals[num_discard:]
fig, ax = plt.subplots(figsize=figsize)
# diagonal line
_ = ax.plot((0, 1), (0, 1), color="gray", linewidth=diagonal_linewidth)
# function line
_ = ax.scatter(
func_x_vals, func_y_vals, color="#cc0000", edgecolor="None", s=function_linewidth
)
# cobweb line
_ = ax.plot(cobweb_x_vals, cobweb_y_vals, color="#003399", linewidth=cobweb_linewidth)
ax.set_ylim((0, 1))
ax.set_xlim((0, 1))
if title == "":
title = "Cobweb Plot, r={}".format(r)
ax.set_title(title, fontproperties=get_title_font())
if filename == "":
filename = "cobweb-plot-r{}-x{}".format(r, cobweb_x).replace(".", "")
return save_and_show(
fig=fig,
ax=ax,
save=save,
show=show,
filename=filename,
folder=folder,
dpi=dpi,
bbox_inches=bbox_inches,
pad=pad,
)
| 35,585 | 27.537289 | 99 | py |
pynamical | pynamical-main/pynamical/__init__.py | """Expose the pynamical API."""
from .pynamical import *
__version__ = "0.3.2"
| 81 | 12.666667 | 31 | py |
clipspy | clipspy-master/setup.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import fileinput
from setuptools import find_packages, setup
CWD = os.path.dirname(__file__)
def package_version():
module_path = os.path.join(CWD, 'clips', '__init__.py')
for line in fileinput.input(module_path):
if line.startswith('__version__'):
return line.split('=')[-1].strip().replace('\'', '')
setup(
name="clipspy",
version=package_version(),
author="Matteo Cafasso",
author_email="noxdafox@gmail.com",
description=("CLIPS Python bindings"),
license="BSD",
long_description=open(os.path.join(CWD, 'README.rst')).read(),
packages=find_packages(),
ext_package="clips",
setup_requires=["cffi>=1.0.0"],
install_requires=["cffi>=1.0.0"],
cffi_modules=["clips/clips_build.py:ffibuilder"],
include_dirs=["/usr/include/clips", "/usr/local/include/clips"],
data_files=[('lib', ['lib/clips.cdef'])],
keywords="clips python cffi expert-system",
url="https://github.com/noxdafox/clipspy",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: PyPy",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License"
]
)
| 2,819 | 39.285714 | 80 | py |
clipspy | clipspy-master/test/environment_test.py | import os
import unittest
from tempfile import mkstemp
from clips import CLIPSError
from clips import Environment, Symbol, LoggingRouter, ImpliedFact, InstanceName
DEFRULE_FACT = """
(defrule fact-rule
?fact <- (test-fact)
=>
(python_method ?fact))
"""
DEFRULE_INSTANCE = """
(defrule instance-rule
?instance <- (object (is-a TEST)
(name ?instance-name))
=>
(python_method ?instance)
(python_method ?instance-name))
"""
DEFFUNCTION = """
(deffunction test-fact-function ()
(bind ?facts (python_fact_method))
(python_method ?facts))
"""
DEFCLASS = """(defclass TEST (is-a USER))"""
def python_function(*value):
return value
def python_types():
return None, True, False
def python_objects(obj):
return obj
def python_error():
raise Exception("BOOM!")
class TempFile:
"""Cross-platform temporary file."""
name = None
def __enter__(self):
fobj, self.name = mkstemp()
os.close(fobj)
return self
def __exit__(self, *_):
os.remove(self.name)
class ObjectTest:
def __init__(self, value):
self.value = value
class TestEnvironment(unittest.TestCase):
def setUp(self):
self.values = []
self.env = Environment()
self.env.add_router(LoggingRouter())
self.env.define_function(python_function)
self.env.define_function(python_function,
name='python-function-renamed')
self.env.define_function(python_error)
self.env.define_function(python_types)
self.env.define_function(python_objects)
self.env.define_function(self.python_method)
self.env.define_function(self.python_fact_method)
self.env.build(DEFCLASS)
self.env.build(DEFFUNCTION)
self.env.build(DEFRULE_FACT)
self.env.build(DEFRULE_INSTANCE)
def tearDown(self):
for router in tuple(self.env.routers()):
router.delete()
def python_method(self, *values):
self.values += values
def python_fact_method(self):
"""Returns a list with one fact."""
return [self.env.assert_string('(test-fact 5)')]
def test_eval_python_function(self):
"""Python function is evaluated correctly."""
expected = (0, 1.1, "2", Symbol('three'), InstanceName('four'))
ret = self.env.eval('(python_function 0 1.1 "2" three [four])')
self.assertEqual(ret, expected)
expected = (0, 1.1, "2", Symbol('three'))
ret = self.env.eval('(python-function-renamed 0 1.1 "2" three)')
self.assertEqual(ret, expected)
expected = (Symbol('nil'), Symbol('TRUE'), Symbol('FALSE'))
ret = self.env.eval('(python_types)')
self.assertEqual(ret, expected)
def test_eval_python_error(self):
"""Errors in Python functions are correctly set."""
self.assertIsNone(self.env.error_state)
with self.assertRaises(CLIPSError):
self.env.eval('(python_error)')
self.assertTrue("[PYCODEFUN1]" in str(self.env.error_state))
self.env.clear_error_state()
self.assertIsNone(self.env.error_state)
def test_eval_python_method(self):
"""Python method is evaluated correctly."""
expected = [0, 1.1, "2", Symbol('three')]
ret = self.env.eval('(python_method 0 1.1 "2" three)')
self.assertEqual(ret, Symbol('nil'))
self.assertEqual(self.values, expected)
def test_call_python_object(self):
"""Python objects are correctly marshalled."""
test_object = ObjectTest(42)
ret = self.env.call('python_objects', test_object)
self.assertEqual(ret, test_object)
def test_rule_python_fact(self):
"""Facts are forwarded to Python """
fact = self.env.assert_string('(test-fact)')
self.env.run()
self.assertEqual(self.values[0], fact)
def test_rule_python_instance(self):
"""Instances are forwarded to Python """
defclass = self.env.find_class('TEST')
inst = defclass.make_instance('test')
self.env.run()
self.assertEqual(self.values[0], inst)
self.assertEqual(self.values[1], inst.name)
def test_facts_function(self):
"""Python functions can return list of facts"""
function = self.env.find_function('test-fact-function')
function()
self.assertTrue(isinstance(self.values[0], ImpliedFact))
def test_batch_star(self):
"""Commands are evaluated from file."""
with TempFile() as tmp:
with open(tmp.name, 'wb') as tmpfile:
tmpfile.write(b"(assert (test-fact))\n")
self.env.batch_star(tmp.name)
self.assertTrue(
'test-fact' in (f.template.name for f in self.env.facts()))
def test_save_load(self):
"""Constructs are saved and loaded."""
with TempFile() as tmp:
self.env.save(tmp.name)
self.env.clear()
self.env.load(tmp.name)
self.assertTrue('fact-rule' in
(r.name for r in self.env.rules()))
with TempFile() as tmp:
self.env.save(tmp.name, binary=True)
self.env.clear()
self.env.load(tmp.name, binary=True)
self.assertTrue('fact-rule' in
(r.name for r in self.env.rules()))
| 5,415 | 27.505263 | 79 | py |
clipspy | clipspy-master/test/functions_test.py | import unittest
from clips import Environment, Symbol, CLIPSError
DEFFUNCTION1 = """(deffunction function-sum (?a ?b) (+ ?a ?b))"""
DEFFUNCTION2 = """(deffunction function-sub (?a ?b) (- ?a ?b))"""
DEFGENERIC1 = """(defgeneric generic-sum)"""
DEFGENERIC2 = """(defgeneric generic-sub)"""
DEFMETHOD = """
(defmethod generic-sum ((?a INTEGER) (?b INTEGER)) (+ ?a ?b))
"""
class TestFunctions(unittest.TestCase):
def setUp(self):
self.env = Environment()
self.env.build(DEFMETHOD)
self.env.build(DEFGENERIC1)
self.env.build(DEFGENERIC2)
self.env.build(DEFFUNCTION1)
self.env.build(DEFFUNCTION2)
def test_function_call(self):
"""Test function call."""
function = self.env.find_function('function-sum')
self.assertEqual(function(1, 2), 3)
function = self.env.find_generic('generic-sum')
self.assertEqual(function(1, 2), 3)
self.assertEqual(self.env.call('function-sum', 1, 2), 3)
self.assertEqual(self.env.call('generic-sum', 1, 2), 3)
self.assertEqual(
self.env.call('create$', 1, 2.0, "three", Symbol('four')),
(1, 2.0, 'three', 'four'))
def test_function(self):
"""Deffunction object test."""
func = self.env.find_function("function-sub")
self.assertTrue(func in self.env.functions())
self.assertEqual(func.name, "function-sub")
self.assertEqual(func.module.name, "MAIN")
self.assertTrue('deffunction' in str(func))
self.assertTrue('deffunction' in repr(func))
self.assertTrue(func.deletable)
self.assertFalse(func.watch)
func.watch = True
self.assertTrue(func.watch)
func.undefine()
with self.assertRaises(LookupError):
self.env.find_function("function-sub")
with self.assertRaises(CLIPSError):
print(func)
def test_generic(self):
"""Defgeneric object test."""
func = self.env.find_generic("generic-sum")
self.assertTrue(func in self.env.generics())
self.assertEqual(func.name, "generic-sum")
self.assertEqual(func.module.name, "MAIN")
self.assertTrue('defgeneric' in str(func))
self.assertTrue('defgeneric' in repr(func))
self.assertTrue(func.deletable)
self.assertFalse(func.watch)
func.watch = True
self.assertTrue(func.watch)
func.undefine()
with self.assertRaises(LookupError):
self.env.find_function("generic-sum")
with self.assertRaises(CLIPSError):
print(func)
def test_method(self):
"""Defgeneric object test."""
restr = (2, 2, 2, 6, 9, Symbol('FALSE'), 1, Symbol('INTEGER'),
Symbol('FALSE'), 1, Symbol('INTEGER'))
func = self.env.find_generic("generic-sum")
method = tuple(func.methods())[0]
self.assertTrue('defmethod' in str(method))
self.assertTrue('defmethod' in repr(method))
self.assertTrue(method.deletable)
self.assertFalse(method.watch)
self.assertEqual(method.description, "1 (INTEGER) (INTEGER)")
self.assertEqual(method.restrictions, restr)
method.watch = True
self.assertTrue(method.watch)
method.undefine()
self.assertTrue(method not in func.methods())
| 3,363 | 30.148148 | 70 | py |
clipspy | clipspy-master/test/agenda_test.py | import unittest
from clips import Environment, CLIPSError, Strategy, SalienceEvaluation
DEFTEMPLATE = """(deftemplate template-fact
(slot template-slot))
"""
DEFRULE = """(defrule MAIN::rule-name
(declare (salience 10))
(implied-fact implied-value)
=>
(assert (rule-fired)))
"""
DEFTEMPLATERULE = """(defrule MAIN::rule-name
(implied-fact implied-value)
(template-fact (template-slot template-value))
=>
(assert (rule-fired)))
"""
DEFOTHERRULE = """(defrule MAIN::other-rule-name
(declare (salience 20))
(implied-fact implied-value)
=>
(assert (rule-fired)))
"""
class TestAgenda(unittest.TestCase):
def setUp(self):
self.env = Environment()
self.env.build(DEFTEMPLATE)
self.env.build(DEFRULE)
def test_agenda_strategy(self):
"""Agenda strategy getting/setting."""
for strategy in Strategy:
self.env.strategy = strategy
self.assertEqual(self.env.strategy, strategy)
def test_agenda_salience_evaluation(self):
"""Agenda salience_evaluation getting/setting."""
for salience_evaluation in SalienceEvaluation:
self.env.salience_evaluation = salience_evaluation
self.assertEqual(
self.env.salience_evaluation, salience_evaluation)
def test_agenda_activation(self):
"""Agenda activation test."""
self.env.assert_string('(implied-fact implied-value)')
self.assertTrue(self.env.agenda_changed)
activation = tuple(self.env.activations())[0]
self.assertEqual(activation.name, 'rule-name')
self.assertEqual(activation.salience, 10)
self.assertEqual(str(activation), '10 rule-name: f-1')
self.assertEqual(repr(activation), 'Activation: 10 rule-name: f-1')
activation.delete()
self.assertFalse(activation in self.env.activations())
with self.assertRaises(CLIPSError):
activation.salience = 10
def test_agenda_run(self):
"""Agenda rules are fired on run."""
self.env.assert_string('(implied-fact implied-value)')
self.env.run()
fact_names = (f.template.name for f in self.env.facts())
self.assertTrue('rule-fired' in fact_names)
def test_agenda_activation_order(self):
"""Agenda activations order change if salience or strategy change."""
self.env.build(DEFOTHERRULE)
self.env.assert_string('(implied-fact implied-value)')
self.assertTrue(self.env.agenda_changed)
activations = tuple(self.env.activations())
self.assertEqual(tuple(a.name for a in activations),
(u'other-rule-name', u'rule-name'))
activations[1].salience = 30
self.assertEqual(activations[1].salience, 30)
self.assertFalse(self.env.agenda_changed)
self.env.reorder()
self.assertTrue(self.env.agenda_changed)
activations = tuple(self.env.activations())
self.assertEqual(tuple(a.name for a in activations),
(u'rule-name', u'other-rule-name'))
self.env.refresh()
self.assertTrue(self.env.agenda_changed)
self.env.clear()
activations = tuple(self.env.activations())
self.assertEqual(len(activations), 0)
class TestRules(unittest.TestCase):
def setUp(self):
self.env = Environment()
self.env.build(DEFTEMPLATE)
self.env.build(DEFTEMPLATERULE)
def test_rule_build(self):
"""Simple Rule build."""
rule = self.env.find_rule('rule-name')
self.assertTrue(rule in self.env.rules())
self.assertEqual(rule.module.name, 'MAIN')
self.assertTrue(rule.deletable)
self.assertEqual(str(rule), ' '.join(DEFTEMPLATERULE.split()))
self.assertEqual(repr(rule),
"Rule: %s" % ' '.join(DEFTEMPLATERULE.split()))
self.assertFalse(rule.watch_firings)
rule.watch_firings = True
self.assertTrue(rule.watch_firings)
self.assertFalse(rule.watch_activations)
rule.watch_activations = True
self.assertTrue(rule.watch_activations)
rule.undefine()
with self.assertRaises(LookupError):
self.env.find_rule('rule-name')
with self.assertRaises(CLIPSError):
print(rule)
def test_rule_matches(self):
"""Partial rule matches."""
rule = self.env.find_rule('rule-name')
self.env.assert_string('(implied-fact implied-value)')
self.assertEqual(rule.matches(), (1, 0, 0))
rule.undefine()
def test_rule_activation(self):
"""Rule activation."""
rule = self.env.find_rule('rule-name')
self.env.assert_string('(implied-fact implied-value)')
self.env.assert_string(
'(template-fact (template-slot template-value))')
self.assertEqual(rule.matches(), (2, 1, 1))
self.env.run()
rule.refresh()
fact_names = (f.template.name for f in self.env.facts())
self.assertTrue('rule-fired' in fact_names)
| 5,094 | 29.147929 | 77 | py |
clipspy | clipspy-master/test/classes_test.py | import os
import unittest
from tempfile import mkstemp
from clips import Environment, Symbol, InstanceName
from clips import CLIPSError, ClassDefaultMode, LoggingRouter
DEFCLASSES = [
"""
(defclass AbstractClass (is-a USER)
(role abstract))
""",
"""(defclass InheritClass (is-a AbstractClass))""",
"""
(defclass ConcreteClass (is-a USER)
(slot Slot (type SYMBOL) (allowed-values value another-value)))
""",
"""
(defclass MessageHandlerClass (is-a USER)
(slot One)
(slot Two))
""",
"""
(defmessage-handler MessageHandlerClass test-handler ()
(+ ?self:One ?self:Two))
"""
]
DEFINSTANCES = """(definstances MAIN::defined-instances
(c1 of ConcreteClass (Slot a-slot)))
"""
class TempFile:
"""Cross-platform temporary file."""
name = None
def __enter__(self):
fobj, self.name = mkstemp()
os.close(fobj)
return self
def __exit__(self, *_):
os.remove(self.name)
class TestClasses(unittest.TestCase):
def setUp(self):
self.env = Environment()
self.env.add_router(LoggingRouter())
for defclass in DEFCLASSES:
self.env.build(defclass)
def test_classes(self):
"""Classes wrapper test."""
self.assertEqual(
self.env.default_mode, ClassDefaultMode.CONVENIENCE_MODE)
self.env.default_mode = ClassDefaultMode.CONSERVATION_MODE
self.assertEqual(
self.env.default_mode, ClassDefaultMode.CONSERVATION_MODE)
defclass = self.env.find_class('USER')
self.assertTrue(defclass in self.env.classes())
with self.assertRaises(LookupError):
self.env.find_class('NonExisting')
defclass = self.env.find_class('ConcreteClass')
defclass.make_instance('some-instance')
defclass.make_instance('test-instance')
instance = self.env.find_instance('test-instance')
self.assertTrue(instance in self.env.instances())
with self.assertRaises(LookupError):
self.env.find_instance('non-existing-instance')
self.assertTrue(self.env.instances_changed)
self.assertFalse(self.env.instances_changed)
with TempFile() as tmp:
saved = self.env.save_instances(tmp.name)
self.env.reset()
loaded = self.env.load_instances(tmp.name)
self.assertEqual(saved, loaded)
with TempFile() as tmp:
saved = self.env.save_instances(tmp.name)
self.env.reset()
loaded = self.env.restore_instances(tmp.name)
self.assertEqual(saved, loaded)
with TempFile() as tmp:
saved = self.env.save_instances(tmp.name, binary=True)
self.env.reset()
loaded = self.env.load_instances(tmp.name)
self.assertEqual(saved, loaded)
def test_abstract_class(self):
"""Abstract class test."""
superclass = self.env.find_class('USER')
subclass = self.env.find_class('InheritClass')
defclass = self.env.find_class('AbstractClass')
self.assertTrue(defclass.abstract)
self.assertFalse(defclass.reactive)
self.assertEqual(defclass.name, 'AbstractClass')
self.assertEqual(defclass.module.name, 'MAIN')
self.assertTrue(defclass.deletable)
self.assertTrue(defclass.subclass(superclass))
self.assertTrue(defclass.superclass(subclass))
self.assertEqual(tuple(defclass.subclasses()), (subclass, ))
self.assertEqual(tuple(defclass.superclasses()), (superclass, ))
with self.assertRaises(CLIPSError):
defclass.make_instance('foobar')
defclass.undefine()
def test_concrete_class(self):
"""Concrete class test."""
defclass = self.env.find_class('ConcreteClass')
self.assertFalse(defclass.abstract)
self.assertTrue(defclass.reactive)
self.assertEqual(defclass.name, 'ConcreteClass')
self.assertEqual(defclass.module.name, 'MAIN')
self.assertTrue(defclass.deletable)
self.assertFalse(defclass.watch_instances)
defclass.watch_instances = True
self.assertTrue(defclass.watch_instances)
self.assertFalse(defclass.watch_slots)
defclass.watch_slots = True
self.assertTrue(defclass.watch_slots)
defclass.undefine()
def test_slot(self):
"""Slot test."""
defclass = self.env.find_class('ConcreteClass')
slot = tuple(defclass.slots())[0]
self.assertFalse(slot.public)
self.assertTrue(slot.writable)
self.assertTrue(slot.accessible)
self.assertTrue(slot.initializable)
self.assertEqual(slot.name, 'Slot')
self.assertEqual(slot.types, ('SYMBOL', ))
self.assertEqual(slot.sources, (defclass.name, ))
self.assertEqual(slot.range, Symbol('FALSE'))
self.assertEqual(slot.facets, ('SGL', 'STC', 'INH', 'RW', 'LCL', 'RCT',
'EXC', 'PRV', 'RW', 'put-Slot'))
self.assertEqual(slot.cardinality, ())
self.assertEqual(slot.default_value, Symbol('value'))
self.assertEqual(slot.allowed_values, ('value', 'another-value'))
self.assertEqual(tuple(slot.allowed_classes()), ())
def test_make_instance(self):
"""Instance test."""
defclass = self.env.find_class('ConcreteClass')
instance_name = self.env.eval(
'(make-instance test-name-instance of ConcreteClass)')
self.assertEqual(instance_name, 'test-name-instance')
self.assertTrue(isinstance(instance_name, InstanceName))
instance = defclass.make_instance()
self.assertEqual(instance.name, 'gen1')
instance = defclass.make_instance('test-instance', Slot=Symbol('value'))
self.assertTrue(instance in defclass.instances())
self.assertEqual(instance.name, 'test-instance')
self.assertEqual(instance.instance_class, defclass)
self.assertEqual(instance['Slot'], Symbol('value'))
self.assertEqual(
str(instance), '[test-instance] of ConcreteClass (Slot value)')
self.assertEqual(
repr(instance),
'Instance: [test-instance] of ConcreteClass (Slot value)')
self.assertEqual(dict(instance), {'Slot': Symbol('value')})
instance.delete()
with self.assertRaises(LookupError):
self.env.find_instance('test-instance')
instance = defclass.make_instance('test-instance')
instance.unmake()
with self.assertRaises(LookupError):
self.env.find_instance('test-instance')
def test_make_instance_errors(self):
"""Instance errors."""
defclass = self.env.find_class('ConcreteClass')
with self.assertRaises(KeyError):
defclass.make_instance('some-instance', NonExistingSlot=1)
with self.assertRaises(TypeError):
defclass.make_instance('some-instance', Slot="wrong type")
with self.assertRaises(ValueError):
defclass.make_instance('some-instance', Slot=Symbol('wrong-value'))
def test_modify_instance(self):
"""Instance slot modification test."""
defclass = self.env.find_class('ConcreteClass')
defclass.make_instance('some-instance')
instance = defclass.make_instance('test-instance', Slot=Symbol('value'))
instance.modify_slots(Slot=Symbol('another-value'))
self.assertEqual(instance['Slot'], Symbol('another-value'))
instance.delete()
def test_message_handler(self):
"""MessageHandler test."""
defclass = self.env.find_class('MessageHandlerClass')
handler = defclass.find_message_handler('test-handler')
expected_str = "(defmessage-handler MAIN::MessageHandlerClass " + \
"test-handler () (+ ?self:One ?self:Two))"
self.assertTrue(handler.deletable)
self.assertEqual(handler.type, 'primary')
self.assertEqual(handler.name, 'test-handler')
self.assertTrue(handler in defclass.message_handlers())
self.assertEqual(str(handler), expected_str)
self.assertEqual(repr(handler), 'MessageHandler: ' + expected_str)
self.assertFalse(handler.watch)
handler.watch = True
self.assertTrue(handler.watch)
handler.undefine()
def test_message_handler_instance(self):
"""MessageHandler instance test."""
defclass = self.env.find_class('MessageHandlerClass')
instance = defclass.make_instance('test-instance', One=1, Two=2)
self.assertEqual(instance.send('test-handler'), 3)
def test_defined_instances(self):
"""DefinedInstances tests."""
self.env.build(DEFINSTANCES)
definstances = self.env.find_defined_instances('defined-instances')
listed = list(self.env.defined_instances())
self.assertEqual(definstances, listed[0])
self.assertEqual(definstances.name, 'defined-instances')
self.assertEqual(
str(definstances),
'(definstances MAIN::defined-instances (c1 of ConcreteClass (Slot a-slot)))')
self.assertEqual(definstances.module.name, 'MAIN')
self.assertTrue(definstances.deletable)
definstances.undefine()
with self.assertRaises(LookupError):
self.env.find_defined_instances('defined-instances')
with self.assertRaises(CLIPSError):
print(definstances)
| 9,513 | 33.722628 | 89 | py |
clipspy | clipspy-master/test/facts_test.py | import os
import unittest
from tempfile import mkstemp
from clips import Environment, Symbol, CLIPSError, TemplateSlotDefaultType
DEFTEMPLATE = """(deftemplate MAIN::template-fact
(slot int (type INTEGER) (allowed-values 0 1 2 3 4 5 6 7 8 9))
(slot float (type FLOAT))
(slot str (type STRING))
(slot symbol (type SYMBOL))
(multislot multifield))
"""
DEFFACTS = """(deffacts MAIN::defined-facts
(template-fact (int 1) (str "a-string")))
"""
IMPL_STR = '(implied-fact 1 2.3 "4" five)'
IMPL_RPR = 'ImpliedFact: (implied-fact 1 2.3 "4" five)'
TMPL_STR = '(template-fact (int 1) (float 2.2) (str "4") (symbol five) ' + \
'(multifield 1 2))'
TMPL_RPR = 'TemplateFact: (template-fact (int 1) (float 2.2) ' + \
'(str "4") (symbol five) (multifield 1 2))'
class TempFile:
"""Cross-platform temporary file."""
name = None
def __enter__(self):
fobj, self.name = mkstemp()
os.close(fobj)
return self
def __exit__(self, *_):
os.remove(self.name)
class TestFacts(unittest.TestCase):
def setUp(self):
self.env = Environment()
self.env.build(DEFTEMPLATE)
def test_facts(self):
"""Facts wrapper test."""
template = self.env.find_template('template-fact')
self.assertTrue(template in self.env.templates())
fact = self.env.assert_string('(implied-fact)')
self.assertTrue(fact in self.env.facts())
self.env.load_facts('(one-fact) (two-facts)')
self.assertTrue('(two-facts)' in (str(f)
for f in self.env.facts()))
with TempFile() as tmp:
saved = self.env.save_facts(tmp.name)
self.env.reset()
loaded = self.env.load_facts(tmp.name)
self.assertEqual(saved, loaded)
def test_implied_fact(self):
"""ImpliedFacts are asserted."""
expected = (1, 2.3, '4', Symbol('five'))
fact = self.env.assert_string('(implied-fact 1 2.3 "4" five)')
self.assertEqual(fact[0], 1)
self.assertEqual(len(fact), 4)
self.assertEqual(fact.index, 1)
self.assertEqual(tuple(fact), expected)
self.assertEqual(str(fact), IMPL_STR)
self.assertEqual(repr(fact), IMPL_RPR)
self.assertTrue(fact in tuple(self.env.facts()))
def test_template_fact(self):
"""TemplateFacts are asserted."""
expected = {'int': 1,
'float': 2.2,
'str': '4',
'symbol': Symbol('five'),
'multifield': (1, 2)}
template = self.env.find_template('template-fact')
fact = template.assert_fact(**expected)
self.assertEqual(len(fact), 5)
self.assertEqual(fact.index, 1)
self.assertEqual(fact['int'], 1)
self.assertEqual(dict(fact), expected)
self.assertEqual(str(fact), TMPL_STR)
self.assertEqual(repr(fact), TMPL_RPR)
self.assertTrue(fact in tuple(self.env.facts()))
def test_template_fact_errors(self):
"""TemplateFacts errors."""
with self.assertRaises(LookupError):
self.env.find_template('non-existing-template')
template = self.env.find_template('template-fact')
with self.assertRaises(KeyError):
template.assert_fact(non_existing_slot=1)
with self.assertRaises(TypeError):
template.assert_fact(int=1.0)
with self.assertRaises(ValueError):
template.assert_fact(int=10)
def test_fact_duplication(self):
"""Test fact duplication."""
fact = self.env.assert_string('(implied-fact)')
new_fact = self.env.assert_string('(implied-fact)')
self.assertEqual(fact, new_fact)
self.assertEqual(len(tuple(self.env.facts())), 1)
self.env.fact_duplication = True
new_fact = self.env.assert_string('(implied-fact)')
self.assertNotEqual(fact, new_fact)
self.assertEqual(len(tuple(self.env.facts())), 2)
def test_modify_fact(self):
"""Asserted TemplateFacts can be modified."""
template = self.env.find_template('template-fact')
fact = template.assert_fact(**{'int': 1,
'float': 2.2,
'str': '4',
'symbol': Symbol('five'),
'multifield': (1, 2)})
fact.modify_slots(symbol=Symbol('six'))
self.assertEqual(fact['symbol'], Symbol('six'))
def test_retract_fact(self):
"""Retracted fact is not anymore in the fact list."""
fact = self.env.assert_string('(implied-fact)')
self.assertTrue(fact in list(self.env.facts()))
fact.retract()
self.assertFalse(fact in list(self.env.facts()))
def test_implied_fact_template(self):
"""ImpliedFact template properties."""
fact = self.env.assert_string('(implied-fact 1 2.3 "4" five)')
template = fact.template
self.assertTrue(template.implied)
self.assertEqual(template.name, 'implied-fact')
self.assertEqual(template.module.name, 'MAIN')
self.assertEqual(template.slots, ())
self.assertEqual(str(template), '')
self.assertEqual(repr(template), 'Template: ')
self.assertFalse(template.deletable)
with self.assertRaises(CLIPSError):
template.undefine()
def test_template_fact_template(self):
"""TemplateFact template properties."""
template = self.env.find_template('template-fact')
self.assertEqual(template.name, 'template-fact')
self.assertEqual(template.module.name, 'MAIN')
self.assertEqual(len(tuple(template.slots)), 5)
self.assertEqual(str(template), ' '.join(DEFTEMPLATE.split()))
self.assertEqual(repr(template),
'Template: ' + ' '.join(DEFTEMPLATE.split()))
self.assertTrue(template.deletable)
template.undefine()
with self.assertRaises(LookupError):
self.env.find_template('template-fact')
with self.assertRaises(CLIPSError):
print(template)
def test_template_fact_slot(self):
"""TemplateFact template Slot."""
template = self.env.find_template('template-fact')
slots = {s.name: s for s in template.slots}
self.assertEqual(slots['int'].name, 'int')
self.assertFalse(slots['int'].multifield)
self.assertTrue(slots['multifield'].multifield)
self.assertEqual(slots['int'].types, ('INTEGER', ))
self.assertEqual(slots['float'].types, ('FLOAT', ))
self.assertEqual(slots['str'].types, ('STRING', ))
self.assertEqual(slots['symbol'].types, ('SYMBOL', ))
self.assertEqual(slots['int'].range, ('-oo', '+oo'))
self.assertEqual(slots['float'].cardinality, ())
self.assertEqual(slots['str'].default_type,
TemplateSlotDefaultType.STATIC_DEFAULT)
self.assertEqual(slots['str'].default_value, '')
self.assertEqual(slots['int'].allowed_values,
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))
def test_defined_facts(self):
"""DefinedFacts tests."""
self.env.build(DEFFACTS)
deffacts = self.env.find_defined_facts('defined-facts')
listed = list(self.env.defined_facts())
self.assertEqual(deffacts, listed[0])
self.assertEqual(deffacts.name, 'defined-facts')
self.assertEqual(
str(deffacts),
'(deffacts MAIN::defined-facts (template-fact (int 1) (str "a-string")))')
self.assertEqual(deffacts.module.name, 'MAIN')
self.assertTrue(deffacts.deletable)
deffacts.undefine()
with self.assertRaises(LookupError):
self.env.find_defined_facts('defined-facts')
with self.assertRaises(CLIPSError):
print(deffacts)
| 7,966 | 34.887387 | 86 | py |
clipspy | clipspy-master/test/modules_test.py | import unittest
from clips import Environment, Symbol, CLIPSError
DEFGLOBAL = """
(defglobal
?*a* = 1
?*b* = 2)
"""
DEFMODULE = """(defmodule TEST)"""
class TestModules(unittest.TestCase):
def setUp(self):
self.env = Environment()
self.env.build(DEFGLOBAL)
def tearDown(self):
self.env = None
def test_modules(self):
"""Modules wrapper class test."""
self.env.build(DEFMODULE)
# reset MAIN module
module = self.env.find_module('MAIN')
self.env.current_module = module
module = self.env.find_module('TEST')
self.env.current_module = module
self.assertEqual(self.env.current_module,
self.env.find_module('TEST'))
self.assertTrue(module in self.env.modules())
self.assertEqual(self.env.current_module, module)
with self.assertRaises(LookupError):
self.env.find_module("NONEXISTING")
def test_global(self):
"""Defglobal object test."""
glbl = self.env.find_global("b")
self.assertTrue(glbl in self.env.globals())
self.assertEqual(glbl.value, 2)
glbl.value = 3
self.assertEqual(glbl.value, 3)
self.assertTrue(self.env.globals_changed)
self.assertEqual(glbl.name, "b")
self.assertEqual(glbl.module.name, "MAIN")
self.assertTrue('defglobal' in str(glbl))
self.assertTrue('defglobal' in repr(glbl))
self.assertTrue(glbl.deletable)
self.assertFalse(glbl.watch)
glbl.watch = True
self.assertTrue(glbl.watch)
glbl.undefine()
with self.assertRaises(LookupError):
self.env.find_global("b")
with self.assertRaises(CLIPSError):
print(glbl)
def test_module(self):
"""Module object test."""
module = self.env.current_module
self.assertEqual(module.name, 'MAIN')
self.assertEqual(str(module), '')
self.assertEqual(repr(module), 'Module: ')
| 2,026 | 24.658228 | 57 | py |
clipspy | clipspy-master/clips/classes.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module contains the definition of:
* Classes namespace class
* Class class
* Instance class
* ClassSlot class
* MessageHandler class
* DefinedInstances class
"""
import os
import clips
from clips.modules import Module
from clips.common import PutSlotError, PUT_SLOT_ERROR
from clips.common import CLIPSError, SaveMode, ClassDefaultMode
from clips.common import environment_builder, environment_modifier
from clips._clips import lib, ffi
class Instance:
"""A Class Instance is an occurrence of an object.
A Class Instance represents its data as a dictionary
where each slot name is a key.
Class Instance slot values can be modified.
The Instance will be re-evaluated against the rule network once modified.
"""
__slots__ = '_env', '_ist'
def __init__(self, env: ffi.CData, ist: ffi.CData):
self._env = env
self._ist = ist
lib.RetainInstance(self._ist)
def __del__(self):
try:
lib.ReleaseInstance(self._ist)
except (AttributeError, TypeError):
pass # mostly happening during interpreter shutdown
def __hash__(self):
return hash(self._ist)
def __eq__(self, ist):
return self._ist == ist._ist
def __str__(self):
return ' '.join(instance_pp_string(self._env, self._ist).split())
def __repr__(self):
string = ' '.join(instance_pp_string(self._env, self._ist).split())
return "%s: %s" % (self.__class__.__name__, string)
def __iter__(self):
slot_names = (s.name for s in self.instance_class.slots())
return ((n, slot_value(self._env, self._ist, n)) for n in slot_names)
def __getitem__(self, slot):
return slot_value(self._env, self._ist, slot)
@property
def name(self) -> str:
"""Instance name."""
return ffi.string(lib.InstanceName(self._ist)).decode()
@property
def instance_class(self) -> 'Class':
"""Instance class."""
defclass = lib.InstanceClass(self._ist)
name = ffi.string(lib.DefclassName(defclass)).decode()
return Class(self._env, name)
def modify_slots(self, **slots):
"""Modify one or more slot values of the Instance.
Instance must exist within the CLIPS engine.
Equivalent to the CLIPS (modify-instance) function.
"""
modifier = environment_modifier(self._env, 'instance')
ret = lib.IMSetInstance(modifier, self._ist)
if ret != lib.IME_NO_ERROR:
raise CLIPSError(self._env, code=ret)
for slot, slot_val in slots.items():
value = clips.values.clips_value(self._env, value=slot_val)
ret = lib.IMPutSlot(modifier, str(slot).encode(), value)
if ret != PutSlotError.PSE_NO_ERROR:
raise PUT_SLOT_ERROR[ret](slot)
if lib.IMModify(modifier) is ffi.NULL:
raise CLIPSError(self._env, code=lib.IMError(self._env))
def send(self, message: str, arguments: str = None) -> type:
"""Send a message to the Instance.
The output value of the message handler is returned.
Equivalent to the CLIPS (send) function.
"""
output = clips.values.clips_value(self._env)
instance = clips.values.clips_value(self._env, value=self)
args = arguments.encode() if arguments is not None else ffi.NULL
lib.Send(self._env, instance, message.encode(), args, output)
return clips.values.python_value(self._env, output)
def delete(self):
"""Directly delete the instance."""
ret = lib.DeleteInstance(self._ist)
if ret != lib.UIE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
def unmake(self):
"""This method is equivalent to delete except that it uses
message-passing instead of directly deleting the instance.
"""
ret = lib.UnmakeInstance(self._ist)
if ret != lib.UIE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
class Class:
"""A Class is a template for creating instances of objects.
In CLIPS, Classes are defined via the (defclass) statement.
Classes allow to create new instances
to be added within the CLIPS environment.
"""
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, cls):
return self._ptr() == cls._ptr()
def __str__(self):
string = lib.DefclassPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DefclassPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
cls = lib.FindDefclass(self._env, self._name)
if cls == ffi.NULL:
raise CLIPSError(self._env, 'Class <%s> not defined' % self.name)
return cls
@property
def abstract(self) -> bool:
"""True if the class is abstract."""
return lib.ClassAbstractP(self._ptr())
@property
def reactive(self) -> bool:
"""True if the class is reactive."""
return lib.ClassReactiveP(self._ptr())
@property
def name(self) -> str:
"""Class name."""
return ffi.string(lib.DefclassName(self._ptr())).decode()
@property
def module(self) -> Module:
"""The module in which the Class is defined.
Equivalent to the CLIPS (defclass-module) function.
"""
name = ffi.string(lib.DefclassModule(self._ptr())).decode()
return Module(self._env, name)
@property
def deletable(self) -> bool:
"""True if the Class can be deleted."""
return lib.DefclassIsDeletable(self._ptr())
@property
def watch_instances(self) -> bool:
"""Whether or not the Class Instances are being watched."""
return lib.DefclassGetWatchInstances(self._ptr())
@watch_instances.setter
def watch_instances(self, flag: bool):
"""Whether or not the Class Instances are being watched."""
lib.DefclassSetWatchInstances(self._ptr(), flag)
@property
def watch_slots(self) -> bool:
"""Whether or not the Class Slots are being watched."""
return lib.DefclassGetWatchSlots(self._ptr())
@watch_slots.setter
def watch_slots(self, flag: bool):
"""Whether or not the Class Slots are being watched."""
lib.DefclassSetWatchSlots(self._ptr(), flag)
def make_instance(self, instance_name: str = None, **slots) -> Instance:
"""Make a new Instance from this Class.
Equivalent to the CLIPS (make-instance) function.
"""
builder = environment_builder(self._env, 'instance')
ret = lib.IBSetDefclass(builder, lib.DefclassName(self._ptr()))
if ret != lib.IBE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
for slot, slot_val in slots.items():
value = clips.values.clips_value(self._env, value=slot_val)
ret = lib.IBPutSlot(builder, str(slot).encode(), value)
if ret != PutSlotError.PSE_NO_ERROR:
raise PUT_SLOT_ERROR[ret](slot)
instance = lib.IBMake(
builder, instance_name.encode()
if instance_name is not None else ffi.NULL)
if instance != ffi.NULL:
return Instance(self._env, instance)
else:
raise CLIPSError(self._env, code=lib.FBError(self._env))
def subclass(self, defclass: 'Class') -> bool:
"""True if the Class is a subclass of the given one."""
return lib.SubclassP(self._ptr(), defclass._ptr())
def superclass(self, defclass: 'Class') -> bool:
"""True if the Class is a superclass of the given one."""
return lib.SuperclassP(self._ptr(), defclass._ptr())
def slots(self, inherited: bool = False) -> iter:
"""Iterate over the Slots of the class."""
value = clips.values.clips_value(self._env)
lib.ClassSlots(self._ptr(), value, inherited)
return (ClassSlot(self._env, self.name, n)
for n in clips.values.python_value(self._env, value))
def instances(self) -> iter:
"""Iterate over the instances of the class."""
ist = lib.GetNextInstanceInClass(self._ptr(), ffi.NULL)
while ist != ffi.NULL:
yield Instance(self._env, ist)
ist = lib.GetNextInstanceInClass(self._ptr(), ist)
def subclasses(self, inherited: bool = False) -> iter:
"""Iterate over the subclasses of the class.
Equivalent to the CLIPS (class-subclasses) function.
"""
value = clips.values.clips_value(self._env)
lib.ClassSubclasses(self._ptr(), value, inherited)
for defclass in classes(
self._env, clips.values.python_value(self._env, value)):
yield defclass
def superclasses(self, inherited=False) -> iter:
"""Iterate over the superclasses of the class.
Equivalent to the CLIPS class-superclasses command.
"""
value = clips.values.clips_value(self._env)
lib.ClassSuperclasses(self._ptr(), value, int(inherited))
for defclass in classes(
self._env, clips.values.python_value(self._env, value)):
yield defclass
def message_handlers(self) -> iter:
"""Iterate over the message handlers of the class."""
index = lib.GetNextDefmessageHandler(self._ptr(), 0)
while index != 0:
yield MessageHandler(self._env, self.name, index)
index = lib.GetNextDefmessageHandler(self._ptr(), index)
def find_message_handler(
self, name: str, handler_type: str = 'primary') -> 'MessageHandler':
"""Returns the MessageHandler given its name and type."""
ident = lib.FindDefmessageHandler(
self._ptr(), name.encode(), handler_type.encode())
if ident == 0:
raise CLIPSError(self._env)
return MessageHandler(self._env, self.name, ident)
def undefine(self):
"""Undefine the Class.
Equivalent to the CLIPS (undefclass) command.
The object becomes unusable after this method has been called.
"""
if not lib.Undefclass(self._ptr(), self._env):
raise CLIPSError(self._env)
class ClassSlot:
"""A Class Instances organize the information within Slots.
Slots might restrict the type or amount of data they store.
"""
__slots__ = '_env', '_cls', '_name'
def __init__(self, env: ffi.CData, cls: str, name: str):
self._env = env
self._cls = cls.encode()
self._name = name.encode()
def __hash__(self):
return hash(self._ptr()) + hash(self._name)
def __eq__(self, cls):
return self._ptr() == cls._ptr() and self._name == cls._name
def __str__(self):
return self.name
def __repr__(self):
return "%s: %s" % (self.__class__.__name__, self.name)
def _ptr(self) -> ffi.CData:
cls = lib.FindDefclass(self._env, self._cls)
if cls == ffi.NULL:
raise CLIPSError(
self._env, 'Class <%s> not defined' % self._cls.decode())
return cls
@property
def name(self):
"""The Slot name."""
return self._name.decode()
@property
def public(self) -> bool:
"""True if the Slot is public."""
return lib.SlotPublicP(self._ptr(), self._name)
@property
def initializable(self) -> bool:
"""True if the Slot is initializable."""
return lib.SlotInitableP(self._ptr(), self._name)
@property
def writable(self) -> bool:
"""True if the Slot is writable."""
return lib.SlotWritableP(self._ptr(), self._name)
@property
def accessible(self) -> bool:
"""True if the Slot is directly accessible."""
return lib.SlotDirectAccessP(self._ptr(), self._name)
@property
def types(self) -> tuple:
"""A tuple containing the value types for this Slot.
Equivalent to the CLIPS (slot-types) function.
"""
value = clips.values.clips_value(self._env)
if lib.SlotTypes(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
else:
raise CLIPSError(self._env)
@property
def sources(self) -> tuple:
"""A tuple containing the names of the Class sources for this Slot.
Equivalent to the CLIPS (slot-sources) function.
"""
value = clips.values.clips_value(self._env)
if lib.SlotSources(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
else:
raise CLIPSError(self._env)
@property
def range(self) -> tuple:
"""A tuple containing the numeric range for this Slot.
Equivalent to the CLIPS (slot-range) function.
"""
value = clips.values.clips_value(self._env)
if lib.SlotRange(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
else:
raise CLIPSError(self._env)
@property
def facets(self) -> tuple:
"""A tuple containing the facets for this Slot.
Equivalent to the CLIPS (slot-facets) function.
"""
value = clips.values.clips_value(self._env)
if lib.SlotFacets(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
else:
raise CLIPSError(self._env)
@property
def cardinality(self) -> tuple:
"""A tuple containing the cardinality for this Slot.
Equivalent to the CLIPS slot-cardinality function.
"""
value = clips.values.clips_value(self._env)
if lib.SlotCardinality(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
else:
raise CLIPSError(self._env)
@property
def default_value(self) -> type:
"""The default value for this Slot.
Equivalent to the CLIPS (slot-default-value) function.
"""
value = clips.values.clips_value(self._env)
if lib.SlotDefaultValue(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
else:
raise CLIPSError(self._env)
@property
def allowed_values(self) -> tuple:
"""A tuple containing the allowed values for this Slot.
Equivalent to the CLIPS (slot-allowed-values) function.
"""
value = clips.values.clips_value(self._env)
if lib.SlotAllowedValues(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
else:
raise CLIPSError(self._env)
def allowed_classes(self) -> iter:
"""Iterate over the allowed classes for this slot.
Equivalent to the CLIPS (slot-allowed-classes) function.
"""
value = clips.values.clips_value(self._env)
lib.SlotAllowedClasses(self._ptr(), self._name, value)
if isinstance(value, tuple):
for defclass in classes(self._env, value):
yield defclass
class MessageHandler:
"""MessageHandlers are the CLIPS equivalent of instance methods in Python.
"""
__slots__ = '_env', '_cls', '_idx'
def __init__(self, env: ffi.CData, cls: str, idx: int):
self._env = env
self._cls = cls.encode()
self._idx = idx
def __hash__(self):
return hash(self._ptr()) + self._idx
def __eq__(self, cls):
return self._ptr() == cls._ptr() and self._idx == cls._idx
def __str__(self):
string = lib.DefmessageHandlerPPForm(self._ptr(), self._idx)
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DefmessageHandlerPPForm(self._ptr(), self._idx)
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
cls = lib.FindDefclass(self._env, self._cls)
if cls == ffi.NULL:
raise CLIPSError(
self._env, 'Class <%s> not defined' % self._cls.decode())
return cls
@property
def name(self) -> str:
"""MessageHandler name."""
return ffi.string(lib.DefmessageHandlerName(
self._ptr(), self._idx)).decode()
@property
def type(self) -> str:
"""MessageHandler type."""
return ffi.string(lib.DefmessageHandlerType(
self._ptr(), self._idx)).decode()
@property
def watch(self) -> bool:
"""True if the MessageHandler is being watched."""
return lib.DefmessageHandlerGetWatch(self._ptr(), self._idx)
@watch.setter
def watch(self, flag: bool):
"""True if the MessageHandler is being watched."""
lib.DefmessageHandlerSetWatch(self._ptr(), self._idx, flag)
@property
def deletable(self) -> bool:
"""True if the MessageHandler can be deleted."""
return lib.DefmessageHandlerIsDeletable(self._ptr(), self._idx)
def undefine(self):
"""Undefine the MessageHandler.
Equivalent to the CLIPS (undefmessage-handler) function.
The object becomes unusable after this method has been called.
"""
if not lib.UndefmessageHandler(self._ptr(), self._idx, self._env):
raise CLIPSError(self._env)
class DefinedInstances:
"""The DefinedInstances constitute a set of a priori
or initial knowledge specified as a collection of instances of user
defined classes.
When the CLIPS environment is reset, every instance specified
within a definstances construct in the CLIPS knowledge base
is added to the DefinedInstances list.
"""
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, dis):
return self._ptr() == dis._ptr()
def __str__(self):
string = lib.DefinstancesPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DefinstancesPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
dfc = lib.FindDefinstances(self._env, self._name)
if dfc == ffi.NULL:
raise CLIPSError(
self._env, 'DefinedInstances <%s> not defined' % self.name)
return dfc
@property
def name(self) -> str:
"""The DefinedInstances name."""
return self._name.decode()
@property
def module(self) -> Module:
"""The module in which the DefinedInstances is defined.
Python equivalent of the CLIPS (definstances-module) command.
"""
name = ffi.string(lib.DefinstancesModule(self._ptr())).decode()
return Module(self._env, name)
@property
def deletable(self) -> bool:
"""True if the DefinedInstances can be undefined."""
return lib.DefinstancesIsDeletable(self._ptr())
def undefine(self):
"""Undefine the DefinedInstances.
Equivalent to the CLIPS (undefinstances) function.
The object becomes unusable after this method has been called.
"""
if not lib.Undefinstances(self._ptr(), self._env):
raise CLIPSError(self._env)
class Classes:
"""Classes and Instances namespace class.
.. note::
All the Classes methods are accessible through the Environment class.
"""
__slots__ = ['_env']
def __init__(self, env: ffi.CData):
self._env = env
@property
def default_mode(self) -> ClassDefaultMode:
"""Return the current class defaults mode.
Equivalent to the CLIPS (get-class-defaults-mode) function.
"""
return ClassDefaultMode(lib.GetClassDefaultsMode(self._env))
@default_mode.setter
def default_mode(self, value: ClassDefaultMode):
"""Return the current class defaults mode.
Equivalent to the CLIPS (get-class-defaults-mode) command.
"""
lib.SetClassDefaultsMode(self._env, value)
@property
def instances_changed(self) -> bool:
"""True if any instance has changed since last check."""
value = lib.GetInstancesChanged(self._env)
lib.SetInstancesChanged(self._env, False)
return value
def classes(self) -> iter:
"""Iterate over the defined Classes."""
defclass = lib.GetNextDefclass(self._env, ffi.NULL)
while defclass != ffi.NULL:
name = ffi.string(lib.DefclassName(defclass)).decode()
yield Class(self._env, name)
defclass = lib.GetNextDefclass(self._env, defclass)
def find_class(self, name: str) -> Class:
"""Find the Class by the given name."""
defclass = lib.FindDefclass(self._env, name.encode())
if defclass == ffi.NULL:
raise LookupError("Class '%s' not found" % name)
return Class(self._env, name)
def defined_instances(self) -> iter:
"""Iterate over the DefinedInstances."""
definstances = lib.GetNextDefinstances(self._env, ffi.NULL)
while definstances != ffi.NULL:
name = ffi.string(lib.DefinstancesName(definstances)).decode()
yield DefinedInstances(self._env, name)
definstances = lib.GetNextDefinstances(self._env, definstances)
def find_defined_instances(self, name: str) -> DefinedInstances:
"""Find the DefinedInstances by its name."""
dfs = lib.FindDefinstances(self._env, name.encode())
if dfs == ffi.NULL:
raise LookupError("DefinedInstances '%s' not found" % name)
return DefinedInstances(self._env, name)
def instances(self) -> iter:
"""Iterate over the defined Instancees."""
definstance = lib.GetNextInstance(self._env, ffi.NULL)
while definstance != ffi.NULL:
yield Instance(self._env, definstance)
definstance = lib.GetNextInstance(self._env, definstance)
def find_instance(self, name: str, module: Module = None) -> Instance:
"""Find the Instance by the given name."""
module = module._mdl if module is not None else ffi.NULL
definstance = lib.FindInstance(self._env, module, name.encode(),
ClassDefaultMode.CONSERVATION_MODE)
if definstance == ffi.NULL:
raise LookupError("Instance '%s' not found" % name)
return Instance(self._env, definstance)
def load_instances(self, instances: str) -> int:
"""Load a set of instances into the CLIPS data base.
Equivalent to the CLIPS (load-instances) function.
Instances can be loaded from a string, a file or a binary file.
"""
instances = instances.encode()
if os.path.exists(instances):
try:
return self._load_instances_binary(instances)
except CLIPSError:
return self._load_instances_text(instances)
else:
return self._load_instances_string(instances)
def _load_instances_binary(self, instances: str) -> int:
ret = lib.BinaryLoadInstances(self._env, instances)
if ret == -1:
raise CLIPSError(self._env)
return ret
def _load_instances_text(self, instances: str) -> int:
ret = lib.LoadInstances(self._env, instances)
if ret == -1:
raise CLIPSError(self._env)
return ret
def _load_instances_string(self, instances: str) -> int:
ret = lib.LoadInstancesFromString(self._env, instances, len(instances))
if ret == -1:
raise CLIPSError(self._env)
return ret
def restore_instances(self, instances: str) -> int:
"""Restore a set of instances into the CLIPS data base.
Equivalent to the CLIPS (restore-instances) function.
Instances can be passed as a set of strings or as a file.
"""
instances = instances.encode()
if os.path.exists(instances):
ret = lib.RestoreInstances(self._env, instances)
if ret == -1:
raise CLIPSError(self._env)
else:
ret = lib.RestoreInstancesFromString(
self._env, instances, len(instances))
if ret == -1:
raise CLIPSError(self._env)
return ret
def save_instances(self, path: str, binary: bool = False,
mode: SaveMode = SaveMode.LOCAL_SAVE) -> int:
"""Save the instances in the system to the specified file.
If binary is True, the instances will be saved in binary format.
Equivalent to the CLIPS (save-instances) function.
"""
if binary:
ret = lib.BinarySaveInstances(self._env, path.encode(), mode)
else:
ret = lib.SaveInstances(self._env, path.encode(), mode)
if ret == 0:
raise CLIPSError(self._env)
return ret
def slot_value(env: ffi.CData, ist: ffi.CData, slot: str) -> type:
value = clips.values.clips_value(env)
ret = lib.DirectGetSlot(ist, slot.encode(), value)
if ret != lib.GSE_NO_ERROR:
raise CLIPSError(env, code=ret)
return clips.values.python_value(env, value)
def classes(env: ffi.CData, names: (list, tuple)) -> iter:
for name in names:
defclass = lib.FindDefclass(env, name.encode())
if defclass == ffi.NULL:
raise CLIPSError(env)
yield Class(env, name)
def instance_pp_string(env: ffi.CData, ist: ffi.CData) -> str:
builder = environment_builder(env, 'string')
lib.SBReset(builder)
lib.InstancePPForm(ist, builder)
return ffi.string(builder.contents).decode()
| 28,087 | 30.418345 | 80 | py |
clipspy | clipspy-master/clips/environment.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import clips
from clips.facts import Facts
from clips.agenda import Agenda
from clips.classes import Classes
from clips.modules import Modules
from clips.functions import Functions
from clips.routers import Routers, ErrorRouter
from clips.common import CLIPSError
from clips.common import initialize_environment_data, delete_environment_data
from clips._clips import lib
class Environment:
"""The environment class encapsulates an independent CLIPS engine
with its own data structures.
"""
__slots__ = ('_env', '_facts', '_agenda', '_classes',
'_modules', '_functions', '_routers', '_namespaces')
def __init__(self):
self._env = lib.CreateEnvironment()
initialize_environment_data(self._env)
self._facts = Facts(self._env)
self._agenda = Agenda(self._env)
self._classes = Classes(self._env)
self._modules = Modules(self._env)
self._functions = Functions(self._env)
self._routers = Routers(self._env)
self._routers.add_router(ErrorRouter())
# mapping between the namespace and the methods it exposes
self._namespaces = {m: n for n in (self._facts,
self._agenda,
self._classes,
self._modules,
self._functions,
self._routers)
for m in dir(n) if not m.startswith('_')}
def __del__(self):
try:
delete_environment_data(self._env)
lib.DestroyEnvironment(self._env)
except (AttributeError, KeyError, TypeError):
pass # mostly happening during interpreter shutdown
def __getattr__(self, attr):
try:
return getattr(self._namespaces[attr], attr)
except (KeyError, AttributeError):
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
def __setattr__(self, attr, value):
if attr in self.__slots__:
super(Environment, self).__setattr__(attr, value)
return
try:
setattr(self._namespaces[attr], attr, value)
except (KeyError, AttributeError):
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attr))
def __dir__(self):
return dir(self.__class__) + list(self._namespaces.keys())
def load(self, path: str, binary: bool = False):
"""Load a set of constructs into the CLIPS data base.
If constructs were saved in binary format,
the binary parameter should be set to True.
Equivalent to the CLIPS (load) function.
"""
if binary:
if not lib.Bload(self._env, path.encode()):
raise CLIPSError(self._env)
else:
ret = lib.Load(self._env, path.encode())
if ret != lib.LE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
def save(self, path: str, binary=False):
"""Save a set of constructs into the CLIPS data base.
If binary is True, the constructs will be saved in binary format.
Equivalent to the CLIPS (load) function.
"""
if binary:
ret = lib.Bsave(self._env, path.encode())
else:
ret = lib.Save(self._env, path.encode())
if ret == 0:
raise CLIPSError(self._env)
def batch_star(self, path: str):
"""Evaluate the commands contained in the specific path.
Equivalent to the CLIPS (batch*) function.
"""
if lib.BatchStar(self._env, path.encode()) != 1:
raise CLIPSError(self._env)
def build(self, construct: str):
"""Build a single construct in CLIPS.
Equivalent to the CLIPS (build) function.
"""
ret = lib.Build(self._env, construct.encode())
if ret != lib.BE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
def eval(self, expression: str) -> type:
"""Evaluate an expression returning its value.
Equivalent to the CLIPS (eval) function.
"""
value = clips.values.clips_value(self._env)
ret = lib.Eval(self._env, expression.encode(), value)
if ret != lib.EE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
return clips.values.python_value(self._env, value)
def reset(self):
"""Reset the CLIPS environment.
Equivalent to the CLIPS (reset) function.
"""
if lib.Reset(self._env):
raise CLIPSError(self._env)
def clear(self):
"""Clear the CLIPS environment.
Equivalent to the CLIPS (clear) function.
"""
if not lib.Clear(self._env):
raise CLIPSError(self._env)
| 6,501 | 33.956989 | 80 | py |
clipspy | clipspy-master/clips/modules.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module contains the definition of:
* Modules namespace class
* Module class
* Global class
"""
import clips
from clips.common import CLIPSError
from clips._clips import lib, ffi
class Module:
"""Modules are namespaces restricting the CLIPS constructs scope."""
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, mdl):
return self._ptr() == mdl._ptr()
def __str__(self):
string = lib.DefmodulePPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DefmodulePPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
module = lib.FindDefmodule(self._env, self._name)
if module == ffi.NULL:
raise CLIPSError(self._env, 'Module <%s> not defined' % self.name)
return module
@property
def name(self) -> str:
"""Module name."""
return self._name.decode()
class Global:
"""A CLIPS global variable.
In CLIPS, Globals are defined via the (defglobal) statement.
"""
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, glb):
return self._ptr() == glb._ptr()
def __str__(self):
string = lib.DefglobalPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DefglobalPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
glb = lib.FindDefglobal(self._env, self._name)
if glb == ffi.NULL:
raise CLIPSError(
self._env, 'Global <%s> not defined' % self.name)
return glb
@property
def value(self) -> type:
"""Global value."""
value = clips.values.clips_value(self._env)
lib.DefglobalGetValue(self._ptr(), value)
return clips.values.python_value(self._env, value)
@value.setter
def value(self, value: type):
"""Global value."""
value = clips.values.clips_value(self._env, value=value)
lib.DefglobalSetValue(self._ptr(), value)
@property
def name(self) -> str:
"""Global name."""
return self._name.decode()
@property
def module(self) -> Module:
"""The module in which the Global is defined.
Equivalent to the CLIPS (defglobal-module) function.
"""
name = ffi.string(lib.DefglobalModule(self._ptr())).decode()
return Module(self._env, name)
@property
def deletable(self) -> bool:
"""True if the Global can be deleted."""
return lib.DefglobalIsDeletable(self._ptr())
@property
def watch(self) -> bool:
"""Whether or not the Global is being watched."""
return lib.DefglobalGetWatch(self._ptr())
@watch.setter
def watch(self, flag: bool):
"""Whether or not the Global is being watched."""
lib.DefglobalSetWatch(self._ptr(), flag)
def undefine(self):
"""Undefine the Global.
Equivalent to the CLIPS (undefglobal) function.
The object becomes unusable after this method has been called.
"""
if not lib.Undefglobal(self._ptr(), self._env):
raise CLIPSError(self._env)
class Modules:
"""Globals and Modules namespace class.
.. note::
All the Modules methods are accessible through the Environment class.
"""
__slots__ = ['_env']
def __init__(self, env: ffi.CData):
self._env = env
@property
def current_module(self) -> Module:
"""The current module.
Equivalent to the CLIPS (get-current-module) function.
"""
module = lib.GetCurrentModule(self._env)
name = ffi.string(lib.DefmoduleName(module)).decode()
return Module(self._env, name)
@current_module.setter
def current_module(self, module: Module):
"""The current module.
Equivalent to the CLIPS (get-current-module) function.
"""
lib.SetCurrentModule(self._env, module._ptr())
@property
def reset_globals(self) -> bool:
"""True if Globals reset behaviour is enabled."""
return lib.GetResetGlobals(self._env)
@reset_globals.setter
def reset_globals(self, value: bool):
"""True if Globals reset behaviour is enabled."""
lib.SetResetGlobals(self._env, value)
@property
def globals_changed(self) -> bool:
"""True if any Global has changed since last check."""
value = lib.GetGlobalsChanged(self._env)
lib.SetGlobalsChanged(self._env, False)
return value
def globals(self) -> iter:
"""Iterates over the defined Globals."""
defglobal = lib.GetNextDefglobal(self._env, ffi.NULL)
while defglobal != ffi.NULL:
name = ffi.string(lib.DefglobalName(defglobal)).decode()
yield Global(self._env, name)
defglobal = lib.GetNextDefglobal(self._env, defglobal)
def find_global(self, name: str) -> Module:
"""Find the Global by its name."""
defglobal = lib.FindDefglobal(self._env, name.encode())
if defglobal == ffi.NULL:
raise LookupError("Global '%s' not found" % name)
return Global(self._env, name)
def modules(self) -> iter:
"""Iterates over the defined Modules."""
defmodule = lib.GetNextDefmodule(self._env, ffi.NULL)
while defmodule != ffi.NULL:
name = ffi.string(lib.DefmoduleName(defmodule)).decode()
yield Module(self._env, name)
defmodule = lib.GetNextDefmodule(self._env, defmodule)
def find_module(self, name: str) -> Module:
"""Find the Module by its name."""
defmodule = lib.FindDefmodule(self._env, name.encode())
if defmodule == ffi.NULL:
raise LookupError("Module '%s' not found" % name)
return Module(self._env, name)
| 8,139 | 29.0369 | 80 | py |
clipspy | clipspy-master/clips/routers.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module contains the definition of:
* Router class
* LoggingRouter class
* Routers namespace class
"""
import logging
import clips
from clips import common
from clips._clips import lib, ffi
class Router:
__slots__ = '_env', '_name', '_userdata', '_priority'
def __init__(self, name: str, priority: int):
self._env = None
self._name = name
self._priority = priority
self._userdata = ffi.new_handle(self)
@property
def name(self) -> str:
"""The Router name."""
return self._name
@property
def priority(self) -> int:
"""The Router priority."""
return self._priority
def query(self, _name: str) -> bool:
"""This method should return True if the provided logical name
is handled by the Router.
"""
return False
def write(self, _name: str, _message: str):
"""If the query method returns True for the given logical name,
this method will be called with the forwarded message.
"""
return None
def read(self, _name: str) -> int:
"""Callback implementation for the `Environment.read_router`
function.
"""
return 0
def unread(self, _name: str, _char: int) -> int:
"""Callback implementation for the `Environment.unread_router`
function.
"""
return 0
def exit(self, _exitcode: int):
return None
def activate(self):
"""Activate the Router."""
if not lib.ActivateRouter(self._env, self._name.encode()):
raise RuntimeError("Unable to activate router %s" % self._name)
def deactivate(self):
"""Deactivate the Router."""
if not lib.DeactivateRouter(self._env, self._name.encode()):
raise RuntimeError("Unable to deactivate router %s" % self._name)
def delete(self):
"""Delete the Router."""
clips.common.environment_data(self._env, 'routers').pop(self.name, None)
if not lib.DeleteRouter(self._env, self._name.encode()):
raise RuntimeError("Unable to delete router %s" % self._name)
def share_message(self, name: str, message: str):
"""Share the captured message with other Routers."""
self.deactivate()
lib.WriteString(self._env, name.encode(), message.encode())
self.activate()
class ErrorRouter(Router):
"""Router capturing error messages for CLIPSError exceptions."""
__slots__ = '_env', '_name', '_userdata', '_priority', '_last_message'
def __init__(self):
super().__init__('python-error-router', 40)
self._last_message = ''
@property
def last_message(self) -> str:
ret = self._last_message
self._last_message = ''
return ret
def query(self, name: str):
return True if name == 'stderr' else False
def write(self, name: str, message: str):
self._last_message += message
self.share_message(name, message)
class LoggingRouter(Router):
"""Python logging Router.
A helper Router to get Python standard logging facilities
integrated with CLIPS.
It captures CLIPS output and re-directs it to Python logging library.
"""
__slots__ = '_env', '_name', '_userdata', '_priority', '_message'
LOGGERS = {'stdout': logging.info,
'stderr': logging.error,
'stdwrn': logging.warning}
def __init__(self):
super().__init__('python-logging-router', 30)
self._message = ''
def query(self, name: str) -> bool:
"""Capture log from CLIPS output routers."""
return name in self.LOGGERS
def write(self, name: str, message: str):
"""If the message is a new-line terminate sentence,
log it at according to the mapped level.
Otherwise, append it to the message string.
"""
if message == '\n':
self.log_message(name)
else:
self._message += message
if self._message.rstrip(' ').endswith('\n'):
self.log_message(name)
def log_message(self, name: str):
if self._message:
self.LOGGERS[name](self._message.lstrip('\n').rstrip('\n'))
self._message = ''
class Routers:
"""Routers namespace class.
.. note::
All the Routers methods are accessible through the Environment class.
"""
__slots__ = ['_env']
def __init__(self, env):
self._env = env
def routers(self) -> iter:
"""The User defined routers installed within the Environment."""
return common.environment_data(self._env, 'routers').values()
def read_router(self, router_name: str) -> int:
"""Query the Router by the given name calling its `read` callback."""
return lib.ReadRouter(self._env, router_name.encode())
def unread_router(self, router_name: str, characters: int) -> int:
"""Query the Router by the given name calling its `unread` callback."""
return lib.UnReadRouter(self._env, router_name.encode(), characters)
def write_router(self, router_name: str, *args):
"""Send the given arguments to the given Router for writing."""
for arg in args:
if type(arg) == str:
lib.WriteString(self._env, router_name.encode(), arg.encode())
else:
value = clips.values.clips_value(self._env, arg)
lib.WriteCLIPSValue(self._env, router_name.encode(), value)
def add_router(self, router: Router):
"""Add the given Router to the Environment."""
name = router.name
router._env = self._env
common.environment_data(self._env, 'routers')[name] = router
lib.AddRouter(self._env,
name.encode(),
router.priority,
lib.query_function,
lib.write_function,
lib.read_function,
lib.unread_function,
lib.exit_function,
router._userdata)
@ffi.def_extern()
def query_function(env: ffi.CData, name: ffi.CData, context: ffi.CData):
router = ffi.from_handle(context)
return bool(router.query(ffi.string(name).decode()))
@ffi.def_extern()
def write_function(env: ffi.CData, name: ffi.CData,
message: ffi.CData, context: ffi.CData):
router = ffi.from_handle(context)
try:
router.write(ffi.string(name).decode(), ffi.string(message).decode())
except BaseException:
pass
@ffi.def_extern()
def read_function(env: ffi.CData, name: ffi.CData, context: ffi.CData):
router = ffi.from_handle(context)
try:
return int(router.read(ffi.string(name).decode()))
except BaseException:
return 0
@ffi.def_extern()
def unread_function(env: ffi.CData, char: ffi.CData,
name: ffi.CData, context: ffi.CData):
router = ffi.from_handle(context)
try:
return int(router.unread(ffi.string(name).decode(), char))
except BaseException:
return 0
@ffi.def_extern()
def exit_function(env: ffi.CData, exitcode: int, context: ffi.CData):
router = ffi.from_handle(context)
try:
router.exit(exitcode)
except BaseException:
pass
| 8,877 | 29.613793 | 80 | py |
clipspy | clipspy-master/clips/functions.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module contains the definition of:
* Function class
* Generic class
* Method class
* Functions namespace class
"""
import traceback
from typing import Union
import clips
from clips.modules import Module
from clips.common import CLIPSError, environment_builder, environment_data
from clips._clips import lib, ffi
class Function:
"""A CLIPS user defined Function.
In CLIPS, Functions are defined via the (deffunction) statement.
"""
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, fnc):
return self._ptr() == fnc._ptr()
def __str__(self):
string = lib.DeffunctionPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DeffunctionPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def __call__(self, *arguments):
"""Call the CLIPS function with the given arguments."""
value = clips.values.clips_value(self._env)
builder = environment_builder(self._env, 'function')
lib.FCBReset(builder)
for argument in arguments:
lib.FCBAppend(
builder, clips.values.clips_value(self._env, value=argument))
ret = lib.FCBCall(builder, lib.DeffunctionName(self._ptr()), value)
if ret != lib.FCBE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
return clips.values.python_value(self._env, value)
def _ptr(self) -> ffi.CData:
dfc = lib.FindDeffunction(self._env, self._name)
if dfc == ffi.NULL:
raise CLIPSError(
self._env, 'Function <%s> not defined' % self.name)
return dfc
@property
def name(self) -> str:
"""Function name."""
return self._name.decode()
@property
def module(self) -> Module:
"""The module in which the Function is defined.
Equivalent to the CLIPS (deffunction-module) functions.
"""
name = ffi.string(lib.DeffunctionModule(self._ptr())).decode()
return Module(self._env, name)
@property
def deletable(self) -> bool:
"""True if the Function can be deleted."""
return lib.DeffunctionIsDeletable(self._ptr())
@property
def watch(self) -> bool:
"""Whether or not the Function is being watched."""
return lib.DeffunctionGetWatch(self._ptr())
@watch.setter
def watch(self, flag: bool):
"""Whether or not the Function is being watched."""
lib.DeffunctionSetWatch(self._ptr(), flag)
def undefine(self):
"""Undefine the Function.
Equivalent to the CLIPS (undeffunction) command.
The object becomes unusable after this method has been called.
"""
if not lib.Undeffunction(self._ptr(), self._env):
raise CLIPSError(self._env)
class Generic:
"""A CLIPS Generic Function.
In CLIPS, Generic Functions are defined via the (defgeneric) statement.
"""
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, gnc):
return self._ptr() == gnc._ptr()
def __str__(self):
string = lib.DefgenericPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DefgenericPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def __call__(self, *arguments):
"""Call the CLIPS Generic function with the given arguments."""
value = clips.values.clips_value(self._env)
builder = environment_builder(self._env, 'function')
lib.FCBReset(builder)
for argument in arguments:
lib.FCBAppend(
builder, clips.values.clips_value(self._env, value=argument))
ret = lib.FCBCall(builder, lib.DefgenericName(self._ptr()), value)
if ret != lib.FCBE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
return clips.values.python_value(self._env, value)
def _ptr(self) -> ffi.CData:
gnc = lib.FindDefgeneric(self._env, self._name)
if gnc == ffi.NULL:
raise CLIPSError(
self._env, 'Generic <%s> not defined' % self.name)
return gnc
@property
def name(self) -> str:
"""Generic name."""
return self._name.decode()
@property
def module(self) -> Module:
"""The module in which the Generic is defined.
Equivalent to the CLIPS (defgeneric-module) generics.
"""
name = ffi.string(lib.DefgenericModule(self._ptr())).decode()
return Module(self._env, name)
@property
def deletable(self) -> bool:
"""True if the Generic can be deleted."""
return lib.DefgenericIsDeletable(self._ptr())
@property
def watch(self) -> bool:
"""Whether or not the Generic is being watched."""
return lib.DefgenericGetWatch(self._ptr())
@watch.setter
def watch(self, flag: bool):
"""Whether or not the Generic is being watched."""
lib.DefgenericSetWatch(self._ptr(), flag)
def methods(self) -> iter:
"""Iterates over the defined Methods."""
index = lib.GetNextDefmethod(self._ptr(), 0)
while index != 0:
yield Method(self._env, self.name, index)
index = lib.GetNextDefmethod(self._ptr(), index)
def undefine(self):
"""Undefine the Generic.
Equivalent to the CLIPS (undefgeneric) command.
The object becomes unusable after this method has been called.
"""
if not lib.Undefgeneric(self._ptr(), self._env):
raise CLIPSError(self._env)
class Method(object):
"""Methods implement the generic logic
according to the input parameter types.
"""
__slots__ = '_env', '_gnc', '_idx'
def __init__(self, env: ffi.CData, gnc: str, idx: int):
self._env = env
self._gnc = gnc.encode()
self._idx = idx
def __hash__(self):
return hash(self._ptr()) + self._idx
def __eq__(self, gnc):
return self._ptr() == gnc._ptr() and self._idx == gnc._idx
def __str__(self):
string = lib.DefmethodPPForm(self._ptr(), self._idx)
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DefmethodPPForm(self._ptr(), self._idx)
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
gnc = lib.FindDefgeneric(self._env, self._gnc)
if gnc == ffi.NULL:
raise CLIPSError(
self._env, 'Generic <%s> not defined' % self._gnc)
return gnc
@property
def watch(self) -> bool:
"""Whether or not the Method is being watched."""
return lib.DefmethodGetWatch(self._ptr(), self._idx)
@watch.setter
def watch(self, flag: bool):
"""Whether or not the Method is being watched."""
lib.DefmethodSetWatch(self._ptr(), self._idx, flag)
@property
def deletable(self):
"""True if the Template can be undefined."""
return lib.DefmethodIsDeletable(self._ptr(), self._idx)
@property
def restrictions(self) -> tuple:
value = clips.values.clips_value(self._env)
lib.GetMethodRestrictions(self._ptr(), self._idx, value)
return clips.values.python_value(self._env, value)
@property
def description(self) -> str:
builder = environment_builder(self._env, 'string')
lib.SBReset(builder)
lib.DefmethodDescription(self._ptr(), self._idx, builder)
return ffi.string(builder.contents).decode()
def undefine(self):
"""Undefine the Method.
Equivalent to the CLIPS (undefmethod) command.
The object becomes unusable after this method has been called.
"""
if not lib.Undefmethod(self._ptr(), self._idx, self._env):
raise CLIPSError(self._env)
class Functions:
"""Functions, Generics and Methods namespace class.
.. note::
All the Functions methods are accessible through the Environment class.
"""
__slots__ = ['_env']
def __init__(self, env: ffi.CData):
self._env = env
@property
def error_state(self) -> Union[None, CLIPSError]:
"""Get the CLIPS environment error state.
Equivalent to the CLIPS (get-error) function.
"""
value = clips.values.clips_udf_value(self._env)
lib.GetErrorFunction(self._env, ffi.NULL, value)
state = clips.values.python_value(self._env, value)
if isinstance(state, clips.Symbol):
return None
else:
return CLIPSError(self._env, message=state)
def clear_error_state(self):
"""Clear the CLIPS environment error state.
Equivalent to the CLIPS (clear-error) function.
"""
lib.ClearErrorValue(self._env)
def call(self, function: str, *arguments) -> type:
"""Call the CLIPS function with the given arguments."""
value = clips.values.clips_value(self._env)
builder = environment_builder(self._env, 'function')
lib.FCBReset(builder)
for argument in arguments:
lib.FCBAppend(
builder, clips.values.clips_value(self._env, value=argument))
ret = lib.FCBCall(builder, function.encode(), value)
if ret != lib.FCBE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
return clips.values.python_value(self._env, value)
def functions(self):
"""Iterates over the defined Globals."""
deffunction = lib.GetNextDeffunction(self._env, ffi.NULL)
while deffunction != ffi.NULL:
name = ffi.string(lib.DeffunctionName(deffunction)).decode()
yield Function(self._env, name)
deffunction = lib.GetNextDeffunction(self._env, deffunction)
def find_function(self, name: str) -> Function:
"""Find the Function by its name."""
deffunction = lib.FindDeffunction(self._env, name.encode())
if deffunction == ffi.NULL:
raise LookupError("Function '%s' not found" % name)
return Function(self._env, name)
def generics(self) -> iter:
"""Iterates over the defined Generics."""
defgeneric = lib.GetNextDefgeneric(self._env, ffi.NULL)
while defgeneric != ffi.NULL:
name = ffi.string(lib.DefgenericName(defgeneric)).decode()
yield Generic(self._env, name)
defgeneric = lib.GetNextDefgeneric(self._env, name)
def find_generic(self, name: str) -> Generic:
"""Find the Generic by its name."""
defgeneric = lib.FindDefgeneric(self._env, name.encode())
if defgeneric == ffi.NULL:
raise LookupError("Generic '%s' not found" % name)
return Generic(self._env, name)
def define_function(self, function: callable, name: str = None):
"""Define the Python function within the CLIPS environment.
If a name is given, it will be the function name within CLIPS.
Otherwise, the name of the Python function will be used.
The Python function will be accessible within CLIPS via its name
as if it was defined via the `deffunction` construct.
"""
name = name if name is not None else function.__name__
user_functions = environment_data(self._env, 'user_functions')
user_functions.functions[name] = function
ret = lib.Build(self._env, DEFFUNCTION.format(name).encode())
if ret != lib.BE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
@ffi.def_extern()
def python_function(env: ffi.CData, context: ffi.CData, output: ffi.CData):
arguments = []
value = clips.values.clips_udf_value(env)
if lib.UDFFirstArgument(context, lib.SYMBOL_BIT, value):
funcname = clips.values.python_value(env, value)
else:
lib.UDFThrowError(context)
return
while lib.UDFHasNextArgument(context):
if lib.UDFNextArgument(context, clips.values.ANY_TYPE_BITS, value):
arguments.append(clips.values.python_value(env, value))
else:
lib.UDFThrowError(context)
return
try:
user_functions = environment_data(env, 'user_functions')
ret = user_functions.functions[funcname](*arguments)
except Exception as error:
message = "[PYCODEFUN1] %r" % error
string = "\n".join((message, traceback.format_exc()))
lib.WriteString(env, 'stdwrn'.encode(), string.encode())
clips.values.clips_udf_value(env, message, value)
lib.SetErrorValue(env, value.header)
lib.UDFThrowError(context)
else:
clips.values.clips_udf_value(env, ret, output)
DEFFUNCTION = """
(deffunction {0} ($?args)
(python-function {0} (expand$ ?args)))
"""
| 15,175 | 30.290722 | 80 | py |
clipspy | clipspy-master/clips/agenda.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module contains the definition of:
* Agenda class
* Rule class
* Activation class
"""
import clips
from clips.modules import Module
from clips.common import environment_builder
from clips.common import CLIPSError, Strategy, SalienceEvaluation, Verbosity
from clips._clips import lib, ffi
class Rule:
"""A CLIPS rule.
In CLIPS, Rules are defined via the (defrule) statement.
"""
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, rule):
return self._ptr() == rule._ptr()
def __str__(self):
string = lib.DefrulePPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DefrulePPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
rule = lib.FindDefrule(self._env, self._name)
if rule == ffi.NULL:
raise CLIPSError(self._env, 'Rule <%s> not defined' % self.name)
return rule
@property
def name(self) -> str:
"""Rule name."""
return self._name.decode()
@property
def module(self) -> Module:
"""The module in which the Rule is defined.
Equivalent to the CLIPS (defrule-module) function.
"""
name = ffi.string(lib.DefruleModule(self._ptr())).decode()
return Module(self._env, name)
@property
def deletable(self) -> bool:
"""True if the Rule can be deleted."""
return lib.DefruleIsDeletable(self._ptr())
@property
def watch_firings(self) -> bool:
"""Whether or not the Rule firings are being watched."""
return lib.DefruleGetWatchFirings(self._ptr())
@watch_firings.setter
def watch_firings(self, flag: bool):
"""Whether or not the Rule firings are being watched."""
lib.DefruleSetWatchFirings(self._ptr(), flag)
@property
def watch_activations(self) -> bool:
"""Whether or not the Rule Activations are being watched."""
return lib.DefruleGetWatchActivations(self._ptr())
@watch_activations.setter
def watch_activations(self, flag: bool):
"""Whether or not the Rule Activations are being watched."""
lib.DefruleSetWatchActivations(self._ptr(), flag)
def matches(self, verbosity: Verbosity = Verbosity.TERSE):
"""Shows partial matches and activations.
Returns a tuple containing the combined sum of the matches
for each pattern, the combined sum of partial matches
and the number of activations.
The verbosity parameter controls how much to output:
* Verbosity.VERBOSE: detailed matches are printed to stdout
* Verbosity.SUCCINT: a brief description is printed to stdout
* Verbosity.TERSE: (default) nothing is printed to stdout
"""
value = clips.values.clips_value(self._env)
lib.Matches(self._ptr(), verbosity, value)
return clips.values.python_value(self._env, value)
def refresh(self):
"""Refresh the Rule.
Equivalent to the CLIPS (refresh) function.
"""
lib.Refresh(self._ptr())
def add_breakpoint(self):
"""Add a breakpoint for the Rule.
Equivalent to the CLIPS (add-break) function.
"""
lib.SetBreak(self._ptr())
def remove_breakpoint(self):
"""Remove a breakpoint for the Rule.
Equivalent to the CLIPS (remove-break) function.
"""
if not lib.RemoveBreak(self._env, self._ptr()):
raise CLIPSError("No breakpoint set")
def undefine(self):
"""Undefine the Rule.
Equivalent to the CLIPS (undefrule) function.
The object becomes unusable after this method has been called.
"""
if not lib.Undefrule(self._ptr(), self._env):
raise CLIPSError(self._env)
class Activation:
"""When all the constraints of a Rule are satisfied,
the Rule becomes active.
Activations are organized within the CLIPS Agenda.
"""
def __init__(self, env: ffi.CData, act: ffi.CData):
self._env = env
self._act = act
self._pp = activation_pp_string(self._env, self._act)
self._rule_name = ffi.string(lib.ActivationRuleName(self._act))
def __hash__(self):
return hash(self._act)
def __eq__(self, act):
return self._act == act._act
def __str__(self):
return ' '.join(self._pp.split())
def __repr__(self):
return "%s: %s" % (self.__class__.__name__, ' '.join(self._pp.split()))
def _assert_is_active(self):
"""As the engine does not provide means to find activations,
the existence of the pointer in the activations list is tested instead.
"""
activations = []
activation = lib.GetNextActivation(self._env, ffi.NULL)
while activation != ffi.NULL:
activations.append(activation)
activation = lib.GetNextActivation(self._env, activation)
if self._act not in activations:
raise CLIPSError(
self._env, "Activation %s not in the agenda" % self.name)
@property
def name(self) -> str:
"""Activation Rule name."""
return self._rule_name.decode()
@property
def salience(self) -> int:
"""Activation salience value."""
self._assert_is_active()
return lib.ActivationGetSalience(self._act)
@salience.setter
def salience(self, salience: int):
"""Activation salience value."""
self._assert_is_active()
lib.ActivationSetSalience(self._act, salience)
def delete(self):
"""Remove the activation from the agenda."""
self._assert_is_active()
lib.DeleteActivation(self._act)
class Agenda:
"""In CLIPS, when all the conditions to activate a rule are met,
The Rule action is placed within the Agenda.
The CLIPS Agenda is responsible of sorting the Rule Activations
according to their salience and the conflict resolution strategy.
.. note::
All the Agenda methods are accessible through the Environment class.
"""
def __init__(self, env: ffi.CData):
self._env = env
@property
def agenda_changed(self) -> bool:
"""True if any rule activation changes have occurred."""
value = lib.GetAgendaChanged(self._env)
lib.SetAgendaChanged(self._env, False)
return value
@property
def focus(self) -> Module:
"""The module associated with the current focus.
Equivalent to the CLIPS (get-focus) function.
"""
name = ffi.string(lib.DefmoduleName(lib.GetFocus(self._env))).decode()
return Module(self._env, name)
@focus.setter
def focus(self, module: Module):
"""The module associated with the current focus.
Equivalent to the CLIPS (get-focus) function.
"""
return lib.Focus(module._ptr())
@property
def strategy(self) -> Strategy:
"""The current conflict resolution strategy.
Equivalent to the CLIPS (get-strategy) function.
"""
return Strategy(lib.GetStrategy(self._env))
@strategy.setter
def strategy(self, value: Strategy):
"""The current conflict resolution strategy.
Equivalent to the CLIPS (get-strategy) function.
"""
lib.SetStrategy(self._env, Strategy(value))
@property
def salience_evaluation(self) -> SalienceEvaluation:
"""The salience evaluation behavior.
Equivalent to the CLIPS (get-salience-evaluation) command.
"""
return SalienceEvaluation(lib.GetSalienceEvaluation(self._env))
@salience_evaluation.setter
def salience_evaluation(self, value: SalienceEvaluation):
"""The salience evaluation behavior.
Equivalent to the CLIPS (get-salience-evaluation) command.
"""
lib.SetSalienceEvaluation(self._env, SalienceEvaluation(value))
def rules(self) -> iter:
"""Iterate over the defined Rules."""
rule = lib.GetNextDefrule(self._env, ffi.NULL)
while rule != ffi.NULL:
name = ffi.string(lib.DefruleName(rule)).decode()
yield Rule(self._env, name)
rule = lib.GetNextDefrule(self._env, rule)
def find_rule(self, name: str) -> Rule:
"""Find a Rule by name."""
defrule = lib.FindDefrule(self._env, name.encode())
if defrule == ffi.NULL:
raise LookupError("Rule '%s' not found" % name)
return Rule(self._env, name)
def reorder(self, module: Module = None):
"""Reorder the Activations in the Agenda.
If no Module is specified, the agendas of all modules are reordered.
To be called after changing the conflict resolution strategy.
"""
if module is not None:
lib.ReorderAgenda(module._ptr())
else:
lib.ReorderAllAgendas(self._env)
def refresh(self, module: Module = None):
"""Recompute the salience values of the Activations on the Agenda
and then reorder the agenda.
Equivalent to the CLIPS (refresh-agenda) function.
If no Module is specified, the agendas of all modules are refreshed.
"""
if module is not None:
lib.RefreshAgenda(module._ptr())
else:
lib.RefreshAllAgendas(self._env)
def activations(self) -> iter:
"""Iterate over the Activations in the Agenda."""
activation = lib.GetNextActivation(self._env, ffi.NULL)
while activation != ffi.NULL:
yield Activation(self._env, activation)
activation = lib.GetNextActivation(self._env, activation)
def delete_activations(self):
"""Delete all activations in the agenda."""
if not lib.DeleteActivation(self._env, ffi.NULL):
raise CLIPSError(self._env)
def clear_focus(self):
"""Remove all modules from the focus stack.
Equivalent to the CLIPS (clear-focus-stack) function.
"""
lib.ClearFocusStack(self._env)
def run(self, limit: int = None) -> int:
"""Runs the activations in the agenda.
If limit is not None, the first activations up to limit will be run.
Returns the number of activation which were run.
"""
return lib.Run(self._env, limit if limit is not None else -1)
def activation_pp_string(env: ffi.CData, ist: ffi.CData) -> str:
builder = environment_builder(env, 'string')
lib.SBReset(builder)
lib.ActivationPPForm(ist, builder)
return ffi.string(builder.contents).decode()
| 12,531 | 29.417476 | 80 | py |
clipspy | clipspy-master/clips/common.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from enum import IntEnum
from collections import namedtuple
from clips._clips import lib, ffi
class CLIPSError(RuntimeError):
"""An error occurred within the CLIPS Environment."""
def __init__(self, env: ffi.CData, message: str = None, code: int = None):
if message is None:
routers = environment_data(env, 'routers')
message = routers['python-error-router'].last_message
message = message.lstrip('\n').rstrip('\n').replace('\n', ' ')
super(CLIPSError, self).__init__(message)
self.code = code
class CLIPSType(IntEnum):
FLOAT = 0
INTEGER = 1
SYMBOL = 2
STRING = 3
MULTIFIELD = 4
EXTERNAL_ADDRESS = 5
FACT_ADDRESS = 6
INSTANCE_ADDRESS = 7
INSTANCE_NAME = 8
VOID = 9
class SaveMode(IntEnum):
LOCAL_SAVE = lib.LOCAL_SAVE
VISIBLE_SAVE = lib.VISIBLE_SAVE
class ClassDefaultMode(IntEnum):
CONVENIENCE_MODE = 0
CONSERVATION_MODE = 1
class Strategy(IntEnum):
DEPTH = 0
BREADTH = 1
LEX = 2
MEA = 3
COMPLEXITY = 4
SIMPLICITY = 5
RANDOM = 6
class SalienceEvaluation(IntEnum):
WHEN_DEFINED = lib.WHEN_DEFINED
WHEN_ACTIVATED = lib.WHEN_ACTIVATED
EVERY_CYCLE = lib.EVERY_CYCLE
class Verbosity(IntEnum):
VERBOSE = 0
SUCCINT = 1
TERSE = 2
class TemplateSlotDefaultType(IntEnum):
NO_DEFAULT = lib.NO_DEFAULT
STATIC_DEFAULT = lib.STATIC_DEFAULT
DYNAMIC_DEFAULT = lib.DYNAMIC_DEFAULT
class PutSlotError(IntEnum):
PSE_NO_ERROR = lib.PSE_NO_ERROR
PSE_NULL_POINTER_ERROR = lib.PSE_NULL_POINTER_ERROR
PSE_INVALID_TARGET_ERROR = lib.PSE_INVALID_TARGET_ERROR
PSE_SLOT_NOT_FOUND_ERROR = lib.PSE_SLOT_NOT_FOUND_ERROR
PSE_TYPE_ERROR = lib.PSE_TYPE_ERROR
PSE_RANGE_ERROR = lib.PSE_RANGE_ERROR
PSE_ALLOWED_VALUES_ERROR = lib.PSE_ALLOWED_VALUES_ERROR
PSE_CARDINALITY_ERROR = lib.PSE_CARDINALITY_ERROR
PSE_ALLOWED_CLASSES_ERROR = lib.PSE_ALLOWED_CLASSES_ERROR
PUT_SLOT_ERROR = {PutSlotError.PSE_NULL_POINTER_ERROR:
lambda s: RuntimeError("Internal error '%s'" % s),
PutSlotError.PSE_INVALID_TARGET_ERROR:
lambda s: ValueError("invalid target for slot '%s'" % s),
PutSlotError.PSE_SLOT_NOT_FOUND_ERROR:
lambda s: KeyError("slot '%s' does not exist" % s),
PutSlotError.PSE_TYPE_ERROR:
lambda s: TypeError("invalid type for slot '%s'" % s),
PutSlotError.PSE_RANGE_ERROR:
lambda s: ValueError("invalid range for slot '%s'" % s),
PutSlotError.PSE_ALLOWED_VALUES_ERROR:
lambda s: ValueError("value not allowed for slot '%s'" % s),
PutSlotError.PSE_CARDINALITY_ERROR:
lambda s: IndexError("invalid cardinality for slot '%s'" % s),
PutSlotError.PSE_ALLOWED_CLASSES_ERROR:
lambda s: ValueError("class not allowed for slot '%s'" % s)}
def initialize_environment_data(env: ffi.CData) -> 'EnvData':
fact = lib.CreateFactBuilder(env, ffi.NULL)
if fact is ffi.NULL:
raise CLIPSError(env, code=lib.FBError(env))
instance = lib.CreateInstanceBuilder(env, ffi.NULL)
if fact is ffi.NULL:
raise CLIPSError(env, code=lib.FBError(env))
function = lib.CreateFunctionCallBuilder(env, 0)
if fact is ffi.NULL:
raise CLIPSError(env, code=lib.FBError(env))
multifield = lib.CreateMultifieldBuilder(env, 0)
if multifield is ffi.NULL:
raise CLIPSError(env)
string = lib.CreateStringBuilder(env, 0)
if string is ffi.NULL:
raise CLIPSError(env)
builders = EnvBuilders(fact, instance, function, string, multifield)
fact = lib.CreateFactModifier(env, ffi.NULL)
if fact is ffi.NULL:
raise CLIPSError(env, code=lib.FMError(env))
instance = lib.CreateInstanceModifier(env, ffi.NULL)
if instance is ffi.NULL:
raise CLIPSError(env, code=lib.FMError(env))
modifiers = EnvModifiers(fact, instance)
functions = UserFunctions({}, {})
ENVIRONMENT_DATA[env] = EnvData(builders, modifiers, {}, functions)
lib.DefinePythonFunction(env)
return ENVIRONMENT_DATA[env]
def delete_environment_data(env: ffi.CData):
data = ENVIRONMENT_DATA.pop(env, None)
if data is not None:
fact, instance, function, string, multifield = data.builders
lib.FBDispose(fact)
lib.IBDispose(instance)
lib.FCBDispose(function)
lib.SBDispose(string)
lib.MBDispose(multifield)
fact, instance = data.modifiers
lib.FMDispose(fact)
lib.IMDispose(instance)
def environment_data(env: ffi.CData, name: str) -> type:
"""Retrieve Environment specific data."""
return getattr(ENVIRONMENT_DATA[env], name)
def environment_builder(env: ffi.CData, name: str) -> ffi.CData:
"""Retrieve Environment specific builder."""
return getattr(ENVIRONMENT_DATA[env].builders, name)
def environment_modifier(env: ffi.CData, name: str) -> ffi.CData:
"""Retrieve Environment specific modifier."""
return getattr(ENVIRONMENT_DATA[env].modifiers, name)
ENVIRONMENT_DATA = {}
EnvData = namedtuple('EnvData', ('builders',
'modifiers',
'routers',
'user_functions'))
EnvBuilders = namedtuple('EnvBuilders', ('fact',
'instance',
'function',
'string',
'multifield'))
EnvModifiers = namedtuple('EnvModifiers', ('fact',
'instance'))
UserFunctions = namedtuple('UserFunctions', ('functions',
'external_addresses'))
| 7,437 | 34.084906 | 80 | py |
clipspy | clipspy-master/clips/__init__.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Matteo Cafasso'
__version__ = '1.0.1'
__license__ = 'BSD-3'
__all__ = ('CLIPSError',
'Environment',
'Router',
'LoggingRouter',
'ImpliedFact',
'TemplateFact',
'Template',
'Instance',
'InstanceName',
'Class',
'Strategy',
'SalienceEvaluation',
'Verbosity',
'ClassDefaultMode',
'TemplateSlotDefaultType',
'Symbol',
'InstanceName',
'SaveMode')
from clips.environment import Environment
from clips.classes import Instance, Class
from clips.values import Symbol, InstanceName
from clips.routers import Router, LoggingRouter
from clips.facts import ImpliedFact, TemplateFact, Template
from clips.common import SaveMode, Strategy, SalienceEvaluation, Verbosity
from clips.common import CLIPSError, ClassDefaultMode, TemplateSlotDefaultType
| 2,474 | 38.285714 | 80 | py |
clipspy | clipspy-master/clips/facts.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This module contains the definition of:
* ImpliedFact class
* TemplateFact class
* Template class
* TemplateSlot class
* DefinedFacts class
* Facts namespace class
"""
import os
from itertools import chain
import clips
from clips.modules import Module
from clips.common import PutSlotError, PUT_SLOT_ERROR
from clips.common import environment_builder, environment_modifier
from clips.common import CLIPSError, SaveMode, TemplateSlotDefaultType
from clips._clips import lib, ffi
class Fact:
"""CLIPS Fact base class."""
__slots__ = '_env', '_fact'
def __init__(self, env: ffi.CData, fact: ffi.CData):
self._env = env
self._fact = fact
lib.RetainFact(self._fact)
def __del__(self):
try:
lib.ReleaseFact(self._env, self._fact)
except (AttributeError, TypeError):
pass # mostly happening during interpreter shutdown
def __hash__(self):
return hash(self._fact)
def __eq__(self, fact):
return self._fact == fact._fact
def __str__(self):
return ' '.join(fact_pp_string(self._env, self._fact).split())
def __repr__(self):
string = ' '.join(fact_pp_string(self._env, self._fact).split())
return "%s: %s" % (self.__class__.__name__, string)
@property
def index(self) -> int:
"""The fact index."""
return lib.FactIndex(self._fact)
@property
def exists(self) -> bool:
"""True if the fact has been asserted within CLIPS.
Equivalent to the CLIPS (fact-existp) function.
"""
return lib.FactExistp(self._fact)
@property
def template(self) -> 'Template':
"""The associated Template."""
template = lib.FactDeftemplate(self._fact)
name = ffi.string(lib.DeftemplateName(template)).decode()
return Template(self._env, name)
def retract(self):
"""Retract the fact from the CLIPS environment."""
ret = lib.Retract(self._fact)
if ret != lib.RE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
class ImpliedFact(Fact):
"""An Implied Fact or Ordered Fact represents its data as a list
of elements similarly as for a Multifield.
Implied Fact cannot be build or modified.
They can be asserted via the Environment.assert_string() method.
"""
def __iter__(self):
return chain(slot_value(self._env, self._fact))
def __len__(self):
return len(slot_value(self._env, self._fact))
def __getitem__(self, index):
return slot_value(self._env, self._fact)[index]
class TemplateFact(Fact):
"""A Template or Unordered Fact represents its data as a dictionary
where each slot name is a key.
TemplateFact slot values can be modified.
The Fact will be re-evaluated against the rule network once modified.
"""
__slots__ = '_env', '_fact'
def __init__(self, env: ffi.CData, fact: ffi.CData):
super().__init__(env, fact)
def __iter__(self):
return chain(slot_values(self._env, self._fact))
def __len__(self):
slots = slot_values(self._env, self._fact)
return len(tuple(slots))
def __getitem__(self, key):
try:
return slot_value(self._env, self._fact, slot=str(key))
except CLIPSError as error:
if error.code == lib.GSE_SLOT_NOT_FOUND_ERROR:
raise KeyError("'%s'" % key)
else:
raise error
def modify_slots(self, **slots):
"""Modify one or more slot values of the Fact.
Fact must be asserted within the CLIPS engine.
Equivalent to the CLIPS (modify) function.
"""
modifier = environment_modifier(self._env, 'fact')
ret = lib.FMSetFact(modifier, self._fact)
if ret != lib.FME_NO_ERROR:
raise CLIPSError(self._env, code=ret)
for slot, slot_val in slots.items():
value = clips.values.clips_value(self._env, value=slot_val)
ret = lib.FMPutSlot(modifier, str(slot).encode(), value)
if ret != PutSlotError.PSE_NO_ERROR:
raise PUT_SLOT_ERROR[ret](slot)
if lib.FMModify(modifier) is ffi.NULL:
raise CLIPSError(self._env, code=lib.FBError(self._env))
class Template:
"""A Fact Template is a formal representation of the fact data structure.
In CLIPS, Templates are defined via the (deftemplate) function.
Templates allow to assert new facts within the CLIPS environment.
Implied facts are associated to implied templates. Implied templates
have a limited set of features.
"""
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, tpl):
return self._ptr() == tpl._ptr()
def __str__(self):
string = lib.DeftemplatePPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DeftemplatePPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
tpl = lib.FindDeftemplate(self._env, self._name)
if tpl == ffi.NULL:
raise CLIPSError(self._env, 'Template <%s> not defined' % self.name)
return tpl
@property
def implied(self) -> bool:
"""True if the Template is implied."""
return lib.ImpliedDeftemplate(self._ptr())
@property
def name(self) -> str:
"""Template name."""
return self._name.decode()
@property
def module(self) -> Module:
"""The module in which the Template is defined.
Python equivalent of the CLIPS deftemplate-module command.
"""
name = ffi.string(lib.DeftemplateModule(self._ptr())).decode()
return Module(self._env, name)
@property
def deletable(self) -> bool:
"""True if the Template can be undefined."""
return lib.DeftemplateIsDeletable(self._ptr())
@property
def slots(self) -> tuple:
"""The slots of the template."""
if self.implied:
return ()
value = clips.values.clips_value(self._env)
lib.DeftemplateSlotNames(self._ptr(), value)
return tuple(TemplateSlot(self._env, self.name, n)
for n in clips.values.python_value(self._env, value))
@property
def watch(self) -> bool:
"""Whether or not the Template is being watched."""
return lib.GetDeftemplateWatch(self._ptr())
@watch.setter
def watch(self, flag: bool):
"""Whether or not the Template is being watched."""
lib.EnvSetDeftemplateWatch(self._ptr(), flag)
def facts(self) -> iter:
"""Iterate over the asserted Facts belonging to this Template."""
fact = lib.GetNextFactInTemplate(self._ptr(), ffi.NULL)
while fact != ffi.NULL:
yield new_fact(self._ptr(), fact)
fact = lib.GetNextFactInTemplate(self._ptr(), fact)
def assert_fact(self, **slots) -> TemplateFact:
"""Assert a new fact with the given slot values.
Only deftemplates that have been explicitly defined can be asserted
with this function.
Equivalent to the CLIPS (assert) function.
"""
builder = environment_builder(self._env, 'fact')
ret = lib.FBSetDeftemplate(builder, self._name)
if ret != lib.FBE_NO_ERROR:
raise CLIPSError(self._env, code=ret)
for slot, slot_val in slots.items():
value = clips.values.clips_value(self._env, value=slot_val)
ret = lib.FBPutSlot(builder, str(slot).encode(), value)
if ret != PutSlotError.PSE_NO_ERROR:
raise PUT_SLOT_ERROR[ret](slot)
fact = lib.FBAssert(builder)
if fact != ffi.NULL:
return TemplateFact(self._env, fact)
else:
raise CLIPSError(self._env, code=lib.FBError(self._env))
def undefine(self):
"""Undefine the Template.
Equivalent to the CLIPS (undeftemplate) function.
The object becomes unusable after this method has been called.
"""
if not lib.Undeftemplate(self._ptr(), self._env):
raise CLIPSError(self._env)
class TemplateSlot:
"""Template Facts organize the information within Slots.
Slots might restrict the type or amount of data they store.
"""
__slots__ = '_env', '_tpl', '_name'
def __init__(self, env: ffi.CData, tpl: str, name: str):
self._env = env
self._tpl = tpl.encode()
self._name = name.encode()
def __hash__(self):
return hash(self._ptr()) + hash(self._name)
def __eq__(self, slot):
return self._ptr() == slot._ptr() and self._name == slot._name
def __str__(self):
return self.name
def __repr__(self):
return "%s: %s" % (self.__class__.__name__, self.name)
def _ptr(self) -> ffi.CData:
tpl = lib.FindDeftemplate(self._env, self._tpl)
if tpl == ffi.NULL:
raise CLIPSError(
self._env, 'Template <%s> not defined' % self._tpl.decode())
return tpl
@property
def name(self) -> str:
"""The slot name."""
return self._name.decode()
@property
def multifield(self) -> bool:
"""True if the slot is a multifield slot."""
return bool(lib.DeftemplateSlotMultiP(self._ptr(), self._name))
@property
def types(self) -> tuple:
"""A tuple containing the value types for this Slot.
Equivalent to the CLIPS (deftemplate-slot-types) function.
"""
value = clips.values.clips_value(self._env)
if lib.DeftemplateSlotTypes(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
raise CLIPSError(self._env)
@property
def range(self) -> tuple:
"""A tuple containing the numeric range for this Slot.
Equivalent to the CLIPS (deftemplate-slot-range) function.
"""
value = clips.values.clips_value(self._env)
if lib.DeftemplateSlotRange(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
raise CLIPSError(self._env)
@property
def cardinality(self) -> tuple:
"""A tuple containing the cardinality for this Slot.
Equivalent to the CLIPS (deftemplate-slot-cardinality) function.
"""
value = clips.values.clips_value(self._env)
if lib.DeftemplateSlotCardinality(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
raise CLIPSError(self._env)
@property
def default_type(self) -> TemplateSlotDefaultType:
"""The default value type for this Slot.
Equivalent to the CLIPS (deftemplate-slot-defaultp) function.
"""
return TemplateSlotDefaultType(
lib.DeftemplateSlotDefaultP(self._ptr(), self._name))
@property
def default_value(self) -> type:
"""The default value for this Slot.
Equivalent to the CLIPS (deftemplate-slot-default-value) function.
"""
value = clips.values.clips_value(self._env)
if lib.DeftemplateSlotDefaultValue(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
raise CLIPSError(self._env)
@property
def allowed_values(self) -> tuple:
"""A tuple containing the allowed values for this Slot.
Equivalent to the CLIPS (slot-allowed-values) function.
"""
value = clips.values.clips_value(self._env)
if lib.DeftemplateSlotAllowedValues(self._ptr(), self._name, value):
return clips.values.python_value(self._env, value)
raise CLIPSError(self._env)
class DefinedFacts:
"""The DefinedFacts constitute a set of a priori
or initial knowledge specified as a collection of facts of user
defined classes.
When the CLIPS environment is reset, every fact specified
within a deffacts construct in the CLIPS knowledge base
is added to the DefinedFacts list.
"""
__slots__ = '_env', '_name'
def __init__(self, env: ffi.CData, name: str):
self._env = env
self._name = name.encode()
def __hash__(self):
return hash(self._ptr())
def __eq__(self, dfc):
return self._ptr() == dfc._ptr()
def __str__(self):
string = lib.DeffactsPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return ' '.join(string.split())
def __repr__(self):
string = lib.DeffactsPPForm(self._ptr())
string = ffi.string(string).decode() if string != ffi.NULL else ''
return "%s: %s" % (self.__class__.__name__, ' '.join(string.split()))
def _ptr(self) -> ffi.CData:
dfc = lib.FindDeffacts(self._env, self._name)
if dfc == ffi.NULL:
raise CLIPSError(
self._env, 'DefinedFacts <%s> not defined' % self.name)
return dfc
@property
def name(self) -> str:
"""DefinedFacts name."""
return self._name.decode()
@property
def module(self) -> Module:
"""The module in which the DefinedFacts is defined.
Python equivalent of the CLIPS (deffacts-module) command.
"""
name = ffi.string(lib.DeffactsModule(self._ptr())).decode()
return Module(self._env, name)
@property
def deletable(self) -> bool:
"""True if the DefinedFacts can be undefined."""
return lib.DeffactsIsDeletable(self._ptr())
def undefine(self):
"""Undefine the DefinedFacts.
Equivalent to the CLIPS (undeffacts) function.
The object becomes unusable after this method has been called.
"""
if not lib.Undeffacts(self._ptr(), self._env):
raise CLIPSError(self._env)
class Facts:
"""Facts and Templates namespace class.
.. note::
All the Facts methods are accessible through the Environment class.
"""
__slots__ = ['_env']
def __init__(self, env):
self._env = env
@property
def fact_duplication(self) -> bool:
"""Whether or not duplicate facts are allowed."""
return lib.GetFactDuplication(self._env)
@fact_duplication.setter
def fact_duplication(self, duplication: bool) -> bool:
return lib.SetFactDuplication(self._env, duplication)
def facts(self) -> iter:
"""Iterate over the asserted Facts."""
fact = lib.GetNextFact(self._env, ffi.NULL)
while fact != ffi.NULL:
yield new_fact(self._env, fact)
fact = lib.GetNextFact(self._env, fact)
def templates(self) -> iter:
"""Iterate over the defined Templates."""
template = lib.GetNextDeftemplate(self._env, ffi.NULL)
while template != ffi.NULL:
name = ffi.string(lib.DeftemplateName(template)).decode()
yield Template(self._env, name)
template = lib.GetNextDeftemplate(self._env, template)
def find_template(self, name: str) -> Template:
"""Find the Template by its name."""
tpl = lib.FindDeftemplate(self._env, name.encode())
if tpl == ffi.NULL:
raise LookupError("Template '%s' not found" % name)
return Template(self._env, name)
def defined_facts(self) -> iter:
"""Iterate over the DefinedFacts."""
deffacts = lib.GetNextDeffacts(self._env, ffi.NULL)
while deffacts != ffi.NULL:
name = ffi.string(lib.DeffactsName(deffacts)).decode()
yield DefinedFacts(self._env, name)
deffacts = lib.GetNextDeffacts(self._env, deffacts)
def find_defined_facts(self, name: str) -> DefinedFacts:
"""Find the DefinedFacts by its name."""
dfs = lib.FindDeffacts(self._env, name.encode())
if dfs == ffi.NULL:
raise LookupError("DefinedFacts '%s' not found" % name)
return DefinedFacts(self._env, name)
def assert_string(self, string: str) -> (ImpliedFact, TemplateFact):
"""Assert a fact as string."""
fact = lib.AssertString(self._env, string.encode())
if fact == ffi.NULL:
raise CLIPSError(
self._env, code=lib.GetAssertStringError(self._env))
return new_fact(self._env, fact)
def load_facts(self, facts: str):
"""Load a set of facts into the CLIPS data base.
Equivalent to the CLIPS (load-facts) function.
Facts can be loaded from a string or from a text file.
"""
facts = facts.encode()
if os.path.exists(facts):
if not lib.LoadFacts(self._env, facts):
raise CLIPSError(self._env)
else:
if not lib.LoadFactsFromString(self._env, facts, len(facts)):
raise CLIPSError(self._env)
def save_facts(self, path, mode=SaveMode.LOCAL_SAVE):
"""Save the facts in the system to the specified file.
Equivalent to the CLIPS (save-facts) function.
"""
if not lib.SaveFacts(self._env, path.encode(), mode):
raise CLIPSError(self._env)
def new_fact(env: ffi.CData, fact: ffi.CData) -> (ImpliedFact, TemplateFact):
if lib.ImpliedDeftemplate(lib.FactDeftemplate(fact)):
return ImpliedFact(env, fact)
else:
return TemplateFact(env, fact)
def slot_value(env: ffi.CData, fact: ffi.CData, slot: str = None) -> type:
value = clips.values.clips_value(env)
slot = slot.encode() if slot is not None else ffi.NULL
implied = lib.ImpliedDeftemplate(lib.FactDeftemplate(fact))
if not implied and slot == ffi.NULL:
raise ValueError()
ret = lib.GetFactSlot(fact, slot, value)
if ret != lib.GSE_NO_ERROR:
raise CLIPSError(env, code=ret)
return clips.values.python_value(env, value)
def slot_values(env: ffi.CData, fact: ffi.CData) -> iter:
value = clips.values.clips_value(env)
lib.FactSlotNames(fact, value)
return ((s, slot_value(env, fact, slot=s))
for s in clips.values.python_value(env, value))
def fact_pp_string(env: ffi.CData, fact: ffi.CData) -> str:
builder = environment_builder(env, 'string')
lib.SBReset(builder)
lib.FactPPForm(fact, builder, False)
return ffi.string(builder.contents).decode()
| 20,258 | 29.464662 | 80 | py |
clipspy | clipspy-master/clips/values.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from clips import common
from clips.classes import Instance
from clips.facts import new_fact, ImpliedFact, TemplateFact
from clips._clips import lib, ffi # pylint: disable=E0611
class Symbol(str):
"""Python equivalent of a CLIPS SYMBOL."""
def __new__(cls, symbol):
return str.__new__(cls, sys.intern(symbol))
class InstanceName(Symbol):
"""Python equivalent of a CLIPS INSTANCE_NAME."""
def python_value(env, value: ffi.CData) -> type:
"""Convert a CLIPSValue or UDFValue into Python."""
return PYTHON_VALUES[value.header.type](env, value)
def clips_value(env: ffi.CData, value: type = ffi.NULL) -> ffi.CData:
"""Convert a Python value into CLIPS.
If no value is provided, an empty value is returned.
"""
val = ffi.new("CLIPSValue *")
if value is not ffi.NULL:
constructor = CLIPS_VALUES.get(type(value), clips_external_address)
val.value = constructor(env, value)
return val
def clips_udf_value(env: ffi.CData, value: type = ffi.NULL,
udf_value: ffi.CData = ffi.NULL) -> ffi.CData:
"""Convert a Python value into a CLIPS UDFValue.
If no value is provided, an empty value is returned.
"""
if udf_value is ffi.NULL:
return ffi.new("UDFValue *")
constructor = CLIPS_VALUES.get(type(value), clips_external_address)
udf_value.value = constructor(env, value)
return udf_value
def multifield_value(env: ffi.CData, values: (list, tuple)) -> ffi.CData:
"""Convert a Python list or tuple into a CLIPS multifield."""
if not values:
return lib.EmptyMultifield(env)
builder = common.environment_builder(env, 'multifield')
lib.MBReset(builder)
for value in values:
lib.MBAppend(builder, clips_value(env, value))
return lib.MBCreate(builder)
def clips_external_address(env: ffi.CData, value: type) -> ffi.CData:
"""Convert a Python object into a CLIPSExternalAddress."""
handle = ffi.new_handle(value)
# Hold reference to CData handle
user_functions = common.environment_data(env, 'user_functions')
user_functions.external_addresses[value] = handle
return lib.CreateCExternalAddress(env, handle)
def python_external_address(env: ffi.CData, value: ffi.CData) -> type:
"""Convert a CLIPSExternalAddress into a Python object."""
obj = ffi.from_handle(value.externalAddressValue.contents)
# Remove reference to CData handle
user_functions = common.environment_data(env, 'user_functions')
del user_functions.external_addresses[obj]
return obj
PYTHON_VALUES = {common.CLIPSType.FLOAT:
lambda e, v: float(v.floatValue.contents),
common.CLIPSType.INTEGER:
lambda e, v: int(v.integerValue.contents),
common.CLIPSType.SYMBOL:
lambda e, v: Symbol(
ffi.string(v.lexemeValue.contents).decode()),
common.CLIPSType.STRING:
lambda e, v: ffi.string(v.lexemeValue.contents).decode(),
common.CLIPSType.MULTIFIELD:
lambda e, v: tuple(
python_value(e, v.multifieldValue.contents + i)
for i in range(v.multifieldValue.length)),
common.CLIPSType.FACT_ADDRESS:
lambda e, v: new_fact(e, v.factValue),
common.CLIPSType.INSTANCE_ADDRESS:
lambda e, v: Instance(e, v.instanceValue),
common.CLIPSType.INSTANCE_NAME:
lambda e, v: InstanceName(
ffi.string(v.lexemeValue.contents).decode()),
common.CLIPSType.EXTERNAL_ADDRESS: python_external_address,
common.CLIPSType.VOID: lambda e, v: None}
CLIPS_VALUES = {int: lib.CreateInteger,
float: lib.CreateFloat,
list: multifield_value,
tuple: multifield_value,
bool: lib.CreateBoolean,
type(None): lambda e, v: lib.CreateSymbol(e, b'nil'),
str: lambda e, v: lib.CreateString(e, v.encode()),
Instance: lambda e, v: v._ist,
ImpliedFact: lambda e, v: v._fact,
TemplateFact: lambda e, v: v._fact,
Symbol: lambda e, v: lib.CreateSymbol(e, v.encode()),
InstanceName:
lambda e, v: lib.CreateInstanceName(e, v.encode())}
ANY_TYPE_BITS = (lib.FLOAT_BIT | lib.INTEGER_BIT | lib.SYMBOL_BIT |
lib.STRING_BIT | lib.MULTIFIELD_BIT |
lib.EXTERNAL_ADDRESS_BIT | lib.FACT_ADDRESS_BIT |
lib.INSTANCE_ADDRESS_BIT | lib.INSTANCE_NAME_BIT
| lib.VOID_BIT | lib.BOOLEAN_BIT)
| 6,285 | 37.09697 | 80 | py |
clipspy | clipspy-master/clips/clips_build.py | # Copyright (c) 2016-2023, Matteo Cafasso
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from cffi import FFI
ffibuilder = FFI()
CLIPS_SOURCE = """
#include <clips.h>
/* Return true if the template is implied. */
bool ImpliedDeftemplate(Deftemplate *template)
{
return template->implied;
}
/* User Defined Functions support. */
static void python_function(Environment *env, UDFContext *udfc, UDFValue *out);
int DefinePythonFunction(Environment *environment)
{
return AddUDF(
environment, "python-function",
NULL, UNBOUNDED, UNBOUNDED, NULL,
python_function, "python_function", NULL);
}
"""
with open("lib/clips.cdef") as cdef_file:
CLIPS_CDEF = cdef_file.read()
ffibuilder.set_source("_clips",
CLIPS_SOURCE,
libraries=["clips"])
ffibuilder.cdef(CLIPS_CDEF)
if __name__ == "__main__":
ffibuilder.compile(verbose=True)
| 2,364 | 32.309859 | 80 | py |
clipspy | clipspy-master/doc/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# clipspy documentation build configuration file, created by
# sphinx-quickstart on Sat Oct 7 00:14:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import fileinput
from unittest.mock import MagicMock
module_dir = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
sys.path.insert(0, module_dir)
sys.modules['clips._clips'] = MagicMock()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'clipspy'
copyright = '2016-2023, Matteo Cafasso'
author = 'Matteo Cafasso'
CWD = os.path.dirname(__file__)
def package_version():
module_path = os.path.join(CWD, '..', 'clips', '__init__.py')
for line in fileinput.input(module_path):
if line.startswith('__version__'):
return line.split('=')[-1].strip().replace('\'', '')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = package_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme_options = {
'page_width': '80%',
'github_user': 'noxdafox',
'github_repo': 'clipspy',
'show_related': True
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'clipspy v0.0.8'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'clipspydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'clipspy.tex', 'clipspy Documentation',
'Matteo Cafasso', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'clipspy', 'clipspy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'clipspy', 'clipspy Documentation',
author, 'clipspy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 10,419 | 27.547945 | 80 | py |
ScatterPlot | ScatterPlot-master/Scatter plot.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 14:37:55 2020
@author: dongdong
"""
import numpy as np
import matplotlib.pyplot as plt
library_lng = 120.425947
library_lat = 36.078474
longitude=[]
latitude=[]
for i in range(1,7):#The initial value is 6, every iteration, the number is increased by 6
longitude=[]
latitude=[]
for j in range(i*6):
n= (np.random.randint(2, 7))* 100
longitude.append(library_lng + (np.random.rand(1) - 0.5) * 2 / n);
latitude.append(library_lat + (np.random.rand(1) - 0.5) * 2 / n);
figure = plt.figure(figsize = [16, 10])
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
ax = figure.add_subplot()
ax.set_title('Number of durians:' + str(6 * i), fontproperties = 'SimHei', fontsize = 35)
ax.scatter(longitude, latitude, s=280, alpha=0.9, marker='v', color ='#ffd700')
ax.plot(library_lng, library_lat, '.', color = 'r', markersize = 33)
ax.set_xlabel('Longitude', fontproperties = 'SimHei', fontsize = 30)
ax.set_ylabel('Latitude', fontproperties = 'SimHei', fontsize = 30)
ax.ticklabel_format(useOffset=False)
ax.axis([120.42004, 120.437593, 36.071997, 36.082411])#
ax.grid()
plt.savefig('D:/Durian/Durians Distribution' + str(i))
#plt.show() | 1,269 | 36.352941 | 93 | py |
TAM | TAM-main/tam.py | import numpy as np
from entropy import *
def est_entropy(x, A = None):
'''helper function for estimating (conditional) entropy H(x|A).
Parameters
----------
x : np.array
Data of the R.V. to be estimate
A : 2d np.array
Data matrix of conditioning set
Can be None, then return marginal entropy
Returns
----------
h : float
estimated (conditional) entropy
'''
A = A if (A is None) or (len(A.shape) == 2) else A[:,np.newaxis]
if len(np.unique(x)) == 1:
return 0
elif (A is None) or (A.shape[1] == 0):
k, counts = np.unique(x, return_counts = True)
k = min(len(k), 100000)
fin = hist_to_fin(counts)
entropy = Entropy(k = k)
entro = entropy.estimate(fin)*np.log(2)
return entro
else:
logk = np.apply_along_axis(lambda y: np.log(len(np.unique(y))), 0, A).sum()
k = np.round(np.exp(min(logk, np.log(100000))))
if k == 1:
partial = 0
else:
counts = np.unique(A, return_counts = True, axis=0)[1]
fin = hist_to_fin(counts)
entropy = Entropy(k = k)
partial = entropy.estimate(fin)*np.log(2)
logk += np.log(len(np.unique(x)))
k = np.round(np.exp(min(logk, np.log(100000))))
counts = np.unique(np.c_[x, A], return_counts = True, axis=0)[1]
fin = hist_to_fin(counts)
entropy = Entropy(k = k)
total = entropy.estimate(fin)*np.log(2)
return total - partial
def findPPS(x, A, kappa):
'''PPS procedure to find Markov boundary.
Parameters
----------
x : np.array
Data of the R.V. to be estimate
A : 2d np.array
Data matrix of candidate set
kappa : float
PPS threshold
Returns
----------
pps : np.array
Index of estimated Markov boundary in A
'''
if A.size == 0:
return np.array([], dtype=int)
pps = []
entro_now = est_entropy(x)
rest_pps = np.arange(A.shape[1])
while True:
entro_cond = np.array([est_entropy(x, A = np.c_[A[:,pps], A[:,pp]]) for pp in rest_pps])
mutInfor = entro_now - entro_cond
if max(mutInfor) < kappa:
break
pps.append(rest_pps[np.argmax(mutInfor)])
entro_now = entro_cond[np.argmax(mutInfor)]
rest_pps = np.setdiff1d(np.arange(A.shape[1]), pps)
if len(rest_pps) == 0:
break
return np.array(pps, dtype=int)
class TAM():
def __init__(self, X):
'''Main class of TAM DAG learning algorithm.
Parameters
----------
X : 2d np.array
data matrix
'''
self.X = X
self.n, self.d = X.shape
self.ancestor = np.array([], dtype=int)
self.descendant = np.arange(self.d)
self.layers = []
def train(self, kappa, omega):
'''Run the TAM algorithm to learn the DAG.
Parameters
----------
kappa : float
PPS threshold
omega : float
Mutual information testing threshold
Returns
----------
self.Gr : np.array
binary adj matrix of DAG
self.layers : list of np.array
layer decomposition of DAG
'''
self.Gr = np.zeros((self.d, self.d))
while len(self.descendant) > 0:
# call PPS() to find Markov boundary and estimate conditional entropy
ppss = {}
condentro = []
for j in self.descendant:
pps = findPPS(self.X[:,j], self.X[:,self.ancestor], kappa)
ppss[j] = self.ancestor[pps]
condentro.append(est_entropy(self.X[:,j], self.X[:,ppss[j]]))
condentro = np.array(condentro)
tau = self.descendant[np.argsort(condentro)]
condentro = np.sort(condentro)
# TAM step
cond = np.array([], dtype=int)
mask = np.array([], dtype=int)
while len(tau) > 0:
cond = np.r_[cond, tau[0]]
condentro = condentro[1:]
tau = tau[1:]
# testing
if len(tau) == 0:
break
condentro2 = []
for j in tau:
condentro2.append(est_entropy(self.X[:,j], self.X[:, np.r_[ppss[j], cond]]))
condentro2 = np.array(condentro2)
# masking
index = (condentro - condentro2 > omega)
mask = np.r_[mask, tau[index]]
condentro = condentro[~index]
tau = tau[~index]
# update layers and graphs
self.layers.append(cond)
self.ancestor = np.r_[self.ancestor, cond]
self.descendant = np.setdiff1d(self.descendant, self.ancestor)
for j in cond:
self.Gr[ppss[j], j] = 1
| 5,134 | 31.5 | 96 | py |
TAM | TAM-main/utils.py | import numpy as np
import igraph as ig
import networkx as nx
def compute_caus_order(G):
d = G.shape[0]
remain = list(range(d))
caus_order = np.empty(d, dtype = int)
for i in range(d-1):
root = min(np.where(G.sum(axis=0) == 0)[0])
caus_order[i] = remain[root]
del remain[root]
G = np.delete(G, root, axis = 0)
G = np.delete(G, root, axis = 1)
caus_order[d-1] = remain[0]
return caus_order
def find_pa(G, node):
return np.where(G[:,node] == 1)[0]
def test_order(est_order, G):
ed_from, ed_to = np.where(G==1)
order_index = np.argsort(est_order)
count = 0
for i in range(len(ed_from)):
if order_index[ed_from[i]] > order_index[ed_to[i]]:
count += 1
return int(count==0), count
def simulate_dag(d, s0, graph_type, permute=True):
'''Simulate random DAG with some expected number of edges.
Parameters
----------
d : int
num of nodes
s0 : int
expected num of edges
graph_type : str
'ER', 'SF', 'Tree', 'MC'
Returns
----------
B : np.array
binary adj matrix of DAG
'''
max_num_edge = d * (d - 1) / 2
if graph_type == 'ER':
# Erdos-Renyi
edge_from, edge_to = np.nonzero(np.triu(np.ones(d), k = 1))
edges = np.random.choice(len(edge_from), min(s0, max_num_edge), replace = False)
edge_from = edge_from[edges]
edge_to = edge_to[edges]
B = np.zeros((d, d))
B[edge_from, edge_to] = 1
if permute:
rand_sort = np.arange(d)
np.random.shuffle(rand_sort)
B = B[rand_sort, :]
B = B[:, rand_sort]
elif graph_type == 'SF':
# Scale-free, Barabasi-Albert
G = ig.Graph.Barabasi(n=d, m=int(round(min(s0, max_num_edge) / d)), directed=True)
B = np.array(G.get_adjacency().data)
elif graph_type == 'Tree':
# Tree graph
B = np.tril(nx.to_numpy_matrix(nx.generators.trees.random_tree(d)))
elif graph_type == 'MC':
# Markov chain
B = np.eye(d, k = 1)
if permute:
rand_sort = np.arange(d)
np.random.shuffle(rand_sort)
B = B[rand_sort, :]
B = B[:, rand_sort]
return B
def sample_from_bino_addtive(G, n, prob):
d = G.shape[0]
X = np.empty((n,d))
caus_order = compute_caus_order(G)
for node in caus_order:
pa_of_node = np.where(G[:,node] == 1)[0]
if len(pa_of_node) == 0:
X[:,node] = np.random.binomial(1, prob, n)
else:
X[:,node] = np.random.binomial(1, prob, n) + X[:,pa_of_node].sum(axis=1)
return X
def sample_from_mod(G, n, prob, shuffle=True):
d = G.shape[0]
X = np.empty((n,d))
caus_order = compute_caus_order(G)
for node in caus_order:
pt = np.random.choice([prob, 1-prob]) if shuffle else prob
pa_of_node = np.where(G[:,node] == 1)[0]
if len(pa_of_node) == 0:
X[:,node] = np.random.binomial(1, pt, n)
else:
S = X[:,pa_of_node].sum(axis=1)
Yt = np.mod(S, 2)
ind = np.random.binomial(1, pt, n)
X[:,node] = (Yt ** ind) * ((1-Yt) ** (1-ind))
return X
def count_accuracy(B_true, B_est):
"""Compute various accuracy metrics for B_est.
true positive = predicted association exists in condition in correct direction
reverse = predicted association exists in condition in opposite direction
false positive = predicted association does not exist in condition
Args:
B_true (np.ndarray): [d, d] ground truth graph, {0, 1}
B_est (np.ndarray): [d, d] estimate, {0, 1, -1}, -1 is undirected edge in CPDAG
Returns:
fdr: (reverse + false positive) / prediction positive
tpr: (true positive) / condition positive
fpr: (reverse + false positive) / condition negative
shd: undirected extra + undirected missing + reverse
nnz: prediction positive
"""
d = B_true.shape[0]
# linear index of nonzeros
pred_und = np.flatnonzero(B_est == -1)
pred = np.flatnonzero(B_est == 1)
cond = np.flatnonzero(B_true)
cond_reversed = np.flatnonzero(B_true.T)
cond_skeleton = np.concatenate([cond, cond_reversed])
# true pos
true_pos = np.intersect1d(pred, cond, assume_unique=True)
# treat undirected edge favorably
true_pos_und = np.intersect1d(pred_und, cond_skeleton, assume_unique=True)
true_pos = np.concatenate([true_pos, true_pos_und])
# false pos
false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True)
false_pos_und = np.setdiff1d(pred_und, cond_skeleton, assume_unique=True)
false_pos = np.concatenate([false_pos, false_pos_und])
# reverse
extra = np.setdiff1d(pred, cond, assume_unique=True)
reverse = np.intersect1d(extra, cond_reversed, assume_unique=True)
# compute ratio
pred_size = len(pred) + len(pred_und)
cond_neg_size = 0.5 * d * (d - 1) - len(cond)
fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1)
tpr = float(len(true_pos)) / max(len(cond), 1)
fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1)
# structural hamming distance
pred_lower = np.flatnonzero(np.tril(B_est + B_est.T))
cond_lower = np.flatnonzero(np.tril(B_true + B_true.T))
extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True)
missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True)
shd = len(extra_lower) + len(missing_lower) + len(reverse)
return {'fdr': fdr, 'tpr': tpr, 'fpr': fpr, 'shd': shd, 'nnz': pred_size}
| 5,659 | 33.512195 | 90 | py |
TAM | TAM-main/entropy.py | #!/usr/bin/env python3
"""
Main libraray for entropy estimation
"""
import linecache
from math import log, floor
import numpy as np
class Entropy():
"""
Entropy estimator
"""
def __init__(self, k, L=None, M=None, N=None):
"""
Args:
k: int, required
alphabet size
L: int
Polynoimal degree. Default c0*log(k)
M: float
M/n is the right-end of approximation interval. Default c1*log(k)
N: int
Threshold to apply polynomial estimator. Default c2*log(k)
"""
self.k = k
self.degree = L if L != None else floor(1.6*log(k))
self.ratio = M if M != None else 3.5*log(k)
self.n_threhold = N if N != None else floor(1.6*log(k))
def estimate(self, fin):
"""
Polynomial estimator from a given fingerprint
Args:
fin: list of tuples (frequency, count)
fingerprint of samples, fin[i] is the number of symbols that appeared exactly i times
Return:
the estimated entropy (bits)
"""
# get total sample size
num = get_sample_size(fin)
# get linear estimator coefficients
a_coeffs = read_coeffs(self.degree)
g_coeffs = np.empty(self.n_threhold+1)
for j in range(self.n_threhold+1):
start = self.degree if j > self.degree else j
g_coeffs[j] = a_coeffs[start]
for i in range(start, 0, -1):
g_coeffs[j] = a_coeffs[i-1] + g_coeffs[j] * (j-i+1) / self.ratio
g_coeffs[j] = (g_coeffs[j] * self.ratio + log(num/self.ratio) * j)/num
# get estimate
h_estimate = 0
sym_num = 0
for freq, cnt in fin:
sym_num += cnt
if freq > self.n_threhold: # plug-in
p_hat = 1.*freq/num
h_estimate += (-p_hat*log(p_hat)+0.5/num)*cnt
else: # polynomial
h_estimate += g_coeffs[freq]*cnt
h_estimate += 1.0*g_coeffs[0] * (self.k-sym_num)
h_estimate = h_estimate if h_estimate > 0 else 0
return h_estimate/log(2)
def estimate_Miller_Madow(self, fin):
"""
Miller Madow estimator from a given fingerprint
Args:
fin: list of tuples (frequency, count)
fingerprint of samples, fin[i] is the number of symbols that appeared exactly i times
Return:
the estimated entropy (bits)
"""
num = get_sample_size(fin)
h_estimate = 0
sym_num = 0
for freq, cnt in fin:
sym_num += cnt
if freq > 0:
p_hat = 1.*freq/num
h_estimate += (-p_hat*log(p_hat))*cnt
return (h_estimate+1.0*(sym_num-1)/num/2)/log(2)
def estimate_plug(self, fin):
"""
Miller Madow estimator from a given fingerprint
Args:
fin: list of tuples (frequency, count)
fingerprint of samples, fin[i] is the number of symbols that appeared exactly i times
Return:
the estimated entropy (bits)
"""
num = get_sample_size(fin)
h_estimate = 0
sym_num = 0
for freq, cnt in fin:
sym_num += cnt
if freq > 0:
p_hat = 1.*freq/num
h_estimate += (-p_hat*log(p_hat))*cnt
return h_estimate/log(2)
def get_sample_size(fin):
"""
get total sample size from a given fingerprint
"""
num = 0
for freq, cnt in fin:
num += freq * cnt
return num
def read_coeffs(degree):
"""
read the coefficients from a given degree
"""
line = linecache.getline("coeffs.txt", degree)
return np.asarray(line.split()[1:]).astype(np.float)
def sample_to_fin(samples):
"""
Return a fingerprint from samples
"""
return hist_to_fin(sample_to_hist(samples))
def hist_to_fin(hist):
"""
Return a fingerprint from histogram
"""
fin = Counter()
for freq in hist:
fin[freq] += 1
return fin.items()
def sample_to_hist(samples):
"""
Return a histogram of samples
"""
freq = Counter()
for symbol in samples:
freq[symbol] += 1
return np.asarray(list(freq.values()))
class Counter(dict):
"""
Class for counting items
"""
def __missing__(self, key):
return 0 | 4,355 | 25.888889 | 93 | py |
scalene | scalene-master/setup.py | from setuptools import setup, find_packages
from setuptools.extension import Extension
from scalene.scalene_version import scalene_version
from os import path, environ
import platform
import sys
if sys.platform == 'darwin':
import sysconfig
mdt = 'MACOSX_DEPLOYMENT_TARGET'
target = environ[mdt] if mdt in environ else sysconfig.get_config_var(mdt)
# target >= 10.9 is required for gcc/clang to find libstdc++ headers
if [int(n) for n in target.split('.')] < [10, 9]:
from os import execve
newenv = environ.copy()
newenv[mdt] = '10.9'
execve(sys.executable, [sys.executable] + sys.argv, newenv)
def clang_version():
import re
pat = re.compile('Clang ([0-9]+)')
match = pat.search(platform.python_compiler())
version = int(match.group(1))
return version
def multiarch_args():
"""Returns args requesting multi-architecture support, if applicable."""
# On MacOS we build "universal2" packages, for both x86_64 and arm64/M1
if sys.platform == 'darwin':
args = ['-arch', 'x86_64']
# ARM support was added in XCode 12, which requires MacOS 10.15.4
if clang_version() >= 12: # XCode 12
if [int(n) for n in platform.mac_ver()[0].split('.')] >= [10, 15, 4]:
args += ['-arch', 'arm64', '-arch', 'arm64e']
return args
return []
def extra_compile_args():
"""Returns extra compiler args for platform."""
if sys.platform == 'win32':
return ['/std:c++14'] # for Visual Studio C++
return ['-std=c++14'] + multiarch_args()
def make_command():
# return 'nmake' if sys.platform == 'win32' else 'make' # 'nmake' isn't found on github actions' VM
return 'make'
def dll_suffix():
"""Returns the file suffix ("extension") of a DLL"""
if (sys.platform == 'win32'): return '.dll'
if (sys.platform == 'darwin'): return '.dylib'
return '.so'
def read_file(name):
"""Returns a file's contents"""
with open(path.join(path.dirname(__file__), name), encoding="utf-8") as f:
return f.read()
import setuptools.command.egg_info
class EggInfoCommand(setuptools.command.egg_info.egg_info):
"""Custom command to download vendor libs before creating the egg_info."""
def run(self):
if sys.platform != 'win32':
self.spawn([make_command(), 'vendor-deps'])
super().run()
# Force building platform-specific wheel to avoid the Windows wheel
# (which doesn't include libscalene, and thus would be considered "pure")
# being used for other platforms.
from wheel.bdist_wheel import bdist_wheel as orig_bdist_wheel
class BdistWheelCommand(orig_bdist_wheel):
def finalize_options(self):
orig_bdist_wheel.finalize_options(self)
self.root_is_pure = False
import setuptools.command.build_ext
class BuildExtCommand(setuptools.command.build_ext.build_ext):
"""Custom command that runs 'make' to generate libscalene."""
def run(self):
super().run()
# No build of DLL for Windows currently.
if sys.platform != 'win32':
self.build_libscalene()
def build_libscalene(self):
scalene_temp = path.join(self.build_temp, 'scalene')
scalene_lib = path.join(self.build_lib, 'scalene')
libscalene = 'libscalene' + dll_suffix()
self.mkpath(scalene_temp)
self.mkpath(scalene_lib)
self.spawn([make_command(), 'OUTDIR=' + scalene_temp,
'ARCH=' + ' '.join(multiarch_args())])
self.copy_file(path.join(scalene_temp, libscalene),
path.join(scalene_lib, libscalene))
if self.inplace:
self.copy_file(path.join(scalene_lib, libscalene),
path.join('scalene', libscalene))
get_line_atomic = Extension('scalene.get_line_atomic',
include_dirs=['.', 'vendor/Heap-Layers', 'vendor/Heap-Layers/utility'],
sources=['src/source/get_line_atomic.cpp'],
extra_compile_args=extra_compile_args(),
extra_link_args=multiarch_args(),
py_limited_api=True, # for binary compatibility
language="c++"
)
pywhere = Extension('scalene.pywhere',
include_dirs=['.', 'src', 'src/include'],
sources = ['src/source/pywhere.cpp'],
extra_compile_args=extra_compile_args(),
extra_link_args=multiarch_args(),
py_limited_api=False,
language="c++")
# If we're testing packaging, build using a ".devN" suffix in the version number,
# so that we can upload new files (as testpypi/pypi don't allow re-uploading files with
# the same name as previously uploaded).
# Numbering scheme: https://www.python.org/dev/peps/pep-0440
dev_build = ('.dev' + environ['DEV_BUILD']) if 'DEV_BUILD' in environ else ''
setup(
name="scalene",
version=scalene_version + dev_build,
description="Scalene: A high-resolution, low-overhead CPU, GPU, and memory profiler for Python",
keywords="performance memory profiler",
long_description=read_file("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/plasma-umass/scalene",
author="Emery Berger",
author_email="emery@cs.umass.edu",
license="Apache License 2.0",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: IPython",
"Framework :: Jupyter",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Software Development",
"Topic :: Software Development :: Debuggers",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows :: Windows 10"
],
packages=find_packages(),
cmdclass={
'bdist_wheel': BdistWheelCommand,
'egg_info': EggInfoCommand,
'build_ext': BuildExtCommand,
},
install_requires=[
"rich>=9.2.0",
"cloudpickle>=1.5.0",
"pynvml>=11.0.0,<375.99999",
"numpy"
],
ext_modules=([get_line_atomic, pywhere] if sys.platform != 'win32' else []),
setup_requires=['setuptools_scm'],
include_package_data=True,
entry_points={"console_scripts": ["scalene = scalene.__main__:main"]},
python_requires=">=3.8",
)
| 6,575 | 37.45614 | 103 | py |
scalene | scalene-master/benchmarks/julia1_nopil.py | import sys
# Disable the @profile decorator if none has been declared.
try:
# Python 2
import __builtin__ as builtins
except ImportError:
# Python 3
import builtins
try:
builtins.profile
except AttributeError:
# No line profiler, provide a pass-through version
def profile(func): return func
builtins.profile = profile
# Pasted from Chapter 2, High Performance Python - O'Reilly Media;
# minor modifications for Python 3 by Emery Berger
"""Julia set generator without optional PIL-based image drawing"""
import time
# area of complex space to investigate
x1, x2, y1, y2 = -1.8, 1.8, -1.8, 1.8
c_real, c_imag = -0.62772, -.42193
@profile
def calculate_z_serial_purepython(maxiter, zs, cs):
"""Calculate output list using Julia update rule"""
output = [0] * len(zs)
for i in range(len(zs)):
n = 0
z = zs[i]
c = cs[i]
while abs(z) < 2 and n < maxiter:
z = z * z + c
n += 1
output[i] = n
return output
@profile
def calc_pure_python(desired_width, max_iterations):
"""Create a list of complex coordinates (zs) and complex
parameters (cs), build Julia set, and display"""
x_step = (float(x2 - x1) / float(desired_width))
y_step = (float(y1 - y2) / float(desired_width))
x = []
y = []
ycoord = y2
while ycoord > y1:
y.append(ycoord)
ycoord += y_step
xcoord = x1
while xcoord < x2:
x.append(xcoord)
xcoord += x_step
# Build a list of coordinates and the initial condition for each cell.
# Note that our initial condition is a constant and could easily be removed;
# we use it to simulate a real-world scenario with several inputs to
# our function.
zs = []
cs = []
for ycoord in y:
for xcoord in x:
zs.append(complex(xcoord, ycoord))
cs.append(complex(c_real, c_imag))
print("Length of x:", len(x))
print("Total elements:", len(zs))
start_time = time.process_time()
output = calculate_z_serial_purepython(max_iterations, zs, cs)
end_time = time.process_time()
secs = end_time - start_time
sys.stdout.flush()
sys.stderr.flush()
output_str = "calculate_z_serial_purepython took " + str(secs) + " seconds"
print(output_str, file=sys.stderr)
sys.stderr.flush()
# This sum is expected for a 1000^2 grid with 300 iterations.
# It catches minor errors we might introduce when we're
# working on a fixed set of inputs.
### assert sum(output) == 33219980
if __name__ == "__main__":
# Calculate the Julia set using a pure Python solution with
# reasonable defaults for a laptop
calc_pure_python(desired_width=1000, max_iterations=300)
sys.exit(-1) # To force output from py-spy
| 2,797 | 29.086022 | 80 | py |
scalene | scalene-master/benchmarks/benchmark.py | import os
import sys
import re
import subprocess
import traceback
import statistics
python = "python3"
progname = os.path.join(os.path.dirname(__file__), "julia1_nopil.py")
number_of_runs = 1 # We take the average of this many runs.
# Output timing string from the benchmark.
result_regexp = re.compile("calculate_z_serial_purepython took ([0-9]*\.[0-9]+) seconds")
# Characteristics of the tools.
line_level = {}
cpu_profiler = {}
separate_profiler = {}
memory_profiler = {}
unmodified_code = {}
timing = {}
line_level["baseline"] = None
line_level["cProfile"] = False
line_level["Profile"] = False
line_level["line_profiler"] = True
line_level["pyinstrument"] = False
line_level["yappi_cputime"] = False
line_level["yappi_wallclock"] = False
line_level["pprofile_deterministic"] = True
line_level["pprofile_statistical"] = True
line_level["py_spy"] = True
line_level["memory_profiler"] = True
line_level["scalene_cpu"] = True
line_level["scalene_cpu_memory"] = True
cpu_profiler["baseline"] = None
cpu_profiler["cProfile"] = True
cpu_profiler["Profile"] = True
cpu_profiler["pyinstrument"] = True
cpu_profiler["line_profiler"] = True
cpu_profiler["yappi_cputime"] = True
cpu_profiler["yappi_wallclock"] = True
cpu_profiler["pprofile_deterministic"] = True
cpu_profiler["pprofile_statistical"] = True
cpu_profiler["py_spy"] = True
cpu_profiler["memory_profiler"] = False
cpu_profiler["scalene_cpu"] = True
cpu_profiler["scalene_cpu_memory"] = True
separate_profiler["baseline"] = None
separate_profiler["cProfile"] = False
separate_profiler["Profile"] = False
separate_profiler["pyinstrument"] = False
separate_profiler["line_profiler"] = False
separate_profiler["yappi_cputime"] = False
separate_profiler["yappi_wallclock"] = False
separate_profiler["pprofile_deterministic"] = False
separate_profiler["pprofile_statistical"] = False
separate_profiler["py_spy"] = False
separate_profiler["memory_profiler"] = False
separate_profiler["scalene_cpu"] = True
separate_profiler["scalene_cpu_memory"] = True
memory_profiler["baseline"] = None
memory_profiler["cProfile"] = False
memory_profiler["Profile"] = False
memory_profiler["pyinstrument"] = False
memory_profiler["line_profiler"] = False
memory_profiler["yappi_cputime"] = False
memory_profiler["yappi_wallclock"] = False
memory_profiler["pprofile_deterministic"] = False
memory_profiler["pprofile_statistical"] = False
memory_profiler["py_spy"] = False
memory_profiler["memory_profiler"] = True
memory_profiler["scalene_cpu"] = False
memory_profiler["scalene_cpu_memory"] = True
unmodified_code["baseline"] = None
unmodified_code["cProfile"] = True
unmodified_code["Profile"] = True
unmodified_code["pyinstrument"] = True
unmodified_code["line_profiler"] = False
unmodified_code["yappi_cputime"] = True
unmodified_code["yappi_wallclock"] = True
unmodified_code["pprofile_deterministic"] = True
unmodified_code["pprofile_statistical"] = True
unmodified_code["py_spy"] = True
unmodified_code["memory_profiler"] = False
unmodified_code["scalene_cpu"] = True
unmodified_code["scalene_cpu_memory"] = True
# how the profilers measure time
# - wall clock only
# - virtual (process) time only
# - either one
WallClock = 1
VirtualTime = 2
Either = 3
timing["baseline"] = None
timing["cProfile"] = WallClock
timing["Profile"] = VirtualTime
timing["pyinstrument"] = WallClock
timing["line_profiler"] = WallClock
timing["yappi_cputime"] = Either
timing["yappi_wallclock"] = Either
timing["pprofile_deterministic"] = WallClock
timing["pprofile_statistical"] = WallClock
timing["py_spy"] = Either
timing["memory_profiler"] = None
timing["scalene_cpu"] = Either
timing["scalene_cpu_memory"] = Either
# Command lines for the various tools.
baseline = f"{python} {progname}"
cprofile = f"{python} -m cProfile {progname}"
profile = f"{python} -m profile {progname}"
pyinstrument = f"pyinstrument {progname}"
line_profiler = f"{python} -m kernprof -l -v {progname}"
pprofile_deterministic = f"pprofile {progname}"
pprofile_statistical = f"pprofile --statistic 0.001 {progname}" # Same as Scalene
yappi_cputime = f"yappi {progname}"
yappi_wallclock = f"yappi -c wall {progname}"
py_spy = f"py-spy record -f raw -o foo.txt -- python3.7 {progname}"
scalene_cpu = f"{python} -m scalene {progname}"
scalene_cpu_memory = f"{python} -m scalene {progname}" # see below for environment variables
benchmarks = [(baseline, "baseline", "_original program_"), (cprofile, "cProfile", "`cProfile`"), (profile, "Profile", "`Profile`"), (pyinstrument, "pyinstrument", "`pyinstrument`"), (line_profiler, "line_profiler", "`line_profiler`"), (pprofile_deterministic, "pprofile_deterministic", "`pprofile` _(deterministic)_"), (pprofile_statistical, "pprofile_statistical", "`pprofile` _(statistical)_"), (yappi_cputime, "yappi_cputime", "`yappi` _(CPU)_"), (yappi_wallclock, "yappi_wallclock", "`yappi` _(wallclock)_"), (scalene_cpu, "scalene_cpu", "`scalene` _(CPU only)_"), (scalene_cpu_memory, "scalene_cpu_memory", "`scalene` _(CPU + memory)_")]
# benchmarks = [(baseline, "baseline", "_original program_"), (pprofile_deterministic, "`pprofile` _(deterministic)_")]
# benchmarks = [(baseline, "baseline", "_original program_"), (pprofile_statistical, "pprofile_statistical", "`pprofile` _(statistical)_")]
benchmarks = [(baseline, "baseline", "_original program_"), (py_spy, "py_spy", "`py-spy`"), (scalene_cpu, "scalene_cpu", "`scalene` _(CPU only)_"), (scalene_cpu_memory, "scalene_cpu_memory", "`scalene` _(CPU + memory)_")]
average_time = {}
check = ":heavy_check_mark:"
print("| | Time | Slowdown | Line-level? | CPU? | Python vs. C? | Memory? | Unmodified code? |")
print("| :--- | ---: | ---: | :---: | :---: | :---: | :---: | :---: |")
for bench in benchmarks:
print(bench)
times = []
for i in range(0, number_of_runs):
my_env = os.environ.copy()
if bench[1] == "scalene_cpu_memory":
my_env["PYTHONMALLOC"] = "malloc"
if sys.platform == 'darwin':
my_env["DYLD_INSERT_LIBRARIES"] = "./libscalene.dylib"
if sys.platform == 'linux':
my_env["LD_PRELOAD"] = "./libscalene.so"
result = subprocess.run(bench[0].split(), env = my_env, stderr = subprocess.STDOUT, stdout = subprocess.PIPE)
output = result.stdout.decode('utf-8')
print(output)
match = result_regexp.search(output)
if match is not None:
times.append(round(100 * float(match.group(1))) / 100.0)
else:
print("failed run")
average_time[bench[1]] = statistics.mean(times) # sum_time / (number_of_runs * 1.0)
print(str(average_time[bench[1]]))
if bench[1] == "baseline":
print(f"| {bench[2]} | {average_time[bench[1]]}s | 1.0x | | | | | |")
print("| | | | | |")
else:
try:
if bench[1].find("scalene") >= 0:
if bench[1].find("scalene_cpu") >= 0:
print("| | | | | |")
print(f"| {bench[2]} | {average_time[bench[1]]}s | **{round(100 * average_time[bench[1]] / average_time['baseline']) / 100}x** | {check if line_level[bench[1]] else 'function-level'} | {check if cpu_profiler[bench[1]] else ''} | {check if separate_profiler[bench[1]] else ''} | {check if memory_profiler[bench[1]] else ''} | {check if unmodified_code[bench[1]] else 'needs `@profile` decorators'} |")
else:
print(f"| {bench[2]} | {average_time[bench[1]]}s | {round(100 * average_time[bench[1]] / average_time['baseline']) / 100}x | {check if line_level[bench[1]] else 'function-level'} | {check if cpu_profiler[bench[1]] else ''} | {check if separate_profiler[bench[1]] else ''} | {check if memory_profiler[bench[1]] else ''} | {check if unmodified_code[bench[1]] else 'needs `@profile` decorators'} |")
except Exception as err:
traceback.print_exc()
print("err = " + str(err))
print("WOOPS")
# print(bench[1] + " = " + str(sum_time / 5.0))
| 8,083 | 43.417582 | 643 | py |
scalene | scalene-master/benchmarks/pystone.py | #! /usr/bin/env python3
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 500000
import time # from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print("Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime))
print("This machine benchmarks at %g pystones/second" % stones)
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = [x[:] for x in [Array1Glob]*51]
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = time.perf_counter()
for i in range(loops):
pass
nulltime = time.perf_counter() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = time.perf_counter()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = time.perf_counter() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print(msg, end=' ', file=sys.stderr)
print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr)
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
| 7,444 | 26.472325 | 75 | py |
scalene | scalene-master/test/testpyt.py | # -*- coding: utf-8 -*-
import random
import torch
class DynamicNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
In the constructor we construct three nn.Linear instances that we will use
in the forward pass.
"""
super(DynamicNet, self).__init__()
self.input_linear = torch.nn.Linear(D_in, H)
self.middle_linear = torch.nn.Linear(H, H)
self.output_linear = torch.nn.Linear(H, D_out)
def forward(self, x):
"""
For the forward pass of the model, we randomly choose either 0, 1, 2, or 3
and reuse the middle_linear Module that many times to compute hidden layer
representations.
Since each forward pass builds a dynamic computation graph, we can use normal
Python control-flow operators like loops or conditional statements when
defining the forward pass of the model.
Here we also see that it is perfectly safe to reuse the same Module many
times when defining a computational graph. This is a big improvement from Lua
Torch, where each Module could be used only once.
"""
h_relu = self.input_linear(x).clamp(min=0)
for _ in range(random.randint(0, 3)):
h_relu = self.middle_linear(h_relu).clamp(min=0)
y_pred = self.output_linear(h_relu)
return y_pred
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# Construct our model by instantiating the class defined above
model = DynamicNet(D_in, H, D_out)
# Construct our loss function and an Optimizer. Training this strange model with
# vanilla stochastic gradient descent is tough, so we use momentum
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
for t in range(500):
# Forward pass: Compute predicted y by passing x to the model
y_pred = model(x)
# Compute and print loss
loss = criterion(y_pred, y)
if t % 100 == 99:
print(t, loss.item())
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
| 2,339 | 34.454545 | 85 | py |
scalene | scalene-master/test/test-pprofile.py | import time
import argparse
def do_work_fn(x, i):
return (x >> 2) | (i & x)
def inline_loop(x, its):
for i in range(its): # 9500000
x = x | (x >> 2) | (i & x)
return x
def fn_call_loop(x, its):
for i in range(its): # 500000):
x = x | do_work_fn(x, i)
return x
def main():
parser = argparse.ArgumentParser(description='Test time breakdown.')
parser.add_argument('--inline', dest='inline', type=int, default=9500000, help="inline iterations")
parser.add_argument('--fn_call', dest='fn_call', type=int, default=500000, help="function call iterations")
args = parser.parse_args()
x = 0
start_fn_call = time.perf_counter()
x = fn_call_loop(x, args.fn_call)
elapsed_fn_call = time.perf_counter() - start_fn_call
print(f"elapsed fn call = {elapsed_fn_call}")
start_inline_loop = time.perf_counter()
x = inline_loop(x, args.inline)
elapsed_inline_loop = time.perf_counter() - start_inline_loop
print(f"elapsed inline loop = {elapsed_inline_loop}")
print(f"ratio fn_call/total = {100*(elapsed_fn_call/(elapsed_fn_call+elapsed_inline_loop)):.2f}%")
print(f"ratio inline/total = {100*(elapsed_inline_loop/(elapsed_fn_call+elapsed_inline_loop)):.2f}%")
if __name__ == '__main__':
main()
# prof = pprofile.StatisticalProfile()
#with prof():
# main()
# prof.print_stats()
# prof.callgrind(sys.stdout)
| 1,418 | 30.533333 | 111 | py |
scalene | scalene-master/test/test-size.py | from __future__ import print_function
from sys import getsizeof, stderr
from itertools import chain
from collections import deque
try:
from reprlib import repr
except ImportError:
pass
def total_size(o, handlers={}, verbose=False):
""" Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
dict_handler = lambda d: chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(o):
if id(o) in seen: # do not double count the same object
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
#@profile
def doit():
print("HERE WE GO")
q1 = list(range(0,2000))
q2 = list(range(0,20000))
q3 = list(range(0,200000))
r = range(0,2000000)
q4 = []
for i in r:
q4.append(i)
# q4 = list(r)
z = 2000000 * getsizeof(1)
print(z)
print("q4", total_size(q4)/(1024*1024))
del q4
#print("q1", total_size(q1)/(1024*1024))
#print("q2", total_size(q2)/(1024*1024))
#print("q3", total_size(q3)/(1024*1024))
for i in range(12):
doit()
| 2,145 | 29.225352 | 86 | py |
scalene | scalene-master/test/testflask-driver.py | import os
import time
from random import random
from requests import get
iter = 1
while True:
print(iter)
iter += 1
get(f"http://localhost:5000/{random()}")
| 171 | 13.333333 | 44 | py |
scalene | scalene-master/test/test-martinheinz.py | from decimal import *
def exp(x):
getcontext().prec += 2
i, lasts, s, fact, num = 0, 0, 1, 1, 1
while s != lasts:
lasts = s
i += 1
fact *= i
num *= x
s += num / fact
getcontext().prec -= 2
print(+s)
return +s
import time
start = time.time()
print("Original:")
d1_orig = exp(Decimal(150))
d2_orig = exp(Decimal(400))
d3_orig = exp(Decimal(3000))
elapsed_original = time.time() - start
print("Elapsed time, original (s): ", elapsed_original)
def exp_opt(x):
getcontext().prec += 2
i, lasts, s, fact, num = 0, 0, 1, 1, 1
nf = Decimal(1) ### = num / fact
while s != lasts:
lasts = s
i += 1
# was: fact *= i
# was: num *= x
nf *= (x / i) ### update nf to be num / fact
s += nf ### was: s += num / fact
getcontext().prec -= 2
print(+s)
return +s
start = time.time()
print("Optimized:")
d1_opt = exp_opt(Decimal(150))
d2_opt = exp_opt(Decimal(400))
d3_opt = exp_opt(Decimal(3000))
elapsed_optimized = time.time() - start
print("Elapsed time, optimized (s): ", elapsed_optimized)
print("Improvement: ", elapsed_original / elapsed_optimized)
assert d1_orig == d1_opt
assert d2_orig == d2_opt
assert d3_orig == d3_opt
print("All equivalent? ", d1_orig == d1_opt and d2_orig == d2_opt and d3_orig == d3_opt)
| 1,355 | 19.861538 | 88 | py |
scalene | scalene-master/test/multiprocessing_test.py | import logging
import multiprocessing
from time import sleep, perf_counter
# import faulthandler
# faulthandler.enable()
# import signal
# import os
# multiprocessing.log_to_stderr(logging.DEBUG)
# from multiprocessing.spawn import spawn_main
# import scalene.replacement_pjoin
# Stolen from https://stackoverflow.com/questions/15347174/python-finding-prime-factors
class Integer(object):
def __init__(self, x):
self.x = x
def largest_prime_factor(n):
for i in range(10):
x = [Integer(i * i) for i in range(80000)]
# sleep(1)
a = x[50]
print("\033[91mprogress ", n, i, a.x, '\033[0m')
print("Done")
# range_obj = range (65535588555555555, 65535588555555557)
range_obj = range(4)
if __name__ == "__main__":
# import __main__
# x = [largest_prime_factor(i) for i in range_obj]
t0 = perf_counter()
handles = [multiprocessing.Process(target=largest_prime_factor, args=(i,)) for i in range_obj]
# handles = [multiprocessing.Process(target=largest_prime_factor, args=(1000000181,))]
for handle in handles:
print("Starting", handle)
handle.start()
# multiprocessing.popen_fork.Popen
# try:
for handle in handles:
print("Joining", handle)
handle.join()
# except KeyboardInterrupt:
# for handle in handles:
# try:
# os.kill(handle.pid, signal.SIGSEGV)
# except:
# pass
# exit(1)
dt = perf_counter() - t0
print(f"Total time: {dt}")
| 1,539 | 28.615385 | 98 | py |
scalene | scalene-master/test/torchtest.py | import torch
import math
def torchtest():
dtype = torch.float
#device = torch.device("cpu")
device = torch.device("cuda:0") # Uncomment this to run on GPU
# device = torch.device("cuda") # Uncomment this to run on GPU
# Create Tensors to hold input and outputs.
# By default, requires_grad=False, which indicates that we do not need to
# compute gradients with respect to these Tensors during the backward pass.
# x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
q = torch.linspace(-math.pi, math.pi, 5000000, device=device, dtype=dtype)
x = torch.linspace(-math.pi, math.pi, 5000000, device=device, dtype=dtype)
y = torch.sin(x)
# Create random Tensors for weights. For a third order polynomial, we need
# 4 weights: y = a + b x + c x^2 + d x^3
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Tensors during the backward pass.
a = torch.randn((), device=device, dtype=dtype, requires_grad=True)
b = torch.randn((), device=device, dtype=dtype, requires_grad=True)
c = torch.randn((), device=device, dtype=dtype, requires_grad=True)
d = torch.randn((), device=device, dtype=dtype, requires_grad=True)
learning_rate = 1e-6
for t in range(2000):
# Forward pass: compute predicted y using operations on Tensors.
y_pred = a + b * x + c * x ** 2 + d * x ** 3
# Compute and print loss using operations on Tensors.
# Now loss is a Tensor of shape (1,)
# loss.item() gets the scalar value held in the loss.
# loss = (y_pred - y).pow(2).sum()
loss = (y_pred - y).sum()
if t % 100 == 99:
print(t, loss.item())
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Tensors with requires_grad=True.
# After this call a.grad, b.grad. c.grad and d.grad will be Tensors holding
# the gradient of the loss with respect to a, b, c, d respectively.
loss.backward()
# Manually update weights using gradient descent. Wrap in torch.no_grad()
# because weights have requires_grad=True, but we don't need to track this
# in autograd.
with torch.no_grad():
a -= learning_rate * a.grad
b -= learning_rate * b.grad
c -= learning_rate * c.grad
d -= learning_rate * d.grad
# Manually zero the gradients after updating weights
a.grad = None
b.grad = None
c.grad = None
d.grad = None
print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')
torchtest()
| 2,736 | 41.765625 | 85 | py |
scalene | scalene-master/test/new_mp_test.py | import multiprocessing
from time import sleep, perf_counter
# from multiprocessing.spawn import spawn_main
# import scalene.replacement_pjoin
# Stolen from https://stackoverflow.com/questions/15347174/python-finding-prime-factors
class Integer(object):
def __init__(self, x):
self.x = x
def largest_prime_factor(n):
for i in range(10):
x = [Integer(i * i) for i in range(80000)]
# sleep(1)
a = x[50]
print("\033[91mprogress ", n, i, a.x, '\033[0m')
print("Done")
# range_obj = range (65535588555555555, 65535588555555557)
range_obj = range(4)
if __name__ == "__main__":
# import __main__
# x = [largest_prime_factor(i) for i in range_obj]
t0 = perf_counter()
handles = [multiprocessing.Process(target=largest_prime_factor, args=(i,)) for i in range_obj]
# handles = [multiprocessing.Process(target=largest_prime_factor, args=(1000000181,))]
for handle in handles:
# print("Starting", handle)
handle.start()
# multiprocessing.popen_fork.Popen
for handle in handles:
# print("Joining", handle)
handle.join()
dt = perf_counter() - t0
print(f"Total time: {dt}") | 1,181 | 35.9375 | 98 | py |
scalene | scalene-master/test/test-memory.py | import numpy as np
import sys
x = np.ones((1,1))
print(sys.getsizeof(x) / 1048576)
x = np.ones((1000,1000))
print(sys.getsizeof(x) / 1048576)
x = np.ones((1000,2000))
print(sys.getsizeof(x) / 1048576)
x = np.ones((1000,20000))
print(sys.getsizeof(x) / 1048576)
@profile
def allocate():
for i in range(100):
x = np.ones((1000,1000))
x = np.ones((1,1))
x = np.ones((1,1))
x = np.ones((1,1))
x = np.ones((1000,2000))
x = np.ones((1,1))
x = np.ones((1,1))
x = np.ones((1,1))
x = np.ones((1000,20000))
x = 1
x += 1
x += 1
x += 1
allocate()
| 650 | 17.6 | 33 | py |
scalene | scalene-master/test/pool-test.py | import multiprocessing
pool = multiprocessing.Pool(processes=1)
pool.terminate()
| 81 | 19.5 | 40 | py |
scalene | scalene-master/test/threads-test.py | import threading
import sys
import numpy as np
# Disable the @profile decorator if none has been declared.
try:
# Python 2
import __builtin__ as builtins
except ImportError:
# Python 3
import builtins
try:
builtins.profile
except AttributeError:
# No line profiler, provide a pass-through version
def profile(func): return func
builtins.profile = profile
class MyThread(threading.Thread):
@profile
def run(self):
z = 0
z = np.random.uniform(0,100,size=2 * 50000000);
print("thread1")
class MyThread2(threading.Thread):
@profile
def run(self):
z = 0
for i in range(50000000 // 2):
z += 1
print("thread2")
use_threads = True
# use_threads = False
if use_threads:
t1 = MyThread()
t2 = MyThread2()
t1.start()
t2.start()
t1.join()
t2.join()
else:
t1 = MyThread()
t1.run()
t2 = MyThread2()
t2.run()
| 972 | 16.690909 | 59 | py |
scalene | scalene-master/test/testflask.py | from flask import Flask
app = Flask(__name__)
cache = {}
@app.route("/<page>")
def index(page):
if page not in cache:
cache[page] = f"<h1>Welcome to {page}</h1>"
return cache[page]
if __name__ == "__main__":
app.run()
| 248 | 13.647059 | 51 | py |
scalene | scalene-master/test/test_sparkline.py | import pytest
from scalene.sparkline import SparkLine
@pytest.fixture(name="sl")
def sparkline() -> SparkLine:
return SparkLine()
def test_get_bars(sl):
bar = sl._get_bars()
assert bar == "▁▂▃▄▅▆▇█"
def test_get_bars___in_wsl(sl, monkeypatch):
monkeypatch.setenv("WSL_DISTRO_NAME", "Some WSL distro name")
bar = sl._get_bars()
assert bar == "▄▄■■■▀▀▀"
def test_get_bars__in_wsl_and_windows_terminal(sl, monkeypatch):
monkeypatch.setenv("WSL_DISTRO_NAME", "Some WSL distro name")
monkeypatch.setenv("WT_PROFILE_ID", "Some Windows Terminal id")
bar = sl._get_bars()
assert bar == "▁▂▃▄▅▆▇█"
def test_generate(sl):
numbers = [1, 2, 3, 4, 5, 6, 7, 8]
result = sl.generate(numbers)
assert result == (1, 8, "▁▂▃▄▅▆▇█")
def test_generate__up_and_down(sl):
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 7, 6, 5, 4, 3, 2, 1]
result = sl.generate(numbers)
assert result == (1, 8, "▁▂▃▄▅▆▇█▇▆▅▄▃▂▁")
def test_generate__all_zeroes(sl):
numbers = [0, 0, 0]
result = sl.generate(numbers)
assert result == (0, 0, '')
def test_generate__with_negative_values(sl):
numbers = [1, 2, 3, -4, 5, -6, 7, 8]
result = sl.generate(numbers)
assert result == (0.0, 8.0, '▂▃▄▁▆▁██')
def test_generate__with_min(sl):
numbers = [1, 2, 3, 4, 5, 6, 7, 8]
result = sl.generate(numbers, minimum=0)
assert result == (0, 8.0, '▂▃▄▅▆▇██')
def test_generate__with_max_same_as_actual_max(sl):
numbers = [1, 2, 3, 4, 5, 6, 7, 8]
result = sl.generate(numbers, maximum=8)
assert result == (1.0, 8, '▁▂▃▄▅▆▇█')
def test_generate__with_max_below_actual_max(sl):
numbers = [1, 2, 3, 4, 5, 6, 7, 8]
result = sl.generate(numbers, maximum=6)
assert result == (1.0, 6, '▁▂▄▅▇███')
| 1,784 | 19.755814 | 67 | py |
scalene | scalene-master/test/testtf.py | import tensorflow as tf
from time import perf_counter
def config():
num_threads = 16
tf.config.threading.set_inter_op_parallelism_threads(
num_threads
)
tf.config.threading.set_intra_op_parallelism_threads(
num_threads
)
def run_benchmark():
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
predictions = model(x_train[:1]).numpy()
print("predictions", predictions)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
t0 = perf_counter()
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
dt = perf_counter() - t0
print(f"Total time: {dt}")
run_benchmark()
| 1,116 | 26.925 | 77 | py |
scalene | scalene-master/test/small_mp_test.py | import multiprocessing
import faulthandler
import os
import signal
from time import sleep
import threading
def do_very_little():
sleep(1)
print("In subprocess")
print(threading.enumerate())
if __name__ == "__main__":
print("Starting")
p = multiprocessing.Process(target=do_very_little)
p.start()
print("Joining")
p.join()
print("Joined", p)
print("exiting") | 400 | 18.095238 | 54 | py |
scalene | scalene-master/test/testme.py | import numpy as np
#import math
from numpy import linalg as LA
arr = [i for i in range(1,1000)]
def doit1(x):
# x = [i*i for i in range(1,1000)][0]
y = 1
# w, v = LA.eig(np.diag(arr)) # (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)))
x = [i*i for i in range(0,100000)][99999]
y1 = [i*i for i in range(0,200000)][199999]
z1 = [i for i in range(0,300000)][299999]
z = x * y
# z = np.multiply(x, y)
return z
def doit2(x):
i = 0
# zarr = [math.cos(13) for i in range(1,100000)]
# z = zarr[0]
z = 0.1
while i < 100000:
# z = math.cos(13)
# z = np.multiply(x,x)
# z = np.multiply(z,z)
# z = np.multiply(z,z)
z = z * z
z = x * x
z = z * z
z = z * z
i += 1
return z
def doit3(x):
z = x + 1
z = x + 1
z = x + 1
z = x + z
z = x + z
# z = np.cos(x)
return z
def stuff():
y = np.random.randint(1, 100, size=5000000)[4999999]
x = 1.01
for i in range(1,10):
print(i)
for j in range(1,10):
x = doit1(x)
x = doit2(x)
x = doit3(x)
x = 1.01
return x
import sys
print("TESTME")
print(sys.argv)
stuff()
| 1,207 | 18.483871 | 68 | py |
scalene | scalene-master/test/test_timers.py | import signal
import time
start = -1
loop = 10
def callback(*args):
global loop
global start
print(time.perf_counter() - start)
start = time.perf_counter()
loop -= 1
signal.signal(signal.SIGALRM, callback)
start = time.perf_counter()
signal.setitimer(signal.ITIMER_REAL, 5, 1)
i = 0
while loop > 0:
i += 1
time.sleep(0.1) | 354 | 15.136364 | 42 | py |
scalene | scalene-master/test/issues/test-issue74.py | import gevent
def calc(a):
x = 0
for i in range(1000000):
x += 1
gevent.sleep(a)
g1 = gevent.spawn(calc, 1)
g2 = gevent.spawn(calc, 2)
g3 = gevent.spawn(calc, 3)
g1.start()
g2.start()
g3.start()
g1.join()
g2.join()
g3.join()
| 247 | 12.777778 | 28 | py |
scalene | scalene-master/test/issues/test-issue130.py | from pyproj import Proj
import time
time.sleep(1)
time.sleep(0.1)
| 67 | 10.333333 | 23 | py |
scalene | scalene-master/test/issues/test-issue156.py | import numpy as np
class A:
def __init__(self, n):
self.arr = np.random.rand(n)
self.lst = [1] * n
print(n)
if __name__ == '__main__':
a = A(50_000_000)
| 188 | 14.75 | 36 | py |
scalene | scalene-master/test/issues/test-issue256.py | ret_value = dict()
for k in range(10**7):
temp = k*2
ret_value[k] = temp
| 82 | 12.833333 | 23 | py |
scalene | scalene-master/test/issues/test-issue193.py | import time
def test_cls_in_locals():
cls = "This value is not a class"
time.sleep(0.5)
if __name__ == "__main__":
test_cls_in_locals()
| 151 | 14.2 | 37 | py |
scalene | scalene-master/test/issues/test-issue124.py | #!/usr/bin/env python3
import time
time.sleep(5)
x = 0
for i in range(10000000):
x += 1
print("done")
| 111 | 7.615385 | 25 | py |
scalene | scalene-master/test/issues/test-issue31.py | import numpy as np
def main1():
# Before optimization
x = np.array(range(10**7))
y = np.array(np.random.uniform(0, 100, size=10**8))
def main2():
# After optimization, spurious `np.array` removed.
x = np.array(range(10**7))
y = np.random.uniform(0, 100, size=10**8)
main1()
main2()
| 311 | 17.352941 | 55 | py |
scalene | scalene-master/test/issues/test-issue266.py | import pandas as pd
import numpy as np
import gc
def f():
print('called f')
#Uses around 4GB of memory when looped once
df = np.ones(500000000)
#Uses around 20GB of memory when looped 5 times
for i in range(0,5):
f()
| 239 | 17.461538 | 47 | py |
scalene | scalene-master/test/issues/test-issue167.py | import time
import numpy as np
import pandas as pd
# this assumes you have memory_profiler installed
# if you want to use "@profile" on a function
# if not, we can ignore it with a pass-through decorator
if 'profile' not in dir():
def profile(fn):
return fn
SIZE = 10_000_000
@profile
def get_mean_for_indicator_poor(df, indicator):
# poor way to use a groupby here, causes big allocation
gpby = df.groupby('indicator')
means = gpby.mean() # means by column
means_for_ind = means.loc[indicator]
total = means_for_ind.sum()
return total
@profile
def get_mean_for_indicator_better(df, indicator, rnd_cols):
# more memory efficient and faster way to solve this challenge
df_sub = df.query('indicator==@indicator')[rnd_cols]
means_for_ind = df_sub.mean() # means by column
total = means_for_ind.sum() # sum of rows
return total
@profile
def run():
arr = np.random.random((SIZE, 10))
print(f"{arr.shape} shape for our array")
df = pd.DataFrame(arr)
rnd_cols = [f"c_{n}" for n in df.columns]
df.columns = rnd_cols
# make a big dataframe with an indicator column and lots of random data
df2 = pd.DataFrame({'indicator' : np.random.randint(0, 10, SIZE)})
# deliberately overwrite the first df
df = pd.concat((df2, df), axis=1) # PART OF DEMO - unexpected copy=True forces an expensive copy
print("Head of our df:")
print(df.head())
print("Print results to check that we get the result")
indicator = 2
print(f"Mean for indicator {indicator} on better implementation {get_mean_for_indicator_better(df, indicator, rnd_cols):0.5f}")
print(f"Mean for indicator {indicator} on poor implementation: {get_mean_for_indicator_poor(df, indicator):0.5f}")
if __name__ == "__main__":
run() | 1,804 | 31.818182 | 131 | py |
scalene | scalene-master/test/original/bm_spectral_norm.py | """
MathWorld: "Hundred-Dollar, Hundred-Digit Challenge Problems", Challenge #3.
http://mathworld.wolfram.com/Hundred-DollarHundred-DigitChallengeProblems.html
The Computer Language Benchmarks Game
http://benchmarksgame.alioth.debian.org/u64q/spectralnorm-description.html#spectralnorm
Contributed by Sebastien Loisel
Fixed by Isaac Gouy
Sped up by Josh Goldfoot
Dirtily sped up by Simon Descarpentries
Concurrency by Jason Stitt
"""
from six.moves import xrange, zip as izip
DEFAULT_N = 130
def eval_A(i, j):
return 1.0 / ((i + j) * (i + j + 1) // 2 + i + 1)
def eval_times_u(func, u):
return [func((i, u)) for i in xrange(len(list(u)))]
def eval_AtA_times_u(u):
return eval_times_u(part_At_times_u, eval_times_u(part_A_times_u, u))
def part_A_times_u(i_u):
i, u = i_u
partial_sum = 0
for j, u_j in enumerate(u):
partial_sum += eval_A(i, j) * u_j
return partial_sum
def part_At_times_u(i_u):
i, u = i_u
partial_sum = 0
for j, u_j in enumerate(u):
partial_sum += eval_A(j, i) * u_j
return partial_sum
def bench_spectral_norm(loops):
range_it = xrange(loops)
# t0 = pyperf.perf_counter()
for _ in range_it:
u = [1] * DEFAULT_N
for dummy in xrange(10):
v = eval_AtA_times_u(u)
u = eval_AtA_times_u(v)
vBv = vv = 0
for ue, ve in izip(u, v):
vBv += ue * ve
vv += ve * ve
return # pyperf.perf_counter() - t0
if __name__ == "__main__":
bench_spectral_norm(10)
# runner = pyperf.Runner()
# runner.metadata['description'] = (
# 'MathWorld: "Hundred-Dollar, Hundred-Digit Challenge Problems", '
# 'Challenge #3.')
# runner.bench_time_func('spectral_norm', bench_spectral_norm)
| 1,773 | 22.653333 | 87 | py |
scalene | scalene-master/test/original/bm_raytrace.py | """
This file contains definitions for a simple raytracer.
Copyright Callum and Tony Garnock-Jones, 2008.
This file may be freely redistributed under the MIT license,
http://www.opensource.org/licenses/mit-license.php
From http://www.lshift.net/blog/2008/10/29/toy-raytracer-in-python
"""
import array
import math
import pyperf
from six.moves import xrange
DEFAULT_WIDTH = 100
DEFAULT_HEIGHT = 100
EPSILON = 0.00001
class Vector(object):
def __init__(self, initx, inity, initz):
self.x = initx
self.y = inity
self.z = initz
def __str__(self):
return '(%s,%s,%s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Vector(%s,%s,%s)' % (self.x, self.y, self.z)
def magnitude(self):
return math.sqrt(self.dot(self))
def __add__(self, other):
if other.isPoint():
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
else:
return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
other.mustBeVector()
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
def scale(self, factor):
return Vector(factor * self.x, factor * self.y, factor * self.z)
def dot(self, other):
other.mustBeVector()
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
def cross(self, other):
other.mustBeVector()
return Vector(self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
def normalized(self):
return self.scale(1.0 / self.magnitude())
def negated(self):
return self.scale(-1)
def __eq__(self, other):
return (self.x == other.x) and (self.y == other.y) and (self.z == other.z)
def isVector(self):
return True
def isPoint(self):
return False
def mustBeVector(self):
return self
def mustBePoint(self):
raise 'Vectors are not points!'
def reflectThrough(self, normal):
d = normal.scale(self.dot(normal))
return self - d.scale(2)
Vector.ZERO = Vector(0, 0, 0)
Vector.RIGHT = Vector(1, 0, 0)
Vector.UP = Vector(0, 1, 0)
Vector.OUT = Vector(0, 0, 1)
assert Vector.RIGHT.reflectThrough(Vector.UP) == Vector.RIGHT
assert Vector(-1, -1, 0).reflectThrough(Vector.UP) == Vector(-1, 1, 0)
class Point(object):
def __init__(self, initx, inity, initz):
self.x = initx
self.y = inity
self.z = initz
def __str__(self):
return '(%s,%s,%s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Point(%s,%s,%s)' % (self.x, self.y, self.z)
def __add__(self, other):
other.mustBeVector()
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
if other.isPoint():
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
else:
return Point(self.x - other.x, self.y - other.y, self.z - other.z)
def isVector(self):
return False
def isPoint(self):
return True
def mustBeVector(self):
raise 'Points are not vectors!'
def mustBePoint(self):
return self
class Sphere(object):
def __init__(self, centre, radius):
centre.mustBePoint()
self.centre = centre
self.radius = radius
def __repr__(self):
return 'Sphere(%s,%s)' % (repr(self.centre), self.radius)
def intersectionTime(self, ray):
cp = self.centre - ray.point
v = cp.dot(ray.vector)
discriminant = (self.radius * self.radius) - (cp.dot(cp) - v * v)
if discriminant < 0:
return None
else:
return v - math.sqrt(discriminant)
def normalAt(self, p):
return (p - self.centre).normalized()
class Halfspace(object):
def __init__(self, point, normal):
self.point = point
self.normal = normal.normalized()
def __repr__(self):
return 'Halfspace(%s,%s)' % (repr(self.point), repr(self.normal))
def intersectionTime(self, ray):
v = ray.vector.dot(self.normal)
if v:
return 1 / -v
else:
return None
def normalAt(self, p):
return self.normal
class Ray(object):
def __init__(self, point, vector):
self.point = point
self.vector = vector.normalized()
def __repr__(self):
return 'Ray(%s,%s)' % (repr(self.point), repr(self.vector))
def pointAtTime(self, t):
return self.point + self.vector.scale(t)
Point.ZERO = Point(0, 0, 0)
class Canvas(object):
def __init__(self, width, height):
self.bytes = array.array('B', [0] * (width * height * 3))
for i in xrange(width * height):
self.bytes[i * 3 + 2] = 255
self.width = width
self.height = height
def plot(self, x, y, r, g, b):
i = ((self.height - y - 1) * self.width + x) * 3
self.bytes[i] = max(0, min(255, int(r * 255)))
self.bytes[i + 1] = max(0, min(255, int(g * 255)))
self.bytes[i + 2] = max(0, min(255, int(b * 255)))
def write_ppm(self, filename):
header = 'P6 %d %d 255\n' % (self.width, self.height)
with open(filename, "wb") as fp:
fp.write(header.encode('ascii'))
fp.write(self.bytes.tostring())
def firstIntersection(intersections):
result = None
for i in intersections:
candidateT = i[1]
if candidateT is not None and candidateT > -EPSILON:
if result is None or candidateT < result[1]:
result = i
return result
class Scene(object):
def __init__(self):
self.objects = []
self.lightPoints = []
self.position = Point(0, 1.8, 10)
self.lookingAt = Point.ZERO
self.fieldOfView = 45
self.recursionDepth = 0
def moveTo(self, p):
self.position = p
def lookAt(self, p):
self.lookingAt = p
def addObject(self, object, surface):
self.objects.append((object, surface))
def addLight(self, p):
self.lightPoints.append(p)
def render(self, canvas):
fovRadians = math.pi * (self.fieldOfView / 2.0) / 180.0
halfWidth = math.tan(fovRadians)
halfHeight = 0.75 * halfWidth
width = halfWidth * 2
height = halfHeight * 2
pixelWidth = width / (canvas.width - 1)
pixelHeight = height / (canvas.height - 1)
eye = Ray(self.position, self.lookingAt - self.position)
vpRight = eye.vector.cross(Vector.UP).normalized()
vpUp = vpRight.cross(eye.vector).normalized()
for y in xrange(canvas.height):
for x in xrange(canvas.width):
xcomp = vpRight.scale(x * pixelWidth - halfWidth)
ycomp = vpUp.scale(y * pixelHeight - halfHeight)
ray = Ray(eye.point, eye.vector + xcomp + ycomp)
colour = self.rayColour(ray)
canvas.plot(x, y, *colour)
def rayColour(self, ray):
if self.recursionDepth > 3:
return (0, 0, 0)
try:
self.recursionDepth = self.recursionDepth + 1
intersections = [(o, o.intersectionTime(ray), s)
for (o, s) in self.objects]
i = firstIntersection(intersections)
if i is None:
return (0, 0, 0) # the background colour
else:
(o, t, s) = i
p = ray.pointAtTime(t)
return s.colourAt(self, ray, p, o.normalAt(p))
finally:
self.recursionDepth = self.recursionDepth - 1
def _lightIsVisible(self, l, p):
for (o, s) in self.objects:
t = o.intersectionTime(Ray(p, l - p))
if t is not None and t > EPSILON:
return False
return True
def visibleLights(self, p):
result = []
for l in self.lightPoints:
if self._lightIsVisible(l, p):
result.append(l)
return result
def addColours(a, scale, b):
return (a[0] + scale * b[0],
a[1] + scale * b[1],
a[2] + scale * b[2])
class SimpleSurface(object):
def __init__(self, **kwargs):
self.baseColour = kwargs.get('baseColour', (1, 1, 1))
self.specularCoefficient = kwargs.get('specularCoefficient', 0.2)
self.lambertCoefficient = kwargs.get('lambertCoefficient', 0.6)
self.ambientCoefficient = 1.0 - self.specularCoefficient - self.lambertCoefficient
def baseColourAt(self, p):
return self.baseColour
def colourAt(self, scene, ray, p, normal):
b = self.baseColourAt(p)
c = (0, 0, 0)
if self.specularCoefficient > 0:
reflectedRay = Ray(p, ray.vector.reflectThrough(normal))
reflectedColour = scene.rayColour(reflectedRay)
c = addColours(c, self.specularCoefficient, reflectedColour)
if self.lambertCoefficient > 0:
lambertAmount = 0
for lightPoint in scene.visibleLights(p):
contribution = (lightPoint - p).normalized().dot(normal)
if contribution > 0:
lambertAmount = lambertAmount + contribution
lambertAmount = min(1, lambertAmount)
c = addColours(c, self.lambertCoefficient * lambertAmount, b)
if self.ambientCoefficient > 0:
c = addColours(c, self.ambientCoefficient, b)
return c
class CheckerboardSurface(SimpleSurface):
def __init__(self, **kwargs):
SimpleSurface.__init__(self, **kwargs)
self.otherColour = kwargs.get('otherColour', (0, 0, 0))
self.checkSize = kwargs.get('checkSize', 1)
def baseColourAt(self, p):
v = p - Point.ZERO
v.scale(1.0 / self.checkSize)
if ((int(abs(v.x) + 0.5)
+ int(abs(v.y) + 0.5)
+ int(abs(v.z) + 0.5)) % 2):
return self.otherColour
else:
return self.baseColour
def bench_raytrace(loops, width, height, filename):
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for i in range_it:
canvas = Canvas(width, height)
s = Scene()
s.addLight(Point(30, 30, 10))
s.addLight(Point(-10, 100, 30))
s.lookAt(Point(0, 3, 0))
s.addObject(Sphere(Point(1, 3, -10), 2),
SimpleSurface(baseColour=(1, 1, 0)))
for y in xrange(6):
s.addObject(Sphere(Point(-3 - y * 0.4, 2.3, -5), 0.4),
SimpleSurface(baseColour=(y / 6.0, 1 - y / 6.0, 0.5)))
s.addObject(Halfspace(Point(0, 0, 0), Vector.UP),
CheckerboardSurface())
s.render(canvas)
dt = pyperf.perf_counter() - t0
if filename:
canvas.write_ppm(filename)
return dt
def add_cmdline_args(cmd, args):
cmd.append("--width=%s" % args.width)
cmd.append("--height=%s" % args.height)
if args.filename:
cmd.extend(("--filename", args.filename))
if __name__ == "__main__":
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
cmd = runner.argparser
cmd.add_argument("--width",
type=int, default=DEFAULT_WIDTH,
help="Image width (default: %s)" % DEFAULT_WIDTH)
cmd.add_argument("--height",
type=int, default=DEFAULT_HEIGHT,
help="Image height (default: %s)" % DEFAULT_HEIGHT)
cmd.add_argument("--filename", metavar="FILENAME.PPM",
help="Output filename of the PPM picture")
args = runner.parse_args()
runner.metadata['description'] = "Simple raytracer"
runner.metadata['raytrace_width'] = args.width
runner.metadata['raytrace_height'] = args.height
bench_raytrace(5, args.width, args.height, args.filename)
# runner.bench_time_func('raytrace', bench_raytrace,
# args.width, args.height,
# args.filename)
| 12,142 | 28.473301 | 90 | py |
scalene | scalene-master/test/original/bm_scimark.py | from array import array
import math
import pyperf
from six.moves import xrange
class Array2D(object):
def __init__(self, w, h, data=None):
self.width = w
self.height = h
self.data = array('d', [0]) * (w * h)
if data is not None:
self.setup(data)
def _idx(self, x, y):
if 0 <= x < self.width and 0 <= y < self.height:
return y * self.width + x
raise IndexError
def __getitem__(self, x_y):
(x, y) = x_y
return self.data[self._idx(x, y)]
def __setitem__(self, x_y, val):
(x, y) = x_y
self.data[self._idx(x, y)] = val
def setup(self, data):
for y in xrange(self.height):
for x in xrange(self.width):
self[x, y] = data[y][x]
return self
def indexes(self):
for y in xrange(self.height):
for x in xrange(self.width):
yield x, y
def copy_data_from(self, other):
self.data[:] = other.data[:]
class Random(object):
MDIG = 32
ONE = 1
m1 = (ONE << (MDIG - 2)) + ((ONE << (MDIG - 2)) - ONE)
m2 = ONE << MDIG // 2
dm1 = 1.0 / float(m1)
def __init__(self, seed):
self.initialize(seed)
self.left = 0.0
self.right = 1.0
self.width = 1.0
self.haveRange = False
def initialize(self, seed):
self.seed = seed
seed = abs(seed)
jseed = min(seed, self.m1)
if (jseed % 2 == 0):
jseed -= 1
k0 = 9069 % self.m2
k1 = 9069 / self.m2
j0 = jseed % self.m2
j1 = jseed / self.m2
self.m = array('d', [0]) * 17
for iloop in xrange(17):
jseed = j0 * k0
j1 = (jseed / self.m2 + j0 * k1 + j1 * k0) % (self.m2 / 2)
j0 = jseed % self.m2
self.m[iloop] = j0 + self.m2 * j1
self.i = 4
self.j = 16
def nextDouble(self):
I, J, m = self.i, self.j, self.m
k = m[I] - m[J]
if (k < 0):
k += self.m1
self.m[J] = k
if (I == 0):
I = 16
else:
I -= 1
self.i = I
if (J == 0):
J = 16
else:
J -= 1
self.j = J
if (self.haveRange):
return self.left + self.dm1 * float(k) * self.width
else:
return self.dm1 * float(k)
def RandomMatrix(self, a):
for x, y in a.indexes():
a[x, y] = self.nextDouble()
return a
def RandomVector(self, n):
return array('d', [self.nextDouble() for i in xrange(n)])
def copy_vector(vec):
# Copy a vector created by Random.RandomVector()
vec2 = array('d')
vec2[:] = vec[:]
return vec2
class ArrayList(Array2D):
def __init__(self, w, h, data=None):
self.width = w
self.height = h
self.data = [array('d', [0]) * w for y in xrange(h)]
if data is not None:
self.setup(data)
def __getitem__(self, idx):
if isinstance(idx, tuple):
return self.data[idx[1]][idx[0]]
else:
return self.data[idx]
def __setitem__(self, idx, val):
if isinstance(idx, tuple):
self.data[idx[1]][idx[0]] = val
else:
self.data[idx] = val
def copy_data_from(self, other):
for l1, l2 in zip(self.data, other.data):
l1[:] = l2
def SOR_execute(omega, G, cycles, Array):
for p in xrange(cycles):
for y in xrange(1, G.height - 1):
for x in xrange(1, G.width - 1):
G[x, y] = (omega * 0.25 * (G[x, y - 1] + G[x, y + 1] + G[x - 1, y]
+ G[x + 1, y])
+ (1.0 - omega) * G[x, y])
def bench_SOR(loops, n, cycles, Array):
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
G = Array(n, n)
SOR_execute(1.25, G, cycles, Array)
return pyperf.perf_counter() - t0
def SparseCompRow_matmult(M, y, val, row, col, x, num_iterations):
range_it = xrange(num_iterations)
t0 = pyperf.perf_counter()
for _ in range_it:
for r in xrange(M):
sa = 0.0
for i in xrange(row[r], row[r + 1]):
sa += x[col[i]] * val[i]
y[r] = sa
return pyperf.perf_counter() - t0
def bench_SparseMatMult(cycles, N, nz):
x = array('d', [0]) * N
y = array('d', [0]) * N
nr = nz // N
anz = nr * N
val = array('d', [0]) * anz
col = array('i', [0]) * nz
row = array('i', [0]) * (N + 1)
row[0] = 0
for r in xrange(N):
rowr = row[r]
step = r // nr
row[r + 1] = rowr + nr
if step < 1:
step = 1
for i in xrange(nr):
col[rowr + i] = i * step
return SparseCompRow_matmult(N, y, val, row, col, x, cycles)
def MonteCarlo(Num_samples):
rnd = Random(113)
under_curve = 0
for count in xrange(Num_samples):
x = rnd.nextDouble()
y = rnd.nextDouble()
if x * x + y * y <= 1.0:
under_curve += 1
return float(under_curve) / Num_samples * 4.0
def bench_MonteCarlo(loops, Num_samples):
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
MonteCarlo(Num_samples)
return pyperf.perf_counter() - t0
def LU_factor(A, pivot):
M, N = A.height, A.width
minMN = min(M, N)
for j in xrange(minMN):
jp = j
t = abs(A[j][j])
for i in xrange(j + 1, M):
ab = abs(A[i][j])
if ab > t:
jp = i
t = ab
pivot[j] = jp
if A[jp][j] == 0:
raise Exception("factorization failed because of zero pivot")
if jp != j:
A[j], A[jp] = A[jp], A[j]
if j < M - 1:
recp = 1.0 / A[j][j]
for k in xrange(j + 1, M):
A[k][j] *= recp
if j < minMN - 1:
for ii in xrange(j + 1, M):
for jj in xrange(j + 1, N):
A[ii][jj] -= A[ii][j] * A[j][jj]
def LU(lu, A, pivot):
lu.copy_data_from(A)
LU_factor(lu, pivot)
def bench_LU(cycles, N):
rnd = Random(7)
A = rnd.RandomMatrix(ArrayList(N, N))
lu = ArrayList(N, N)
pivot = array('i', [0]) * N
range_it = xrange(cycles)
t0 = pyperf.perf_counter()
for _ in range_it:
LU(lu, A, pivot)
return pyperf.perf_counter() - t0
def int_log2(n):
k = 1
log = 0
while k < n:
k *= 2
log += 1
if n != 1 << log:
raise Exception("FFT: Data length is not a power of 2: %s" % n)
return log
def FFT_num_flops(N):
return (5.0 * N - 2) * int_log2(N) + 2 * (N + 1)
def FFT_transform_internal(N, data, direction):
n = N // 2
bit = 0
dual = 1
if n == 1:
return
logn = int_log2(n)
if N == 0:
return
FFT_bitreverse(N, data)
# apply fft recursion
# this loop executed int_log2(N) times
bit = 0
while bit < logn:
w_real = 1.0
w_imag = 0.0
theta = 2.0 * direction * math.pi / (2.0 * float(dual))
s = math.sin(theta)
t = math.sin(theta / 2.0)
s2 = 2.0 * t * t
for b in range(0, n, 2 * dual):
i = 2 * b
j = 2 * (b + dual)
wd_real = data[j]
wd_imag = data[j + 1]
data[j] = data[i] - wd_real
data[j + 1] = data[i + 1] - wd_imag
data[i] += wd_real
data[i + 1] += wd_imag
for a in xrange(1, dual):
tmp_real = w_real - s * w_imag - s2 * w_real
tmp_imag = w_imag + s * w_real - s2 * w_imag
w_real = tmp_real
w_imag = tmp_imag
for b in range(0, n, 2 * dual):
i = 2 * (b + a)
j = 2 * (b + a + dual)
z1_real = data[j]
z1_imag = data[j + 1]
wd_real = w_real * z1_real - w_imag * z1_imag
wd_imag = w_real * z1_imag + w_imag * z1_real
data[j] = data[i] - wd_real
data[j + 1] = data[i + 1] - wd_imag
data[i] += wd_real
data[i + 1] += wd_imag
bit += 1
dual *= 2
def FFT_bitreverse(N, data):
n = N // 2
nm1 = n - 1
j = 0
for i in range(nm1):
ii = i << 1
jj = j << 1
k = n >> 1
if i < j:
tmp_real = data[ii]
tmp_imag = data[ii + 1]
data[ii] = data[jj]
data[ii + 1] = data[jj + 1]
data[jj] = tmp_real
data[jj + 1] = tmp_imag
while k <= j:
j -= k
k >>= 1
j += k
def FFT_transform(N, data):
FFT_transform_internal(N, data, -1)
def FFT_inverse(N, data):
n = N / 2
norm = 0.0
FFT_transform_internal(N, data, +1)
norm = 1 / float(n)
for i in xrange(N):
data[i] *= norm
def bench_FFT(loops, N, cycles):
twoN = 2 * N
init_vec = Random(7).RandomVector(twoN)
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
x = copy_vector(init_vec)
for i in xrange(cycles):
FFT_transform(twoN, x)
FFT_inverse(twoN, x)
return pyperf.perf_counter() - t0
def add_cmdline_args(cmd, args):
if args.benchmark:
cmd.append(args.benchmark)
BENCHMARKS = {
# function name => arguments
'sor': (bench_SOR, 100, 10, Array2D),
'sparse_mat_mult': (bench_SparseMatMult, 1000, 50 * 1000),
'monte_carlo': (bench_MonteCarlo, 100 * 1000,),
'lu': (bench_LU, 100,),
'fft': (bench_FFT, 1024, 50),
}
if __name__ == "__main__":
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.argparser.add_argument("benchmark", nargs='?',
choices=sorted(BENCHMARKS))
args = runner.parse_args()
if args.benchmark:
benchmarks = (args.benchmark,)
else:
benchmarks = sorted(BENCHMARKS)
for bench in benchmarks:
name = 'scimark_%s' % bench
print(name)
args = BENCHMARKS[bench]
(args[0])(10, *args[1:])
# runner.bench_time_func(name, *args)
| 10,284 | 23.546539 | 82 | py |
scalene | scalene-master/test/original/bm_sympy.py | import pyperf
from sympy import expand, symbols, integrate, tan, summation
from sympy.core.cache import clear_cache
def bench_expand():
x, y, z = symbols('x y z')
expand((1 + x + y + z) ** 20)
def bench_integrate():
x, y = symbols('x y')
f = (1 / tan(x)) ** 10
return integrate(f, x)
def bench_sum():
x, i = symbols('x i')
summation(x ** i / i, (i, 1, 400))
def bench_str():
x, y, z = symbols('x y z')
str(expand((x + 2 * y + 3 * z) ** 30))
def bench_sympy(loops, func):
timer = pyperf.perf_counter
dt = 0
for _ in range(loops):
# Don't benchmark clear_cache(), exclude it of the benchmark
clear_cache()
t0 = timer()
func()
dt += (timer() - t0)
return dt
BENCHMARKS = ("expand", "integrate", "sum", "str")
def add_cmdline_args(cmd, args):
if args.benchmark:
cmd.append(args.benchmark)
if __name__ == "__main__":
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.metadata['description'] = "SymPy benchmark"
runner.argparser.add_argument("benchmark", nargs='?',
choices=BENCHMARKS)
import gc
gc.disable()
args = runner.parse_args()
if args.benchmark:
benchmarks = (args.benchmark,)
else:
benchmarks = BENCHMARKS
for bench in benchmarks:
name = 'sympy_%s' % bench
func = globals()['bench_' + bench]
func()
# runner.bench_time_func(name, bench_sympy, func)
| 1,512 | 20.309859 | 68 | py |
scalene | scalene-master/test/original/bm_richards.py | """
based on a Java version:
Based on original version written in BCPL by Dr Martin Richards
in 1981 at Cambridge University Computer Laboratory, England
and a C++ version derived from a Smalltalk version written by
L Peter Deutsch.
Java version: Copyright (C) 1995 Sun Microsystems, Inc.
Translation from C++, Mario Wolczko
Outer loop added by Alex Jacoby
"""
from __future__ import print_function
import pyperf
from six.moves import xrange
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
BUFSIZE = 4
BUFSIZE_RANGE = range(BUFSIZE)
class Packet(object):
def __init__(self, l, i, k):
self.link = l
self.ident = i
self.kind = k
self.datum = 0
self.data = [0] * BUFSIZE
def append_to(self, lst):
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
self.pending = None
class IdleTaskRec(TaskRec):
def __init__(self):
self.control = 1
self.count = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
self.work_in = None
self.device_in = None
def workInAdd(self, p):
self.work_in = p.append_to(self.work_in)
return self.work_in
def deviceInAdd(self, p):
self.device_in = p.append_to(self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
self.destination = I_HANDLERA
self.count = 0
# Task
class TaskState(object):
def __init__(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
def packetPending(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self):
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self):
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self):
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self):
return self.packet_pending
def isTaskWaiting(self):
return self.task_waiting
def isTaskHolding(self):
return self.task_holding
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self):
return self.packet_pending and self.task_waiting and not self.task_holding
tracing = False
layout = 0
def trace(a):
global layout
layout -= 1
if layout <= 0:
print()
layout = 50
print(a, end='')
TASKTABSIZE = 10
class TaskWorkArea(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
taskWorkArea = TaskWorkArea()
class Task(TaskState):
def __init__(self, i, p, w, initialState, r):
self.link = taskWorkArea.taskList
self.ident = i
self.priority = p
self.input = w
self.packet_pending = initialState.isPacketPending()
self.task_waiting = initialState.isTaskWaiting()
self.task_holding = initialState.isTaskHolding()
self.handle = r
taskWorkArea.taskList = self
taskWorkArea.taskTab[i] = self
def fn(self, pkt, r):
raise NotImplementedError
def addPacket(self, p, old):
if self.input is None:
self.input = p
self.packet_pending = True
if self.priority > old.priority:
return self
else:
p.append_to(self.input)
return old
def runTask(self):
if self.isWaitingWithPacket():
msg = self.input
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
else:
msg = None
return self.fn(msg, self.handle)
def waitTask(self):
self.task_waiting = True
return self
def hold(self):
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self, i):
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self, pkt):
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
return t.addPacket(pkt, self)
def findtcb(self, id):
t = taskWorkArea.taskTab[id]
if t is None:
raise Exception("Bad task id %d" % id)
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
d = r
assert isinstance(d, DeviceTaskRec)
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = pkt
if tracing:
trace(pkt.datum)
return self.hold()
class HandlerTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
h = r
assert isinstance(h, HandlerTaskRec)
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.link
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, 0, None, s, r)
def fn(self, pkt, r):
i = r
assert isinstance(i, IdleTaskRec)
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control //= 2
return self.release(I_DEVA)
else:
i.control = i.control // 2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
w = r
assert isinstance(w, WorkerTaskRec)
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE: # xrange(BUFSIZE)
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
def schedule():
t = taskWorkArea.taskList
while t is not None:
if tracing:
print("tcb =", t.ident)
if t.isTaskHoldingOrWaiting():
t = t.link
else:
if tracing:
trace(chr(ord("0") + t.ident))
t = t.runTask()
class Richards(object):
def run(self, iterations):
for i in xrange(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq, 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState(
).waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = None
DeviceTask(I_DEVA, 4000, wkq,
TaskState().waiting(), DeviceTaskRec())
DeviceTask(I_DEVB, 5000, wkq,
TaskState().waiting(), DeviceTaskRec())
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
if __name__ == "__main__":
runner = pyperf.Runner()
runner.metadata['description'] = "The Richards benchmark"
richard = Richards()
richard.run(1)
# runner.bench_func('richards', richard.run, 1)
| 9,629 | 21.5 | 83 | py |
scalene | scalene-master/test/original/bm_mdp.py | import collections
from collections import defaultdict
from fractions import Fraction
import pyperf
# Disable @profile if not defined.
try:
# Python 2
import __builtin__ as builtins
except ImportError:
# Python 3
import builtins
try:
builtins.profile
except AttributeError:
# No line profiler, provide a pass-through version
def profile(func): return func
builtins.profile = profile
@profile
def topoSort(roots, getParents):
"""Return a topological sorting of nodes in a graph.
roots - list of root nodes to search from
getParents - function which returns the parents of a given node
"""
results = []
visited = set()
# Use iterative version to avoid stack limits for large datasets
stack = [(node, 0) for node in roots]
while stack:
current, state = stack.pop()
if state == 0:
# before recursing
if current not in visited:
visited.add(current)
stack.append((current, 1))
stack.extend((parent, 0) for parent in getParents(current))
else:
# after recursing
assert(current in visited)
results.append(current)
return results
@profile
def getDamages(L, A, D, B, stab, te):
x = (2 * L) // 5
x = ((x + 2) * A * B) // (D * 50) + 2
if stab:
x += x // 2
x = int(x * te)
return [(x * z) // 255 for z in range(217, 256)]
@profile
def getCritDist(L, p, A1, A2, D1, D2, B, stab, te):
p = min(p, Fraction(1))
norm = getDamages(L, A1, D1, B, stab, te)
crit = getDamages(L * 2, A2, D2, B, stab, te)
dist = defaultdict(Fraction)
for mult, vals in zip([1 - p, p], [norm, crit]):
mult /= len(vals)
for x in vals:
dist[x] += mult
return dist
@profile
def plus12(x):
return x + x // 8
stats_t = collections.namedtuple('stats_t', ['atk', 'df', 'speed', 'spec'])
NOMODS = stats_t(0, 0, 0, 0)
fixeddata_t = collections.namedtuple(
'fixeddata_t', ['maxhp', 'stats', 'lvl', 'badges', 'basespeed'])
halfstate_t = collections.namedtuple(
'halfstate_t', ['fixed', 'hp', 'status', 'statmods', 'stats'])
@profile
def applyHPChange(hstate, change):
hp = min(hstate.fixed.maxhp, max(0, hstate.hp + change))
return hstate._replace(hp=hp)
@profile
def applyBadgeBoosts(badges, stats):
return stats_t(*[(plus12(x) if b else x) for x, b in zip(stats, badges)])
attack_stats_t = collections.namedtuple(
'attack_stats_t', ['power', 'isspec', 'stab', 'te', 'crit'])
attack_data = {
'Ember': attack_stats_t(40, True, True, 0.5, False),
'Dig': attack_stats_t(100, False, False, 1, False),
'Slash': attack_stats_t(70, False, False, 1, True),
'Water Gun': attack_stats_t(40, True, True, 2, False),
'Bubblebeam': attack_stats_t(65, True, True, 2, False),
}
@profile
def _applyActionSide1(state, act):
me, them, extra = state
if act == 'Super Potion':
me = applyHPChange(me, 50)
return {(me, them, extra): Fraction(1)}
mdata = attack_data[act]
aind = 3 if mdata.isspec else 0
dind = 3 if mdata.isspec else 1
pdiv = 64 if mdata.crit else 512
dmg_dist = getCritDist(me.fixed.lvl, Fraction(me.fixed.basespeed, pdiv),
me.stats[aind], me.fixed.stats[aind], them.stats[
dind], them.fixed.stats[dind],
mdata.power, mdata.stab, mdata.te)
dist = defaultdict(Fraction)
for dmg, p in dmg_dist.items():
them2 = applyHPChange(them, -dmg)
dist[me, them2, extra] += p
return dist
@profile
def _applyAction(state, side, act):
if side == 0:
return _applyActionSide1(state, act)
else:
me, them, extra = state
dist = _applyActionSide1((them, me, extra), act)
return {(k[1], k[0], k[2]): v for k, v in dist.items()}
class Battle(object):
@profile
def __init__(self):
self.successors = {}
self.min = defaultdict(float)
self.max = defaultdict(lambda: 1.0)
self.frozen = set()
self.win = 4, True
self.loss = 4, False
self.max[self.loss] = 0.0
self.min[self.win] = 1.0
self.frozen.update([self.win, self.loss])
@profile
def _getSuccessorsA(self, statep):
st, state = statep
for action in ['Dig', 'Super Potion']:
yield (1, state, action)
@profile
def _applyActionPair(self, state, side1, act1, side2, act2, dist, pmult):
for newstate, p in _applyAction(state, side1, act1).items():
if newstate[0].hp == 0:
newstatep = self.loss
elif newstate[1].hp == 0:
newstatep = self.win
else:
newstatep = 2, newstate, side2, act2
dist[newstatep] += p * pmult
@profile
def _getSuccessorsB(self, statep):
st, state, action = statep
dist = defaultdict(Fraction)
for eact, p in [('Water Gun', Fraction(64, 130)),
('Bubblebeam', Fraction(66, 130))]:
priority1 = state[0].stats.speed + \
10000 * (action == 'Super Potion')
priority2 = state[1].stats.speed + 10000 * (action == 'X Defend')
if priority1 > priority2:
self._applyActionPair(state, 0, action, 1, eact, dist, p)
elif priority1 < priority2:
self._applyActionPair(state, 1, eact, 0, action, dist, p)
else:
self._applyActionPair(state, 0, action, 1, eact, dist, p / 2)
self._applyActionPair(state, 1, eact, 0, action, dist, p / 2)
return {k: float(p) for k, p in dist.items() if p > 0}
@profile
def _getSuccessorsC(self, statep):
st, state, side, action = statep
dist = defaultdict(Fraction)
for newstate, p in _applyAction(state, side, action).items():
if newstate[0].hp == 0:
newstatep = self.loss
elif newstate[1].hp == 0:
newstatep = self.win
else:
newstatep = 0, newstate
dist[newstatep] += p
return {k: float(p) for k, p in dist.items() if p > 0}
@profile
def getSuccessors(self, statep):
try:
return self.successors[statep]
except KeyError:
st = statep[0]
if st == 0:
result = list(self._getSuccessorsA(statep))
else:
if st == 1:
dist = self._getSuccessorsB(statep)
elif st == 2:
dist = self._getSuccessorsC(statep)
result = sorted(dist.items(), key=lambda t: (-t[1], t[0]))
self.successors[statep] = result
return result
@profile
def getSuccessorsList(self, statep):
if statep[0] == 4:
return []
temp = self.getSuccessors(statep)
if statep[0] != 0:
temp = list(zip(*temp))[0] if temp else []
return temp
@profile
def evaluate(self, tolerance=0.15):
badges = 1, 0, 0, 0
starfixed = fixeddata_t(59, stats_t(40, 44, 56, 50), 11, NOMODS, 115)
starhalf = halfstate_t(starfixed, 59, 0, NOMODS,
stats_t(40, 44, 56, 50))
charfixed = fixeddata_t(63, stats_t(39, 34, 46, 38), 26, badges, 65)
charhalf = halfstate_t(charfixed, 63, 0, NOMODS, applyBadgeBoosts(
badges, stats_t(39, 34, 46, 38)))
initial_state = charhalf, starhalf, 0
initial_statep = 0, initial_state
dmin, dmax, frozen = self.min, self.max, self.frozen
stateps = topoSort([initial_statep], self.getSuccessorsList)
itercount = 0
while dmax[initial_statep] - dmin[initial_statep] > tolerance:
itercount += 1
for sp in stateps:
if sp in frozen:
continue
if sp[0] == 0:
# choice node
dmin[sp] = max(dmin[sp2] for sp2 in self.getSuccessors(sp))
dmax[sp] = max(dmax[sp2] for sp2 in self.getSuccessors(sp))
else:
dmin[sp] = sum(dmin[sp2] * p for sp2,
p in self.getSuccessors(sp))
dmax[sp] = sum(dmax[sp2] * p for sp2,
p in self.getSuccessors(sp))
if dmin[sp] >= dmax[sp]:
dmax[sp] = dmin[sp] = (dmin[sp] + dmax[sp]) / 2
frozen.add(sp)
return (dmax[initial_statep] + dmin[initial_statep]) / 2
@profile
def bench_mdp(loops):
expected = 0.89873589887
max_diff = 1e-6
range_it = range(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
result = Battle().evaluate(0.192)
dt = pyperf.perf_counter() - t0
if abs(result - expected) > max_diff:
raise Exception("invalid result: got %s, expected %s "
"(diff: %s, max diff: %s)"
% (result, expected, result - expected, max_diff))
return dt
if __name__ == "__main__":
import time
start = time.perf_counter()
runner = pyperf.Runner()
runner.metadata['description'] = "MDP benchmark"
bench_mdp(1)
stop = time.perf_counter()
print("Time elapsed: ", stop - start)
| 9,396 | 29.911184 | 79 | py |
scalene | scalene-master/test/original/bm_pyflate.py | #!/usr/bin/env python
"""
Copyright 2006--2007-01-21 Paul Sladen
http://www.paul.sladen.org/projects/compression/
You may use and distribute this code under any DFSG-compatible
license (eg. BSD, GNU GPLv2).
Stand-alone pure-Python DEFLATE (gzip) and bzip2 decoder/decompressor.
This is probably most useful for research purposes/index building; there
is certainly some room for improvement in the Huffman bit-matcher.
With the as-written implementation, there was a known bug in BWT
decoding to do with repeated strings. This has been worked around;
see 'bwt_reverse()'. Correct output is produced in all test cases
but ideally the problem would be found...
"""
import hashlib
import os
import pyperf
import six
from six.moves import xrange
class BitfieldBase(object):
def __init__(self, x):
if isinstance(x, BitfieldBase):
self.f = x.f
self.bits = x.bits
self.bitfield = x.bitfield
self.count = x.bitfield
else:
self.f = x
self.bits = 0
self.bitfield = 0x0
self.count = 0
def _read(self, n):
s = self.f.read(n)
if not s:
raise "Length Error"
self.count += len(s)
return s
def needbits(self, n):
while self.bits < n:
self._more()
def _mask(self, n):
return (1 << n) - 1
def toskip(self):
return self.bits & 0x7
def align(self):
self.readbits(self.toskip())
def dropbits(self, n=8):
while n >= self.bits and n > 7:
n -= self.bits
self.bits = 0
n -= len(self.f._read(n >> 3)) << 3
if n:
self.readbits(n)
# No return value
def dropbytes(self, n=1):
self.dropbits(n << 3)
def tell(self):
return self.count - ((self.bits + 7) >> 3), 7 - ((self.bits - 1) & 0x7)
def tellbits(self):
bytes, bits = self.tell()
return (bytes << 3) + bits
class Bitfield(BitfieldBase):
def _more(self):
c = self._read(1)
self.bitfield += ord(c) << self.bits
self.bits += 8
def snoopbits(self, n=8):
if n > self.bits:
self.needbits(n)
return self.bitfield & self._mask(n)
def readbits(self, n=8):
if n > self.bits:
self.needbits(n)
r = self.bitfield & self._mask(n)
self.bits -= n
self.bitfield >>= n
return r
class RBitfield(BitfieldBase):
def _more(self):
c = self._read(1)
self.bitfield <<= 8
self.bitfield += ord(c)
self.bits += 8
def snoopbits(self, n=8):
if n > self.bits:
self.needbits(n)
return (self.bitfield >> (self.bits - n)) & self._mask(n)
def readbits(self, n=8):
if n > self.bits:
self.needbits(n)
r = (self.bitfield >> (self.bits - n)) & self._mask(n)
self.bits -= n
self.bitfield &= ~(self._mask(n) << self.bits)
return r
def printbits(v, n):
o = ''
for i in range(n):
if v & 1:
o = '1' + o
else:
o = '0' + o
v >>= 1
return o
class HuffmanLength(object):
def __init__(self, code, bits=0):
self.code = code
self.bits = bits
self.symbol = None
self.reverse_symbol = None
def __repr__(self):
return repr((self.code, self.bits, self.symbol, self.reverse_symbol))
@staticmethod
def _sort_func(obj):
return (obj.bits, obj.code)
def reverse_bits(v, n):
a = 1 << 0
b = 1 << (n - 1)
z = 0
for i in range(n - 1, -1, -2):
z |= (v >> i) & a
z |= (v << i) & b
a <<= 1
b >>= 1
return z
def reverse_bytes(v, n):
a = 0xff << 0
b = 0xff << (n - 8)
z = 0
for i in range(n - 8, -8, -16):
z |= (v >> i) & a
z |= (v << i) & b
a <<= 8
b >>= 8
return z
class HuffmanTable(object):
def __init__(self, bootstrap):
l = []
start, bits = bootstrap[0]
for finish, endbits in bootstrap[1:]:
if bits:
for code in range(start, finish):
l.append(HuffmanLength(code, bits))
start, bits = finish, endbits
if endbits == -1:
break
l.sort(key=HuffmanLength._sort_func)
self.table = l
def populate_huffman_symbols(self):
bits, symbol = -1, -1
for x in self.table:
symbol += 1
if x.bits != bits:
symbol <<= (x.bits - bits)
bits = x.bits
x.symbol = symbol
x.reverse_symbol = reverse_bits(symbol, bits)
def tables_by_bits(self):
d = {}
for x in self.table:
try:
d[x.bits].append(x)
except: # noqa
d[x.bits] = [x]
def min_max_bits(self):
self.min_bits, self.max_bits = 16, -1
for x in self.table:
if x.bits < self.min_bits:
self.min_bits = x.bits
if x.bits > self.max_bits:
self.max_bits = x.bits
def _find_symbol(self, bits, symbol, table):
for h in table:
if h.bits == bits and h.reverse_symbol == symbol:
return h.code
return -1
def find_next_symbol(self, field, reversed=True):
cached_length = -1
cached = None
for x in self.table:
if cached_length != x.bits:
cached = field.snoopbits(x.bits)
cached_length = x.bits
if (reversed and x.reverse_symbol == cached) or (not reversed and x.symbol == cached):
field.readbits(x.bits)
return x.code
raise Exception("unfound symbol, even after end of table @%r"
% field.tell())
for bits in range(self.min_bits, self.max_bits + 1):
r = self._find_symbol(bits, field.snoopbits(bits), self.table)
if 0 <= r:
field.readbits(bits)
return r
elif bits == self.max_bits:
raise "unfound symbol, even after max_bits"
class OrderedHuffmanTable(HuffmanTable):
def __init__(self, lengths):
l = len(lengths)
z = list(zip(range(l), lengths)) + [(l, -1)]
HuffmanTable.__init__(self, z)
def code_length_orders(i):
return (16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3,
13, 2, 14, 1, 15)[i]
def distance_base(i):
return (1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193,
12289, 16385, 24577)[i]
def length_base(i):
return (3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35,
43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258)[i - 257]
def extra_distance_bits(n):
if 0 <= n <= 1:
return 0
elif 2 <= n <= 29:
return (n >> 1) - 1
else:
raise "illegal distance code"
def extra_length_bits(n):
if 257 <= n <= 260 or n == 285:
return 0
elif 261 <= n <= 284:
return ((n - 257) >> 2) - 1
else:
raise "illegal length code"
def move_to_front(l, c):
l[:] = l[c:c + 1] + l[0:c] + l[c + 1:]
def bwt_transform(L):
# Semi-inefficient way to get the character counts
if six.PY3:
F = bytes(sorted(L))
else:
F = b''.join(sorted(L))
base = []
for i in range(256):
base.append(F.find(six.int2byte(i)))
pointers = [-1] * len(L)
for i, symbol in enumerate(six.iterbytes(L)):
pointers[base[symbol]] = i
base[symbol] += 1
return pointers
def bwt_reverse(L, end):
out = []
if len(L):
T = bwt_transform(L)
# STRAGENESS WARNING: There was a bug somewhere here in that
# if the output of the BWT resolves to a perfect copy of N
# identical strings (think exact multiples of 255 'X' here),
# then a loop is formed. When decoded, the output string would
# be cut off after the first loop, typically '\0\0\0\0\xfb'.
# The previous loop construct was:
#
# next = T[end]
# while next != end:
# out += L[next]
# next = T[next]
# out += L[next]
#
# For the moment, I've instead replaced it with a check to see
# if there has been enough output generated. I didn't figured
# out where the off-by-one-ism is yet---that actually produced
# the cyclic loop.
for i in xrange(len(L)):
end = T[end]
out.append(L[end])
if six.PY3:
return bytes(out)
else:
return b"".join(out)
def compute_used(b):
huffman_used_map = b.readbits(16)
map_mask = 1 << 15
used = []
while map_mask > 0:
if huffman_used_map & map_mask:
huffman_used_bitmap = b.readbits(16)
bit_mask = 1 << 15
while bit_mask > 0:
if huffman_used_bitmap & bit_mask:
pass
used += [bool(huffman_used_bitmap & bit_mask)]
bit_mask >>= 1
else:
used += [False] * 16
map_mask >>= 1
return used
def compute_selectors_list(b, huffman_groups):
selectors_used = b.readbits(15)
mtf = list(range(huffman_groups))
selectors_list = []
for i in range(selectors_used):
# zero-terminated bit runs (0..62) of MTF'ed huffman table
c = 0
while b.readbits(1):
c += 1
if c >= huffman_groups:
raise "Bzip2 chosen selector greater than number of groups (max 6)"
if c >= 0:
move_to_front(mtf, c)
selectors_list.append(mtf[0])
return selectors_list
def compute_tables(b, huffman_groups, symbols_in_use):
groups_lengths = []
for j in range(huffman_groups):
length = b.readbits(5)
lengths = []
for i in range(symbols_in_use):
if not 0 <= length <= 20:
raise "Bzip2 Huffman length code outside range 0..20"
while b.readbits(1):
length -= (b.readbits(1) * 2) - 1
lengths += [length]
groups_lengths += [lengths]
tables = []
for g in groups_lengths:
codes = OrderedHuffmanTable(g)
codes.populate_huffman_symbols()
codes.min_max_bits()
tables.append(codes)
return tables
def decode_huffman_block(b, out):
randomised = b.readbits(1)
if randomised:
raise "Bzip2 randomised support not implemented"
pointer = b.readbits(24)
used = compute_used(b)
huffman_groups = b.readbits(3)
if not 2 <= huffman_groups <= 6:
raise Exception("Bzip2: Number of Huffman groups not in range 2..6")
selectors_list = compute_selectors_list(b, huffman_groups)
symbols_in_use = sum(used) + 2 # remember RUN[AB] RLE symbols
tables = compute_tables(b, huffman_groups, symbols_in_use)
favourites = [six.int2byte(i) for i, x in enumerate(used) if x]
selector_pointer = 0
decoded = 0
# Main Huffman loop
repeat = repeat_power = 0
buffer = []
t = None
while True:
decoded -= 1
if decoded <= 0:
decoded = 50 # Huffman table re-evaluate/switch length
if selector_pointer <= len(selectors_list):
t = tables[selectors_list[selector_pointer]]
selector_pointer += 1
r = t.find_next_symbol(b, False)
if 0 <= r <= 1:
if repeat == 0:
repeat_power = 1
repeat += repeat_power << r
repeat_power <<= 1
continue
elif repeat > 0:
# Remember kids: If there is only one repeated
# real symbol, it is encoded with *zero* Huffman
# bits and not output... so buffer[-1] doesn't work.
buffer.append(favourites[0] * repeat)
repeat = 0
if r == symbols_in_use - 1:
break
else:
o = favourites[r - 1]
move_to_front(favourites, r - 1)
buffer.append(o)
pass
nt = nearly_there = bwt_reverse(b"".join(buffer), pointer)
i = 0
# Pointless/irritating run-length encoding step
while i < len(nearly_there):
if i < len(nearly_there) - 4 and nt[i] == nt[i + 1] == nt[i + 2] == nt[i + 3]:
out.append(nearly_there[i:i + 1] * (ord(nearly_there[i + 4:i + 5]) + 4))
i += 5
else:
out.append(nearly_there[i:i + 1])
i += 1
# Sixteen bits of magic have been removed by the time we start decoding
def bzip2_main(input):
b = RBitfield(input)
method = b.readbits(8)
if method != ord('h'):
raise Exception(
"Unknown (not type 'h'uffman Bzip2) compression method")
blocksize = b.readbits(8)
if ord('1') <= blocksize <= ord('9'):
blocksize = blocksize - ord('0')
else:
raise Exception("Unknown (not size '0'-'9') Bzip2 blocksize")
out = []
while True:
blocktype = b.readbits(48)
b.readbits(32) # crc
if blocktype == 0x314159265359: # (pi)
decode_huffman_block(b, out)
elif blocktype == 0x177245385090: # sqrt(pi)
b.align()
break
else:
raise Exception("Illegal Bzip2 blocktype")
return b''.join(out)
# Sixteen bits of magic have been removed by the time we start decoding
def gzip_main(field):
b = Bitfield(field)
method = b.readbits(8)
if method != 8:
raise Exception("Unknown (not type eight DEFLATE) compression method")
# Use flags, drop modification time, extra flags and OS creator type.
flags = b.readbits(8)
b.readbits(32) # mtime
b.readbits(8) # extra_flags
b.readbits(8) # os_type
if flags & 0x04: # structured GZ_FEXTRA miscellaneous data
xlen = b.readbits(16)
b.dropbytes(xlen)
while flags & 0x08: # original GZ_FNAME filename
if not b.readbits(8):
break
while flags & 0x10: # human readable GZ_FCOMMENT
if not b.readbits(8):
break
if flags & 0x02: # header-only GZ_FHCRC checksum
b.readbits(16)
out = []
while True:
lastbit = b.readbits(1)
blocktype = b.readbits(2)
if blocktype == 0:
b.align()
length = b.readbits(16)
if length & b.readbits(16):
raise Exception("stored block lengths do not match each other")
for i in range(length):
out.append(six.int2byte(b.readbits(8)))
elif blocktype == 1 or blocktype == 2: # Huffman
main_literals, main_distances = None, None
if blocktype == 1: # Static Huffman
static_huffman_bootstrap = [
(0, 8), (144, 9), (256, 7), (280, 8), (288, -1)]
static_huffman_lengths_bootstrap = [(0, 5), (32, -1)]
main_literals = HuffmanTable(static_huffman_bootstrap)
main_distances = HuffmanTable(static_huffman_lengths_bootstrap)
elif blocktype == 2: # Dynamic Huffman
literals = b.readbits(5) + 257
distances = b.readbits(5) + 1
code_lengths_length = b.readbits(4) + 4
l = [0] * 19
for i in range(code_lengths_length):
l[code_length_orders(i)] = b.readbits(3)
dynamic_codes = OrderedHuffmanTable(l)
dynamic_codes.populate_huffman_symbols()
dynamic_codes.min_max_bits()
# Decode the code_lengths for both tables at once,
# then split the list later
code_lengths = []
n = 0
while n < (literals + distances):
r = dynamic_codes.find_next_symbol(b)
if 0 <= r <= 15: # literal bitlength for this code
count = 1
what = r
elif r == 16: # repeat last code
count = 3 + b.readbits(2)
# Is this supposed to default to '0' if in the zeroth
# position?
what = code_lengths[-1]
elif r == 17: # repeat zero
count = 3 + b.readbits(3)
what = 0
elif r == 18: # repeat zero lots
count = 11 + b.readbits(7)
what = 0
else:
raise Exception(
"next code length is outside of the range 0 <= r <= 18")
code_lengths += [what] * count
n += count
main_literals = OrderedHuffmanTable(code_lengths[:literals])
main_distances = OrderedHuffmanTable(code_lengths[literals:])
# Common path for both Static and Dynamic Huffman decode now
main_literals.populate_huffman_symbols()
main_distances.populate_huffman_symbols()
main_literals.min_max_bits()
main_distances.min_max_bits()
literal_count = 0
while True:
r = main_literals.find_next_symbol(b)
if 0 <= r <= 255:
literal_count += 1
out.append(six.int2byte(r))
elif r == 256:
if literal_count > 0:
literal_count = 0
break
elif 257 <= r <= 285: # dictionary lookup
if literal_count > 0:
literal_count = 0
length_extra = b.readbits(extra_length_bits(r))
length = length_base(r) + length_extra
r1 = main_distances.find_next_symbol(b)
if 0 <= r1 <= 29:
distance = distance_base(
r1) + b.readbits(extra_distance_bits(r1))
while length > distance:
out += out[-distance:]
length -= distance
if length == distance:
out += out[-distance:]
else:
out += out[-distance:length - distance]
elif 30 <= r1 <= 31:
raise Exception("illegal unused distance symbol "
"in use @%r" % b.tell())
elif 286 <= r <= 287:
raise Exception("illegal unused literal/length symbol "
"in use @%r" % b.tell())
elif blocktype == 3:
raise Exception("illegal unused blocktype in use @%r" % b.tell())
if lastbit:
break
b.align()
b.readbits(32) # crc
b.readbits(32) # final_length
return "".join(out)
def bench_pyflake(loops, filename):
input_fp = open(filename, 'rb')
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
input_fp.seek(0)
field = RBitfield(input_fp)
magic = field.readbits(16)
if magic == 0x1f8b: # GZip
out = gzip_main(field)
elif magic == 0x425a: # BZip2
out = bzip2_main(field)
else:
raise Exception("Unknown file magic %x, not a gzip/bzip2 file"
% hex(magic))
dt = pyperf.perf_counter() - t0
input_fp.close()
if hashlib.md5(out).hexdigest() != "afa004a630fe072901b1d9628b960974":
raise Exception("MD5 checksum mismatch")
return dt
if __name__ == '__main__':
runner = pyperf.Runner()
runner.metadata['description'] = "Pyflate benchmark"
filename = os.path.join(#os.path.dirname(__file__),
"test", "original", "data", "interpreter.tar.bz2")
bench_pyflake(1,filename)
# runner.bench_time_func('pyflate', bench_pyflake, filename)
| 20,272 | 29.213115 | 98 | py |
scalene | scalene-master/test/optimized/bm_spectral_norm.py | """
MathWorld: "Hundred-Dollar, Hundred-Digit Challenge Problems", Challenge #3.
http://mathworld.wolfram.com/Hundred-DollarHundred-DigitChallengeProblems.html
The Computer Language Benchmarks Game
http://benchmarksgame.alioth.debian.org/u64q/spectralnorm-description.html#spectralnorm
Contributed by Sebastien Loisel
Fixed by Isaac Gouy
Sped up by Josh Goldfoot
Dirtily sped up by Simon Descarpentries
Concurrency by Jason Stitt
"""
from six.moves import xrange, zip as izip
DEFAULT_N = 130
def eval_A(i, j):
return 1.0 / ((i + j) * (i + j + 1) // 2 + i + 1)
def eval_times_u(func, u):
return [func((i, u)) for i in xrange(len(list(u)))]
def eval_AtA_times_u(u):
return eval_times_u(part_At_times_u, eval_times_u(part_A_times_u, u))
def part_A_times_u(i_u):
i, u = i_u
partial_sum = 0
for j, u_j in enumerate(u):
# EDB WAS:
# partial_sum += eval_A(i, j) * u_j
ij = i + j
partial_sum += (1.0 / ((ij) * (ij + 1) // 2 + i + 1)) * u_j
return partial_sum
def part_At_times_u(i_u):
i, u = i_u
partial_sum = 0
for j, u_j in enumerate(u):
# EDB WAS:
# partial_sum += eval_A(j, i) * u_j
ij = i + j
partial_sum += (1.0 / ((ij) * (ij + 1) // 2 + j + 1)) * u_j
return partial_sum
def bench_spectral_norm(loops):
range_it = xrange(loops)
# t0 = pyperf.perf_counter()
for _ in range_it:
u = [1] * DEFAULT_N
for dummy in xrange(10):
v = eval_AtA_times_u(u)
u = eval_AtA_times_u(v)
vBv = vv = 0
for ue, ve in izip(u, v):
vBv += ue * ve
vv += ve * ve
return # pyperf.perf_counter() - t0
if __name__ == "__main__":
bench_spectral_norm(10)
# runner = pyperf.Runner()
# runner.metadata['description'] = (
# 'MathWorld: "Hundred-Dollar, Hundred-Digit Challenge Problems", '
# 'Challenge #3.')
# runner.bench_time_func('spectral_norm', bench_spectral_norm)
| 1,997 | 23.666667 | 87 | py |
scalene | scalene-master/test/optimized/bm_raytrace.py | """
This file contains definitions for a simple raytracer.
Copyright Callum and Tony Garnock-Jones, 2008.
This file may be freely redistributed under the MIT license,
http://www.opensource.org/licenses/mit-license.php
From http://www.lshift.net/blog/2008/10/29/toy-raytracer-in-python
"""
import array
import math
import pyperf
from six.moves import xrange
DEFAULT_WIDTH = 100
DEFAULT_HEIGHT = 100
EPSILON = 0.00001
class Vector(object):
def __init__(self, initx, inity, initz):
self.x = initx
self.y = inity
self.z = initz
def __str__(self):
return '(%s,%s,%s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Vector(%s,%s,%s)' % (self.x, self.y, self.z)
def magnitude(self):
return math.sqrt(self.dot(self))
def __add__(self, other):
if other.isPoint():
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
else:
return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
other.mustBeVector()
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
def scale(self, factor):
return Vector(factor * self.x, factor * self.y, factor * self.z)
def dot(self, other):
other.mustBeVector()
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
def cross(self, other):
other.mustBeVector()
return Vector(self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
def normalized(self):
# return self.scale(1.0 / self.magnitude())
return self.scale(1.0 / (math.sqrt(self.dot(self))))
def negated(self):
return self.scale(-1)
def __eq__(self, other):
return (self.x == other.x) and (self.y == other.y) and (self.z == other.z)
def isVector(self):
return True
def isPoint(self):
return False
def mustBeVector(self):
return self
def mustBePoint(self):
raise 'Vectors are not points!'
def reflectThrough(self, normal):
d = normal.scale(self.dot(normal))
return self - d.scale(2)
Vector.ZERO = Vector(0, 0, 0)
Vector.RIGHT = Vector(1, 0, 0)
Vector.UP = Vector(0, 1, 0)
Vector.OUT = Vector(0, 0, 1)
assert Vector.RIGHT.reflectThrough(Vector.UP) == Vector.RIGHT
assert Vector(-1, -1, 0).reflectThrough(Vector.UP) == Vector(-1, 1, 0)
class Point(object):
def __init__(self, initx, inity, initz):
self.x = initx
self.y = inity
self.z = initz
def __str__(self):
return '(%s,%s,%s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Point(%s,%s,%s)' % (self.x, self.y, self.z)
def __add__(self, other):
other.mustBeVector()
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
if other.isPoint():
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
else:
return Point(self.x - other.x, self.y - other.y, self.z - other.z)
def isVector(self):
return False
def isPoint(self):
return True
def mustBeVector(self):
raise 'Points are not vectors!'
def mustBePoint(self):
return self
class Sphere(object):
def __init__(self, centre, radius):
centre.mustBePoint()
self.centre = centre
self.radius = radius
def __repr__(self):
return 'Sphere(%s,%s)' % (repr(self.centre), self.radius)
def intersectionTime(self, ray):
cp = self.centre - ray.point
v = cp.dot(ray.vector)
# EDB WAS
# discriminant = (self.radius * self.radius) - (cp.dot(cp) - v * v)
discriminant = (self.radius * self.radius) - ((cp.x * cp.x + cp.y * cp.y + cp.z * cp.z) - v * v)
if discriminant < 0:
return None
else:
return v - math.sqrt(discriminant)
def normalAt(self, p):
return (p - self.centre).normalized()
class Halfspace(object):
def __init__(self, point, normal):
self.point = point
self.normal = normal.normalized()
def __repr__(self):
return 'Halfspace(%s,%s)' % (repr(self.point), repr(self.normal))
def intersectionTime(self, ray):
v = ray.vector.dot(self.normal)
if v:
return 1 / -v
else:
return None
def normalAt(self, p):
return self.normal
class Ray(object):
def __init__(self, point, vector):
self.point = point
self.vector = vector.normalized()
def __repr__(self):
return 'Ray(%s,%s)' % (repr(self.point), repr(self.vector))
def pointAtTime(self, t):
return self.point + self.vector.scale(t)
Point.ZERO = Point(0, 0, 0)
class Canvas(object):
def __init__(self, width, height):
self.bytes = array.array('B', [0] * (width * height * 3))
for i in xrange(width * height):
self.bytes[i * 3 + 2] = 255
self.width = width
self.height = height
def plot(self, x, y, r, g, b):
i = ((self.height - y - 1) * self.width + x) * 3
self.bytes[i] = max(0, min(255, int(r * 255)))
self.bytes[i + 1] = max(0, min(255, int(g * 255)))
self.bytes[i + 2] = max(0, min(255, int(b * 255)))
def write_ppm(self, filename):
header = 'P6 %d %d 255\n' % (self.width, self.height)
with open(filename, "wb") as fp:
fp.write(header.encode('ascii'))
fp.write(self.bytes.tostring())
def firstIntersection(intersections):
result = None
for i in intersections:
candidateT = i[1]
if candidateT is not None and candidateT > -EPSILON:
if result is None or candidateT < result[1]:
result = i
return result
class Scene(object):
def __init__(self):
self.objects = []
self.lightPoints = []
self.position = Point(0, 1.8, 10)
self.lookingAt = Point.ZERO
self.fieldOfView = 45
self.recursionDepth = 0
def moveTo(self, p):
self.position = p
def lookAt(self, p):
self.lookingAt = p
def addObject(self, object, surface):
self.objects.append((object, surface))
def addLight(self, p):
self.lightPoints.append(p)
def render(self, canvas):
fovRadians = math.pi * (self.fieldOfView / 2.0) / 180.0
halfWidth = math.tan(fovRadians)
halfHeight = 0.75 * halfWidth
width = halfWidth * 2
height = halfHeight * 2
pixelWidth = width / (canvas.width - 1)
pixelHeight = height / (canvas.height - 1)
eye = Ray(self.position, self.lookingAt - self.position)
vpRight = eye.vector.cross(Vector.UP).normalized()
vpUp = vpRight.cross(eye.vector).normalized()
for y in xrange(canvas.height):
for x in xrange(canvas.width):
xcomp = vpRight.scale(x * pixelWidth - halfWidth)
ycomp = vpUp.scale(y * pixelHeight - halfHeight)
ray = Ray(eye.point, eye.vector + xcomp + ycomp)
colour = self.rayColour(ray)
canvas.plot(x, y, *colour)
def rayColour(self, ray):
if self.recursionDepth > 3:
return (0, 0, 0)
try:
self.recursionDepth = self.recursionDepth + 1
intersections = [(o, o.intersectionTime(ray), s)
for (o, s) in self.objects]
i = firstIntersection(intersections)
if i is None:
return (0, 0, 0) # the background colour
else:
(o, t, s) = i
p = ray.pointAtTime(t)
return s.colourAt(self, ray, p, o.normalAt(p))
finally:
self.recursionDepth = self.recursionDepth - 1
def _lightIsVisible(self, l, p):
for (o, s) in self.objects:
t = o.intersectionTime(Ray(p, l - p))
if t is not None and t > EPSILON:
return False
return True
def visibleLights(self, p):
result = []
for l in self.lightPoints:
if self._lightIsVisible(l, p):
result.append(l)
return result
def addColours(a, scale, b):
return (a[0] + scale * b[0],
a[1] + scale * b[1],
a[2] + scale * b[2])
class SimpleSurface(object):
def __init__(self, **kwargs):
self.baseColour = kwargs.get('baseColour', (1, 1, 1))
self.specularCoefficient = kwargs.get('specularCoefficient', 0.2)
self.lambertCoefficient = kwargs.get('lambertCoefficient', 0.6)
self.ambientCoefficient = 1.0 - self.specularCoefficient - self.lambertCoefficient
def baseColourAt(self, p):
return self.baseColour
def colourAt(self, scene, ray, p, normal):
b = self.baseColourAt(p)
c = (0, 0, 0)
if self.specularCoefficient > 0:
reflectedRay = Ray(p, ray.vector.reflectThrough(normal))
reflectedColour = scene.rayColour(reflectedRay)
c = addColours(c, self.specularCoefficient, reflectedColour)
if self.lambertCoefficient > 0:
lambertAmount = 0
for lightPoint in scene.visibleLights(p):
contribution = (lightPoint - p).normalized().dot(normal)
if contribution > 0:
lambertAmount = lambertAmount + contribution
lambertAmount = min(1, lambertAmount)
c = addColours(c, self.lambertCoefficient * lambertAmount, b)
if self.ambientCoefficient > 0:
c = addColours(c, self.ambientCoefficient, b)
return c
class CheckerboardSurface(SimpleSurface):
def __init__(self, **kwargs):
SimpleSurface.__init__(self, **kwargs)
self.otherColour = kwargs.get('otherColour', (0, 0, 0))
self.checkSize = kwargs.get('checkSize', 1)
def baseColourAt(self, p):
v = p - Point.ZERO
v.scale(1.0 / self.checkSize)
if ((int(abs(v.x) + 0.5)
+ int(abs(v.y) + 0.5)
+ int(abs(v.z) + 0.5)) % 2):
return self.otherColour
else:
return self.baseColour
def bench_raytrace(loops, width, height, filename):
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for i in range_it:
canvas = Canvas(width, height)
s = Scene()
s.addLight(Point(30, 30, 10))
s.addLight(Point(-10, 100, 30))
s.lookAt(Point(0, 3, 0))
s.addObject(Sphere(Point(1, 3, -10), 2),
SimpleSurface(baseColour=(1, 1, 0)))
for y in xrange(6):
s.addObject(Sphere(Point(-3 - y * 0.4, 2.3, -5), 0.4),
SimpleSurface(baseColour=(y / 6.0, 1 - y / 6.0, 0.5)))
s.addObject(Halfspace(Point(0, 0, 0), Vector.UP),
CheckerboardSurface())
s.render(canvas)
dt = pyperf.perf_counter() - t0
if filename:
canvas.write_ppm(filename)
return dt
def add_cmdline_args(cmd, args):
cmd.append("--width=%s" % args.width)
cmd.append("--height=%s" % args.height)
if args.filename:
cmd.extend(("--filename", args.filename))
if __name__ == "__main__":
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
cmd = runner.argparser
cmd.add_argument("--width",
type=int, default=DEFAULT_WIDTH,
help="Image width (default: %s)" % DEFAULT_WIDTH)
cmd.add_argument("--height",
type=int, default=DEFAULT_HEIGHT,
help="Image height (default: %s)" % DEFAULT_HEIGHT)
cmd.add_argument("--filename", metavar="FILENAME.PPM",
help="Output filename of the PPM picture")
args = runner.parse_args()
runner.metadata['description'] = "Simple raytracer"
runner.metadata['raytrace_width'] = args.width
runner.metadata['raytrace_height'] = args.height
bench_raytrace(5, args.width, args.height, args.filename)
# runner.bench_time_func('raytrace', bench_raytrace,
# args.width, args.height,
# args.filename)
| 12,330 | 28.713253 | 104 | py |
scalene | scalene-master/test/optimized/bm_scimark.py | from array import array
import math
import pyperf
from six.moves import xrange
class Array2D(object):
def __init__(self, w, h, data=None):
self.width = w
self.height = h
self.data = array('d', [0]) * (w * h)
if data is not None:
self.setup(data)
def _idx(self, x, y):
return y * self.width + x
# EDB: return without error checking
if 0 <= x < self.width and 0 <= y < self.height:
return y * self.width + x
raise IndexError
def __getitem__(self, x_y):
(x, y) = x_y
return self.data[self._idx(x, y)]
def __setitem__(self, x_y, val):
(x, y) = x_y
self.data[self._idx(x, y)] = val
def setup(self, data):
for y in xrange(self.height):
for x in xrange(self.width):
self[x, y] = data[y][x]
return self
def indexes(self):
for y in xrange(self.height):
for x in xrange(self.width):
yield x, y
def copy_data_from(self, other):
self.data[:] = other.data[:]
class Random(object):
MDIG = 32
ONE = 1
m1 = (ONE << (MDIG - 2)) + ((ONE << (MDIG - 2)) - ONE)
m2 = ONE << MDIG // 2
dm1 = 1.0 / float(m1)
def __init__(self, seed):
self.initialize(seed)
self.left = 0.0
self.right = 1.0
self.width = 1.0
self.haveRange = False
def initialize(self, seed):
self.seed = seed
seed = abs(seed)
jseed = min(seed, self.m1)
if (jseed % 2 == 0):
jseed -= 1
k0 = 9069 % self.m2
k1 = 9069 / self.m2
j0 = jseed % self.m2
j1 = jseed / self.m2
self.m = array('d', [0]) * 17
for iloop in xrange(17):
jseed = j0 * k0
j1 = (jseed / self.m2 + j0 * k1 + j1 * k0) % (self.m2 / 2)
j0 = jseed % self.m2
self.m[iloop] = j0 + self.m2 * j1
self.i = 4
self.j = 16
def nextDouble(self):
I, J, m = self.i, self.j, self.m
k = m[I] - m[J]
if (k < 0):
k += self.m1
self.m[J] = k
if (I == 0):
I = 16
else:
I -= 1
self.i = I
if (J == 0):
J = 16
else:
J -= 1
self.j = J
if (self.haveRange):
return self.left + self.dm1 * float(k) * self.width
else:
return self.dm1 * float(k)
def RandomMatrix(self, a):
for x, y in a.indexes():
a[x, y] = self.nextDouble()
return a
def RandomVector(self, n):
return array('d', [self.nextDouble() for i in xrange(n)])
def copy_vector(vec):
# Copy a vector created by Random.RandomVector()
vec2 = array('d')
vec2[:] = vec[:]
return vec2
class ArrayList(Array2D):
def __init__(self, w, h, data=None):
self.width = w
self.height = h
self.data = [array('d', [0]) * w for y in xrange(h)]
if data is not None:
self.setup(data)
def __getitem__(self, idx):
if isinstance(idx, tuple):
return self.data[idx[1]][idx[0]]
else:
return self.data[idx]
def __setitem__(self, idx, val):
if isinstance(idx, tuple):
self.data[idx[1]][idx[0]] = val
else:
self.data[idx] = val
def copy_data_from(self, other):
for l1, l2 in zip(self.data, other.data):
l1[:] = l2
def SOR_execute(omega, G, cycles, Array):
for p in xrange(cycles):
for y in xrange(1, G.height - 1):
for x in xrange(1, G.width - 1):
G[x, y] = (omega * 0.25 * (G[x, y - 1] + G[x, y + 1] + G[x - 1, y]
+ G[x + 1, y])
+ (1.0 - omega) * G[x, y])
def bench_SOR(loops, n, cycles, Array):
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
G = Array(n, n)
SOR_execute(1.25, G, cycles, Array)
return pyperf.perf_counter() - t0
def SparseCompRow_matmult(M, y, val, row, col, x, num_iterations):
range_it = xrange(num_iterations)
t0 = pyperf.perf_counter()
for _ in range_it:
for r in xrange(M):
sa = 0.0
for i in xrange(row[r], row[r + 1]):
sa += x[col[i]] * val[i]
y[r] = sa
return pyperf.perf_counter() - t0
def bench_SparseMatMult(cycles, N, nz):
x = array('d', [0]) * N
y = array('d', [0]) * N
nr = nz // N
anz = nr * N
val = array('d', [0]) * anz
col = array('i', [0]) * nz
row = array('i', [0]) * (N + 1)
row[0] = 0
for r in xrange(N):
rowr = row[r]
step = r // nr
row[r + 1] = rowr + nr
if step < 1:
step = 1
for i in xrange(nr):
col[rowr + i] = i * step
return SparseCompRow_matmult(N, y, val, row, col, x, cycles)
def MonteCarlo(Num_samples):
rnd = Random(113)
under_curve = 0
for count in xrange(Num_samples):
x = rnd.nextDouble()
y = rnd.nextDouble()
if x * x + y * y <= 1.0:
under_curve += 1
return float(under_curve) / Num_samples * 4.0
def bench_MonteCarlo(loops, Num_samples):
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
MonteCarlo(Num_samples)
return pyperf.perf_counter() - t0
def LU_factor(A, pivot):
M, N = A.height, A.width
minMN = min(M, N)
for j in xrange(minMN):
jp = j
t = abs(A[j][j])
for i in xrange(j + 1, M):
ab = abs(A[i][j])
if ab > t:
jp = i
t = ab
pivot[j] = jp
if A[jp][j] == 0:
raise Exception("factorization failed because of zero pivot")
if jp != j:
A[j], A[jp] = A[jp], A[j]
if j < M - 1:
recp = 1.0 / A[j][j]
for k in xrange(j + 1, M):
A[k][j] *= recp
if j < minMN - 1:
for ii in xrange(j + 1, M):
for jj in xrange(j + 1, N):
A[ii][jj] -= A[ii][j] * A[j][jj]
def LU(lu, A, pivot):
lu.copy_data_from(A)
LU_factor(lu, pivot)
def bench_LU(cycles, N):
rnd = Random(7)
A = rnd.RandomMatrix(ArrayList(N, N))
lu = ArrayList(N, N)
pivot = array('i', [0]) * N
range_it = xrange(cycles)
t0 = pyperf.perf_counter()
for _ in range_it:
LU(lu, A, pivot)
return pyperf.perf_counter() - t0
def int_log2(n):
k = 1
log = 0
while k < n:
k *= 2
log += 1
if n != 1 << log:
raise Exception("FFT: Data length is not a power of 2: %s" % n)
return log
def FFT_num_flops(N):
return (5.0 * N - 2) * int_log2(N) + 2 * (N + 1)
def FFT_transform_internal(N, data, direction):
n = N // 2
bit = 0
dual = 1
if n == 1:
return
logn = int_log2(n)
if N == 0:
return
FFT_bitreverse(N, data)
# apply fft recursion
# this loop executed int_log2(N) times
bit = 0
while bit < logn:
w_real = 1.0
w_imag = 0.0
theta = 2.0 * direction * math.pi / (2.0 * float(dual))
s = math.sin(theta)
t = math.sin(theta / 2.0)
s2 = 2.0 * t * t
for b in range(0, n, 2 * dual):
i = 2 * b
j = 2 * (b + dual)
wd_real = data[j]
wd_imag = data[j + 1]
data[j] = data[i] - wd_real
data[j + 1] = data[i + 1] - wd_imag
data[i] += wd_real
data[i + 1] += wd_imag
for a in xrange(1, dual):
tmp_real = w_real - s * w_imag - s2 * w_real
tmp_imag = w_imag + s * w_real - s2 * w_imag
w_real = tmp_real
w_imag = tmp_imag
for b in range(0, n, 2 * dual):
i = 2 * (b + a)
j = 2 * (b + a + dual)
z1_real = data[j]
z1_imag = data[j + 1]
wd_real = w_real * z1_real - w_imag * z1_imag
wd_imag = w_real * z1_imag + w_imag * z1_real
data[j] = data[i] - wd_real
data[j + 1] = data[i + 1] - wd_imag
data[i] += wd_real
data[i + 1] += wd_imag
bit += 1
dual *= 2
def FFT_bitreverse(N, data):
n = N // 2
nm1 = n - 1
j = 0
for i in range(nm1):
ii = i << 1
jj = j << 1
k = n >> 1
if i < j:
tmp_real = data[ii]
tmp_imag = data[ii + 1]
data[ii] = data[jj]
data[ii + 1] = data[jj + 1]
data[jj] = tmp_real
data[jj + 1] = tmp_imag
while k <= j:
j -= k
k >>= 1
j += k
def FFT_transform(N, data):
FFT_transform_internal(N, data, -1)
def FFT_inverse(N, data):
n = N / 2
norm = 0.0
FFT_transform_internal(N, data, +1)
norm = 1 / float(n)
for i in xrange(N):
data[i] *= norm
def bench_FFT(loops, N, cycles):
twoN = 2 * N
init_vec = Random(7).RandomVector(twoN)
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
x = copy_vector(init_vec)
for i in xrange(cycles):
FFT_transform(twoN, x)
FFT_inverse(twoN, x)
return pyperf.perf_counter() - t0
def add_cmdline_args(cmd, args):
if args.benchmark:
cmd.append(args.benchmark)
BENCHMARKS = {
# function name => arguments
'sor': (bench_SOR, 100, 10, Array2D),
'sparse_mat_mult': (bench_SparseMatMult, 1000, 50 * 1000),
'monte_carlo': (bench_MonteCarlo, 100 * 1000,),
'lu': (bench_LU, 100,),
'fft': (bench_FFT, 1024, 50),
}
if __name__ == "__main__":
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.argparser.add_argument("benchmark", nargs='?',
choices=sorted(BENCHMARKS))
args = runner.parse_args()
if args.benchmark:
benchmarks = (args.benchmark,)
else:
benchmarks = sorted(BENCHMARKS)
for bench in benchmarks:
name = 'scimark_%s' % bench
print(name)
args = BENCHMARKS[bench]
(args[0])(10, *args[1:])
# runner.bench_time_func(name, *args)
| 10,363 | 23.617577 | 82 | py |
scalene | scalene-master/test/optimized/bm_richards.py | """
based on a Java version:
Based on original version written in BCPL by Dr Martin Richards
in 1981 at Cambridge University Computer Laboratory, England
and a C++ version derived from a Smalltalk version written by
L Peter Deutsch.
Java version: Copyright (C) 1995 Sun Microsystems, Inc.
Translation from C++, Mario Wolczko
Outer loop added by Alex Jacoby
"""
from __future__ import print_function
import pyperf
from six.moves import xrange
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
BUFSIZE = 4
BUFSIZE_RANGE = range(BUFSIZE)
class Packet(object):
def __init__(self, l, i, k):
self.link = l
self.ident = i
self.kind = k
self.datum = 0
self.data = [0] * BUFSIZE
def append_to(self, lst):
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
self.pending = None
class IdleTaskRec(TaskRec):
def __init__(self):
self.control = 1
self.count = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
self.work_in = None
self.device_in = None
def workInAdd(self, p):
self.work_in = p.append_to(self.work_in)
return self.work_in
def deviceInAdd(self, p):
self.device_in = p.append_to(self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
self.destination = I_HANDLERA
self.count = 0
# Task
class TaskState(object):
def __init__(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
def packetPending(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self):
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self):
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self):
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self):
return self.packet_pending
def isTaskWaiting(self):
return self.task_waiting
def isTaskHolding(self):
return self.task_holding
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self):
return self.packet_pending and self.task_waiting and not self.task_holding
tracing = False
layout = 0
def trace(a):
global layout
layout -= 1
if layout <= 0:
print()
layout = 50
print(a, end='')
TASKTABSIZE = 10
class TaskWorkArea(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
taskWorkArea = TaskWorkArea()
class Task(TaskState):
def __init__(self, i, p, w, initialState, r):
self.link = taskWorkArea.taskList
self.ident = i
self.priority = p
self.input = w
self.packet_pending = initialState.isPacketPending()
self.task_waiting = initialState.isTaskWaiting()
self.task_holding = initialState.isTaskHolding()
self.handle = r
taskWorkArea.taskList = self
taskWorkArea.taskTab[i] = self
def fn(self, pkt, r):
raise NotImplementedError
def addPacket(self, p, old):
if self.input is None:
self.input = p
self.packet_pending = True
if self.priority > old.priority:
return self
else:
p.append_to(self.input)
return old
def runTask(self):
if self.isWaitingWithPacket():
msg = self.input
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
else:
msg = None
return self.fn(msg, self.handle)
def waitTask(self):
self.task_waiting = True
return self
def hold(self):
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self, i):
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self, pkt):
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
return t.addPacket(pkt, self)
def findtcb(self, id):
t = taskWorkArea.taskTab[id]
if t is None:
raise Exception("Bad task id %d" % id)
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
d = r
assert isinstance(d, DeviceTaskRec)
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = pkt
if tracing:
trace(pkt.datum)
return self.hold()
class HandlerTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
h = r
assert isinstance(h, HandlerTaskRec)
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.link
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, 0, None, s, r)
def fn(self, pkt, r):
i = r
assert isinstance(i, IdleTaskRec)
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control //= 2
return self.release(I_DEVA)
else:
i.control = i.control // 2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
w = r
assert isinstance(w, WorkerTaskRec)
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE: # xrange(BUFSIZE)
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
def schedule():
t = taskWorkArea.taskList
while t is not None:
if tracing:
print("tcb =", t.ident)
# EDB WAS
# if t.isTaskHoldingOrWaiting():
if t.task_holding or (not t.packet_pending and t.task_waiting):
t = t.link
else:
if tracing:
trace(chr(ord("0") + t.ident))
t = t.runTask()
class Richards(object):
def run(self, iterations):
for i in xrange(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq, 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState(
).waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = None
DeviceTask(I_DEVA, 4000, wkq,
TaskState().waiting(), DeviceTaskRec())
DeviceTask(I_DEVB, 5000, wkq,
TaskState().waiting(), DeviceTaskRec())
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
if __name__ == "__main__":
runner = pyperf.Runner()
runner.metadata['description'] = "The Richards benchmark"
richard = Richards()
richard.run(1)
# runner.bench_func('richards', richard.run, 1)
| 9,721 | 21.609302 | 83 | py |
scalene | scalene-master/test/optimized/bm_pyflate.py | #!/usr/bin/env python
"""
Copyright 2006--2007-01-21 Paul Sladen
http://www.paul.sladen.org/projects/compression/
You may use and distribute this code under any DFSG-compatible
license (eg. BSD, GNU GPLv2).
Stand-alone pure-Python DEFLATE (gzip) and bzip2 decoder/decompressor.
This is probably most useful for research purposes/index building; there
is certainly some room for improvement in the Huffman bit-matcher.
With the as-written implementation, there was a known bug in BWT
decoding to do with repeated strings. This has been worked around;
see 'bwt_reverse()'. Correct output is produced in all test cases
but ideally the problem would be found...
"""
import hashlib
import os
from collections import deque
import pyperf
import six
from six.moves import xrange
class BitfieldBase(object):
def __init__(self, x):
if isinstance(x, BitfieldBase):
self.f = x.f
self.bits = x.bits
self.bitfield = x.bitfield
self.count = x.bitfield
else:
self.f = x
self.bits = 0
self.bitfield = 0x0
self.count = 0
def _read(self, n):
s = self.f.read(n)
if not s:
raise "Length Error"
self.count += len(s)
return s
def needbits(self, n):
while self.bits < n:
self._more()
def _mask(self, n):
return (1 << n) - 1
def toskip(self):
return self.bits & 0x7
def align(self):
self.readbits(self.toskip())
def dropbits(self, n=8):
while n >= self.bits and n > 7:
n -= self.bits
self.bits = 0
n -= len(self.f._read(n >> 3)) << 3
if n:
self.readbits(n)
# No return value
def dropbytes(self, n=1):
self.dropbits(n << 3)
def tell(self):
return self.count - ((self.bits + 7) >> 3), 7 - ((self.bits - 1) & 0x7)
def tellbits(self):
bytes, bits = self.tell()
return (bytes << 3) + bits
class Bitfield(BitfieldBase):
def _more(self):
c = self._read(1)
self.bitfield += ord(c) << self.bits
self.bits += 8
def snoopbits(self, n=8):
if n > self.bits:
self.needbits(n)
return self.bitfield & self._mask(n)
def readbits(self, n=8):
if n > self.bits:
self.needbits(n)
r = self.bitfield & self._mask(n)
self.bits -= n
self.bitfield >>= n
return r
class RBitfield(BitfieldBase):
def _more(self):
c = self._read(1)
self.bitfield <<= 8
self.bitfield += ord(c)
self.bits += 8
def snoopbits(self, n=8):
if n > self.bits:
self.needbits(n)
return (self.bitfield >> (self.bits - n)) & self._mask(n)
def readbits(self, n=8):
if n > self.bits:
self.needbits(n)
r = (self.bitfield >> (self.bits - n)) & self._mask(n)
self.bits -= n
self.bitfield &= ~(self._mask(n) << self.bits)
return r
def printbits(v, n):
o = ''
for i in range(n):
if v & 1:
o = '1' + o
else:
o = '0' + o
v >>= 1
return o
class HuffmanLength(object):
def __init__(self, code, bits=0):
self.code = code
self.bits = bits
self.symbol = None
self.reverse_symbol = None
def __repr__(self):
return repr((self.code, self.bits, self.symbol, self.reverse_symbol))
@staticmethod
def _sort_func(obj):
return (obj.bits, obj.code)
def reverse_bits(v, n):
a = 1 << 0
b = 1 << (n - 1)
z = 0
for i in range(n - 1, -1, -2):
z |= (v >> i) & a
z |= (v << i) & b
a <<= 1
b >>= 1
return z
def reverse_bytes(v, n):
a = 0xff << 0
b = 0xff << (n - 8)
z = 0
for i in range(n - 8, -8, -16):
z |= (v >> i) & a
z |= (v << i) & b
a <<= 8
b >>= 8
return z
class HuffmanTable(object):
def __init__(self, bootstrap):
l = []
start, bits = bootstrap[0]
for finish, endbits in bootstrap[1:]:
if bits:
for code in range(start, finish):
l.append(HuffmanLength(code, bits))
start, bits = finish, endbits
if endbits == -1:
break
l.sort(key=HuffmanLength._sort_func)
self.table = l
def populate_huffman_symbols(self):
bits, symbol = -1, -1
for x in self.table:
symbol += 1
if x.bits != bits:
symbol <<= (x.bits - bits)
bits = x.bits
x.symbol = symbol
x.reverse_symbol = reverse_bits(symbol, bits)
def tables_by_bits(self):
d = {}
for x in self.table:
try:
d[x.bits].append(x)
except: # noqa
d[x.bits] = [x]
def min_max_bits(self):
self.min_bits, self.max_bits = 16, -1
for x in self.table:
if x.bits < self.min_bits:
self.min_bits = x.bits
if x.bits > self.max_bits:
self.max_bits = x.bits
def _find_symbol(self, bits, symbol, table):
for h in table:
if h.bits == bits and h.reverse_symbol == symbol:
return h.code
return -1
def find_next_symbol(self, field, reversed=True):
cached_length = -1
cached = None
for x in self.table:
if cached_length != x.bits:
cached = field.snoopbits(x.bits)
cached_length = x.bits
if (reversed and x.reverse_symbol == cached) or (not reversed and x.symbol == cached):
field.readbits(x.bits)
return x.code
raise Exception("unfound symbol, even after end of table @%r"
% field.tell())
for bits in range(self.min_bits, self.max_bits + 1):
r = self._find_symbol(bits, field.snoopbits(bits), self.table)
if 0 <= r:
field.readbits(bits)
return r
elif bits == self.max_bits:
raise "unfound symbol, even after max_bits"
class OrderedHuffmanTable(HuffmanTable):
def __init__(self, lengths):
l = len(lengths)
z = list(zip(range(l), lengths)) + [(l, -1)]
HuffmanTable.__init__(self, z)
def code_length_orders(i):
return (16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3,
13, 2, 14, 1, 15)[i]
def distance_base(i):
return (1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193,
12289, 16385, 24577)[i]
def length_base(i):
return (3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35,
43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258)[i - 257]
def extra_distance_bits(n):
if 0 <= n <= 1:
return 0
elif 2 <= n <= 29:
return (n >> 1) - 1
else:
raise "illegal distance code"
def extra_length_bits(n):
if 257 <= n <= 260 or n == 285:
return 0
elif 261 <= n <= 284:
return ((n - 257) >> 2) - 1
else:
raise "illegal length code"
def move_to_front(l, c):
l.insert(0, l.pop(c))
# EDB WAS
# l[:] = l[c:c + 1] + l[0:c] + l[c + 1:]
def bwt_transform(L):
# Semi-inefficient way to get the character counts
if six.PY3:
F = bytes(sorted(L))
else:
F = b''.join(sorted(L))
base = []
for i in range(256):
base.append(F.find(six.int2byte(i)))
pointers = [-1] * len(L)
for i, symbol in enumerate(six.iterbytes(L)):
pointers[base[symbol]] = i
base[symbol] += 1
return pointers
def bwt_reverse(L, end):
out = deque([])
if len(L):
T = bwt_transform(L)
# STRAGENESS WARNING: There was a bug somewhere here in that
# if the output of the BWT resolves to a perfect copy of N
# identical strings (think exact multiples of 255 'X' here),
# then a loop is formed. When decoded, the output string would
# be cut off after the first loop, typically '\0\0\0\0\xfb'.
# The previous loop construct was:
#
# next = T[end]
# while next != end:
# out += L[next]
# next = T[next]
# out += L[next]
#
# For the moment, I've instead replaced it with a check to see
# if there has been enough output generated. I didn't figured
# out where the off-by-one-ism is yet---that actually produced
# the cyclic loop.
for i in xrange(len(L)):
end = T[end]
out.append(L[end])
if six.PY3:
return bytes(out)
else:
return b"".join(out)
def compute_used(b):
huffman_used_map = b.readbits(16)
map_mask = 1 << 15
used = deque([])
while map_mask > 0:
if huffman_used_map & map_mask:
huffman_used_bitmap = b.readbits(16)
bit_mask = 1 << 15
while bit_mask > 0:
if huffman_used_bitmap & bit_mask:
pass
used += [bool(huffman_used_bitmap & bit_mask)]
bit_mask >>= 1
else:
used += [False] * 16
map_mask >>= 1
return used
def compute_selectors_list(b, huffman_groups):
selectors_used = b.readbits(15)
mtf = list(range(huffman_groups))
selectors_list = deque([])
for i in range(selectors_used):
# zero-terminated bit runs (0..62) of MTF'ed huffman table
c = 0
while b.readbits(1):
c += 1
if c >= huffman_groups:
raise "Bzip2 chosen selector greater than number of groups (max 6)"
if c >= 0:
mtf.insert(0, mtf.pop(c))
# EDB WAS
# move_to_front(mtf, c)
selectors_list.append(mtf[0])
return selectors_list
def compute_tables(b, huffman_groups, symbols_in_use):
groups_lengths = deque([])
for j in range(huffman_groups):
length = b.readbits(5)
lengths = []
for i in range(symbols_in_use):
if not 0 <= length <= 20:
raise "Bzip2 Huffman length code outside range 0..20"
while b.readbits(1):
length -= (b.readbits(1) * 2) - 1
lengths += [length]
groups_lengths += [lengths]
tables = deque([])
for g in groups_lengths:
codes = OrderedHuffmanTable(g)
codes.populate_huffman_symbols()
codes.min_max_bits()
tables.append(codes)
return tables
def decode_huffman_block(b, out):
randomised = b.readbits(1)
if randomised:
raise "Bzip2 randomised support not implemented"
pointer = b.readbits(24)
used = compute_used(b)
huffman_groups = b.readbits(3)
if not 2 <= huffman_groups <= 6:
raise Exception("Bzip2: Number of Huffman groups not in range 2..6")
selectors_list = compute_selectors_list(b, huffman_groups)
symbols_in_use = sum(used) + 2 # remember RUN[AB] RLE symbols
tables = compute_tables(b, huffman_groups, symbols_in_use)
favourites = [six.int2byte(i) for i, x in enumerate(used) if x]
selector_pointer = 0
decoded = 0
# Main Huffman loop
repeat = repeat_power = 0
buffer = deque([])
t = None
while True:
decoded -= 1
if decoded <= 0:
decoded = 50 # Huffman table re-evaluate/switch length
if selector_pointer <= len(selectors_list):
t = tables[selectors_list[selector_pointer]]
selector_pointer += 1
r = t.find_next_symbol(b, False)
if 0 <= r <= 1:
if repeat == 0:
repeat_power = 1
repeat += repeat_power << r
repeat_power <<= 1
continue
elif repeat > 0:
# Remember kids: If there is only one repeated
# real symbol, it is encoded with *zero* Huffman
# bits and not output... so buffer[-1] doesn't work.
buffer.append(favourites[0] * repeat)
repeat = 0
if r == symbols_in_use - 1:
break
else:
o = favourites[r - 1]
favourites.insert(0, favourites.pop(r - 1))
# EDB was
# move_to_front(favourites, r - 1)
buffer.append(o)
pass
nt = nearly_there = bwt_reverse(b"".join(buffer), pointer)
i = 0
# Pointless/irritating run-length encoding step
while i < len(nearly_there):
if i < len(nearly_there) - 4 and nt[i] == nt[i + 1] == nt[i + 2] == nt[i + 3]:
out.append(nearly_there[i:i + 1] * (ord(nearly_there[i + 4:i + 5]) + 4))
i += 5
else:
out.append(nearly_there[i:i + 1])
i += 1
# Sixteen bits of magic have been removed by the time we start decoding
def bzip2_main(input):
b = RBitfield(input)
method = b.readbits(8)
if method != ord('h'):
raise Exception(
"Unknown (not type 'h'uffman Bzip2) compression method")
blocksize = b.readbits(8)
if ord('1') <= blocksize <= ord('9'):
blocksize = blocksize - ord('0')
else:
raise Exception("Unknown (not size '0'-'9') Bzip2 blocksize")
out = deque([])
while True:
blocktype = b.readbits(48)
b.readbits(32) # crc
if blocktype == 0x314159265359: # (pi)
decode_huffman_block(b, out)
elif blocktype == 0x177245385090: # sqrt(pi)
b.align()
break
else:
raise Exception("Illegal Bzip2 blocktype")
return b''.join(out)
# Sixteen bits of magic have been removed by the time we start decoding
def gzip_main(field):
b = Bitfield(field)
method = b.readbits(8)
if method != 8:
raise Exception("Unknown (not type eight DEFLATE) compression method")
# Use flags, drop modification time, extra flags and OS creator type.
flags = b.readbits(8)
b.readbits(32) # mtime
b.readbits(8) # extra_flags
b.readbits(8) # os_type
if flags & 0x04: # structured GZ_FEXTRA miscellaneous data
xlen = b.readbits(16)
b.dropbytes(xlen)
while flags & 0x08: # original GZ_FNAME filename
if not b.readbits(8):
break
while flags & 0x10: # human readable GZ_FCOMMENT
if not b.readbits(8):
break
if flags & 0x02: # header-only GZ_FHCRC checksum
b.readbits(16)
out = deque([])
while True:
lastbit = b.readbits(1)
blocktype = b.readbits(2)
if blocktype == 0:
b.align()
length = b.readbits(16)
if length & b.readbits(16):
raise Exception("stored block lengths do not match each other")
for i in range(length):
out.append(six.int2byte(b.readbits(8)))
elif blocktype == 1 or blocktype == 2: # Huffman
main_literals, main_distances = None, None
if blocktype == 1: # Static Huffman
static_huffman_bootstrap = [
(0, 8), (144, 9), (256, 7), (280, 8), (288, -1)]
static_huffman_lengths_bootstrap = [(0, 5), (32, -1)]
main_literals = HuffmanTable(static_huffman_bootstrap)
main_distances = HuffmanTable(static_huffman_lengths_bootstrap)
elif blocktype == 2: # Dynamic Huffman
literals = b.readbits(5) + 257
distances = b.readbits(5) + 1
code_lengths_length = b.readbits(4) + 4
l = [0] * 19
for i in range(code_lengths_length):
l[code_length_orders(i)] = b.readbits(3)
dynamic_codes = OrderedHuffmanTable(l)
dynamic_codes.populate_huffman_symbols()
dynamic_codes.min_max_bits()
# Decode the code_lengths for both tables at once,
# then split the list later
code_lengths = []
n = 0
while n < (literals + distances):
r = dynamic_codes.find_next_symbol(b)
if 0 <= r <= 15: # literal bitlength for this code
count = 1
what = r
elif r == 16: # repeat last code
count = 3 + b.readbits(2)
# Is this supposed to default to '0' if in the zeroth
# position?
what = code_lengths[-1]
elif r == 17: # repeat zero
count = 3 + b.readbits(3)
what = 0
elif r == 18: # repeat zero lots
count = 11 + b.readbits(7)
what = 0
else:
raise Exception(
"next code length is outside of the range 0 <= r <= 18")
code_lengths += [what] * count
n += count
main_literals = OrderedHuffmanTable(code_lengths[:literals])
main_distances = OrderedHuffmanTable(code_lengths[literals:])
# Common path for both Static and Dynamic Huffman decode now
main_literals.populate_huffman_symbols()
main_distances.populate_huffman_symbols()
main_literals.min_max_bits()
main_distances.min_max_bits()
literal_count = 0
while True:
r = main_literals.find_next_symbol(b)
if 0 <= r <= 255:
literal_count += 1
out.append(six.int2byte(r))
elif r == 256:
if literal_count > 0:
literal_count = 0
break
elif 257 <= r <= 285: # dictionary lookup
if literal_count > 0:
literal_count = 0
length_extra = b.readbits(extra_length_bits(r))
length = length_base(r) + length_extra
r1 = main_distances.find_next_symbol(b)
if 0 <= r1 <= 29:
distance = distance_base(
r1) + b.readbits(extra_distance_bits(r1))
while length > distance:
out += out[-distance:]
length -= distance
if length == distance:
out += out[-distance:]
else:
out += out[-distance:length - distance]
elif 30 <= r1 <= 31:
raise Exception("illegal unused distance symbol "
"in use @%r" % b.tell())
elif 286 <= r <= 287:
raise Exception("illegal unused literal/length symbol "
"in use @%r" % b.tell())
elif blocktype == 3:
raise Exception("illegal unused blocktype in use @%r" % b.tell())
if lastbit:
break
b.align()
b.readbits(32) # crc
b.readbits(32) # final_length
return "".join(out)
def bench_pyflake(loops, filename):
input_fp = open(filename, 'rb')
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
input_fp.seek(0)
field = RBitfield(input_fp)
magic = field.readbits(16)
if magic == 0x1f8b: # GZip
out = gzip_main(field)
elif magic == 0x425a: # BZip2
out = bzip2_main(field)
else:
raise Exception("Unknown file magic %x, not a gzip/bzip2 file"
% hex(magic))
dt = pyperf.perf_counter() - t0
input_fp.close()
if hashlib.md5(out).hexdigest() != "afa004a630fe072901b1d9628b960974":
raise Exception("MD5 checksum mismatch")
return dt
if __name__ == '__main__':
runner = pyperf.Runner()
runner.metadata['description'] = "Pyflate benchmark"
filename = os.path.join(#os.path.dirname(__file__),
"test", "original", "data", "interpreter.tar.bz2")
bench_pyflake(1,filename)
# runner.bench_time_func('pyflate', bench_pyflake, filename)
| 20,545 | 29.303835 | 98 | py |
scalene | scalene-master/scalene/scalene_version.py | """Current version of Scalene; reported by --version."""
scalene_version = "1.5.5"
| 84 | 20.25 | 56 | py |
scalene | scalene-master/scalene/__main__.py | import sys
import traceback
from scalene import scalene_profiler
def should_trace(s) -> bool:
if scalene_profiler.Scalene.is_done():
return False
return scalene_profiler.Scalene.should_trace(s)
def main():
try:
from scalene import scalene_profiler
scalene_profiler.Scalene.main()
except Exception as exc:
sys.stderr.write("ERROR: Calling scalene main function failed: %s\n" % exc)
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
| 523 | 20.833333 | 83 | py |
scalene | scalene-master/scalene/scalene_arguments.py | import argparse
class ScaleneArguments(argparse.Namespace):
"""Encapsulates all arguments and default values for Scalene."""
def __init__(self) -> None:
super().__init__()
self.cpu_only = False
self.cpu_percent_threshold = 1
# mean seconds between interrupts for CPU sampling.
self.cpu_sampling_rate = 0.01
# Size of allocation window (sample when footprint increases or decreases by this amount)
self.allocation_sampling_window = (
1549351 # sync with src/source/libscalene.cpp
)
self.html = False
self.json = False
self.column_width = (
132 # Note that Scalene works best with at least 132 columns.
)
self.malloc_threshold = 100
self.outfile = None
self.pid = 0
# if we profile all code or just target code and code in its child directories
self.profile_all = False
# how long between outputting stats during execution
self.profile_interval = float("inf")
# what function pathnames must contain to be output during profiling
self.profile_only = ""
# what function pathnames should never be output during profiling
self.profile_exclude = ""
# The root of the directory that has the files that should be profiled
self.program_path = ""
# reduced profile?
self.reduced_profile = False
# do we use virtual time or wallclock time (capturing system time and blocking)?
self.use_virtual_time = False
self.memory_leak_detector = True # experimental
self.web = True
self.port = 8088
self.cli = False
| 1,695 | 38.44186 | 97 | py |
scalene | scalene-master/scalene/replacement_signal_fns.py | import os
import signal
import sys
import traceback
from scalene.scalene_profiler import Scalene
@Scalene.shim
def replacement_signal_fns(scalene: Scalene) -> None:
old_signal = signal.signal
if sys.version_info < (3, 8):
old_raise_signal = lambda s: os.kill(os.getpid(), s)
else:
old_raise_signal = signal.raise_signal
old_kill = os.kill
if sys.platform != "win32":
new_cpu_signal = signal.SIGUSR1
else:
new_cpu_signal = signal.SIGFPE
def replacement_signal(signum: int, handler): # type: ignore
all_signals = scalene.get_all_signals_set()
timer_signal, cpu_signal = scalene.get_timer_signals()
timer_signal_str = signal.strsignal(signum)
if signum == cpu_signal:
print(
f"WARNING: Scalene uses {timer_signal_str} to profile. If your code raises {timer_signal_str} from non-Python code, use SIGUSR1. "
"Code that raises signals from within Python code will be rerouted."
)
return old_signal(new_cpu_signal, handler)
if signum in all_signals:
print(
"Error: Scalene cannot profile your program because it (or one of its packages) "
"uses timers or signals that Scalene depends on. If you have encountered this warning, please file an issue using this URL: "
"https://github.com/plasma-umass/scalene/issues/new/choose"
)
exit(-1)
return old_signal(signum, handler)
def replacement_raise_signal(signum: int) -> None:
_, cpu_signal = scalene.get_timer_signals()
if signum == cpu_signal:
old_raise_signal(new_cpu_signal)
old_raise_signal(signum)
def replacement_kill(pid: int, signum: int) -> None:
_, cpu_signal = scalene.get_timer_signals()
if pid == os.getpid() or pid in scalene.child_pids:
if signum == cpu_signal:
return old_kill(pid, new_cpu_signal)
old_kill(pid, signum)
if sys.platform != "win32":
old_setitimer = signal.setitimer
old_siginterrupt = signal.siginterrupt
def replacement_siginterrupt(signum, flag): # type: ignore
all_signals = scalene.get_all_signals_set()
timer_signal, cpu_signal = scalene.get_timer_signals()
if signum == cpu_signal:
return old_siginterrupt(new_cpu_signal, flag)
if signum in all_signals:
print(
"Error: Scalene cannot profile your program because it (or one of its packages) "
"uses timers or signals that Scalene depends on. If you have encountered this warning, please file an issue using this URL: "
"https://github.com/plasma-umass/scalene/issues/new/choose"
)
return old_siginterrupt(signum, flag)
def replacement_setitimer(which, seconds, interval=0.0): # type: ignore
timer_signal, cpu_signal = scalene.get_timer_signals()
timer_signal_str = (
"SIGALRM" if timer_signal == signal.SIGALRM else "SIGVTALRM"
)
if which == timer_signal:
old = scalene.client_timer.get_itimer()
if seconds == 0:
scalene.client_timer.reset()
else:
scalene.client_timer.set_itimer(seconds, interval)
return old
return old_setitimer(which, seconds, interval)
signal.setitimer = replacement_setitimer
signal.siginterrupt = replacement_siginterrupt
signal.signal = replacement_signal
if sys.version_info >= (3, 8):
signal.raise_signal = replacement_raise_signal
os.kill = replacement_kill
| 3,805 | 38.645833 | 146 | py |
scalene | scalene-master/scalene/replacement_lock.py | import sys
import threading
import time
from typing import Any
from scalene.scalene_profiler import Scalene
@Scalene.shim
def replacement_lock(scalene: Scalene) -> None:
class ReplacementLock(object):
"""Replace lock with a version that periodically yields and updates sleeping status."""
def __init__(self) -> None:
# Cache the original lock (which we replace)
# print("INITIALIZING LOCK")
self.__lock: threading.Lock = scalene.get_original_lock()
def acquire(self, blocking: bool = True, timeout: float = -1) -> bool:
tident = threading.get_ident()
if blocking == 0:
blocking = False
start_time = time.perf_counter()
if blocking:
if timeout < 0:
interval = sys.getswitchinterval()
else:
interval = min(timeout, sys.getswitchinterval())
else:
interval = -1
while True:
scalene.set_thread_sleeping(tident)
acquired_lock = self.__lock.acquire(blocking, interval)
scalene.reset_thread_sleeping(tident)
if acquired_lock:
return True
if not blocking:
return False
# If a timeout was specified, check to see if it's expired.
if timeout != -1:
end_time = time.perf_counter()
if end_time - start_time >= timeout:
return False
def release(self) -> None:
self.__lock.release()
def locked(self) -> bool:
return self.__lock.locked()
def _at_fork_reinit(self) -> None:
try:
self.__lock._at_fork_reinit() # type: ignore
except AttributeError:
pass
def __enter__(self) -> None:
self.acquire()
def __exit__(self, type: str, value: str, traceback: Any) -> None:
self.release()
threading.Lock = ReplacementLock # type: ignore
| 2,126 | 32.234375 | 95 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.