repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
CCasGNN | CCasGNN-main/main.py |
from CCasGNN import CCasGNN_Trainer
from utils import Logger
from param_parser import parameter_parser
import sys
import time
def main():
start = time.time()
args = parameter_parser()
sys.stdout = Logger(args.result_log)
model = CCasGNN_Trainer(args)
model.fit()
# model.test()
end = time.time()
print('consume ', (end - start) / 60, ' minutes')
if __name__ == '__main__':
main() | 418 | 21.052632 | 53 | py |
CCasGNN | CCasGNN-main/utils.py |
import sys
class Logger(object):
def __init__ (self, fileN="Default.log"):
self.terminal = sys.stdout
self.log = open(fileN, "a")
def write (self, message):
self.terminal.write(message)
self.log.write(message)
self.log.flush()
def flush (self):
# self.log.flush()
pass | 340 | 20.3125 | 45 | py |
CCasGNN | CCasGNN-main/layers.py | #encoding: utf-8
import torch
from torch_geometric.nn import GCNConv, GATConv
from math import sqrt
class Positional_GAT(torch.nn.Module):
def __init__(self, in_channels, out_channels, n_heads, location_embedding_dim, filters_1, filters_2, dropout):
super(Positional_GAT, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.n_heads = n_heads
self.filters_1 = filters_1
self.filters_2 = filters_2
self.dropout = dropout
self.location_embedding_dim = location_embedding_dim
self.setup_layers()
def setup_layers(self):
self.GAT_1 = GATConv(in_channels=self.in_channels,out_channels=self.filters_1, heads=self.n_heads, dropout=0.1)
self.GAT_2 = GATConv(in_channels=self.filters_1 * self.n_heads + self.location_embedding_dim, out_channels=self.out_channels, heads=self.n_heads, dropout=0.1, concat=False)
def forward(self, edge_indices, features, location_embedding):
features = torch.cat((features, location_embedding), dim=-1)
features = self.GAT_1(features, edge_indices)
features = torch.nn.functional.relu(features)
features = torch.nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = torch.cat((features, location_embedding), dim=-1)
features = self.GAT_2(features, edge_indices)
return features
class Positional_GCN(torch.nn.Module):
def __init__(self, in_channels, out_channels, location_embedding_dim, filters_1, filters_2, dropout):
"""
GCN function
:param args: Arguments object.
:param in_channel: Nodes' input feature dimensions
:param out_channel: Nodes embedding dimension
:param bais:
"""
super(Positional_GCN, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filters_1 = filters_1
self.filters_2 = filters_2
self.dropout = dropout
self.location_embedding_dim = location_embedding_dim
self.setup_layers()
def setup_layers (self):
self.convolution_1 = GCNConv(self.in_channels, self.filters_1)
self.convolution_2 = GCNConv(self.filters_1 + self.location_embedding_dim, self.out_channels)
def forward (self, edge_indices, features, location_embedding):
"""
making convolution
:param edge_indices: 2 * edge_number
:param features: N * feature_size
:return:
"""
features = torch.cat((features, location_embedding), dim=-1)
features = self.convolution_1(features, edge_indices)
features = torch.nn.functional.relu(features)
features = torch.nn.functional.dropout(features,
p=self.dropout,
training=self.training)
features = torch.cat((features, location_embedding), dim=-1)
features = self.convolution_2(features, edge_indices)
return features
class MultiHeadGraphAttention(torch.nn.Module):
def __init__(self, num_heads, dim_in, dim_k, dim_v):
super(MultiHeadGraphAttention, self).__init__()
#"dim_k and dim_v must be multiple of num_heads"
assert dim_k % num_heads == 0 and dim_v % num_heads == 0
self.num_heads = num_heads
self.dim_in = dim_in
self.dim_k = dim_k
self.dim_v = dim_v
self.linear_q = torch.nn.Linear(dim_in, dim_k, bias=False)
self.linear_k = torch.nn.Linear(dim_in, dim_k, bias=False)
self.linear_v = torch.nn.Linear(dim_in, dim_v, bias=False)
self.leaky_relu = torch.nn.LeakyReLU(negative_slope=0.2)
self._nor_fact = 1 / sqrt(dim_k // num_heads)
def forward(self, x):
# x: tensor of shape (batch, n, dim_in)
batch, n, dim_in = x.shape
assert dim_in == self.dim_in
nh = self.num_heads
dk = self.dim_k // nh #dim_k of each head
dv = self.dim_v // nh
q = self.linear_q(x).reshape(batch, n, nh, dk).transpose(1,2) # (batch, nh, n, dk)
k = self.linear_k(x).reshape(batch, n, nh, dk).transpose(1,2)
v = self.linear_v(x).reshape(batch, n, nh, dv).transpose(1,2)
dist = torch.matmul(q, k.transpose(2,3)) * self._nor_fact # batch, nh, n, n
# label = torch.where(dist == 0, torch.tensor(1), torch.tensor(0))
# dist.data.masked_fill_(label, -float("inf"))
dist = self.leaky_relu(dist) # batch, nh, n, n
# dist = torch.where(torch.isnan(dist), torch.full_like(dist,0), dist)
att = torch.matmul(dist, v) # batch, nh, n, dv
att = att.transpose(1,2).reshape(batch, n, self.dim_v)
return att
class dens_Net(torch.nn.Module):
def __init__(self,dens_hiddensize, dens_dropout, dens_inputsize, dens_outputsize):
super(dens_Net, self).__init__()
self.inputsize = dens_inputsize
self.dens_hiddensize = dens_hiddensize
self.dens_dropout = dens_dropout
self.outputsize = dens_outputsize
self.setup_layers()
def setup_layers(self):
self.dens_net = torch.nn.Sequential(
torch.nn.Linear(self.inputsize, self.dens_hiddensize),
torch.nn.Dropout(p=self.dens_dropout),
torch.nn.Linear(self.dens_hiddensize, self.dens_hiddensize),
torch.nn.Dropout(p=self.dens_dropout),
torch.nn.Linear(self.dens_hiddensize, self.outputsize)
)
def forward(self, x1, x2):
return torch.nn.functional.relu(self.dens_net(x1)), torch.nn.functional.relu(self.dens_net(x2))
# return torch.nn.functional.relu(self.dens_net(x1))
class fuse_gate(torch.nn.Module):
def __init__(self, batch_size, in_dim):
super(fuse_gate, self).__init__()
self.indim = in_dim
self.batch_size = batch_size
self.setup_layers()
def setup_layers(self):
self.omega = torch.nn.Parameter(torch.tensor([[0.5],[0.5]]))
def forward(self, x):
omega = self.omega.transpose(1,0)
prediction = torch.matmul(omega, x)
return prediction, self.omega[0], self.omega[1]
| 6,309 | 41.635135 | 180 | py |
CCasGNN | CCasGNN-main/param_parser.py | #encoding:utf-8
import argparse
def parameter_parser():
parser = argparse.ArgumentParser(description="Run CCasGNN.")
parser.add_argument('--graph-file-path',
nargs='?',
default='./weibo/weibo_24hours_obs120.json',
help='Folder with graph pair jsons.')
parser.add_argument('--result-log',
type=str,
default='./log/weibo_24hours_obs120.log',
help='')
parser.add_argument('--number-of-features',
type=int,
default=2,
help='')
parser.add_argument('--number-of-nodes',
type=int,
default=100,
help='')
parser.add_argument('--epochs',
type=int,
default=50,
help="Number of training epochs. Default is 100.")
parser.add_argument('--check-point',
type=int,
default=5,
help="")
parser.add_argument('--train-ratio',
type=float,
default=0.7,
help='')
parser.add_argument('--valid-ratio',
type=float,
default=0.1,
help='')
parser.add_argument('--test-ratio',
type=float,
default=0.2,
help='')
parser.add_argument('--batch-size',
type=int,
default=100,
help='')
parser.add_argument('--learning-rate',
type=float,
default=0.005,
help='')
parser.add_argument('--user-embedding-dim',
type=int,
default=8,
help='the number of features')
parser.add_argument('--location-embedding-dim',
type=int,
default=16,
help='')
parser.add_argument('--gcn-out-channel',
type=int,
default=32,
help="gcn out nodes feature size")
parser.add_argument('--gat-n-heads',
type=int,
default=2,
help="")
parser.add_argument('--gcn-filters-1',
type=int,
default=32,
help="")
parser.add_argument('--gcn-filters-2',
type=int,
default=32,
help="")
parser.add_argument('--gcn-dropout',
type=float,
default=0.1,
help="")
parser.add_argument('--att-num-heads',
type=int,
default=2,
help="")
parser.add_argument('--att-dim-k',
type=int,
default=16,
help="")
parser.add_argument('--att-dim-v',
type=int,
default=32,
help="")
parser.add_argument('--attn-dropout',
type=float,
default=0.2,
help="")
parser.add_argument('--weight-decay',
type=float,
default=0.001,
help="Adam weight decay. Default is 0.001.")
parser.add_argument('--dens-hiddensize',
type=int,
default=32,
help="")
parser.add_argument('--dens-dropout',
type=float,
default=0.1,
help="")
parser.add_argument('--dens-outsize',
type=int,
default=1,
help="")
return parser.parse_args() | 4,184 | 35.710526 | 74 | py |
CCasGNN | CCasGNN-main/CCasGNN.py | #encoding: utf-8
import torch
import json
import numpy as np
import copy
import time
import sys
import math
from layers import Positional_GCN, MultiHeadGraphAttention, dens_Net, Positional_GAT, fuse_gate
import scipy.stats as sci
class CCasGNN(torch.nn.Module):
def __init__(self, args):
super(CCasGNN, self).__init__()
self.args = args
self.number_of_features = self.args.number_of_features
self.number_of_nodes = self.args.number_of_nodes
self._setup_layers()
def _setup_GCN_layers(self):
self.GCN_layers = Positional_GCN(in_channels=self.args.user_embedding_dim + self.args.location_embedding_dim,
out_channels=self.args.gcn_out_channel,
location_embedding_dim=self.args.location_embedding_dim,
filters_1=self.args.gcn_filters_1,
filters_2=self.args.gcn_filters_2,
dropout=self.args.gcn_dropout) #self.args.user_embedding_dim
def _setup_GAT_layers(self):
self.GAT_layers = Positional_GAT(in_channels=self.args.user_embedding_dim + self.args.location_embedding_dim,
out_channels=self.args.gcn_out_channel,
n_heads=self.args.gat_n_heads,
location_embedding_dim=self.args.location_embedding_dim,
filters_1=self.args.gcn_filters_1,
filters_2=self.args.gcn_filters_2,
dropout=self.args.gcn_dropout) #self.number_of_features + self.args.location_embedding_dim
def _setup_MultiHead_att_layers(self):
self.MultiHead_att_layers = MultiHeadGraphAttention(num_heads=self.args.att_num_heads,
dim_in=self.args.gcn_out_channel,
dim_k=self.args.att_dim_k,
dim_v=self.args.att_dim_v)
def _setup_dens_layers (self):
self.dens_layers = dens_Net(dens_inputsize=self.args.gcn_out_channel,
dens_hiddensize=self.args.dens_hiddensize,
dens_dropout=self.args.dens_dropout,
dens_outputsize=self.args.dens_outsize
) #self.args.attn_out_dim
def _setup_fuse_layers(self):
self.fuse_layers = fuse_gate(batch_size=1,
in_dim=2)
def _setup_layers(self):
self._setup_GCN_layers()
self._setup_MultiHead_att_layers()
self._setup_dens_layers()
self._setup_GAT_layers()
self._setup_fuse_layers()
def forward(self,data):
true_nodes_num = data["true_nodes_num"]
features = data['features'][:true_nodes_num]
edges = data['edges']
undirected_edges = data['undirected_edges']
location_embedding = data['location_embedding'][:true_nodes_num]
GAT_representation = torch.nn.functional.relu(self.GAT_layers(edges, features, location_embedding))
GAT_representation = GAT_representation[:true_nodes_num] #nodes_num * feature_num
GAT_representation = torch.mean(GAT_representation, dim=0, keepdim=False)
GCN_representation = torch.nn.functional.relu(self.GCN_layers(undirected_edges, features, location_embedding))
GCN_representation = GCN_representation[:true_nodes_num] #nodes_num * feature_num
GCN_representation = GCN_representation.unsqueeze(dim=0) #batch_size * nodes_num * feature_num
#
GCN_att_representation = self.MultiHead_att_layers(GCN_representation) #batch_size * nodes_num * feature_num
GCN_att_representation = torch.mean(GCN_att_representation, dim=1, keepdim=False) #batch_size * feature_num
GCN_squeeze_att_representation = GCN_att_representation.squeeze(dim=0)
GAT_pred, GCN_pred = self.dens_layers(GAT_representation, GCN_squeeze_att_representation)
model_predict = torch.cat((GAT_pred, GCN_pred), dim=0)
prediction, omega1, omega2 = self.fuse_layers(model_predict)
return prediction, omega1, omega2, GAT_pred, GCN_pred
class CCasGNN_Trainer(torch.nn.Module):
def __init__(self, args):
super(CCasGNN_Trainer, self).__init__()
self.args = args
self.setup_model()
def setup_model(self):
self.load_graph_data()
self.model = CCasGNN(self.args)
def load_graph_data(self):
self.number_of_nodes = self.args.number_of_nodes
self.number_of_features = self.args.number_of_features
self.graph_data = json.load(open(self.args.graph_file_path, 'r'))
N = len(self.graph_data) #the number of graphs
train_start, valid_start, test_start = \
0, int(N * self.args.train_ratio), int(N * (self.args.train_ratio + self.args.valid_ratio))
train_graph_data = self.graph_data[0:valid_start] #list type [dict,dict,...]
valid_graph_data = self.graph_data[valid_start:test_start]
test_graph_data = self.graph_data[test_start:N]
self.train_batches, self.valid_batches, self.test_batches = [], [], []
for i in range(0, len(train_graph_data), self.args.batch_size):
self.train_batches.append(train_graph_data[i:i+self.args.batch_size])
for j in range(0, len(valid_graph_data), self.args.batch_size):
self.valid_batches.append(valid_graph_data[j:j+self.args.batch_size])
for k in range(0, len(test_graph_data), self.args.batch_size):
self.test_batches.append(test_graph_data[k:k+self.args.batch_size])
def create_edges(self,data):
"""
create an Edge matrix
:param data:
:return: Edge matrix
"""
self.nodes_map = [str(nodes_id) for nodes_id in data['nodes']]
self.true_nodes_num = len(data['nodes'])
edges = [[self.nodes_map.index(str(edge[0])), self.nodes_map.index(str(edge[1]))] for edge in data['edges']]
undirected_edges = edges + [[self.nodes_map.index(str(edge[1])), self.nodes_map.index(str(edge[0]))] for edge in data['edges']]
return torch.t(torch.LongTensor(edges)), torch.t(torch.LongTensor(undirected_edges))
def create_location_embedding(self, omega=0.001):
location_dim = self.args.location_embedding_dim
location_emb = torch.zeros(self.number_of_nodes, location_dim)
for i in range(self.number_of_nodes):
for j in range(location_dim):
if j % 2 == 0:
location_emb[i][j] = math.sin(i * math.pow(omega, j / location_dim))
else:
location_emb[i][j] = math.cos(i * math.pow(omega, (j - 1) / location_dim))
return location_emb
def create_target(self,data):
return torch.tensor([data['activated_size']])
def create_features(self,data):
features = np.zeros((self.number_of_nodes, self.args.user_embedding_dim))
# features = np.zeros((self.number_of_nodes, self.number_of_features))
for nodes_id in data['nodes']:
features[self.nodes_map.index(str(nodes_id))][:self.args.user_embedding_dim] = data['nodes_embedding'][str(nodes_id)]
# features[self.nodes_map.index(str(nodes_id))][self.args.user_embedding_dim:] = data['location_embedding'][str(nodes_id)]
features = torch.FloatTensor(features)
return features
def create_input_data(self, data):
"""
:param data: one data in the train/valid/test graph data
:return: to_pass_forward: Data dictionary
"""
to_pass_forward = dict()
activated_size = self.create_target(data)
edges, undirected_edges = self.create_edges(data)
features = self.create_features(data)
user_embedding = self.create_user_embedding(data)
location_embedding = self.create_location_embedding(omega=0.001)
to_pass_forward["edges"] = edges
to_pass_forward["undirected_edges"] = undirected_edges
to_pass_forward["features"] = features
to_pass_forward["true_nodes_num"] = self.true_nodes_num
to_pass_forward['location_embedding'] = location_embedding
return to_pass_forward, activated_size
def create_forward_data(self, data_batches):
data_x, data_y = [], []
for data_batch in data_batches:
data_x_tmp, data_y_tmp = [], []
for each_data in data_batch:
input_data, target = self.create_input_data(each_data)
data_x_tmp.append(input_data)
data_y_tmp.append(target)
data_x.append(copy.deepcopy(data_x_tmp))
data_y.append(copy.deepcopy(data_y_tmp))
return data_x, data_y
def fit(self):
print('\nLoading data.\n')
self.model.train()
train_data_x, train_data_y = self.create_forward_data(self.train_batches)
valid_data_x, valid_data_y = self.create_forward_data(self.valid_batches)
test_data_x, test_data_y = self.create_forward_data(self.test_batches)
optimizer = torch.optim.Adam(self.model.parameters(),
lr=self.args.learning_rate,
weight_decay=self.args.weight_decay)
time_start = time.time()
print('\nTraining started.\n')
for epoch in range(self.args.epochs):
losses = 0.
average_loss = 0.
for step, (train_x_batch, train_y_batch) in enumerate(zip(train_data_x, train_data_y)):
optimizer.zero_grad()
GAT_prediction_tensor = torch.tensor([])
GCN_prediction_tensor = torch.tensor([])
target_tensor = torch.tensor([])
for k, (train_x, train_y) in enumerate(zip(train_x_batch, train_y_batch)):
prediction = self.model(train_x)
GAT_prediction_tensor = torch.cat((GAT_prediction_tensor, prediction[3].float()), 0)
GCN_prediction_tensor = torch.cat((GCN_prediction_tensor, prediction[4].float()), 0)
target_tensor = torch.cat((target_tensor, torch.log2(train_y.float() + 1)), 0)
omega1 = prediction[1].data.float()
omega2 = prediction[2].data.float()
GAT_loss = torch.nn.functional.mse_loss(target_tensor,GAT_prediction_tensor)
GCN_loss = torch.nn.functional.mse_loss(target_tensor,GCN_prediction_tensor)
loss = omega1 * GAT_loss + omega2 * GCN_loss
loss.backward()
optimizer.step()
losses = losses + loss.item()
average_loss = losses / (step + 1)
print('CCasGNN train MSLE loss in ', epoch + 1, ' epoch = ', average_loss)
time_now = time.time()
print('the rest of running time about:', (((time_now-time_start)/ (epoch+1)) * (self.args.epochs - epoch)) / 60, ' minutes')
print('\n')
if (epoch + 1) % self.args.check_point == 0:
print('epoch ',epoch + 1, ' evaluating.')
self.evaluation(valid_data_x, valid_data_y)
self.test(test_data_x, test_data_y)
def evaluation(self, valid_x_batches, valid_y_batches):
self.model.eval()
losses = 0.
average_loss = 0.
for step, (valid_x_batch, valid_y_batch) in enumerate(zip(valid_x_batches, valid_y_batches)):
loss = 0.
prediction_tensor = torch.tensor([])
target_tensor = torch.tensor([])
for (valid_x, valid_y) in zip(valid_x_batch, valid_y_batch):
prediction = self.model(valid_x)
prediction_tensor = torch.cat((prediction_tensor, prediction[0].float()), 0)
target_tensor = torch.cat((target_tensor, torch.log2(valid_y.float() + 1)), 0)
loss = torch.nn.functional.mse_loss(target_tensor, prediction_tensor)
losses = losses + loss.item()
average_loss = losses / (step + 1)
print('#####CCasGNN valid MSLE loss in this epoch = ', average_loss)
print('\n')
def test(self, test_x_batches, test_y_batches):
print("\n\nScoring.\n")
self.model.eval()
losses = 0.
average_loss = 0.
all_test_tensor = torch.tensor([])
all_true_tensor = torch.tensor([])
for step, (test_x_batch, test_y_batch) in enumerate(zip(test_x_batches, test_y_batches)):
loss = 0.
prediction_tensor = torch.tensor([])
target_tensor = torch.tensor([])
for (test_x, test_y) in zip(test_x_batch, test_y_batch):
prediction = self.model(test_x)
prediction_tensor = torch.cat((prediction_tensor, prediction[0].float()), 0)
all_test_tensor = torch.cat((all_test_tensor, prediction[0].float()), dim=0)
target_tensor = torch.cat((target_tensor, torch.log2(test_y.float() + 1)), 0)
all_true_tensor = torch.cat((all_true_tensor, torch.log2(test_y.float() + 1)), dim=0)
loss = torch.nn.functional.mse_loss(target_tensor, prediction_tensor)
losses = losses + loss.item()
average_loss = losses / (step + 1)
all_test_np = all_test_tensor.detach().numpy()
all_true_np = all_true_tensor.detach().numpy()
sub_np = all_test_np - all_true_np
print('correlation: ', sci.pearsonr(sub_np, all_true_np))
print('#####CCasGNN test MSLE loss = ', average_loss)
print('\n')
| 13,767 | 50.373134 | 136 | py |
pyEPR | pyEPR-master/setup.py | """
Python (py) Energy-Participation-Ratio (EPR) package
pyEPR is an open source, BSD-licensed library providing high-efficiency,
easy-to-use analysis functions and automation for the design of quantum
chips based on superconducting quantum circuits, both distributed and lumped.
pyEPR interfaces the classical distributed microwave analysis with that of
quantum structures and Hamiltonians. It is chiefly based on the energy participation
ratio approach; however, it has since v0.4 extended to cover a broad range of
design approaches. pyEPR straddles the analysis from Maxwell’s to Schrodinger’s
equations, and converts the solutions of distributed microwave (typically eigenmode
simulations) to a fully diagonalized spectrum of the energy levels, couplings,
and key parameters of a many-body quantum Hamiltonian.
Read the docs: https://pyepr-docs.readthedocs.io/en/latest/
Github page: https://github.com/zlatko-minev/pyEPR
"""
from pathlib import Path
from setuptools import setup, find_packages
here = Path(__file__).parent.absolute()
# Get the long description from the README file
with open(here / "README.md", encoding="utf-8") as f:
long_description = f.read()
with open(here / "requirements.txt", encoding="utf-8") as f:
requirements = f.read().splitlines()
doclines = __doc__.split('\n')
setup(
name='pyEPR-quantum',
version='0.9.0',
description=doclines[0],
long_description=long_description,
long_description_content_type="text/markdown",
author='Zlatko K. Minev',
packages=find_packages(),
author_email='zlatko.minev@aya.yale.edu',
maintainer='Zlatko Minev, pyEPR team',
license='BSD-3-Clause',
url=r'https://github.com/zlatko-minev/pyEPR',
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS", "Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering", "Environment :: Console",
"License :: OSI Approved :: Apache Software License"
],
python_requires=">=3.5, <4",
# install_requires=['numpy','pandas','pint','matplotlib','addict','sympy','IPython'],
install_requires=requirements)
| 2,596 | 40.887097 | 89 | py |
pyEPR | pyEPR-master/_tutorial_notebooks/1A Startup_example_hfss_files/startup_example_script.py | # -*- coding: utf-8 -*-
"""
Example startup script to perform full quantization of a two qubit, one cavity Josephson circuit.
The results are saved, printed, and nicely plotted.
------~~~~!!!!------~~~~
Please also see the Jupyter notebook tutorials!
------~~~~!!!!------~~~~
@author: Zlatko
"""
from pyEPR import ProjectInfo, DistributedAnalysis, QuantumAnalysis
# 1. Project and design. Open link to HFSS controls.
project_info = ProjectInfo('c:/sims',
project_name = 'two_qubit_one_cavity', # Project file name (string). "None" will get the current active one.
design_name = 'Alice_Bob' # Design name (string). "None" will get the current active one.
)
# 2a. Junctions. Specify junctions in HFSS model
project_info.junctions['jAlice'] = {'Lj_variable':'LJAlice', 'rect':'qubitAlice', 'line': 'alice_line', 'length':0.0001}
project_info.junctions['jBob'] = {'Lj_variable':'LJBob', 'rect':'qubitBob', 'line': 'bob_line', 'length':0.0001}
# 2b. Dissipative elements.
project_info.dissipative['dielectrics_bulk'] = ['si_substrate'] # supply names here, there are more options in project_info.dissipative.
project_info.dissipative['dielectric_surfaces'] = ['interface']
# 3. Run analysis
epr_hfss = DistributedAnalysis(project_info)
epr_hfss.do_EPR_analysis()
# 4. Hamiltonian analysis
epr = QuantumAnalysis(epr_hfss.data_filename)
epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7)
epr.plot_hamiltonian_results() | 1,547 | 39.736842 | 142 | py |
pyEPR | pyEPR-master/scripts/Alec/7ghz/7ghz_pyEPR.py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 23 10:34:00 2017
@author: alec-eickbusch
"""
from pyEPR import *
if 1:
# Specify the HFSS project to be analyzed
project_info = ProjectInfo(r"C:\Users\awe4\Documents\Simulations\HFSS\11ghz\\")
project_info.project_name = '2017_08_Zlatko_Shyam_AutStab' # Name of the project file (string). "None" will get the current active one.
project_info.design_name = 'pyEPR_2_chips' # Name of the design file (string). "None" will get the current active one.
project_info.setup_name = None # Name of the setup(string). "None" will get the current active one.
## Describe the junctions in the HFSS design
project_info.junctions['jAlice'] = {'rect':'qubitAlice', 'line': 'alice_line', 'Lj_variable':'LJAlice', 'length':0.0001}
project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001}
# Dissipative elements EPR
project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative.
# Run analysis
epr_hfss = DistributedAnalysis(project_info)
epr_hfss.do_EPR_analysis()
if 0: # Hamiltonian analysis
filename = epr_hfss.data_filename
#filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5'
epr = QuantumAnalysis(filename)
epr._renorm_pj = False
#result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7)
epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7)
epr.plot_hamiltonian_results()
| 1,651 | 46.2 | 141 | py |
pyEPR | pyEPR-master/scripts/Alec/11ghz/EPR_test.py | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 23 10:34:00 2017
@author: alec-eickbusch
"""
from pyEPR import *
if 0:
# Specify the HFSS project to be analyzed
project_info = ProjectInfo(r"C:\Users\awe4\Documents\Backed\hfss_simulations\11ghz\\")
project_info.project_name = '11ghz_alec' # Name of the project file (string). "None" will get the current active one.
project_info.design_name = '11ghz_design1' # Name of the design file (string). "None" will get the current active one.
project_info.setup_name = None # Name of the setup(string). "None" will get the current active one.
project_info.junctions['bot_junc'] = {'rect':'bot_junction', 'line': 'bot_junc_line', 'Lj_variable':'bot_lj', 'length':0.0001}
project_info.junctions['top_junc'] = {'rect':'top_junction', 'line': 'top_junc_line', 'Lj_variable':'top_lj', 'length':0.0001}
project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative.
epr_hfss = DistributedAnalysis(project_info)
epr_hfss.do_EPR_analysis()
#%%
if 1:
epr = QuantumAnalysis(epr_hfss.data_filename) # Analysis results
#result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7)
epr.analyze_all_variations(cos_trunc = 10, fock_trunc = 7);
#%%
if 1:
PM = OrderedDict()
for n in range(3,10):
epr.analyze_all_variations(cos_trunc = 10, fock_trunc = n);
PM[n] = epr.results['0']['chi_ND']
{k:v[0][0] for k,v in PM.items()} | 1,540 | 39.552632 | 140 | py |
pyEPR | pyEPR-master/scripts/nick/import_pyEPR.py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 22 11:21:01 2017
@author: Zlatko
"""
from pyEPR import *
if 0:
# Specify the HFSS project to be analyzed
project_info = ProjectInfo(r"X:\Simulation\\hfss\\KC\\")
project_info.project_name = '2013-12-03_9GHzCavity' # Name of the project file (string). "None" will get the current active one.
project_info.design_name = '9GHz_EM_center_SNAIL' # Name of the design file (string). "None" will get the current active one.
project_info.setup_name = None # Name of the setup(string). "None" will get the current active one.
## Describe the junctions in the HFSS design
project_info.junctions['snail'] = {'rect':'qubit', 'line': 'JunctionLine', 'Lj_variable':'LJ', 'length':0.0001}
# project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001}
# Dissipative elements EPR
project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative.
# Run analysis
epr_hfss = DistributedAnalysis(project_info)
epr_hfss.do_EPR_analysis() #variations = ['1', '70']
if 1: # Hamiltonian analysis
# filename = epr_hfss.data_filename
filename = r'X:\Simulation\hfss\KC\pyEPR_results_2018\2013-12-03_9GHzCavity\9GHz_EM_center_SNAIL\9GHz_EM_center_SNAIL_20180726_170049.hdf5'
#filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5'
epr = QuantumAnalysis(filename)
#result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7)
epr.analyze_all_variations(cos_trunc = None, fock_trunc = 4) # only quadratic part
epr.plot_hamiltonian_results()
if 1:
from pyEPR.toolbox_plotting import cmap_discrete
f0 = epr.results.get_frequencies_HFSS()
f1 = epr.results.get_frequencies_O1()
chi = epr.results.get_chi_O1()
mode_idx = list(f0.index)
nmodes = len(mode_idx)
cmap = cmap_discrete(nmodes)
| 2,095 | 44.565217 | 143 | py |
pyEPR | pyEPR-master/scripts/minev/_low_level/com_browse.py | # -*- coding: utf-8 -*-
"""
Debug purpose only
@author: Zlatko Minev
"""
import win32com.client
import win32com.client.combrowse
#win32com.client.combrowse.main()
# A tree heading for registered type libraries"
c = win32com.client.combrowse.HLIHeadingRegisterdTypeLibs()
for s in c.GetSubList():
#print(s)
name = s.GetText()
if ('ansys' in name.lower()) or ('hfss' in name.lower()):
print(name)
# HFSSAppDLL 1.0 Type Library
# C:\Program Files\AnsysEM\AnsysEM17.0\Win64\HfssDesktop.tlb | 526 | 22.954545 | 68 | py |
pyEPR | pyEPR-master/scripts/minev/hfss-scripts/2017_10 R3C1 resim.py | # Zlatko
from pyEPR import *
import matplotlib.pyplot as plt
if 1:
# Specify the HFSS project to be analyzed
project_info = ProjectInfo(r"C:\Users\rslqulab\Desktop\zkm\2017_pyEPR_data\\")
project_info.project_name = '2017-10 re-sim SM22-R3C1'
project_info.design_name = '3. sweep both'
project_info.setup_name = None
## Describe the junctions in the HFSS design
project_info.junctions['jBright'] = {'rect':'juncV', 'line': 'juncH_line', 'Lj_variable':'LJ1', 'length':0.0001}
project_info.junctions['jDark'] = {'rect':'juncH', 'line': 'juncV_line', 'Lj_variable':'LJ2', 'length':0.0001}
# Dissipative elements EPR
project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative.
# Run analysis
epr_hfss = DistributedAnalysis(project_info)
epr_hfss.do_EPR_analysis()
if 1: # Analysis result
filename = epr_hfss.data_filename
#filename = r'C:\Users\rslqulab\Desktop\zkm\2017_pyEPR_data\\/2017-10 re-sim SM22-R3C1/1. R3C1/1. R3C1_20171016_110756.hdf5'
epr = QuantumAnalysis(filename)
epr.plot_convergence_f_lin()
epr._renorm_pj = True
plt.close('all')
epr.analyze_all_variations(cos_trunc = 10, fock_trunc = 8)
epr.plot_hamiltonian_results()
print(epr.data_filename)
#%%
if 1:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.reset_orig()
#epr.hfss_variables.loc['_LJ2']
kw_map = dict(vmin = -20, vmax = 20, linewidths=0.5, annot=True,\
cmap='seismic' ) # RdYlGn_r
target_f = pd.Series([4688, 5300, 9003], index=['D','B','C'])
target_alpha = pd.Series([148, 174], index=['D', 'B'])
target_chi = pd.Series([85, 5, 0.33], index=['DB', 'BC', 'DC'])
results = epr.results
f_ND = results.get_frequencies_ND().rename(\
index ={0:'D',1:'B',2:'C'})
f_error = f_ND.apply(lambda x: 100*(x.values-target_f)/x, axis = 'index')
fig, axs = plt.subplots(1, 3, figsize = (15,7.5))
sns.heatmap(f_error.transpose(), ax = axs[0], **kw_map)
chis = results.get_chi_ND()
chis = xarray_unravel_levels(chis, ['variation','m', 'n'])
alpha_ND = sort_df_col(chis.sel_points(m = [0,1], n=[0,1]).to_pandas())
alpha_ND.index = target_alpha.index
alpha_ND_err = alpha_ND.apply(lambda x: 100*(x.values-target_alpha)/x, axis = 'index')
sns.heatmap(alpha_ND_err.transpose(), ax = axs[1], **kw_map)
chi_ND = sort_df_col(chis.sel_points(m = [0,1,0], n=[1,2,2]).to_pandas())
chi_ND.index = target_chi.index
chi_ND_err = chi_ND.apply(lambda x: 100*(x.values-target_chi)/x, axis = 'index')
sns.heatmap(chi_ND_err.transpose(), ax = axs[2], **kw_map)
axs[0].set_title('Freq.')
axs[1].set_title('Anharmonicities')
axs[2].set_title('cross-Kerrs')
| 2,994 | 36.911392 | 140 | py |
pyEPR | pyEPR-master/scripts/minev/hfss-scripts/import_pyEPR.py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 22 11:21:01 2017
@author: Zlatko
"""
from pyEPR import *
if 1:
# Specify the HFSS project to be analyzed
project_info = ProjectInfo(r"C:\\Users\\rslqulab\Desktop\\Lysander\participation_ratio_project\\Shyam's autonomous stabilization simulations\\")
project_info.project_name = '2017_08_Zlatko_Shyam_AutStab' # Name of the project file (string). "None" will get the current active one.
project_info.design_name = '2 pyEPR' # Name of the design file (string). "None" will get the current active one.
project_info.setup_name = None # Name of the setup(string). "None" will get the current active one.
## Describe the junctions in the HFSS design
project_info.junctions['jAlice'] = {'rect':'qubitAlice', 'line': 'alice_line', 'Lj_variable':'LJAlice', 'length':0.0001}
project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001}
# Dissipative elements EPR
project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative.
# Run analysis
epr_hfss = DistributedAnalysis(project_info)
epr_hfss.do_EPR_analysis()
if 1: # Hamiltonian analysis
filename = epr_hfss.data_filename
#filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5'
epr = QuantumAnalysis(filename)
#result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7)
epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7)
epr.plot_hamiltonian_results()
| 1,680 | 45.694444 | 148 | py |
pyEPR | pyEPR-master/scripts/my-name/example1.py | # -*- coding: utf-8 -*-
"""
My First pyEPR Script
"""
from pyEPR import *
# 1. Project and design. Open link to HFSS controls.
project_info = ProjectInfo(r'C:\zkm\my-first-pyEPR\\',
project_name = 'HelloWorld-pyEPR', # Project file name (string). "None" will get the current active one.
design_name = 'MyFirstTest' # Design name (string). "None" will get the current active one.
)
project_info.connect_to_project()
#
## 2a. Junctions. Specify junctions in HFSS model
#project_info.junctions['jAlice'] = {'Lj_variable':'LJAlice', 'rect':'qubitAlice', 'line': 'alice_line', 'length':0.0001}
#project_info.junctions['jBob'] = {'Lj_variable':'LJBob', 'rect':'qubitBob', 'line': 'bob_line', 'length':0.0001}
#
## 2b. Dissipative elements.
#project_info.dissipative['dielectrics_bulk'] = ['si_substrate'] # supply names here, there are more options in project_info.dissipative.
#project_info.dissipative['dielectric_surfaces'] = ['interface']
#
## 3. Run analysis
#epr_hfss = DistributedAnalysis(project_info)
#epr_hfss.do_EPR_analysis()
#
## 4. Hamiltonian analysis
#epr = QuantumAnalysis(epr_hfss.data_filename)
#epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7)
#epr.plot_hamiltonian_results()
#
| 1,258 | 34.971429 | 143 | py |
pyEPR | pyEPR-master/scripts/Kaicheng/import_pyEPR.py | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 22 11:21:01 2017
@author: Zlatko
"""
from pyEPR import *
if 0:
# Specify the HFSS project to be analyzed
project_info = ProjectInfo(r"X:\Simulation\\hfss\\KC\\")
project_info.project_name = '2013-12-03_9GHzCavity' # Name of the project file (string). "None" will get the current active one.
project_info.design_name = '9GHz_EM_center_SNAIL' # Name of the design file (string). "None" will get the current active one.
project_info.setup_name = None # Name of the setup(string). "None" will get the current active one.
## Describe the junctions in the HFSS design
project_info.junctions['snail'] = {'rect':'qubit', 'line': 'JunctionLine', 'Lj_variable':'LJ', 'length':0.0001}
# project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001}
# Dissipative elements EPR
project_info.dissipative['dielectric_surfaces'] = None # supply names here, there are more options in project_info.dissipative.
# Run analysis
epr_hfss = DistributedAnalysis(project_info)
epr_hfss.do_EPR_analysis() #variations = ['1', '70']
if 1: # Hamiltonian analysis
# filename = epr_hfss.data_filename
filename = r'X:\Simulation\hfss\KC\pyEPR_results_2018\2013-12-03_9GHzCavity\9GHz_EM_center_SNAIL\9GHz_EM_center_SNAIL_20180726_170049.hdf5'
#filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5'
epr = QuantumAnalysis(filename)
#result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7)
epr.analyze_all_variations(cos_trunc = None, fock_trunc = 4) # only quadratic part
epr.plot_hamiltonian_results()
if 1:
f0 = epr.results.get_frequencies_HFSS()
f1 = epr.results.get_frequencies_O1()
chi = epr.results.get_chi_O1()
mode_idx = list(f0.index)
nmodes = len(mode_idx)
cmap = cmap_discrete(nmodes)
| 2,038 | 44.311111 | 143 | py |
pyEPR | pyEPR-master/scripts/hanhee/run_vs_pass.py | # -*- coding: utf-8 -*-
"""
Example startup script to perform full quantization of a two qubit, one cavity Josephson circuit.
The results are saved, printed, and nicely plotted.
@author: Zlatko
"""
from pyEPR import ProjectInfo, DistributedAnalysis, QuantumAnalysis
# 1. Project and design. Open link to HFSS controls.
project_info = ProjectInfo('D:\LOGIQ-IBMQ\Cranes\HFSS simulation\\',
project_name = '2018-12-03 Zlatko pyEPR', # Project file name (string). "None" will get the current active one.
design_name = 'L-4 bus-EPR' # Design name (string). "None" will get the current active one.
)
# 2a. Junctions. Specify junctions in HFSS model
for i in range(1,3+1): # specify N number of junctions
i=str(i)
project_info.junctions['j'+i] = {'Lj_variable':'Lj'+i, 'rect':'Qubit'+i, 'line': 'Polyline'+i, 'length':30*10**-6}
# 2b. Dissipative elements.
#project_info.dissipative['dielectrics_bulk'] = ['subs_Q1'] # supply names here, there are more options in project_info.dissipative.
#project_info.dissipative['dielectric_surfaces'] = ['interface']
#%%
# 3. Run analysis
if 1:
passes = range(1,20,1)
epr_hfss = DistributedAnalysis(project_info)
# CLEAR DATA
# if not 'RES' in locals():
from collections import OrderedDict
RES = OrderedDict()
#%%%
setup_name = None
design = epr_hfss.design
setup_name = setup_name if not (setup_name is None) else design.get_setup_names()[0]
setup = design.get_setup(setup_name)
print(' HFSS setup name: %s' % setup_name)
#%%
from numpy import diag, sqrt, array
import pandas as pd
from pyEPR.toolbox import get_above_diagonal
def do_analysis(pass_, variation = '0'):
epr_hfss.do_EPR_analysis(variations=[variation])
RES[pass_] = OrderedDict()
epr = QuantumAnalysis(epr_hfss.data_filename)
RES[pass_]['epr'] = epr
#epr = RES[pass_]['epr']
RES[pass_]['freq_hfss'] = epr.freqs_hfss[variation]
RES[pass_]['Pmj_raw'] = epr.PM[variation]
## NORMED
dum = epr.get_Pmj('0')
RES[pass_]['Pmj_normed'] = dum['PJ'] # DataFrame
RES[pass_]['_Pmj_norm'] = dum['Pm_norm'] # Series
RES[pass_]['mats'] = epr.get_matrices(variation, print_=False) # arrays
# PJ, SJ, Om, EJ, PHI_zpf
RES[pass_]['Hres'] =Hres = zkm_get_Hparams(*RES[pass_]['mats'])
RES[pass_]['alpha'] = Hres['alpha'] # easy access
RES[pass_]['chi'] = Hres['chi']
### Numerical diagonalization
#RES[pass_]['ND'] = None
if 1:
print(' ND pass=%s variation=%s' % (pass_, variation))
from pyEPR.core import pyEPR_ND
f1_ND, CHI_ND = pyEPR_ND(epr.freqs_hfss[variation], epr.Ljs[variation],
RES[pass_]['mats'][-1], # PHI_zpf
cos_trunc=10, fock_trunc=9)
RES[pass_]['ND'] = {}
RES[pass_]['ND']['f01'] = f1_ND
RES[pass_]['ND']['CHI'] = CHI_ND
return Hres
def zkm_get_Hparams(PJ, SJ, Om, EJ, PHI):
'''
Report all in MHz
'''
M, J = PJ.shape
res = {'alpha_p4R2_p6R1':[], # {p=4,RWA=2} + {p=6,RWA2}
'chi':{}, # {p=4,RWA=1}
'omega_zx':{}}
res['alpha_p4R2_p6R1'] = \
[1000 * sum([ \
PJ[m,j]**2 *(Om[m,m]**2) / (8.*EJ[j,j]) * \
( 1. + PJ[m,j] * Om[m,m] / EJ[j,j] * (17./32.* PJ[m,j] - 0.25) )
for j in range(J)]) # MHz
for m in range(M) ]
res['alpha_zpf'] = \
[1000 * sum([ 0.5*EJ[j,j] * PHI[m,j]**4 + 2.*306./Om[m,m]*(EJ[j,j]/24.)**2 * PHI[m,j]**8 - 0.25*EJ[j,j] * PHI[m,j]**6
for j in range(J)]) # MHz
for m in range(M) ] # fully equivalent, checked
### Cross-Kerr
for m in range(M):
for m1 in range(m):
res['chi']['%d,%d'%(m1,m)] = 1000.*sum([
EJ[j,j] * PHI[m1,j]**2 * PHI[m,j]**2
for j in range(J)])
res['chi_zpf'] = {} # debug only
for m in range(M):
for m1 in range(m):
res['chi_zpf']['%d,%d'%(m1,m)] = 1000.*sum([
Om[m1,m1]*Om[m,m]*PJ[m1,j]*PJ[m,j]/(4.*EJ[j,j])
for j in range(J)]) # fully equivalent, checked
res['chi2'] = {} # higher order correction
for m in range(M):
for m1 in range(m):
res['chi2']['%d,%d'%(m1,m)] = 1000.*sum([ # TODO: CHECK AND SIGN
(EJ[j,j]/24.)**2*(\
864.*PHI[m1,j]**6 *PHI[m,j]**2 / ( +Om[m1,m1] - Om[m,m])\
-864.*PHI[m1,j]**2 *PHI[m,j]**6 / ( -Om[m1,m1] + Om[m,m])\
-576.*PHI[m1,j]**6 *PHI[m,j]**2 / ( Om[m1,m1] )\
-576.*PHI[m1,j]**2 *PHI[m,j]**6 / ( Om[m,m] )\
-576.*PHI[m1,j]**4 *PHI[m,j]**4 / ( Om[m1,m1] + Om[m,m] )\
+288.*PHI[m1,j]**2 *PHI[m,j]**6 / ( Om[m1,m1] -3*Om[m,m] )\
+288.*PHI[m1,j]**6 *PHI[m,j]**2 / ( Om[m,m] -3*Om[m1,m1] )\
)
for j in range(J)])
### CR Gate - analytical {p=4, RWA1}
res['omega_zx'] = {}
for m in range(M):
for m1 in range(m):
if m != m1:
res['omega_zx']['%d,%d'%(m,m1)] = 1000.* abs( sum([
EJ[j,j] * PHI[m,j]**3 * PHI[m1,j]
for j in range(J)])) # fully equivalent, checked
res['alpha'] = res['alpha_p4R2_p6R1'] # MHz
res['f01'] = 1000*diag(Om) - res['alpha'] - \
[ sum([res['chi']['%d,%d'% (min(m1,m),max(m1,m))] \
for m1 in range(M) if m1 != m])\
for m in range(M)]
# MHz -kerrs .. todo # f_01 frequency
return res
if 0: # update all passes
for pass_ in RES.keys():
do_analysis(pass_)
#res = do_analysis(pass_)
#pd.Series(res['alpha'])
#%%
def do_plot(RES):
'''
Make sure
%matplotlib qt
TODO: in future just setup once, and then update lines only
'''
# live plot https://stackoverflow.com/questions/11874767/how-do-i-plot-in-real-time-in-a-while-loop-using-matplotlib
# see also pylive
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from pyEPR.toolbox_plotting import legend_translucent
from pyEPR.toolbox import combinekw, xarray_unravel_levels, floor_10
plt.ion()
fig = plt.figure(1, figsize=(25,10))
fig.clf()
fig, axs = plt.subplots(2, 3, subplot_kw=dict(), num=1,sharex=True)
kw=dict(marker='o')
leg_kw = dict(fontsize=9, ncol=1)
# Frequency
ax = axs[0,0]
df = pd.DataFrame({x:1000.*RES[x]['freq_hfss'] for x in RES}).transpose()
df.plot(ax=ax,**kw)
ax.set_title('Linear mode, HFSS frequency $\omega_m/2\pi$ and dressed (MHz)')
legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='Mode #'))) #ax.legend(title= 'Mode #')
# Dressed frequency
ax.set_prop_cycle(None)
df = pd.DataFrame({x:RES[x]['Hres']['f01'] for x in RES}).transpose()
df.plot(ax=ax,legend=False,**combinekw(kw,dict(marker=None,alpha=0.5,ls='--')))
# Pmj Norm
ax = axs[1,0]
df = pd.DataFrame({x:RES[x]['_Pmj_norm'] for x in RES}).transpose()
df.plot(ax=ax,**kw)
ax.set_title('HFSS $p_{mj}$ norm')
legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='Mode #')))
# Frequency
ax = axs[0,1]
df = pd.DataFrame({x:RES[x]['alpha'] for x in RES}).transpose()
df.plot(ax=ax,**kw)
ax.set_title(r'Anharmonicity $\alpha_{mj}/2\pi$ (MHz)')
legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='Mode #')))
if RES[pass_]['ND'] is not None: # plot numerical solution
ax.set_prop_cycle(None)
df = pd.DataFrame({x:diag(RES[x]['ND']['CHI']) for x in RES}).transpose()
df.plot(ax=ax,legend=False,**combinekw(kw,dict(marker=None,alpha=0.5,ls='--')))
ax = axs[0,2]
df = pd.DataFrame({x:RES[x]['chi'] for x in RES}).transpose()
df.plot(ax=ax,**kw)
ax.set_title(r'Cross-Kerr $\chi_{mm\prime}/2\pi$ (MHz)')
legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='Mode #')))
# if RES[pass_]['ND'] is not None: # plot numerical solution
# ax.set_prop_cycle(None)
# df = pd.DataFrame({x:-get_above_diagonal(RES[x]['ND']['CHI']) for x in RES}).transpose()
## df = pd.DataFrame({x:RES[x]['Hres']['chi2'] for x in RES}).transpose()
# df.plot(ax=ax,legend=False,**combinekw(kw,dict(marker=None,alpha=0.5,ls='--')))
ax = axs[1,2]
df = pd.DataFrame({x:RES[x]['Hres']['omega_zx'] for x in RES}).transpose()
df.plot(ax=ax,**kw)
ax.set_title(r'Cross-Resonance $\omega_{ZX}/2\pi$ (MHz)')
legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='C,T')))
# Pmj normed plot
ax = axs[1,1]
da = xarray_unravel_levels({x:RES[x]['Pmj_normed'] for x in RES},
names=['pass','mode', 'junction'])
for mode in da.coords['mode']:
for junc in da.coords['junction']:
junc_name = str(junc.values)[2:]
ys = da.sel(mode=mode, junction=junc)
ys.plot.line(ax=ax, label='%2s,%4s'%(str(mode.values),junc_name),**kw)
min_ = floor_10(min(abs(np.min(da.values)),abs(np.max(da.values)))) # just in case
ax.set_ylim(min_,1.05)
ax.set_yscale("log", nonposy='clip')
legend_translucent(ax, leg_kw=combinekw(leg_kw,dict(title='$p_{mj}$')))
ax.set_title('HFSS $p_{mj}$ normed')
from matplotlib.widgets import Button
class Index(object):
ind = 0
def __init__(self, button, ax):
self.ax = ax
self.button = button # so it doesnt get erased
def next(self, event):
i = self.ind = (self.ind+1) % 5
ax = self.ax
if i==0:
ax.set_ylim(min_,1.05)
ax.set_yscale("log", nonposy='clip')
elif i==1:
ax.set_ylim(min_,1.02)
ax.set_yscale("linear", nonposy='clip')
elif i==2:
ax.set_ylim(0.8,1.02)
ax.set_yscale("linear", nonposy='clip')
elif i==3:
ax.set_ylim(10**-3,10**-1)
ax.set_yscale("log", nonposy='clip')
elif i==4:
ax.set_ylim(5*10**-5,2*10**-3)
ax.set_yscale("log", nonposy='clip')
self.button.label.set_text('Next %d'%(self.ind))
fig.canvas.draw()
fig.canvas.flush_events()
pos1 = ax.get_position()
pos2 = [pos1.x0 + 0., pos1.y0 + pos1.height+0.002, 0.07, 0.04]
axnext = plt.axes(pos2)
bnext = Button(axnext, 'Next')
callback = Index(bnext, ax)
bnext.on_clicked(callback.next)
for ax in np.ndarray.flatten(axs):
ax.set_xlabel('Pass number')
#ax.autoscale(tight=True)
fig.tight_layout() #pad=0.4, w_pad=0.5, h_pad=1.0)
fig.show()
plt.pause(0.01)
return df
#do_plot(RES)
#import threading
#t = threading.Thread(target=do_plot, args = (RES,))
#t.start()
#%%
import time
if 1:
for pass_ in passes:
print(' Running pass #%s'%(pass_), end='')
setup.passes = str(pass_)
try:
ret = setup.solve() # I tried to use a worker thread but this gets complicated with COM blocking interface
if ret in [0, '0']:
print('. Normal completion.')
time.sleep(0.5)
do_analysis(pass_)
do_plot(RES)
elif ret in ['-1',-1]:
print('. Simulation error.')
print(ret)
except KeyboardInterrupt:
print('\n\n Keyboard interruption...')
break
# ABORT: -2147352567, 'Exception occurred.', (0, None, None, None, 0, -2147024349), None)
do_plot(RES)
#%%
if 0:
epr_hfss.do_EPR_analysis()
# 4. Hamiltonian analysis
epr = QuantumAnalysis(epr_hfss.data_filename)
epr.analyze_all_variations(cos_trunc = 8, fock_trunc = 7)
epr.plot_hamiltonian_results() | 12,386 | 36.536364 | 153 | py |
pyEPR | pyEPR-master/tests/test_quantum_analysis.py | '''
Unit tests for quantum analysis. Takes in pre-made data with known results,
computes the results from the data and checks everything is correct.
'''
import unittest
import pickle
import numpy as np
import sys
sys.path.append('..') # noqa
import pyEPR as epr
# Files location
save_file = './data.npz'
correct_results = './correct_results.pkl'
class TestQuantumAnalysis(unittest.TestCase):
def setUp(self):
self.epra = epr.QuantumAnalysis(save_file)
with open(correct_results, 'rb') as file:
self.correct_res = pickle.load(file)
def test_analyze_all_variations(self):
'''
Check that the calculated results matches the known correct ones
'''
results = self.epra.analyze_all_variations(
cos_trunc=8, fock_trunc=15, print_result=False)['0'] # Variation 0
# TODO: Remove start/finish diagonalization messages (back_box_numeric L:153)
for key, value in results.items():
if key == 'hfss_variables': # All numeric-only datatypes
return
value = np.array(value)
corr_value = np.array(self.correct_res[key])
self.assertTrue(np.allclose(value, corr_value))
epr.logger.info(key+' '+'-'*(13 - len(key))+'-> OK!')
def test_analyze_variation(self):
pass
def test_hamiltonian(self):
pass # TODO: Need to pass **kwargs to epr_num_diag for return_H option
def test_properties(self):
pass
| 1,495 | 28.92 | 85 | py |
pyEPR | pyEPR-master/tests/test_project_info.py | import unittest
import sys; sys.path.insert(0, '..') # noqa
import pyEPR as epr
class TestProjectInfo(unittest.TestCase):
'''Test pyEPR.project_info.py'''
def setUp(self):
path_to_project = r'..\_example_files'
try:
self.pinfo = epr.ProjectInfo(project_path=path_to_project,
project_name='pyEPR_tutorial1',
design_name='1. single_transmon')
except:
assert ConnectionError('Failed to connect to HFSS. Opening it manually')
def test_dissipative(self):
'''Test change of _Dissipative from a class to a dict with deprecation warnings'''
self.assertRaises(Exception, self.pinfo.dissipative.__getattr__, 'mot_exist',
msg='Failed calling non-existing attr')
self.assertRaises(Exception, self.pinfo.dissipative.__getitem__, 'not_exist',
msg='Failed calling non-existing item')
self.assertRaises(Exception, self.pinfo.dissipative.__setattr__, 'seams', 1,
msg='Failed setting invalid attr')
self.assertRaises(Exception, self.pinfo.dissipative.__setitem__, 'seams', 1,
msg='Failed setting invalid item')
self.assertRaises(Exception, self.pinfo.dissipative.__setitem__, 'seams', ['a'],
msg='Failed setting item to non-existing HFSS obj')
self.assertRaises(Exception, self.pinfo.dissipative.__setattr__, 'seams', ['a'],
msg='Failed setting attr to non-existing HFSS obj')
self.assertRaises(Exception, self.pinfo.dissipative.__setattr__, 'not_exist', 1,
msg='Failed setting invalid value and attr')
self.assertRaises(Exception, self.pinfo.dissipative.__setitem__, 'not_exist', 1,
msg='Failed setting invalid value and key')
self.pinfo.dissipative['seams']
self.pinfo.dissipative['seams'] = []
self.pinfo.dissipative['seams'] = ['substrate']
| 2,076 | 55.135135 | 90 | py |
pyEPR | pyEPR-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'pyEPR'
copyright = '2020, Zlatko Minev, Zaki Leghtas, and the pyEPR Team'
author = 'Zlatko Minev, Zaki Leghtas, and the pyEPR Team'
import sys
import os
sys.path.insert(0, os.path.abspath("../../pyEPR"))
print(sys.path)
# The full version, including alpha/beta/rc tags
import pyEPR
version = pyEPR.__version__
release = version
import sphinx_rtd_theme
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
'sphinx.ext.coverage',
'sphinx.ext.napoleon', # parse both NumPy and Google style docstrings
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
"sphinx.ext.mathjax",
"sphinx_rtd_theme",
#'sphinx_automodapi.automodapi',
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"matplotlib.sphinxext.plot_directive",
#'numpydoc'
]
# https://github.com/readthedocs/readthedocs.org/issues/2569
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["**.ipynb_checkpoints"]
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
numpydoc_show_class_members = True
napoleon_numpy_docstring = True
napoleon_use_admonition_for_notes = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' #'default' # 'sphinx_rtd_theme' #'alabaster' "sphinxdoc" 'classic'
if 0:
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'nature'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
full_logo= True
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'canonical_url': '',
#'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
#'style_nav_header_background': 'white',
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# Add any paths that contain custom themes here, relative to this directory.
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If false, no module index is generated.
html_use_modindex = True
html_show_sourcelink = True
# Sort members by type
#autodoc_member_order = 'groupwise'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
# -----------------------------------------------------------------------------
# Autodoc
# -----------------------------------------------------------------------------
#The supported options are
# 'members', 'member-order', 'undoc-members', 'private-members',
# 'special-members', 'inherited-members', 'show-inheritance', 'ignore-module-all',
# 'imported-members' and 'exclude-members'.
autodoc_default_options = {
'inherited-members': None,
#'member-order': 'bysource',
'member-order': 'alphabetical', #This value selects if automatically documented members are sorted alphabetical (value 'alphabetical'), by member type (value 'groupwise') or by source order (value 'bysource'). The default is alphabetical.
'undoc-members': True, # Members without docstrings will be left out, unless you give the undoc-members flag option:
'exclude-members': '__weakref__',
'show-inheritance' : True # , a list of base classes will be inserted just below the class signature (when used with automodule, this will be inserted for every class that is documented in the module).
}
# If true, figures, tables and code-blocks are automatically numbered if they
# have a caption.
numfig = True
# A dictionary mapping 'figure', 'table', 'code-block' and 'section' to
# strings that are used for format of figure numbers. As a special character,
# %s will be replaced to figure number.
numfig_format = {
'table': 'Table %s'
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'colorful'
# A boolean that decides whether module names are prepended to all object names
# (for object types where a “module” of some kind is defined), e.g. for
# py:function directives.
add_module_names = True
# A list of prefixes that are ignored for sorting the Python module index
# (e.g., if this is set to ['foo.'], then foo.bar is shown under B, not F).
# This can be handy if you document a project that consists of a single
# package. Works only for the HTML builder currently.
#modindex_common_prefix = ['pyEPR.'] | 7,220 | 33.716346 | 242 | py |
pyEPR | pyEPR-master/pyEPR/core.py | """
Main interface module to use pyEPR.
Contains code to connect to Ansys and to analyze HFSS files using the EPR method.
This module handles the microwave part of the analysis and connection to
Further contains code to be able to do autogenerated reports,
Copyright Zlatko Minev, Zaki Leghtas, and the pyEPR team
2015, 2016, 2017, 2018, 2019, 2020
"""
# pylint: disable=invalid-name, unused-import
from .project_info import ProjectInfo
from .core_quantum_analysis import QuantumAnalysis
from .core_distributed_analysis import DistributedAnalysis
# Backwards compatibility. To be depreciated.
Project_Info = ProjectInfo
pyEPR_HFSSAnalysis = DistributedAnalysis
pyEPR_Analysis = QuantumAnalysis
| 702 | 27.12 | 81 | py |
pyEPR | pyEPR-master/pyEPR/core_quantum_analysis.py | """
Main interface module to use pyEPR.
Contains code that works on the analysis after hfss, ansys, etc. These can now be closed.
Copyright Zlatko Minev, Zaki Leghtas, and the pyEPR team
2015, 2016, 2017, 2018, 2019, 2020
"""
# pylint: disable=invalid-name
# todo remove this pylint hack later
from __future__ import print_function # Python 2.7 and 3 compatibility
from typing import List
import pickle
import sys
import time
from collections import OrderedDict
from pathlib import Path
from .calcs.convert import Convert
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import Markdown, display
from numpy.linalg import inv
# pyEPR custom imports
from . import Dict, config, logger
from .ansys import ureg
from .calcs.back_box_numeric import epr_numerical_diagonalization
from .calcs.basic import CalcsBasic
from .calcs.constants import Planck, fluxQ
from .core_distributed_analysis import DistributedAnalysis
from .toolbox.plotting import cmap_discrete, legend_translucent
from .toolbox.pythonic import (DataFrame_col_diff, divide_diagonal_by_2,
print_color, print_matrix, sort_df_col,
sort_Series_idx, df_find_index, series_of_1D_dict_to_multi_df)
from .reports import (plot_convergence_max_df, plot_convergence_solved_elem)
class HamiltonianResultsContainer(OrderedDict):
"""
The user should only use the QuantumAnalysis class interface.
This class is largely for internal use.
It is a dictionary based class to contain the results stored.
"""
file_name_extra = ' HamiltonianResultsContainer.npz'
def __init__(self, dict_file=None, data_dir=None):
""" input:
dict file - 1. either None to create an empty results hamiltonian as
as was done in the original code
2. or a string with the name of the file where the file of the
previously saved HamiltonianResultsContainer instance we wish
to load
3. or an existing instance of a dict class which will be
upgraded to the HamiltonianResultsContainer class
data_dir - the directory in which the file is to be saved or loaded
from, defaults to the config.root_dir
"""
super().__init__()
self.sort_index = True # for retrieval
if data_dir is None:
data_dir = Path(config.root_dir) / 'temp' / \
time.strftime('%Y-%m-%d %H-%M-%S', time.localtime())
data_dir = Path(data_dir).resolve()
file_name = data_dir.stem
directory = data_dir.parents[0]
if not directory.is_dir():
directory.mkdir(parents=True, exist_ok=True)
if dict_file is None:
self.file_name = str(
directory/(str(file_name)+self.file_name_extra))
#logger.info(f'Filename hamiltonian params to {self.file_name }')
elif isinstance(dict_file, str):
try:
self.file_name = str(data_dir)+'\\' + dict_file
self.load()
except:
self.file_name = dict_file
self.load()
elif isinstance(dict_file, dict):
# Depreciated
self._inject_dic(dict_file)
self.file_name = str(data_dir)+self.file_name_extra
else:
raise ValueError(
'type dict_file is of type {}'.format(type(dict_file)))
# load file
def save(self, filename: str = None):
"""
Uses numpy npz file.
"""
if filename is None:
filename = self.file_name
np.savez(filename, Res_Hamil=dict(self))
return filename
def load(self, filename=None):
"""
Uses numpy npz file.
"""
if filename is None:
filename = self.file_name
self._inject_dic(extract_dic(file_name=filename)[0])
return filename
def _inject_dic(self, add_dic):
Init_number_of_keys = len(self.keys())
for key, val in add_dic.items():
# TODO remove all copies of same data
# if key in self.keys():
#raise ValueError('trying to overwrite an existing variation')
self[str(int(key)+Init_number_of_keys)] = val
return 1
@staticmethod
def _do_sort_index(z: pd.DataFrame):
"""Overwrite to sort by custom function
Arguments:
z {pd.DataFrame} -- Input
Returns:
Sorted DataFrame
"""
if isinstance(z, pd.DataFrame):
return z.sort_index(axis=1)
else:
return z
def vs_variations(self,
quantity: str,
variations: list = None,
vs='variation',
to_dataframe=False):
"""
QUANTITIES:
`f_0` : HFSS Frequencies
`f_1` : Analytical first order PT on the p=4 term of the cosine
`f_ND` : Numerically diagonalized
`chi_O1`: chi matrix from 1st order PT
Arguments:
quantity {[type]} -- [description]
Keyword Arguments:
variations {list of strings} -- Variations (default: {None} -- means all)
vs {str} -- Swept against (default: {'variation'})
to_dataframe {bool} -- convert or not the result to dataframe.
Make sure to call only if it can be converted to a DataFrame or can
be concatenated into a multi-index DataFrame
Returns:
[type] -- [description]
"""
variations = variations or self.keys()
res = OrderedDict()
for key in variations:
if vs == 'variation':
res[key] = self[key][quantity]
else:
# convert the key to numeric if possible
key_new = ureg.Quantity(
self[key]['hfss_variables']['_'+vs]).magnitude
res[key_new] = self[key][quantity]
# Convert to dataframe
z = res
if to_dataframe: # only call if z can be converted to a dataframe
z = sort_df_col(pd.DataFrame(z))
if self.sort_index:
z = self._do_sort_index(z)
# z.index.name = 'eigenmode'
z.columns.name = vs
return z
# Quick lookup function
def get_frequencies_HFSS(self, variations: list = None, vs='variation'):
'''See help for `vs_variations`'''
return self.vs_variations('f_0', variations=variations, vs=vs, to_dataframe=True)
def get_frequencies_O1(self, variations: list = None, vs='variation'):
'''See help for `vs_variations`'''
return self.vs_variations('f_1', variations=variations, vs=vs, to_dataframe=True)
def get_frequencies_ND(self, variations: list = None, vs='variation'):
'''See help for `vs_variations`'''
return self.vs_variations('f_ND', variations=variations, vs=vs, to_dataframe=True)
def get_chi_O1(self, variations: list = None, vs='variation'):
return self.vs_variations('chi_O1', variations=variations, vs=vs)
def get_chi_ND(self, variations: list = None, vs='variation'):
return self.vs_variations('chi_ND', variations=variations, vs=vs)
class QuantumAnalysis(object):
'''
Defines an analysis object which loads and plots data from a h5 file
This data is obtained using DistributedAnalysis
'''
def __init__(self, data_filename,
variations: list = None,
do_print_info=True,
Res_hamil_filename=None):
self.data_filename = data_filename
self.results = HamiltonianResultsContainer(dict_file=Res_hamil_filename,
data_dir=data_filename)
with open(str(data_filename), 'rb') as handle:
# Contain everything: project_info and results
self.data = Dict(pickle.load(handle))
# Reverse from variations on outside to on inside
results = DistributedAnalysis.results_variations_on_inside(
self.data.results)
# Convenience functions
self.variations = variations or list(self.data.results.keys())
self._hfss_variables = results['hfss_variables']
self.freqs_hfss = results['freqs_hfss_GHz']
self.Qs = results['Qs']
self.Qm_coupling = results['Qm_coupling']
self.Ljs = results['Ljs'] # DataFrame
self.Cjs = results['Cjs'] # DataFrame
self.OM = results['Om'] # dict of dataframes
self.PM = results['Pm'] # participation matrices - raw, unnormed here
# participation matrices for capacitive elements
self.PM_cap = results['Pm_cap']
self.SM = results['Sm'] # sign matrices
self.I_peak = results['I_peak']
self.V_peak = results['V_peak']
self.modes = results['modes']
self.sols = results['sols']
self.ansys_energies = results.get('ansys_energies', {})
self.mesh_stats = results['mesh']
self.convergence = results['convergence']
self.convergence_f_pass = results['convergence_f_pass']
self.n_modes = len(self.modes[self.variations[0]])
self._renorm_pj = config.epr.renorm_pj
# Unique variation params -- make a get function
dum = DataFrame_col_diff(self._hfss_variables)
self.hfss_vars_diff_idx = dum if not (dum.any() == False) else []
try:
self.Num_hfss_vars_diff_idx = len(
self.hfss_vars_diff_idx[self.hfss_vars_diff_idx == True])
except:
e = sys.exc_info()[0]
logger.warning("<p>Error: %s</p>" % e)
self.Num_hfss_vars_diff_idx = 0
if do_print_info:
self.print_info()
@property
def project_info(self):
return self.data.project_info
def print_info(self):
print("\t Differences in variations:")
if len(self.hfss_vars_diff_idx) > 0:
display(self._hfss_variables[self.hfss_vars_diff_idx])
print('\n')
def get_vs_variable(self, swp_var, attr: str):
"""
Convert the index of a dictionary that is stored here from
variation number to variable value.
Args:
swp_var (str) :name of sweep variable in ansys
attr: name of local attribute, eg.., 'ansys_energies'
"""
#from collections import OrderedDict
variable = self.get_variable_vs(swp_var)
return OrderedDict([(variable[variation], val)
for variation, val in getattr(self, attr).items()])
def get_variable_vs(self, swpvar, lv=None):
""" lv is list of variations (example ['0', '1']), if None it takes all variations
swpvar is the variable by which to organize
return:
ordered dictionary of key which is the variation number and the magnitude
of swaver as the item
"""
ret = OrderedDict()
if lv is None:
for key, varz in self._hfss_variables.items():
ret[key] = ureg.Quantity(varz['_'+swpvar]).magnitude
else:
try:
for key in lv:
ret[key] = ureg.Quantity(
self._hfss_variables[key]['_'+swpvar]).magnitude
except:
print(' No such variation as ' + key)
return ret
def get_variable_value(self, swpvar, lv=None):
var = self.get_variable_vs(swpvar, lv=lv)
return [var[key] for key in var.keys()]
def get_variations_of_variable_value(self, swpvar, value, lv=None):
"""A function to return all the variations in which one of the variables
has a specific value lv is list of variations (example ['0', '1']),
if None it takes all variations
swpvar is a string and the name of the variable we wish to filter
value is the value of swapvr in which we are interested
returns lv - a list of the variations for which swavr==value
"""
if lv is None:
lv = self.variations
ret = self.get_variable_vs(swpvar, lv=lv)
lv = np.array(list(ret.keys()))[np.array(list(ret.values())) == value]
#lv = lv_temp if not len(lv_temp) else lv
if not (len(lv)):
raise ValueError('No variations have the variable-' + swpvar +
'= {}'.format(value))
return list(lv)
def get_variation_of_multiple_variables_value(self, Var_dic, lv=None):
"""
SEE get_variations_of_variable_value
A function to return all the variations in which one of the variables has a specific value
lv is list of variations (example ['0', '1']), if None it takes all variations
Var_dic is a dic with the name of the variable as key and the value to filter as item
"""
if lv is None:
lv = self.variations
var_str = None
for key, var in Var_dic.items():
lv = self.get_variations_of_variable_value(key, var, lv)
if var_str is None:
var_str = key + '= {}'.format(var)
else:
var_str = var_str + ' & ' + key + '= {}'.format(var)
return lv, var_str
def get_convergences_max_tets(self):
''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) '''
ret = OrderedDict()
for key, df in self.convergence.items():
ret[key] = df['Solved Elements'].iloc[-1]
return ret
def get_convergences_tets_vs_pass(self, as_dataframe=True):
''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) '''
ret = OrderedDict()
for key, df in self.convergence.items():
s = df['Solved Elements']
s = s.reset_index().dropna().set_index('Pass Number')
#s.index = df['Pass Number']
ret[key] = s
if as_dataframe:
ret = pd.concat(ret)
ret = ret.unstack(0)['Solved Elements']
return ret
def get_convergences_max_delta_freq_vs_pass(self, as_dataframe=True):
''' Index([u'Pass Number', u'Solved Elements', u'Max Delta Freq. %' ]) '''
KEY = 'Max Delta Freq. %'
ret = OrderedDict()
for key, df in self.convergence.items():
s = df[KEY]
s = s.reset_index().dropna().set_index('Pass Number')
#s.index = df['Pass Number']
ret[key] = s
if as_dataframe:
ret = pd.concat(ret)
ret = ret.unstack(0)[KEY]
return ret
def get_mesh_tot(self):
ret = OrderedDict()
for key, m in self.mesh_stats.items():
ret[key] = m['Num Tets '].sum()
return ret
def get_Ejs(self, variation):
''' EJs in GHz
See calcs.convert
'''
Ljs = self.Ljs[variation]
Ejs = fluxQ**2/Ljs/Planck*10**-9
return Ejs
def get_Ecs(self, variation):
''' ECs in GHz
Returns as pandas series
'''
Cs = self.Cjs[variation]
return Convert.Ec_from_Cs(Cs, units_in='F', units_out='GHz')
def analyze_all_variations(self,
variations: List[str] = None,
analyze_previous=False,
**kwargs):
'''
See analyze_variation for full documentation
Args:
variations: None returns all_variations otherwise this is a list with number as strings ['0', '1']
analyze_previous: set to true if you wish to overwrite previous analysis
**kwargs: Keyword arguments passed to :func:`~pyEPR.QuantumAnalysis.analyze_variation`.
'''
result = OrderedDict()
if variations is None:
variations = self.variations
for variation in variations:
if (not analyze_previous) and (variation in self.results.keys()):
result[variation] = self.results[variation]
else:
result[variation] = self.analyze_variation(variation, **kwargs)
self.results.save()
return result
def _get_ansys_total_energies(self, variation):
res = {}
for getkey in ['U_tot_cap', 'U_tot_ind', 'U_H', 'U_E', 'U_norm']:
res[getkey] = pd.Series({mode: self.ansys_energies[variation][mode][getkey]
for mode in self.ansys_energies[variation]})
df = pd.DataFrame(res)
df.index.name = 'modes'
return df
def _get_participation_normalized(self, variation, _renorm_pj=None, print_=False):
'''
Get normalized Pmj Matrix
Return DataFrame object for PJ
'''
if _renorm_pj is None:
_renorm_pj = self._renorm_pj
# Columns are junctions; rows are modes
Pm = self.PM[variation].copy() # EPR matrix DataFrame
# EPR matrix for capacitor DataFrame
Pm_cap = self.PM_cap[variation].copy()
if _renorm_pj: # just non False
# Renormalize
# Should we still do this when Pm_glb_sum is very small
#s = self.sols[variation]
# sum of participation energies as calculated by global UH and UE
# U_mode = s['U_E'] # peak mode energy; or U bar as i denote it sometimes
# We need to add the capacitor here, and maybe take the mean of that
energies = self._get_ansys_total_energies(variation)
U_mode = (energies['U_tot_cap'] + energies['U_tot_ind'])/2.
U_diff = abs(energies['U_tot_cap'] - energies['U_tot_ind'])/U_mode
if np.any(U_diff > 0.15):
logger.error(f"WARNING: U_tot_cap-U_tot_ind / mean = {np.max(np.abs(U_diff))*100:.1f}% is > 15%. \
\nIs the simulation converged? Proceed with caution")
# global sums of participations
Pm_glb_sum = abs((U_mode-energies['U_H'])/U_mode)
Pm_cap_glb_sum = abs((U_mode-energies['U_E'])/U_mode)
# norms
Pm_norm = Pm_glb_sum/Pm.sum(axis=1)
Pm_cap_norm = Pm_cap_glb_sum/Pm_cap.sum(axis=1)
# this is not the correct scaling yet! WARNING. Factors of 2 laying around too
# these numbers are a bit all over the place for now. very small
if _renorm_pj == True or _renorm_pj == 1:
idx = Pm > -1E6 # everywhere scale
idx_cap = Pm_cap > -1E6
elif _renorm_pj == 2:
idx = Pm > 0.15 # Mask for where to scale
idx_cap = Pm_cap > 0.15
else:
raise NotImplementedError(
"Unknown _renorm_pj argument or config values!")
if print_:
# \nPm_cap_norm=\n{Pm_cap_norm}")
print(f"Pm_norm=\n{Pm_norm}\n")
print(f"Pm_norm idx =\n{idx}")
Pm[idx] = Pm[idx].mul(Pm_norm, axis=0)
Pm_cap[idx_cap] = Pm_cap[idx_cap].mul(Pm_cap_norm, axis=0)
#Pm = Pm.mul(Pm_norm, axis=0)
#Pm_cap = Pm_cap.mul(Pm_cap_norm, axis=0)
else:
Pm_norm = 1
Pm_cap_norm = 1
idx = None
idx_cap = None
if print_:
print('NO renorm!')
if np.any(Pm < 0.0):
print_color(" ! Warning: Some p_mj was found <= 0. This is probably a numerical error,'\
'or a super low-Q mode. We will take the abs value. Otherwise, rerun with more precision,'\
'inspect, and do due diligence.)")
print(Pm, '\n')
Pm = np.abs(Pm)
return {'PJ': Pm, 'Pm_norm': Pm_norm, 'PJ_cap': Pm_cap,
'Pm_cap_norm': Pm_cap_norm,
'idx': idx,
'idx_cap': idx_cap}
def get_epr_base_matrices(self, variation, _renorm_pj=None, print_=False):
r'''
Return the key matrices used in the EPR method for analytic calculations.
All as matrices
:PJ: Participation matrix, p_mj
:SJ: Sign matrix, s_mj
:Om: Omega_mm matrix (in GHz) (\hbar = 1) Not radians.
:EJ: E_jj matrix of Josephson energies (in same units as hbar omega matrix)
:PHI_zpf: ZPFs in units of \phi_0 reduced flux quantum
:PJ_cap: capacitive participation matrix
Return all as *np.array*
PM, SIGN, Om, EJ, Phi_ZPF
'''
# TODO: supersede by Convert.ZPF_from_EPR
res = self._get_participation_normalized(
variation, _renorm_pj=_renorm_pj, print_=print_)
PJ = np.array(res['PJ'])
PJ_cap = np.array(res['PJ_cap'])
# Sign bits
SJ = np.array(self.SM[variation]) # DataFrame
# Frequencies of HFSS linear modes.
# Input in dataframe but of one line. Output nd array
Om = np.diagflat(self.OM[variation].values) # GHz
# Junction energies
EJ = np.diagflat(self.get_Ejs(variation).values) # GHz
Ec = np.diagflat(self.get_Ecs(variation).values) # GHz
for x in ("PJ", "SJ", "Om", "EJ"):
logger.debug(f"{x}=")
logger.debug(locals()[x])
PHI_zpf = CalcsBasic.epr_to_zpf(PJ, SJ, Om, EJ)
n_zpf = CalcsBasic.epr_cap_to_nzpf(PJ, SJ, Om, Ec)
return PJ, SJ, Om, EJ, PHI_zpf, PJ_cap, n_zpf # All as np.array
def analyze_variation(self,
variation: str,
cos_trunc: int = None,
fock_trunc: int = None,
print_result: bool = True,
junctions: List = None,
modes: List = None):
# TODO avoid analyzing a previously analyzed variation
'''
Core analysis function to call!
Args:
junctions: list or slice of junctions to include in the analysis.
None defaults to analysing all junctions
modes: list or slice of modes to include in the analysis.
None defaults to analysing all modes
Returns:
dict: Dictionary containing at least the following:
* f_0 [MHz]: Eigenmode frequencies computed by HFSS; i.e., linear freq returned in GHz
* f_1 [MHz]: Dressed mode frequencies (by the non-linearity; e.g., Lamb shift, etc. ).
Result based on 1st order perturbation theory on the 4th order expansion of the cosine.
* f_ND [MHz]: Numerical diagonalization result of dressed mode frequencies.
only available if `cos_trunc` and `fock_trunc` are set (non None).
* chi_O1 [MHz]: Analytic expression for the chis based on a cos trunc to 4th order, and using 1st
order perturbation theory. Diag is anharmonicity, off diag is full cross-Kerr.
* chi_ND [MHz]: Numerically diagonalized chi matrix. Diag is anharmonicity, off diag is full
cross-Kerr.
'''
# ensuring proper matrix dimensionality when slicing
junctions = (junctions,) if type(junctions) is int else junctions
if modes is None:
modes = list(range(self.n_modes))
tmp_n_modes = self.n_modes
tmp_modes = self.modes[variation]
self.n_modes = len(modes)
self.modes[variation] = modes
if (fock_trunc is None) or (cos_trunc is None):
fock_trunc = cos_trunc = None
if print_result:
print('\n', '. '*40)
print('Variation %s\n' % variation)
else:
print('%s, ' % variation, end='')
# Get matrices
PJ, SJ, Om, EJ, PHI_zpf, PJ_cap, n_zpf = self.get_epr_base_matrices(
variation)
freqs_hfss = self.freqs_hfss[variation].values[(modes)]
Ljs = self.Ljs[variation].values
# reduce matrices to only include certain modes/junctions
if junctions is not None:
Ljs = Ljs[junctions, ]
PJ = PJ[:, junctions]
SJ = SJ[:, junctions]
EJ = EJ[:, junctions][junctions, :]
PHI_zpf = PHI_zpf[:, junctions]
PJ_cap = PJ_cap[:, junctions]
if modes is not None:
PJ = PJ[modes, :]
SJ = SJ[modes, :]
Om = Om[modes, :][:, modes]
PHI_zpf = PHI_zpf[modes, :]
PJ_cap = PJ_cap[:, junctions]
# Analytic 4-th order
CHI_O1 = 0.25 * Om @ PJ @ inv(EJ) @ PJ.T @ Om * 1000. # MHz
f1s = np.diag(Om) - 0.5*np.ndarray.flatten(np.array(CHI_O1.sum(1))) / \
1000. # 1st order PT expect freq to be dressed down by alpha
CHI_O1 = divide_diagonal_by_2(CHI_O1) # Make the diagonals alpha
# Numerical diag
if cos_trunc is not None:
f1_ND, CHI_ND = epr_numerical_diagonalization(freqs_hfss,
Ljs,
PHI_zpf,
cos_trunc=cos_trunc,
fock_trunc=fock_trunc)
else:
f1_ND, CHI_ND = None, None
result = OrderedDict()
result['f_0'] = self.freqs_hfss[variation][modes] * 1E3 # MHz - obtained directly from HFSS
result['f_1'] = pd.Series(f1s)*1E3 # MHz
result['f_ND'] = pd.Series(f1_ND)*1E-6 # MHz
result['chi_O1'] = pd.DataFrame(CHI_O1)
result['chi_ND'] = pd.DataFrame(CHI_ND) # why dataframe?
result['ZPF'] = PHI_zpf
result['Pm_normed'] = PJ
try:
result['Pm_raw'] = self.PM[variation][self.PM[variation].columns[0]][modes]#TODO change the columns to junctions
except:
result['Pm_raw'] = self.PM[variation]
_temp = self._get_participation_normalized(
variation, _renorm_pj=self._renorm_pj, print_=print_result)
result['_Pm_norm'] = _temp['Pm_norm'][modes]
result['_Pm_cap_norm'] = _temp['Pm_cap_norm'][modes]
# just propagate
result['hfss_variables'] = self._hfss_variables[variation]
result['Ljs'] = self.Ljs[variation]
result['Cjs'] = self.Cjs[variation]
try:
result['Q_coupling'] = self.Qm_coupling[variation][self.Qm_coupling[variation].columns[junctions]][modes]#TODO change the columns to junctions
except:
result['Q_coupling'] = self.Qm_coupling[variation]
try:
result['Qs'] = self.Qs[variation][self.PM[variation].columns[junctions]][modes] #TODO change the columns to junctions
except:
result['Qs'] = self.Qs[variation][modes]
result['sol'] = self.sols[variation]
result['fock_trunc'] = fock_trunc
result['cos_trunc'] = cos_trunc
self.results[variation] = result
self.results.save()
if print_result:
self.print_variation(variation)
self.print_result(result)
self.n_modes = tmp_n_modes # TODO is this smart should consider defining the modes of interest in the initialisation of the quantum object
self.modes[variation]=tmp_modes
return result
def full_report_variations(self, var_list: list=None):
"""see full_variation_report"""
if var_list is None:
var_list = self.variations
for variation in var_list:
self.full_variation_report(variation)
def full_variation_report(self, variation):
"""
prints the results and parameters of a specific variation
Parameters
----------
variation : int or str
the variation to be printed .
Returns
-------
None.
"""
self.print_variation(variation)
self.print_result(variation)
def print_variation(self, variation):
"""
Utility reporting function
"""
if variation is int: variation = str(variation)
if len(self.hfss_vars_diff_idx) > 0:
print('\n*** Different parameters')
display(self._hfss_variables[self.hfss_vars_diff_idx][variation])
print('\n')
print('*** P (participation matrix, not normlz.)')
print(self.PM[variation])
print('\n*** S (sign-bit matrix)')
print(self.SM[variation])
def print_result(self, result):
"""
Utility reporting function
"""
if type(result) is str or type(result) is int: result = self.results[str(result)]
# TODO: actually make into dataframe with mode labels and junction labels
pritm = lambda x, frmt="{:9.2g}": print_matrix(x, frmt=frmt)
print('*** P (participation matrix, normalized.)')
pritm(result['Pm_normed'])
print('\n*** Chi matrix O1 PT (MHz)\n Diag is anharmonicity, off diag is full cross-Kerr.')
pritm(result['chi_O1'], "{:9.3g}")
print('\n*** Chi matrix ND (MHz) ')
pritm(result['chi_ND'], "{:9.3g}")
print('\n*** Frequencies O1 PT (MHz)')
print(result['f_1'])
print('\n*** Frequencies ND (MHz)')
print(result['f_ND'])
print('\n*** Q_coupling')
print(result['Q_coupling'])
def plotting_dic_x(self, Var_dic, var_name):
dic = {}
if (len(Var_dic.keys())+1) == self.Num_hfss_vars_diff_idx:
lv, lv_str = self.get_variation_of_multiple_variables_value(
Var_dic)
dic['label'] = lv_str
dic['x_label'] = var_name
dic['x'] = self.get_variable_value(var_name, lv=lv)
else:
raise ValueError('more than one hfss variable changes each time')
return lv, dic
# Does not seem used. What is Var_dic and var_name going to?
# def plotting_dic_data(self, Var_dic, var_name, data_name):
# lv, dic = self.plotting_dic_x()
# dic['y_label'] = data_name
def plot_results(self, result, Y_label, variable, X_label, variations: list = None):
# TODO?
pass
def plot_hamiltonian_results(self,
swp_variable: str = 'variation',
variations: list = None,
fig=None,
x_label: str = None):
"""Plot results versus variation
Keyword Arguments:
swp_variable {str} -- Variable against which we swept. If none, then just
take the variation index (default: {None})
variations {list} -- [description] (default: {None})
fig {[type]} -- [description] (default: {None})
Returns:
fig, axs
"""
x_label = x_label or swp_variable
# Create figure and axes
if not fig:
fig, axs = plt.subplots(2, 2, figsize=(10, 6))
else:
axs = fig.axs
############################################################################
# Axis: Frequencies
f0 = self.results.get_frequencies_HFSS(
variations=variations, vs=swp_variable).transpose().sort_index(key=lambda x : x.astype(int))
f1 = self.results.get_frequencies_O1(
variations=variations, vs=swp_variable).transpose().sort_index(key=lambda x : x.astype(int))
f_ND = self.results.get_frequencies_ND(
variations=variations, vs=swp_variable).transpose().sort_index(key=lambda x : x.astype(int))
# changed by Asaf from f0 as not all modes are always analyzed
mode_idx = list(f1.columns)
n_modes = len(mode_idx)
ax = axs[0, 0]
ax.set_title('Modal frequencies (MHz)')
# TODO: should move these kwargs to the config
cmap = cmap_discrete(n_modes)
kw = dict(ax=ax, color=cmap, legend=False, lw=0, ms=0)
# Choose which freq should have the solid line drawn with it. ND if present, else f1
if f_ND.empty:
plt_me_line = f1
markerf1 = 'o'
else:
plt_me_line = f_ND
markerf1 = '.'
# plot the ND as points if present
f_ND.plot(**{**kw, **dict(marker='o', ms=4, zorder=30)})
f0.plot(**{**kw, **dict(marker='x', ms=2, zorder=10)})
f1.plot(**{**kw, **dict(marker=markerf1, ms=4, zorder=20)})
plt_me_line.plot(**{**kw, **dict(lw=1, alpha=0.6, color='grey')})
############################################################################
# Axis: Quality factors
Qs = self.get_quality_factors(swp_variable=swp_variable)
Qs = Qs if variations is None else Qs[variations]
Qs = Qs.transpose().sort_index(key=lambda x : x.astype(int))
ax = axs[1, 0]
ax.set_title('Quality factors')
Qs.plot(ax=ax, lw=0, marker=markerf1, ms=4,
legend=True, zorder=20, color=cmap)
Qs.plot(ax=ax, lw=1, alpha=0.2, color='grey', legend=False)
df_Qs = np.isinf(Qs)
# pylint: disable=E1101
# Instance of 'ndarray' has no 'values' member (no-member)
Qs_val = df_Qs.values
Qs_inf = Qs_val.sum()
if not (len(Qs) == 0 or Qs_inf > 0):
ax.set_yscale('log')
############################################################################
# Axis: Alpha and chi
axs[0][1].set_title('Anharmonicities (MHz)')
axs[1][1].set_title('Cross-Kerr frequencies (MHz)')
def plot_chi_alpha(chi, primary):
"""
Internal function to plot chi and then also to plot alpha
"""
idx = pd.IndexSlice
kw1 = dict(lw=0, ms=4, marker='o' if primary else 'x')
kw2 = dict(lw=1, alpha=0.2, color='grey', label='_nolegend_')
# ------------------------
# Plot anharmonicity
ax = axs[0, 1]
for i, mode in enumerate(mode_idx): # mode index number, mode index
alpha = chi.loc[idx[:, mode], mode].unstack(1)
alpha.columns = [mode]
alpha.plot(ax=ax, label=mode, color=cmap[i], **kw1)
if primary:
alpha.plot(ax=ax, **kw2)
# ------------------------
# Plot chi
ax = axs[1, 1]
for mode in mode_idx: # mode index number, mode index
# restart the color counter i; n= mode2
for i, mode2 in enumerate(mode_idx):
if int(mode2) > int(mode):
chi_element = chi.loc[idx[:, mode], mode2].unstack(1)
chi_element.plot(ax=ax, label=f"{mode},{mode2}", color=cmap[i], **kw1)
if primary:
chi_element.plot(ax=ax, **kw2)
def do_legends():
legend_translucent(axs[0][1], leg_kw=dict(fontsize=7, title='Mode'))
legend_translucent(axs[1][1], leg_kw=dict(fontsize=7))
chiO1 = self.get_chis(variations=variations,
swp_variable=swp_variable, numeric=False)
chiND = self.get_chis(variations=variations,
swp_variable=swp_variable, numeric=True)
use_ND = not np.any(
[r['fock_trunc'] == None for k, r in self.results.items()])
if use_ND:
plot_chi_alpha(chiND, True)
do_legends()
plot_chi_alpha(chiO1, False)
else:
plot_chi_alpha(chiO1, True)
do_legends()
for ax1 in axs:
for ax in ax1:
ax.set_xlabel(x_label)
# Wrap up
fig.tight_layout()
return fig, axs
# Below are functions introduced in v0.8 and newer
def report_results(self, swp_variable='variation', numeric=True):
"""
Report in table form the results in a markdown friendly way in Jupyter notebook
using the pandas interface.
"""
with pd.option_context('display.precision', 2):
display(Markdown(("#### Mode frequencies (MHz)")))
display(Markdown(("###### Numerical diagonalization")))
display(self.get_frequencies(
swp_variable=swp_variable, numeric=numeric))
display(Markdown(("#### Kerr Non-linear coefficient table (MHz)")))
display(Markdown(("###### Numerical diagonalization")))
display(self.get_chis(swp_variable=swp_variable, numeric=numeric))
def get_chis(self, swp_variable='variation', numeric=True, variations: list = None,
m=None, n=None):
"""return as multiindex data table
If you provide m and n as integers or mode labels, then the chi between these modes will
be returned as a pandas Series.
"""
label = 'chi_ND' if numeric else 'chi_O1'
df = pd.concat(self.results.vs_variations(
label, vs=swp_variable, variations=variations),
names=[swp_variable])
if m is None and n is None:
return df
else:
s = df.loc[pd.IndexSlice[:, m], n].unstack(1)[m]
return s
def get_frequencies(self, swp_variable='variation', numeric=True, variations: list = None):
"""return as multiindex data table
index: eigenmode label
columns: variation label
"""
label = 'f_ND' if numeric else 'f_1'
return self.results.vs_variations(label, vs=swp_variable, to_dataframe=True, variations=variations)
def get_quality_factors(self, swp_variable='variation', variations: list = None):
"""return as pd.Series
index: eigenmode label
columns: variation label
"""
return self.results.vs_variations('Qs', vs=swp_variable, to_dataframe=True, variations=variations)
def get_participations(self, swp_variable='variation',
variations: list = None,
inductive=True,
_normed=True):
"""
inductive (bool): EPR for junction inductance when True, else for capacitors
Returns:
----------------
Returns a multiindex dataframe:
index 0: sweep variable
index 1: mode number
column: junction number
Example use:
---------------
Plot the participation ratio of all junctions for a given mode vs a sweep of Lj.
.. code-block language:python
df=epra.get_participations(swp_variable='Lj')
df.loc[pd.IndexSlice[:,0],0].unstack(1).plot(marker='o')
"""
if inductive:
if _normed:
getme = 'Pm_normed'
else:
getme = 'Pm_raw'
else:
if _normed:
getme = 'Pm_cap'
else:
raise NotImplementedError(
'not inductive and not _normed not implemented')
participations = self.results.vs_variations(getme, vs=swp_variable)
p2 = OrderedDict()
for key, val in participations.items():
df = pd.DataFrame(val)
df.index.name = 'mode'
df.columns.name = 'junc_idx'
p2[key] = df
participations = pd.concat(p2, names=[swp_variable])
return participations
def _get_PM_as_DataFrame(self):
"""
Pm = epra._get_PM_as_DataFrame()
Pm.unstack(1).groupby(axis=1,level=1).plot()
"""
Pm = pd.concat(self.PM)
Pm.index.set_names(['variation', 'mode'], inplace=True)
Pm.columns.set_names(['junction'], inplace=True)
return Pm
def get_ansys_energies(self, swp_var='variation'):
"""
Return a multi-index dataframe of ansys energies vs swep_variable
Args:
swp_var (str) :
"""
if swp_var == 'variation':
energies = self.ansys_energies
else:
energies = self.get_vs_variable(swp_var, 'ansys_energies')
df = pd.concat({k: pd.DataFrame(v).transpose()
for k, v in energies.items()})
df.index.set_names([swp_var, 'mode'], inplace=True)
return df
def quick_plot_participation(self, mode, junction, swp_variable='variation', ax=None, kw=None):
"""Quick plot participation for one mode
kw : extra plot arguments
"""
df = self.get_participations(swp_variable=swp_variable)
kw = kw or {}
ax = ax or plt.gca()
df.loc[pd.IndexSlice[:, mode], junction].unstack(
1).plot(marker='o', ax=ax, **kw)
ax.set_ylabel(f'p_({mode},{junction})')
def quick_plot_frequencies(self, mode, swp_variable='variation', ax=None, kw=None, numeric=False):
"""Quick plot freq for one mode
kw : extra plot arguments
"""
kw = kw or {}
ax = ax or plt.gca()
s = self.get_frequencies(
numeric=numeric, swp_variable=swp_variable).transpose()[mode]
s.plot(marker='o', ax=ax, **kw)
ax.set_ylabel(f'$\\omega_{mode}$ (MHz)')
def quick_plot_chi_alpha(self, mode1, mode2, swp_variable='variation', ax=None, kw=None, numeric=False):
"""Quick plot chi between mode 1 and mode 2.
If you select mode1=mode2, then you will plot the alpha
kw : extra plot arguments
"""
kw = kw or {}
ax = ax or plt.gca()
s = self.get_chis(swp_variable=swp_variable,
numeric=numeric).loc[pd.IndexSlice[:, mode1], mode2].unstack(1)
s.plot(marker='o', ax=ax, **kw)
if mode1 == mode2:
ax.set_ylabel(f'$\\alpha$({mode1}) (MHz) [anharmonicity]')
else:
ax.set_ylabel(f'$\\chi$({mode1,mode2}) (MHz) [total split]')
def quick_plot_mode(self, mode, junction, mode1=None, swp_variable='variation', numeric=False, sharex=True):
r"""Create a quick report to see mode parameters for only a single mode and a
cross-kerr coupling to another mode.
Plots the participation and cross participation
Plots the frequencie
plots the anharmonicity
The values are either for the numeric or the non-numeric results, set by `numeric`
"""
fig, axs = plt.subplots(2, 2, figsize=(12*0.9, 7*0.9))
self.quick_plot_frequencies(
mode, swp_variable=swp_variable, numeric=numeric, ax=axs[0, 1])
self.quick_plot_participation(
mode, junction, swp_variable=swp_variable, ax=axs[0, 0])
self.quick_plot_chi_alpha(mode, mode, numeric=numeric, swp_variable=swp_variable, ax=axs[1, 0],
kw=dict(sharex=sharex))
if mode1:
self.quick_plot_chi_alpha(
mode, mode1, numeric=numeric, swp_variable=swp_variable, ax=axs[1, 1])
twinax = axs[0, 0].twinx()
self.quick_plot_participation(mode1, junction, swp_variable=swp_variable, ax=twinax,
kw=dict(alpha=0.7, color='maroon', sharex=sharex))
for ax in np.ndarray.flatten(axs):
ax.grid(alpha=0.2)
axs[0, 1].set_title('Frequency (MHz)')
axs[0, 0].set_title('Self- and cross-EPR')
axs[1, 0].set_title('Anharmonicity')
axs[1, 1].set_title('Cross-Kerr')
fig.suptitle(f'Mode {mode}', y=1.025)
fig.tight_layout()
def quick_plot_convergence(self, ax = None):
"""
Plot a report of the Ansys convergence vs pass number ona twin axis
for the number of tets and the max delta frequency of the eignemode.
"""
ax = ax or plt.gca()
ax_t = ax.twinx()
convergence_tets = self.get_convergences_tets_vs_pass()
convergence_freq = self.get_convergences_max_delta_freq_vs_pass()
convergence_freq.name = 'Δf'
plot_convergence_max_df(ax, convergence_freq)
plot_convergence_solved_elem(ax_t, convergence_tets)
def extract_dic(name=None, file_name=None):
"""#name is the name of the dictionary as saved in the npz file if it is None,
the function will return a list of all dictionaries in the npz file
file name is the name of the npz file"""
with np.load(file_name, allow_pickle=True) as f:
if name is None:
return [f[i][()] for i in f.keys()]
return [f[name][()]]
| 44,973 | 36.384871 | 154 | py |
pyEPR | pyEPR-master/pyEPR/project_info.py | """
Main interface module to use pyEPR.
Contains code to connect to Ansys and to analyze HFSS files using the EPR method.
This module handles the microwave part of the analysis and connection to
Further contains code to be able to do autogenerated reports,
Copyright Zlatko Minev, Zaki Leghtas, and the pyEPR team
2015, 2016, 2017, 2018, 2019, 2020
"""
from __future__ import print_function # Python 2.7 and 3 compatibility
import sys
from pathlib import Path
import pandas as pd
from . import Dict, ansys, config, logger
from .toolbox.pythonic import get_instance_vars
diss_opt = [
'dielectrics_bulk', 'dielectric_surfaces', 'resistive_surfaces', 'seams'
]
class ProjectInfo(object):
"""
Primary class to store interface information between ``pyEPR`` and ``Ansys``.
* **Ansys:** stores and provides easy access to the ansys interface classes :py:class:`pyEPR.ansys.HfssApp`,
:py:class:`pyEPR.ansys.HfssDesktop`, :py:class:`pyEPR.ansys.HfssProject`, :py:class:`pyEPR.ansys.HfssDesign`,
:py:class:`pyEPR.ansys.HfssSetup` (which, if present could nbe a subclass, such as a driven modal setup
:py:class:`pyEPR.ansys.HfssDMSetup`, eigenmode :py:class:`pyEPR.ansys.HfssEMSetup`, or Q3D :py:class:`pyEPR.ansys.AnsysQ3DSetup`),
the 3D modeler to design geometry :py:class:`pyEPR.ansys.HfssModeler`.
* **Junctions:** The class stores params about the design that the user puts will use, such as the names and
properties of the junctions, such as which rectangle and line is associated with which junction.
Note:
**Junction parameters.**
The junction parameters are stored in the ``self.junctions`` ordered dictionary
A Josephson tunnel junction has to have its parameters specified here for the analysis.
Each junction is given a name and is specified by a dictionary.
It has the following properties:
* ``Lj_variable`` (str):
Name of HFSS variable that specifies junction inductance Lj defined
on the boundary condition in HFSS.
WARNING: DO NOT USE Global names that start with $.
* ``rect`` (str):
String of Ansys name of the rectangle on which the lumped boundary condition is defined.
* ``line`` (str):
Name of HFSS polyline which spans the length of the rectangle.
Used to define the voltage across the junction.
Used to define the current orientation for each junction.
Used to define sign of ZPF.
* ``length`` (str):
Length in HFSS of the junction rectangle and line (specified in meters).
To create, you can use :code:`epr.parse_units('100um')`.
* ``Cj_variable`` (str, optional) [experimental]:
Name of HFSS variable that specifies junction inductance Cj defined
on the boundary condition in HFSS. DO NOT USE Global names that start with ``$``.
Warning:
To define junctions, do **NOT** use global names!
I.e., do not use names in ansys that start with ``$``.
Note:
**Junction parameters example .** To define junction parameters, see the following example
.. code-block:: python
:linenos:
# Create project infor class
pinfo = ProjectInfo()
# Now, let us add a junction called `j1`, with the following properties
pinfo.junctions['j1'] = {
'Lj_variable' : 'Lj_1', # name of Lj variable in Ansys
'rect' : 'jj_rect_1',
'line' : 'jj_line_1',
#'Cj' : 'Cj_1' # name of Cj variable in Ansys - optional
}
To extend to define 5 junctions in bulk, we could use the following script
.. code-block:: python
:linenos:
n_junctions = 5
for i in range(1, n_junctions + 1):
pinfo.junctions[f'j{i}'] = {'Lj_variable' : f'Lj_{i}',
'rect' : f'jj_rect_{i}',
'line' : f'jj_line_{i}'}
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
class _Dissipative:
"""
Deprecating the _Dissipative class and turning it into a dictionary.
This is used to message people on the deprecation so they could change their scripts.
"""
def __init__(self):
self['pinfo'] = None
for opt in diss_opt:
self[opt] = None
def __setitem__(self, key, value):
# --- check valid inputs ---
if not (key in diss_opt or key == 'pinfo'):
raise ValueError(f"No such parameter {key}")
if key != 'pinfo' and (not isinstance(value, (list, dict)) or \
not all(isinstance(x, str) for x in value)) and (value != None):
raise ValueError(f'dissipative[\'{key}\'] must be a list of strings ' \
'containing names of models in the project or dictionary of strings of models containing ' \
'material loss properties!'
)
if key != 'pinfo' and hasattr(self['pinfo'], 'design'):
for x in value:
if x not in self['pinfo'].get_all_object_names():
raise ValueError(
f'\'{x}\' is not an object in the HFSS project')
super().__setattr__(key, value)
def __getitem__(self, attr):
if not (attr in diss_opt or attr == 'pinfo'):
raise AttributeError(f'dissipative has no attribute "{attr}". '\
f'The possible attributes are:\n {str(diss_opt)}')
return super().__getattribute__(attr)
def __setattr__(self, attr, value):
logger.warning(
f"DEPRECATED!! use pinfo.dissipative['{attr}'] = {value} instead!"
)
self[attr] = value
def __getattr__(self, attr):
raise AttributeError(f'dissipative has no attribute "{attr}". '\
f'The possible attributes are:\n {str(diss_opt)}')
def __getattribute__(self, attr):
if attr in diss_opt:
logger.warning(
f"DEPRECATED!! use pinfo.dissipative['{attr}'] instead!")
return super().__getattribute__(attr)
def __repr__(self):
return str(self.data())
def data(self):
"""Return dissipative as dictionary"""
return {str(opt): self[opt] for opt in diss_opt}
def __init__(self,
project_path: str = None,
project_name: str = None,
design_name: str = None,
setup_name: str = None,
dielectrics_bulk: list =None,
dielectric_surfaces: list = None,
resistive_surfaces: list= None,
seams: list= None,
do_connect: bool = True):
"""
Keyword Arguments:
project_path (str) : Directory path to the hfss project file.
Should be the directory, not the file.
Defaults to ``None``; i.e., assumes the project is open, and thus gets the project based
on `project_name`.
project_name (str) : Name of the project within the project_path.
Defaults to ``None``, which will get the current active one.
design_name (str) : Name of the design within the project.
Defaults to ``None``, which will get the current active one.
setup_name (str) : Name of the setup within the design.
Defaults to ``None``, which will get the current active one.
dielectrics_bulk (list(str)) : List of names of dielectric bulk objects.
Defaults to ``None``.
dielectric_surfaces (list(str)) : List of names of dielectric surfaces.
Defaults to ``None``.
resistive_surfaces (list(str)) : List of names of resistive surfaces.
Defaults to ``None``.
seams (list(str)) : List of names of seams.
Defaults to ``None``.
do_connect (bool) [additional]: Do create connection to Ansys or not? Defaults to ``True``.
"""
# Path: format path correctly to system convention
self.project_path = str(Path(project_path)) \
if not (project_path is None) else None
self.project_name = project_name
self.design_name = design_name
self.setup_name = setup_name
# HFSS design: describe junction parameters
# TODO: introduce modal labels
self.junctions = Dict() # See above for help
self.ports = Dict()
# Dissipative HFSS volumes and surfaces
self.dissipative = self._Dissipative()
for opt in diss_opt:
self.dissipative[opt] = locals()[opt]
self.options = config.ansys
# Connected to HFSS variable
self.app = None
self.desktop = None
self.project = None
self.design = None
self.setup = None
if do_connect:
self.connect()
self.dissipative['pinfo'] = self
_Forbidden = [
'app', 'design', 'desktop', 'project', 'dissipative', 'setup',
'_Forbidden', 'junctions'
]
def save(self):
'''
Return all the data in a dictionary form that can be used to be saved
'''
return dict(
pinfo=pd.Series(get_instance_vars(self, self._Forbidden)),
dissip=pd.Series(self.dissipative.data()),
options=pd.Series(get_instance_vars(self.options), dtype='object'),
junctions=pd.DataFrame(self.junctions),
ports=pd.DataFrame(self.ports),
)
def connect_project(self):
"""Sets
self.app
self.desktop
self.project
self.project_name
self.project_path
"""
logger.info('Connecting to Ansys Desktop API...')
self.app, self.desktop, self.project = ansys.load_ansys_project(
self.project_name, self.project_path)
if self.project:
# TODO: should be property?
self.project_name = self.project.name
self.project_path = self.project.get_path()
def connect_design(self, design_name: str = None):
"""Sets
self.design
self.design_name
"""
if design_name is not None:
self.design_name = design_name
designs_in_project = self.project.get_designs()
if not designs_in_project:
self.design = None
logger.info(
f'No active design found (or error getting active design).')
return
if self.design_name is None:
# Look for the active design
try:
self.design = self.project.get_active_design()
self.design_name = self.design.name
logger.info(
'\tOpened active design\n'
f'\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]'
)
except Exception as e:
# No active design
self.design = None
self.design_name = None
logger.info(
f'No active design found (or error getting active design). Note: {e}'
)
else:
try:
self.design = self.project.get_design(self.design_name)
logger.info(
'\tOpened active design\n'
f'\tDesign: {self.design_name} [Solution type: {self.design.solution_type}]'
)
except Exception as e:
_traceback = sys.exc_info()[2]
logger.error(f"Original error \N{loudly crying face}: {e}\n")
raise (Exception(' Did you provide the correct design name?\
Failed to pull up design. \N{loudly crying face}').
with_traceback(_traceback))
def connect_setup(self):
"""Connect to the first available setup or create a new in eigenmode and driven modal
Raises:
Exception: [description]
"""
# Setup
if self.design is not None:
try:
setup_names = self.design.get_setup_names()
if len(setup_names) == 0:
logger.warning('\tNo design setup detected.')
setup = None
if self.design.solution_type == 'Eigenmode':
logger.warning('\tCreating eigenmode default setup.')
setup = self.design.create_em_setup()
elif self.design.solution_type == 'DrivenModal':
logger.warning('\tCreating driven modal default setup.')
setup = self.design.create_dm_setup()
elif self.design.solution_type == 'DrivenTerminal':
logger.warning('\tCreating driven terminal default setup.')
setup = self.design.create_dt_setup()
elif self.design.solution_type == 'Q3D':
logger.warning('\tCreating Q3D default setup.')
setup = self.design.create_q3d_setup()
self.setup_name = setup.name
else:
self.setup_name = setup_names[0]
# get the actual setup if there is one
self.get_setup(self.setup_name)
except Exception as e:
_traceback = sys.exc_info()[2]
logger.error(f"Original error \N{loudly crying face}: {e}\n")
raise Exception(' Did you provide the correct setup name?\
Failed to pull up setup. \N{loudly crying face}'
).with_traceback(_traceback)
else:
self.setup = None
self.setup_name = None
def connect(self):
"""
Do establish connection to Ansys desktop.
Connects to project and then get design and setup
"""
self.connect_project()
if not self.project:
logger.info('\tConnection to Ansys NOT established. \n')
if self.project:
self.connect_design()
self.connect_setup()
# Finalize
if self.project:
self.project_name = self.project.name
if self.design:
self.design_name = self.design.name
if self.project and self.design:
logger.info(
f'\tConnected to project \"{self.project_name}\" and design \"{self.design_name}\" \N{grinning face} \n'
)
if not self.project:
logger.info(
'\t Project not detected in Ansys. Is there a project in your desktop app? \N{thinking face} \n'
)
if not self.design:
logger.info(
f'\t Connected to project \"{self.project_name}\". No design detected'
)
return self
def get_setup(self, name: str):
"""
Connects to a specific setup for the design.
Sets self.setup and self.setup_name.
Args:
name (str): Name of the setup.
If the setup does not exist, then throws a logger error.
Defaults to ``None``, in which case returns None
"""
if name is None:
return None
self.setup = self.design.get_setup(name=name)
if self.setup is None:
logger.error(f"Could not retrieve setup: {name}\n \
Did you give the right name? Does it exist?")
self.setup_name = self.setup.name
logger.info(
f'\tOpened setup `{self.setup_name}` ({type(self.setup)})')
return self.setup
def check_connected(self):
"""
Checks if fully connected including setup.
"""
return\
(self.setup is not None) and\
(self.design is not None) and\
(self.project is not None) and\
(self.desktop is not None) and\
(self.app is not None)
def disconnect(self):
'''
Disconnect from existing Ansys Desktop API.
'''
assert self.check_connected() is True,\
"It does not appear that you have connected to HFSS yet.\
Use the connect() method. \N{nauseated face}"
self.project.release()
self.desktop.release()
self.app.release()
ansys.release()
# UTILITY FUNCTIONS
def get_dm(self):
'''
Utility shortcut function to get the design and modeler.
.. code-block:: python
oDesign, oModeler = pinfo.get_dm()
'''
return self.design, self.design.modeler
def get_all_variables_names(self):
"""Returns array of all project and local design names."""
return self.project.get_variable_names(
) + self.design.get_variable_names()
def get_all_object_names(self):
"""Returns array of strings"""
o_objects = []
for s in ["Non Model", "Solids", "Unclassified", "Sheets", "Lines"]:
o_objects += self.design.modeler.get_objects_in_group(s)
return o_objects
def validate_junction_info(self):
"""Validate that the user has put in the junction info correctly.
Do not also forget to check the length of the rectangles/line of
the junction if you change it.
"""
all_variables_names = self.get_all_variables_names()
all_object_names = self.get_all_object_names()
for jjnm, jj in self.junctions.items():
assert jj['Lj_variable'] in all_variables_names,\
"""pyEPR ProjectInfo user error found \N{face with medical mask}:
Seems like for junction `%s` you specified a design or project
variable for `Lj_variable` that does not exist in HFSS by the name:
`%s` """ % (jjnm, jj['Lj_variable'])
for name in ['rect', 'line']:
assert jj[name] in all_object_names, \
"""pyEPR ProjectInfo user error found \N{face with medical mask}:
Seems like for junction `%s` you specified a %s that does not exist
in HFSS by the name: `%s` """ % (jjnm, name, jj[name])
def __del__(self):
logger.info('Disconnected from Ansys HFSS')
# self.disconnect()
| 18,897 | 37.488798 | 137 | py |
pyEPR | pyEPR-master/pyEPR/core_distributed_analysis.py | """
Main distributed analysis module to use pyEPR.
Contains code to connect to Ansys and to analyze HFSS files using the EPR method.
This module handles the microwave part of the analysis and connection to
Further contains code to be able to do autogenerated reports,
Copyright Zlatko Minev, Zaki Leghtas, and the pyEPR team
2015, 2016, 2017, 2018, 2019, 2020
"""
# pylint: disable=invalid-name
# todo remove this pylint hack later
from __future__ import print_function # Python 2.7 and 3 compatibility
from typing import List
import pickle
import sys
import time
from collections import OrderedDict
from pathlib import Path
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from . import Dict, config, logger
from .ansys import CalcObject, ConstantVecCalcObject, set_property, ureg
from .calcs.constants import epsilon_0
from .project_info import ProjectInfo
from .reports import (plot_convergence_f_vspass, plot_convergence_max_df,
plot_convergence_maxdf_vs_sol,
plot_convergence_solved_elem)
from .toolbox.pythonic import print_NoNewLine
# class AnsysAnalysisBase():
#
# def __init__():
# """
# Instantiate in super after call.
# """
class DistributedAnalysis(object):
"""
DISTRIBUTED ANALYSIS of layout and microwave results.
Main computation class & interface with HFSS.
This class defines a DistributedAnalysis object which calculates and saves
Hamiltonian parameters from an HFSS simulation.
Further, it allows one to calculate dissipation, etc.
"""
def __init__(self, *args, **kwargs):
'''
Pass in the arguments for ProjectInfo. See help for `?ProjectInfo`.
Parameters:
-------------------
project_info : ProjectInfo
Supply the project info or the parameters to create pinfo
Use notes:
-------------------
* If you change the setup or number of eigenmodes in HFSS, etc.
call `update_ansys_info()`
Example use:
-------------------
See the tutorials in the repository.
.. code-block:: python
:linenos:
import pyEPR as epr
pinfo = epr.ProjectInfo(project_path = path_to_project,
project_name = 'pyEPR_tutorial1',
design_name = '1. single_transmon')
eprd = epr.DistributedAnalysis(pinfo)
To now quickly see the result of a sweep of a variable in ansys, you can use:
.. code-block:: python
:linenos:
swp_var = 'Lj'
display(eprd.get_ansys_variables())
fs = eprd.quick_plot_frequencies(swp_var)
display(fs)
To perform distributed analysis
.. code-block:: python
:linenos:
eprd.do_EPR_analysis(append_analysis=True);
Key internal parameters:
-------------------
n_modes (int) : Number of eignemodes; e.g., 2
variations (List[str]) : A list of string identifier of **solved** variation
for the selected setup. Example: '['0', '1']
_list_variations : An array of strings corresponding to **solved** variations.
List of identifier strings for the SOLVED ansys variation for the selected setup.
These do not include unsolved variables added after the solution!
.. code-block:: python
("Height='0.06mm' Lj='13.5nH'", "Height='0.06mm' Lj='15.3nH'")
'''
# Get the project info
project_info = None
if (len(args) == 1) and (args[0].__class__.__name__ == 'ProjectInfo'):
# isinstance(args[0], ProjectInfo): # fails on module repload with changes
project_info = args[0]
else:
assert len(args) == 0, '''Since you did not pass a ProjectInfo object
as a argument, we now assume you are trying to create a project
info object here by passing its arguments. See ProjectInfo.
It does not take any arguments, only kwargs. \N{face with medical mask}'''
project_info = ProjectInfo(*args, **kwargs)
# Input
self.pinfo = project_info # : project_info: a reference to a Project_Info class
if self.pinfo.check_connected() is False:
self.pinfo.connect()
# hfss connect module
self.fields = None
self.solutions = None
if self.setup:
self.fields = self.setup.get_fields()
self.solutions = self.setup.get_solutions()
# Stores results from sims
self.results = Dict() # of variations. Saved results
# TODO: turn into base class shared with analysis!
# Modes and variations - the following get updated in update_variation_information
self.n_modes = int(1) # : Number of eigenmodes
self.modes = None
#: List of variation indices, which are strings of ints, such as ['0', '1']
self.variations = []
self.variations_analyzed = [] # : List of analyzed variations. List of indices
# String identifier of variables, such as "Cj='2fF' Lj='12.5nH'"
self._nominal_variation = ''
self._list_variations = ("",) # tuple set of variables
# container for eBBQ list of variables; basically the same as _list_variations
self._hfss_variables = Dict()
self._previously_analyzed = set() # previously analyzed variations
self.update_ansys_info()
print('Design \"%s\" info:' % self.design.name)
print('\t%-15s %d\n\t%-15s %d' % ('# eigenmodes', self.n_modes,
'# variations', self.n_variations))
# Setup data saving
self.data_dir = None
self.file_name = None
self.setup_data()
@property
def setup(self):
"""Ansys setup class handle. Could be None."""
return self.pinfo.setup
@property
def design(self):
"""Ansys design class handle"""
return self.pinfo.design
@property
def project(self):
"""Ansys project class handle"""
return self.pinfo.project
# @property
# def desktop(self):
# """Ansys desktop class handle"""
# return self.pinfo.desktop
# @property
# def app(self):
# """Ansys App class handle"""
# return self.pinfo.app
# @property
# def junctions(self):
# """Project info junctions"""
# return self.pinfo.junctions
# @property
# def ports(self):
# return self.pinfo.ports
@property
def options(self):
""" Project info options"""
return self.pinfo.options
def setup_data(self):
'''
Set up folder paths for saving data to.
Sets the save filename with the current time.
Saves to Path(config.root_dir) / self.project.name / self.design.name
'''
if len(self.design.name) > 50:
logger.error('WARNING! DESIGN FILENAME MAY BE TOO LONG! ')
self.data_dir = Path(config.root_dir) / \
self.project.name / self.design.name
self.data_filename = self.data_dir / (time.strftime(config.save_format,
time.localtime()) + '.npz')
if not self.data_dir.is_dir():
self.data_dir.mkdir(parents=True, exist_ok=True)
def calc_p_junction_single(self, mode, variation, U_E=None, U_H=None):
'''
This function is used in the case of a single junction only.
For multiple junctions, see :func:`~pyEPR.DistributedAnalysis.calc_p_junction`.
Assumes no lumped capacitive elements.
'''
if U_E is None:
U_E = self.calc_energy_electric(variation)
if U_H is None:
U_H = self.calc_energy_magnetic(variation)
pj = OrderedDict()
pj_val = (U_E-U_H)/U_E
pj['pj_'+str(mode)] = np.abs(pj_val)
print(' p_j_' + str(mode) + ' = ' + str(pj_val))
return pj
# TODO: replace this method with the one below, here because some funcs use it still
def get_freqs_bare(self, variation: str):
"""
Warning:
Outdated. Do not use. To be deprecated
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
Returns:
[type] -- [description]
"""
# str(self._get_lv(variation))
freqs_bare_vals = []
freqs_bare_dict = OrderedDict()
freqs, kappa_over_2pis = self.solutions.eigenmodes(
self.get_variation_string(variation))
for m in range(self.n_modes):
freqs_bare_dict['freq_bare_'+str(m)] = 1e9*freqs[m]
freqs_bare_vals.append(1e9*freqs[m])
if kappa_over_2pis is not None:
freqs_bare_dict['Q_'+str(m)] = freqs[m]/kappa_over_2pis[m]
else:
freqs_bare_dict['Q_'+str(m)] = 0
#self.freqs_bare = freqs_bare_dict
#self.freqs_bare_vals = freqs_bare_vals
return freqs_bare_dict, freqs_bare_vals
def get_freqs_bare_pd(self, variation: str, frame=True):
"""Return the freq and Qs of the solved modes for a variation.
I.e., the Ansys solved frequencies.
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
frame {bool} -- if True returns dataframe, else tuple of series.
Returns:
If frame = True, then a multi-index Dataframe that looks something like this
.. code-block:: python
Freq. (GHz) Quality Factor
variation mode
0 0 5.436892 1020
1 7.030932 50200
1 0 5.490328 2010
1 7.032116 104500
If frame = False, then a tuple of two Series, such as
(Fs, Qs) -- Tuple of pandas.Series objects; the row index is the mode number
"""
variation_str = self.get_variation_string(variation)
freqs, kappa_over_2pis = self.solutions.eigenmodes(variation_str)
if kappa_over_2pis is None:
kappa_over_2pis = np.zeros(len(freqs))
freqs = pd.Series(freqs, index=range(len(freqs))) # GHz
Qs = freqs / pd.Series(kappa_over_2pis, index=range(len(freqs)))
if frame:
df = pd.DataFrame({'Freq. (GHz)': freqs, 'Quality Factor': Qs})
df.index.name = 'mode'
return df
else:
return freqs, Qs
def get_ansys_frequencies_all(self, vs='variation'):
"""
Return all ansys frequencies and quality factors vs a variation
Returns a multi-index pandas DataFrame
"""
df = dict()
variable = None if vs == 'variation' else self.get_variable_vs_variations(
vs)
for variation in self.variations: # just for the first 2
if vs == 'variation':
label = variation
else:
label = variable[variation]
df[label] = self.get_freqs_bare_pd(variation=variation)
# TODO: maybe sort column and index? # todo: maybe generalize
return pd.concat(df, names=[vs])
def _get_lv(self, variation=None):
'''
List of variation variables in a format that is used when feeding back to ansys.
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
Returns:
list of var names and var values.
Such as
.. code-block:: python
['Lj1:=','13nH', 'QubitGap:=','100um']
'''
if variation is None:
lv = self._nominal_variation # "Cj='2fF' Lj='12.5nH'"
lv = self._parse_listvariations(lv)
else:
lv = self._list_variations[ureg(variation)]
lv = self._parse_listvariations(lv)
return lv
# Functions that deal with variations exclusively
@property
def n_variations(self):
""" Number of **solved** variations, corresponding to the
selected Setup. """
return len(self._list_variations)
def set_variation(self, variation: str):
"""
Set the ansys design to a solved variation.
This will change all local variables!
Warning: not tested with global variables.
"""
variation_string = self.get_variation_string(variation)
self.design.set_variables(variation_string)
def get_variations(self):
"""
An array of strings corresponding to **solved** variations corresponding to the
selected Setup.
Returns:
Returns a list of strings that give the variation labels for HFSS.
.. code-block:: python
OrderedDict([
('0', "Cj='2fF' Lj='12nH'"),
('1', "Cj='2fF' Lj='12.5nH'"),
('2', "Cj='2fF' Lj='13nH'"),
('3', "Cj='2fF' Lj='13.5nH'"),
('4', "Cj='2fF' Lj='14nH'")])
"""
return OrderedDict(zip(self.variations, self._list_variations))
def get_variation_string(self, variation=None):
"""
**Solved** variation string identifier.
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
Returns:
Return the list variation string of parameters in ansys used to identify the variation.
.. code-block:: python
"$test='0.25mm' Cj='2fF' Lj='12.5nH'"
"""
if variation is None:
return self._nominal_variation
return self._list_variations[ureg(variation)]
def _parse_listvariations(self, lv):
"""
Turns
"Cj='2fF' Lj='13.5nH'"
into
['Cj:=', '2fF', 'Lj:=', '13.5nH']
"""
lv = str(lv)
lv = lv.replace("=", ":=,")
lv = lv.replace(' ', ',')
lv = lv.replace("'", "")
lv = lv.split(",")
return lv
def get_nominal_variation_index(self):
"""
Returns:
A string identifies, such as '0' or '1', that labels the
nominal variation index number.
This may not be in the solved list!s
"""
try:
return str(self._list_variations.index(self._nominal_variation))
except Exception:
print('WARNING: Unsure of the index, returning 0')
return '0'
def get_ansys_variations(self):
"""
Will update ansys information and result the list of variations.
Returns:
For example:
.. code-block:: python
("Cj='2fF' Lj='12nH'",
"Cj='2fF' Lj='12.5nH'",
"Cj='2fF' Lj='13nH'",
"Cj='2fF' Lj='13.5nH'",
"Cj='2fF' Lj='14nH'")
"""
self.update_ansys_info()
return self._list_variations
def update_ansys_info(self):
''''
Updates all information about the Ansys solved variations and variables.
.. code-block:: python
:linenos:
n_modes, _list_variations, nominal_variation, n_variations
'''
# from oDesign
self._nominal_variation = self.design.get_nominal_variation()
if self.setup:
# from oSetup -- only for the solved variations!
self._list_variations = self.solutions.list_variations()
self.variations = [str(i) for i in range(
self.n_variations)] # TODO: change to integer?
# eigenmodes
if self.design.solution_type == 'Eigenmode':
self.n_modes = int(self.setup.n_modes)
else:
self.n_modes = 0
self._update_ansys_variables()
def _update_ansys_variables(self, variations=None):
"""
Updates the list of ansys hfss variables for the set of sweeps.
"""
variations = variations or self.variations
for variation in variations:
self._hfss_variables[variation] = pd.Series(
self.get_variables(variation=variation))
return self._hfss_variables
def get_ansys_variables(self):
"""
Get ansys variables for all variations
Returns:
Return a dataframe of variables as index and columns as the variations
"""
vs = 'variation'
df = pd.DataFrame(self._hfss_variables, columns=self.variations)
df.columns.name = vs
df.index = [x[1:] if x.startswith('_') else x for x in df.index]
#df.index.name = 'variable'
return df
def get_variables(self, variation=None):
"""
Get ansys variables.
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
"""
lv = self._get_lv(variation)
variables = OrderedDict()
for ii in range(int(len(lv)/2)):
variables['_'+lv[2*ii][:-2]] = lv[2*ii+1]
#self.variables = variables
return variables
def get_variable_vs_variations(self, variable: str, convert: bool = True):
"""
Get ansys variables
Return HFSS variable from :py:func:`self.get_ansys_variables()` as a
pandas series vs variations.
Args:
convert (bool) : Convert to a numeric quantity if possible using the
ureg
"""
# TODO: These should be common function to the analysis and here!
# BOth should be subclasses of a base class
s = self.get_ansys_variables().loc[variable, :] # : pd.Series
if convert:
s = s.apply(lambda x: ureg.Quantity(x).magnitude)
return s
def calc_energy_electric(self,
variation: str = None,
obj: str = 'AllObjects',
volume: str = 'Deprecated',
smooth: bool = False,
obj_dims: int = 3):
r'''
Calculates two times the peak electric energy, or 4 times the RMS,
:math:`4*\mathcal{E}_{\mathrm{elec}}`
(since we do not divide by 2 and use the peak phasors).
.. math::
\mathcal{E}_{\mathrm{elec}}=\frac{1}{4}\mathrm{Re}\int_{V}\mathrm{d}v\vec{E}_{\text{max}}^{*}\overleftrightarrow{\epsilon}\vec{E}_{\text{max}}
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
obj (string | 'AllObjects'): Name of the object to integrate over
smooth (bool | False) : Smooth the electric field or not when performing calculation
obj_dims (int | 3) : 1 - line, 2 - surface, 3 - volume. Default volume
Example:
Example use to calculate the energy participation ratio (EPR) of a substrate
.. code-block:: python
:linenos:
ℰ_total = epr_hfss.calc_energy_electric(obj='AllObjects')
ℰ_substr = epr_hfss.calc_energy_electric(obj='Box1')
print(f'Energy in substrate = {100*ℰ_substr/ℰ_total:.1f}%')
'''
if volume != 'Deprecated':
logger.warning('The use of the "volume" argument is deprecated... use "obj" instead')
obj = volume
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
if smooth:
vecE = vecE.smooth()
A = vecE.times_eps()
B = vecE.conj()
A = A.dot(B)
A = A.real()
if obj_dims == 1:
A = A.integrate_line(name=obj)
elif obj_dims == 2:
A = A.integrate_surf(name=obj)
elif obj_dims == 3:
A = A.integrate_vol(name=obj)
else:
logger.warning('Invalid object dimensions %s, using default of 3 (volume)' % obj_dims)
A = A.integrate_vol(name=obj)
lv = self._get_lv(variation)
return A.evaluate(lv=lv)
def calc_energy_magnetic(self,
variation: str = None,
obj: str = 'AllObjects',
volume: str = 'Deprecated',
smooth: bool = False,
obj_dims: int = 3):
'''
See calc_energy_electric.
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
volume (string | 'AllObjects'): Name of the volume to integrate over
smooth (bool | False) : Smooth the electric field or not when performing calculation
obj_dims (int | 3) : 1 - line, 2 - surface, 3 - volume. Default volume
'''
if volume != 'Deprecated':
logger.warning('The use of the "volume" argument is deprecated... use "obj" instead')
obj = volume
calcobject = CalcObject([], self.setup)
vecH = calcobject.getQty("H")
if smooth:
vecH = vecH.smooth()
A = vecH.times_mu()
B = vecH.conj()
A = A.dot(B)
A = A.real()
if obj_dims == 1:
A = A.integrate_line(name=obj)
elif obj_dims == 2:
A = A.integrate_surf(name=obj)
elif obj_dims == 3:
A = A.integrate_vol(name=obj)
else:
logger.warn(f'Invalid object dimensions {obj_dims}, using default of 3 (volume)')
A = A.integrate_vol(name=obj)
lv = self._get_lv(variation)
return A.evaluate(lv=lv)
def calc_p_electric_volume(self,
name_dielectric3D,
relative_to='AllObjects',
variation=None,
E_total=None
):
r'''
Calculate the dielectric energy-participation ratio
of a 3D object (one that has volume) relative to the dielectric energy of
a list of objects.
This is as a function relative to another object or all objects.
When all objects are specified, this does not include any energy
that might be stored in any lumped elements or lumped capacitors.
Returns:
ℰ_object/ℰ_total, (ℰ_object, _total)
'''
if E_total is None:
logger.debug('Calculating ℰ_total')
ℰ_total = self.calc_energy_electric(obj=relative_to, variation=variation)
else:
ℰ_total = E_total
logger.debug('Calculating ℰ_object')
ℰ_object = self.calc_energy_electric(obj=name_dielectric3D, variation=variation)
return ℰ_object/ℰ_total, (ℰ_object, ℰ_total)
def calc_current(self, fields, line: str):
'''
Function to calculate Current based on line. Not in use.
Args:
line (str) : integration line between plates - name
'''
self.design.Clear_Field_Clac_Stack()
comp = fields.Vector_H
exp = comp.integrate_line_tangent(line)
I = exp.evaluate(phase=90)
self.design.Clear_Field_Clac_Stack()
return I
def calc_avg_current_J_surf_mag(self, variation: str, junc_rect: str, junc_line):
''' Peak current I_max for mode J in junction J
The avg. is over the surface of the junction. I.e., spatial.
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
junc_rect (str) : name of rectangle to integrate over
junc_line (str) : name of junction line to integrate over
Returns:
Value of peak current
'''
lv = self._get_lv(variation)
jl, uj = self.get_junc_len_dir(variation, junc_line)
uj = ConstantVecCalcObject(uj, self.setup)
calc = CalcObject([], self.setup)
#calc = calc.getQty("Jsurf").mag().integrate_surf(name = junc_rect)
calc = (((calc.getQty("Jsurf")).dot(uj)).imag()
).integrate_surf(name=junc_rect)
I = calc.evaluate(lv=lv) / jl # phase = 90
# self.design.Clear_Field_Clac_Stack()
return I
def calc_current_using_line_voltage(self, variation: str, junc_line_name: str,
junc_L_Henries: float, Cj_Farads: float = None):
'''
Peak current I_max for prespecified mode calculating line voltage across junction.
Make sure that you have set the correct variation in HFSS before running this
Args:
variation: variation number
junc_line_name: name of the HFSS line spanning the junction
junc_L_Henries: junction inductance in henries
Cj_Farads: junction cap in Farads
TODO: Smooth?
'''
lv = self._get_lv(variation)
v_calc_real = CalcObject([], self.setup).getQty(
"E").real().integrate_line_tangent(name=junc_line_name)
v_calc_imag = CalcObject([], self.setup).getQty(
"E").imag().integrate_line_tangent(name=junc_line_name)
V = np.sign(v_calc_real.evaluate(lv=lv)) * np.sqrt(v_calc_real.evaluate(lv=lv)**2 +
v_calc_imag.evaluate(lv=lv)**2)
# Get frequency
freq = CalcObject(
[('EnterOutputVar', ('Freq', "Complex"))], self.setup).real().evaluate()
omega = 2*np.pi*freq # in SI radian Hz units
Z = omega*junc_L_Henries
if abs(float(Cj_Farads)) > 1E-29: # zero
#print('Non-zero Cj used in calc_current_using_line_voltage')
#Z += 1./(omega*Cj_Farads)
print(
'\t\t'f'Energy fraction (Lj over Lj&Cj)= {100./(1.+omega**2 *Cj_Farads*junc_L_Henries):.2f}%')
# f'Z_L= {omega*junc_L_Henries:.1f} Ohms Z_C= {1./(omega*Cj_Farads):.1f} Ohms')
I_peak = V/Z # I=V/(wL)s
return I_peak, V, freq
def calc_line_current(self, variation, junc_line_name):
lv = self._get_lv(variation)
calc = CalcObject([], self.setup)
calc = calc.getQty("H").imag().integrate_line_tangent(
name=junc_line_name)
# self.design.Clear_Field_Clac_Stack()
return calc.evaluate(lv=lv)
def get_junc_len_dir(self, variation: str, junc_line):
'''
Return the length and direction of a junction defined by a line
Args:
variation (str): simulation variation
junc_line (str): polyline object
Returns:
jl (float) : junction length
uj (list of 3 floats): x,y,z coordinates of the unit vector
tangent to the junction line
'''
#
lv = self._get_lv(variation)
u = []
for coor in ['X', 'Y', 'Z']:
calc = CalcObject([], self.setup)
calc = calc.line_tangent_coor(junc_line, coor)
u.append(calc.evaluate(lv=lv))
jl = float(np.sqrt(u[0]**2+u[1]**2+u[2]**2))
uj = [float(u[0]/jl), float(u[1]/jl), float(u[2]/jl)]
return jl, uj
def get_Qseam(self, seam, mode, variation, U_H=None):
r'''
Calculate the contribution to Q of a seam, by integrating the current in
the seam with finite conductance: set in the config file
ref: http://arxiv.org/pdf/1509.01119.pdf
'''
if U_H is None:
U_H = self.calc_energy_magnetic(variation)
_, freqs_bare_vals = self.get_freqs_bare(variation)
self.omega = 2*np.pi*freqs_bare_vals[mode]
lv = self._get_lv(variation)
Qseam = OrderedDict()
print(f'Calculating Qseam_{seam} for mode {mode} ({mode}/{self.n_modes-1})')
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/U_H/self.omega
Qseam['Qseam_'+seam+'_' +
str(mode)] = config.dissipation.gseam/yseam
print('Qseam_' + seam + '_' + str(mode), '=', str(config.dissipation.gseam/yseam))
return pd.Series(Qseam)
def get_Qseam_sweep(self, seam, mode, variation, variable, values, unit, U_H=None, pltresult=True):
"""
Q due to seam loss.
values = ['5mm','6mm','7mm']
ref: http://arxiv.org/pdf/1509.01119.pdf
"""
if U_H is None:
U_H = self.calc_energy_magnetic(variation)
self.solutions.set_mode(mode+1, 0)
self.fields = self.setup.get_fields()
freqs_bare_dict, freqs_bare_vals = self.get_freqs_bare(variation)
self.omega = 2*np.pi*freqs_bare_vals[mode]
print(variation)
print(type(variation))
print(ureg(variation))
lv = self._get_lv(variation)
Qseamsweep = []
print('Calculating Qseam_' + seam + ' for mode ' + str(mode) +
' (' + str(mode) + '/' + str(self.n_modes-1) + ')')
for value in values:
self.design.set_variable(variable, str(value)+unit)
# overestimating the loss by taking norm2 of j, rather than jperp**2
j_2_norm = self.fields.Vector_Jsurf.norm_2()
int_j_2 = j_2_norm.integrate_line(seam)
int_j_2_val = int_j_2.evaluate(lv=lv, phase=90)
yseam = int_j_2_val/U_H/self.omega
Qseamsweep.append(config.dissipation.gseam/yseam)
# Qseamsweep['Qseam_sweep_'+seam+'_'+str(mode)] = gseam/yseam
# Cprint 'Qseam_' + seam + '_' + str(mode) + str(' = ') + str(gseam/yseam)
if pltresult:
_, ax = plt.subplots()
ax.plot(values, Qseamsweep)
ax.set_yscale('log')
ax.set_xlabel(variable+' ('+unit+')')
ax.set_ylabel('Q'+'_'+seam)
return Qseamsweep
def get_Qdielectric(self, dielectric, mode, variation, U_E=None):
if U_E is None:
U_E = self.calc_energy_electric(variation)
Qdielectric = OrderedDict()
print('Calculating Qdielectric_' + dielectric + ' for mode ' +
str(mode) + ' (' + str(mode) + '/' + str(self.n_modes-1) + ')')
U_dielectric = self.calc_energy_electric(variation, obj=dielectric)
p_dielectric = U_dielectric/U_E
# TODO: Update make p saved sep. and get Q for diff materials, indep. specify in pinfo
Qdielectric['Qdielectric_' + dielectric] = 1/(p_dielectric*config.dissipation.tan_delta_sapp)
print('p_dielectric'+'_'+dielectric+'_' + str(mode) + ' = ' + str(p_dielectric))
return pd.Series(Qdielectric)
def get_Qsurface(self, mode, variation, name, U_E=None, material_properties=None):
'''
Calculate the contribution to Q of a dielectric layer of dirt on a given surface.
Set the dirt thickness and loss tangent in the config file
ref: http://arxiv.org/pdf/1509.01854.pdf
'''
if U_E is None:
U_E = self.calc_energy_electric(variation)
if material_properties is None:
material_properties = {}
th = material_properties.get('th', config.dissipation.th)
eps_r = material_properties.get('eps_r', config.dissipation.eps_r)
tan_delta_surf = material_properties.get('tan_delta_surf', config.dissipation.tan_delta_surf)
lv = self._get_lv(variation)
Qsurf = OrderedDict()
print(f'Calculating Qsurface {name} for mode ({mode}/{self.n_modes-1})')
calcobject = CalcObject([], self.setup)
vecE = calcobject.getQty("E")
A = vecE
B = vecE.conj()
A = A.dot(B)
A = A.real()
A = A.integrate_surf(name=name)
U_surf = A.evaluate(lv=lv)
U_surf *= th * epsilon_0 * eps_r
p_surf = U_surf/U_E
Qsurf[f'Qsurf_{name}'] = 1 / (p_surf * tan_delta_surf)
print(f'p_surf_{name}_{mode} = {p_surf}')
return pd.Series(Qsurf)
def get_Qsurface_all(self, mode, variation, U_E=None):
'''
Calculate the contribution to Q of a dielectric layer of dirt on all surfaces.
Set the dirt thickness and loss tangent in the config file
ref: http://arxiv.org/pdf/1509.01854.pdf
'''
return self.get_Qsurface(mode, variation, name='AllObjects', U_E=U_E)
def calc_Q_external(self, variation, freq_GHz, U_E = None):
'''
Calculate the coupling Q of mode m with each port p
Expected that you have specified the mode before calling this
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
'''
if U_E is None:
U_E = self.calc_energy_electric(variation)
Qp = pd.Series({}, dtype='float64')
freq = freq_GHz * 1e9 # freq in Hz
for port_nm, port in self.pinfo.ports.items():
I_peak = self.calc_avg_current_J_surf_mag(variation, port['rect'],
port['line'])
U_dissip = 0.5 * port['R'] * I_peak**2 * 1 / freq
p = U_dissip / (U_E/2) # U_E is 2x the peak electrical energy
kappa = p * freq
Q = 2 * np.pi * freq / kappa
Qp['Q_' + port_nm] = Q
return Qp
def calc_p_junction(self, variation, U_H, U_E, Ljs, Cjs):
'''
For a single specific mode.
Expected that you have specified the mode before calling this, :func:`~pyEPR.DistributedAnalysis.set_mode`.
Expected to precalc U_H and U_E for mode, will return pandas pd.Series object:
* junc_rect = ['junc_rect1', 'junc_rect2'] name of junc rectangles to integrate H over
* junc_len = [0.0001] specify in SI units; i.e., meters
* LJs = [8e-09, 8e-09] SI units
* calc_sign = ['junc_line1', 'junc_line2']
WARNING: Cjs is experimental.
This function assumes there are no lumped capacitors in model.
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
.. note::
U_E and U_H are the total peak energy. (NOT twice as in U_ and U_H other places)
.. warning::
Potential errors: If you dont have a line or rect by the right name you will prob
get an error of the type: com_error: (-2147352567, 'Exception occurred.',
(0, None, None, None, 0, -2147024365), None)
'''
# ------------------------------------------------------------
# Calculate all peak voltage and currents for all junctions in a given mode
method = self.pinfo.options.method_calc_P_mj
I_peak_ = {}
V_peak_ = {}
Sj = pd.Series({}, dtype='float64')
for j_name, j_props in self.pinfo.junctions.items():
logger.debug(f'Calculating participations for {(j_name, j_props)}')
Lj = Ljs[j_name]
Cj = Cjs[j_name]
line_name = j_props['line']
if method == 'J_surf_mag': # old method
_I_peak_1 = self.calc_avg_current_J_surf_mag(
variation, j_props['rect'], line_name)
# could also use this to back out the V_peak using the impedances as in the line
# below for now, keep both methods
_I_peak_2, _V_peak_2, _ = self.calc_current_using_line_voltage(
variation, line_name, Lj, Cj)
logger.debug(
f'Difference in I_Peak calculation ala the two methods: {(_I_peak_1,_I_peak_2)}')
V_peak = _V_peak_2 # make sure this is signed
I_peak = _I_peak_1
elif method == 'line_voltage': # new preferred method
I_peak, V_peak, _ = self.calc_current_using_line_voltage(
variation, line_name, Lj, Cj)
else:
raise NotImplementedError('Other calculation methods\
(self.pinfo.options.method_calc_P_mj) are possible but not implemented here. ')
# save results
I_peak_[j_name] = I_peak
V_peak_[j_name] = V_peak
Sj['s_' + j_name] = _Smj = 1 if V_peak > 0 else - 1
# REPORT preliminary
pmj_ind = 0.5*Ljs[j_name] * I_peak**2 / U_E
pmj_cap = 0.5*Cjs[j_name] * V_peak**2 / U_E
#print('\tpmj_ind=',pmj_ind, Ljs[j_name], U_E)
self.I_peak = I_peak
self.V_peak = V_peak
self.Ljs = Ljs
self.Cjs = Cjs
print(
f'\t{j_name:<15} {pmj_ind:>8.6g}{("(+)"if _Smj else "(-)"):>5s} {pmj_cap:>8.6g}')
#print('\tV_peak=', V_peak)
# ------------------------------------------------------------
# Calculate participation from the peak voltage and currents
#
# All junction capacitive and inductive lumped energies - all peak
U_J_inds = {j_name: 0.5*Ljs[j_name] * I_peak_[j_name]
** 2 for j_name in self.pinfo.junctions}
U_J_caps = {j_name: 0.5*Cjs[j_name] * V_peak_[j_name]
** 2 for j_name in self.pinfo.junctions}
U_tot_ind = U_H + sum(list(U_J_inds.values())) # total
U_tot_cap = U_E + sum(list(U_J_caps.values()))
# what to use for the norm? U_tot_cap or the mean of U_tot_ind and U_tot_cap?
# i.e., (U_tot_ind + U_tot_cap)/2
U_norm = U_tot_cap
U_diff = (U_tot_cap-U_tot_ind)/(U_tot_cap+U_tot_ind)
print("\t\t"f"(U_tot_cap-U_tot_ind)/mean={U_diff*100:.2f}%")
if abs(U_diff) > 0.15:
print('WARNING: This simulation must not have converged well!!!\
The difference in the total cap and ind energies is larger than 10%.\
Proceed with caution.')
Pj = pd.Series(OrderedDict([(j_name, Uj_ind/U_norm)
for j_name, Uj_ind in U_J_inds.items()]))
PCj = pd.Series(OrderedDict([(j_name, Uj_cap/U_norm)
for j_name, Uj_cap in U_J_caps.items()]))
# print('\t{:<15} {:>8.6g} {:>5s}'.format(
# j_name,
# Pj['p_' + j_name],
# '+' if Sj['s_' + j_name] > 0 else '-'))
return Pj, Sj, PCj, pd.Series(I_peak), pd.Series(V_peak), \
{'U_J_inds': U_J_inds,
'U_J_caps': U_J_caps,
'U_H': U_H,
'U_E': U_E,
'U_tot_ind': U_tot_ind,
'U_tot_cap': U_tot_cap,
'U_norm': U_norm,
'U_diff': U_diff}
def get_previously_analyzed(self):
"""
Return previously analyzed data.
Does not yet handle data that was previously saved in a filename.
"""
# TODO: maybe load from data_file
# Rerun previously analyze variations from load filename
return self._previously_analyzed
def get_junctions_L_and_C(self, variation: str):
"""
Returns a pandas Series with the index being the junction name as specified in the
project_info.
The values in the series are numeric and in SI base units, i.e., not nH but Henries,
and not fF but Farads.
Args:
variation (str) : label such as '0' or 'all', in which case return
pandas table for all variations
"""
if variation == 'all':
# for all variations and concat
raise NotImplementedError() # TODO
else:
Ljs = pd.Series({}, dtype='float64')
Cjs = pd.Series({}, dtype='float64')
for junc_name, val in self.pinfo.junctions.items(): # junction nickname
_variables = self._hfss_variables[variation]
def _parse(name): return ureg.Quantity(
_variables['_'+val[name]]).to_base_units().magnitude
Ljs[junc_name] = _parse('Lj_variable')
Cjs[junc_name] = 2E-15 # _parse(
# 'Cj_variable') if 'Cj_variable' in val else 0
return Ljs, Cjs
def do_EPR_analysis(self,
variations: list = None,
modes=None,
append_analysis=True):
"""
Main analysis routine
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
Optional Parameters:
------------------------
variations : list | None
Example list of variations is ['0', '1']
A variation is a combination of project/design variables in an optimetric sweep
modes : list | None
Modes to analyze
for example modes = [0, 2, 3]
append_analysis (bool) :
When we run the Ansys analysis, should we redo any variations that we have already done?
Ansys Notes:
------------------------
Assumptions:
Low dissipation (high-Q).
It is easier to assume no lumped capacitors to simply calculations, but we have
recently added Cj_variable as a new feature that is begin tested to handle capacitors.
See the paper.
Using the results:
------------------------
Load results with epr.QuantumAnalysis class
Example use:
----------------
.. code-block:: python
:linenos:
eprd = epr.DistributedAnalysis(pinfo)
eprd.do_EPR_analysis(append_analysis=False)
"""
if not modes is None:
assert max(modes) < self.n_modes, 'Non-existing mode selected. \n'\
f'The possible modes are between 0 and {self.n_modes-1}.'
if len(modes) != len(set(modes)):
logger.warn(f'Select each mode only once! Fixing...\n'\
'modes: {modes} --> {list(set(modes))}')
modes = list(set(modes))
# Track the total timing
self._run_time = time.strftime('%Y%m%d_%H%M%S', time.localtime())
# Update the latest hfss variation information
self.update_ansys_info()
variations = variations or self.variations
modes = modes or range(self.n_modes)
self.modes = modes
self.pinfo.save()
# Main loop - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# TODO: Move inside of loop to function calle self.analyze_variation
for ii, variation in enumerate(variations):
print(f'\nVariation {variation} [{ii+1}/{len(variations)}]')
# Previously analyzed and we should re analyze
if append_analysis and variation in self.get_previously_analyzed():
print_NoNewLine(' previously analyzed ...\n')
continue
# QUESTION! should we set the current variation, can this save time, set the variables
# If not, clear the results
self.results[variation] = Dict()
self.lv = self._get_lv(variation)
time.sleep(0.4)
if self.has_fields() == False:
logger.error(f" Error: HFSS does not have field solution for variation={ii}.\
Skipping this mode in the analysis")
continue
try:
# This should allow us to load the fields only once, and then do the calculations
# faster. The loading of the fields does not happen here, but a the first ClcEval call.
# This could fail if more variables are added after the simulation is completed.
self.set_variation(variation)
except Exception as e:
print('\tERROR: Could not set the variation string.'
'\nPossible causes: Did you add a variable after the simulation was already solved? '
'\nAttempting to proceed nonetheless, should be just slower ...')
# use nonframe because old style
freqs_bare_GHz, Qs_bare = self.get_freqs_bare_pd(
variation, frame=False)
# update to the latest
self._hfss_variables[variation] = pd.Series(
self.get_variables(variation=variation))
# Create Ljs and Cjs series for a variation
Ljs, Cjs = self.get_junctions_L_and_C(variation)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# This is crummy now. use dict
#result = dict()
Om = OrderedDict() # Matrix of angular frequency (of analyzed modes)
Pm = OrderedDict() # Participation P matrix
Sm = OrderedDict() # Sign S matrix
Qm_coupling = OrderedDict() # Quality factor matrix
SOL = OrderedDict() # Other results
Pm_cap = OrderedDict()
I_peak = OrderedDict()
V_peak = OrderedDict()
ansys_energies = OrderedDict()
for mode in modes: # integer of mode number [0,1,2,3,..]
# Load fields for mode
self.set_mode(mode)
# Get HFSS solved frequencies
_Om = pd.Series({}, dtype='float64')
temp_freq = freqs_bare_GHz[mode]
_Om['freq_GHz'] = temp_freq # freq
Om[mode] = _Om
print(
'\n'f' \033[1mMode {mode} at {"%.2f" % temp_freq} GHz [{mode+1}/{self.n_modes}]\033[0m')
# EPR Hamiltonian calculations
# Calculation global energies and report
# Magnetic
print(' Calculating ℰ_magnetic', end=',')
try:
self.U_H = self.calc_energy_magnetic(variation)
except Exception as e:
tb = sys.exc_info()[2]
print("\n\nError:\n", e)
raise(Exception(' Did you save the field solutions?\n\
Failed during calculation of the total magnetic energy.\
This is the first calculation step, and is indicative that there are \
no field solutions saved. ').with_traceback(tb))
# Electric
print('ℰ_electric')
self.U_E = self.calc_energy_electric(variation)
# the unnormed
sol = pd.Series({'U_H': self.U_H, 'U_E': self.U_E})
# Fraction - report the peak energy, properly normalized
# the 2 is from the calculation methods
print(f""" {'(ℰ_E-ℰ_H)/ℰ_E':>15s} {'ℰ_E':>9s} {'ℰ_H':>9s}
{100*(self.U_E - self.U_H)/self.U_E:>15.1f}% {self.U_E/2:>9.4g} {self.U_H/2:>9.4g}\n""")
# Calculate EPR for each of the junctions
print(
f' Calculating junction energy participation ration (EPR)\n\tmethod=`{self.pinfo.options.method_calc_P_mj}`. First estimates:')
print(
f"\t{'junction':<15s} EPR p_{mode}j sign s_{mode}j (p_capacitive)")
Pm[mode], Sm[mode], Pm_cap[mode], I_peak[mode], V_peak[mode], ansys_energies[mode] = self.calc_p_junction(
variation, self.U_H/2., self.U_E/2., Ljs, Cjs)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# EPR Dissipative calculations -- should be a function block below
# TODO: this should really be passed as argument to the functions rather than a
# property of the calss I would say
self.omega = 2*np.pi*freqs_bare_GHz[mode]
Qm_coupling[mode] = self.calc_Q_external(variation,
freqs_bare_GHz[mode],
self.U_E)
# get seam Q
if self.pinfo.dissipative['seams']:
for seam in self.pinfo.dissipative['seams']:
sol = pd.concat([sol, self.get_Qseam(seam, mode, variation, self.U_H)])
# get Q dielectric
if self.pinfo.dissipative['dielectrics_bulk']:
for dielectric in self.pinfo.dissipative['dielectrics_bulk']:
sol = pd.concat([sol, self.get_Qdielectric(dielectric, mode, variation, self.U_E)])
# get Q surface
if self.pinfo.dissipative['dielectric_surfaces']:
if self.pinfo.dissipative['dielectric_surfaces'] == 'all':
sol = pd.concat([sol, self.get_Qsurface_all(mode, variation, self.U_E)])
else:
for surface, properties in self.pinfo.dissipative['dielectric_surfaces'].items():
sol = pd.concat([sol, self.get_Qsurface(mode, variation, surface, self.U_E, properties)])
SOL[mode] = sol
# Save
self._update_results(variation, Om, Pm, Sm, Qm_coupling, SOL,
freqs_bare_GHz, Qs_bare, Ljs, Cjs,
Pm_cap, I_peak, V_peak,
ansys_energies,
self._hfss_variables[variation])
self.save()
self._previously_analyzed.add(variation)
print('\nANALYSIS DONE. Data saved to:\n\n' +
str(self.data_filename)+'\n\n')
return self.data_filename, variations
def _update_results(self, variation: str, Om, Pm, Sm, Qm_coupling, sols,
freqs_bare_GHz, Qs_bare, Ljs, Cjs, Pm_cap, I_peak, V_peak,
ansys_energies, _hfss_variables):
'''
Save variation
'''
# raw, not normalized - DataFrames
self.results[variation]['Pm'] = pd.DataFrame(Pm).transpose()
self.results[variation]['Pm_cap'] = pd.DataFrame(Pm_cap).transpose()
self.results[variation]['Sm'] = pd.DataFrame(Sm).transpose()
self.results[variation]['Om'] = pd.DataFrame(Om)
self.results[variation]['sols'] = pd.DataFrame(sols).transpose()
self.results[variation]['Qm_coupling'] = pd.DataFrame(
Qm_coupling).transpose()
self.results[variation]['Ljs'] = Ljs # pd.Series
self.results[variation]['Cjs'] = Cjs # pd.Series
self.results[variation]['Qs'] = Qs_bare
self.results[variation]['freqs_hfss_GHz'] = freqs_bare_GHz
self.results[variation]['hfss_variables'] = _hfss_variables
self.results[variation]['modes'] = self.modes
# mostly for debug info
self.results[variation]['I_peak'] = pd.Series(I_peak)
self.results[variation]['V_peak'] = pd.Series(V_peak)
self.results[variation]['ansys_energies'] = ansys_energies # dict
self.results[variation]['mesh'] = None
self.results[variation]['convergence'] = None
self.results[variation]['convergence_f_pass'] = None
if self.options.save_mesh_stats:
self.results[variation]['mesh'] = self.get_mesh_statistics(
variation) # dataframe
self.results[variation]['convergence'] = self.get_convergence(
variation)
self.results[variation]['convergence_f_pass'] = self.hfss_report_f_convergence(
variation, save_csv=False) # dataframe
@staticmethod
def results_variations_on_inside(results: dict):
"""
Switches the order on result of variations. Reverse dict.
"""
# TODO: THis need to be changed, wont work in the future with updating result etc.
# if i want to make a base class
keys = set()
variations = list(results.keys())
# Get all keys
for variation in variations:
result = results[variation]
keys.update(result.keys())
new_res = dict()
for key in keys:
new_res[key] = {variation: results[variation].get(key, None)
for variation in variations}
# Conver to pandas Dataframe if all are pd.Series
if all(isinstance(new_res[key][variation], pd.Series) for variation in variations):
# print(key) # Conver these to dataframe
# Variations will become columns
new_res[key] = pd.DataFrame(new_res[key])
new_res[key].columns.name = 'variation'
# sort_df_col : maybe sort
return new_res # dict of keys now
def save(self, project_info: dict = None):
"""Save results to self.data_filename
Keyword Arguments:
project_info {dict} -- [description] (default: {None})
"""
if project_info is None:
project_info = self.pinfo.save()
to_save = dict(
project_info=project_info,
results=self.results,
)
with open(str(self.data_filename), 'wb') as handle:
pickle.dump(to_save, handle) # , protocol=pickle.HIGHEST_PROTOCOL)
def load(self, filepath=None):
"""Utility function to load results file
Keyword Arguments:
filepath {[type]} -- [description] (default: {None})
"""
filepath = filepath or self.data_filename
with open(str(filepath), 'rb') as handle:
loaded = pickle.load(handle)
return loaded
def get_mesh_statistics(self, variation='0'):
'''
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
Returns:
A pandas dataframe, such as
.. code-block:: text
:linenos:
Name Num Tets Min edge length Max edge length RMS edge length Min tet vol Max tet vol Mean tet vol Std Devn (vol)
0 Region 909451 0.000243 0.860488 0.037048 6.006260e-13 0.037352 0.000029 6.268190e-04
1 substrate 1490356 0.000270 0.893770 0.023639 1.160090e-12 0.031253 0.000007 2.309920e-04
'''
variation = self._list_variations[ureg(variation)]
return self.setup.get_mesh_stats(variation)
def get_convergence(self, variation='0'):
'''
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
Returns:
A pandas DataFrame object
.. code-block:: text
:linenos:
Solved Elements Max Delta Freq. % Pass Number
1 128955 NaN
2 167607 11.745000
3 192746 3.208600
4 199244 1.524000
'''
variation = self._list_variations[ureg(variation)]
df, _ = self.setup.get_convergence(variation)
return df
def get_convergence_vs_pass(self, variation='0'):
'''
Makes a plot in HFSS that return a pandas dataframe
Args:
variation (str): A string identifier of the variation,
such as '0', '1', ...
Returns:
Returns a convergence vs pass number of the eignemode freqs.
.. code-block:: text
:linenos:
re(Mode(1)) [g] re(Mode(2)) [g] re(Mode(3)) [g]
Pass []
1 4.643101 4.944204 5.586289
2 5.114490 5.505828 6.242423
3 5.278594 5.604426 6.296777
'''
return self.hfss_report_f_convergence(variation)
def set_mode(self, mode_num, phase=0):
'''
Set source excitations should be used for fields post processing.
Counting modes from 0 onward
'''
assert self.setup, "ERROR: There is no 'setup' connected. \N{face with medical mask}"
if mode_num < 0:
logger.error('Too small a mode number')
self.solutions.set_mode(mode_num + 1, phase)
if self.has_fields() == False:
logger.warning(f" Error: HFSS does not have field solution for variation={mode_num}.\
Skipping this mode in the analysis \N{face with medical mask}")
self.fields = self.setup.get_fields()
def has_fields(self, variation: str = None):
'''
Determine if fields exist for a particular solution.
Just calls `self.solutions.has_fields(variation_string)`
Args:
variation (str): String of variation label, such as '0' or '1'. If None, gets the nominal variation
'''
if self.solutions:
#print('variation=', variation)
variation_string = self.get_variation_string(variation)
return self.solutions.has_fields(variation_string)
else:
return False
def hfss_report_f_convergence(self, variation='0', save_csv=True):
'''
Create a report inside HFSS to plot the converge of freq and style it.
Saves report to csv file.
Returns a convergence vs pass number of the eignemode freqs.
Returns a pandas dataframe:
.. code-block:: text
re(Mode(1)) [g] re(Mode(2)) [g] re(Mode(3)) [g]
Pass []
1 4.643101 4.944204 5.586289
2 5.114490 5.505828 6.242423
3 5.278594 5.604426 6.296777
'''
# TODO: Move to class for reporter ?
if not self.setup:
logger.error('NO SETUP PRESENT - hfss_report_f_convergence.')
return None
if not self.design.solution_type == 'Eigenmode':
return None
oDesign = self.design
variation = self._get_lv(variation)
report = oDesign._reporter
# Create report
ycomp = [f"re(Mode({i}))" for i in range(1, 1+self.n_modes)]
params = ["Pass:=", ["All"]]+variation
report_name = "Freq. vs. pass"
if report_name in report.GetAllReportNames():
report.DeleteReports([report_name])
self.solutions.create_report(
report_name, "Pass", ycomp, params, pass_name='AdaptivePass')
# Properties of lines
curves = [f"{report_name}:re(Mode({i})):Curve1" for i in range(
1, 1+self.n_modes)]
set_property(report, 'Attributes', curves, 'Line Width', 3)
set_property(report, 'Scaling',
f"{report_name}:AxisY1", 'Auto Units', False)
set_property(report, 'Scaling', f"{report_name}:AxisY1", 'Units', 'g')
set_property(report, 'Legend',
f"{report_name}:Legend", 'Show Solution Name', False)
if save_csv: # Save
try:
path = Path(self.data_dir)/'hfss_eig_f_convergence.csv'
report.ExportToFile(report_name, path)
logger.info(f'Saved convergences to {path}')
return pd.read_csv(path, index_col=0)
except Exception as e:
logger.error(f"Error could not save and export hfss plot to {path}.\
Is the plot made in HFSS with the correct name.\
Check the HFSS error window. \t Error = {e}")
return None
def hfss_report_full_convergence(self, fig=None, _display=True):
"""Plot a full report of teh convergences of an eigenmode analysis for a
a given variation. Makes a plot inside hfss too.
Keyword Arguments:
fig {matplotlib figure} -- Optional figure (default: {None})
_display {bool} -- Force display or not. (default: {True})
Returns:
[type] -- [description]
"""
if fig is None:
fig = plt.figure(figsize=(11, 3.))
for variation, variation_labels in self.get_variations().items():
fig.clf()
# Grid spec and axes; height_ratios=[4, 1], wspace=0.5
gs = mpl.gridspec.GridSpec(1, 3, width_ratios=[1.2, 1.5, 1])
axs = [fig.add_subplot(gs[i]) for i in range(3)]
logger.info(f'Creating report for variation {variation}')
convergence_t = self.get_convergence(variation=variation)
convergence_f = self.hfss_report_f_convergence(variation=variation)
axs[0].set_ylabel(variation_labels.replace(' ', '\n')) # add variation labels to y-axis of first plot
ax0t = axs[1].twinx()
plot_convergence_f_vspass(axs[0], convergence_f)
plot_convergence_max_df(axs[1], convergence_t.iloc[:, 1])
plot_convergence_solved_elem(ax0t, convergence_t.iloc[:, 0])
plot_convergence_maxdf_vs_sol(axs[2], convergence_t.iloc[:, 1],
convergence_t.iloc[:, 0])
fig.tight_layout(w_pad=0.1) # pad=0.0, w_pad=0.1, h_pad=1.0)
if _display:
from IPython.display import display
display(fig)
return fig
def quick_plot_frequencies(self, swp_variable='variations', ax=None):
"""
Quick plot of frequencies from HFSS
"""
fs = self.get_ansys_frequencies_all(swp_variable)
ax = ax or plt.gca()
fs['Freq. (GHz)'].unstack(0).transpose().plot(marker='o', ax=ax)
ax.set_ylabel('Ansys frequencies (MHz)')
ax.grid(alpha=0.2)
return fs
| 62,094 | 36.610539 | 154 | py |
pyEPR | pyEPR-master/pyEPR/_config_default.py | """
--- DO NOT MODIFY THIS FILE ---
Default configuration file for pyEPR
This file is NOT meant for users to modify.
Rather, a user should update any config settings they want
in a dictionary called CONFIG in a file called config.py
@author: Zlatko Minev and the pyEPR team
@date: Created on Fri Oct 30 14:21:45 2015
"""
import collections.abc
from . import Dict
# If we are reloading the package, then config will already be defined, then do not overwrite it.
__config_defined__ = 'config' in locals()
config = Dict( # pylint: disable=invalid-name
# Folder to save result data to.
root_dir=r'C:\data-pyEPR',
save_format=r'%Y-%m-%d %H-%M-%S',
ansys=Dict(
# method_calc_P_mj sets the method used to calculate the participation ratio in eigenmode.
# Valid values:
# 'line_voltage' : Uses the line voltage integral
# 'J_surf_mag' : takes the avg. Jsurf over the rect. Make sure you have seeded
# lots of tets here. I recommend starting with 4 across smallest dimension.
# Multi-junction calculation of energy participation ratio matrix based on <I_J>.
# Current is integrated average of J_surf by default: (zkm 3/29/16)
# Will calculate the Pj matrix for the selected modes for the given junctions
# junc_rect array & length of junctions
method_calc_P_mj='line_voltage',
# To save or not the mesh statistics from an HFSS run
save_mesh_stats=True,
),
epr = Dict(
# Define the participation renormalization method
# False : no extra renormalization to enforce
# can be more problematic for large pj, when sim isn't well converged
# True or 1 : use enforcement of U_J_total to be U_mode-U_H
# can be more problematic for small pj, when sim isn't well converged
# 2 : use enforcement of U_J_total to be U_mode-U_H (i.e., 1)
# only when the total participation is above a certain threshold
# preferred method.
renorm_pj = 2,
),
# Loss properties of various materials and surfaces
dissipation=Dict(
##################################################
# Bulk dielectric
# refs: https://arxiv.org/abs/1308.1743
# http://arxiv.org/pdf/1509.01854.pdf
tan_delta_sapp=1e-6, # tan(delta) for bulk surface
epsi=10, # dielectric
##################################################
# Surface dielectric
# ref: http://arxiv.org/pdf/1509.01854.pdf
# Surface dielectric (dirt) thickness
# units: meters
th=3e-9,
# Surface dielectric (dirt) constant
# units: relative permittivity
eps_r=10,
# Surface dielectric (dirt) loss tangent
# units: unitless, since this is tan(delta)
tan_delta_surf=1e-3,
##################################################
# Thin-film surface loss
# units: Ohms
# ref: https://arxiv.org/abs/1308.1743
surface_Rs=250e-9,
##################################################
# Seam current loss
# units: per Ohm meter; i.e., seam conductance
# ref: http://arxiv.org/pdf/1509.01119.pdf
gseam=1.0e3,
),
plotting=Dict(
# Default color map for plotting. Better if made into a string name
# taken from matplotlib.cm
default_color_map='viridis', # pylint: disable=no-member
),
# Not to be used by the user. Just internal
internal=Dict(
# Are we using ipython
ipython=None,
# Error message for loading packages
error_msg_missing_import="""\N{face with head-bandage}
If you need a part of pyEPR that uses this package,
then please install it. Then add it to the system path (if needed).
See online setup instructions at
http://www.github.com/zlatko-minev/pyEPR""",
# Warn on missing import
warn_missing_import=False,
),
# Logging
log=Dict(
# '%(name)s - %(levelname)s - %(message)s\n ::%(pathname)s:%(lineno)d: %(funcName)s\n')
format='%(levelname)s %(asctime)s [%(funcName)s]: %(message)s',
datefmt='%I:%M%p', #'%I:%M%p %Ss'
level='INFO'
)
)
def is_using_ipython():
"""Check if we're in IPython.
Returns:
bool -- True if ran in IPython
"""
try:
__IPYTHON__ # pylint: disable=undefined-variable, pointless-statement
return True
except NameError:
return False
def update_recursive(d:collections.abc.Mapping, u:collections.abc.Mapping):
"""Recursive update of dictionaries.
Arguments:
d {collections.abc.Mapping} -- dict to overwrite
u {collections.abc.Mapping} -- dict used to update
Returns:
same as d; Updated d
"""
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update_recursive(d.get(k, {}), v)
else:
d[k] = v
return d
def get_config():
"""Returns the config pointer.
If the config is not yet loaded, it will load the default config and then
update it with the _config_user.config dictionary.
Else, it will just return the pointer to the above-updated config, which the
user could have modified. The modifications will be kept.
Returns:
Dict : the config dictionary
"""
if __config_defined__:
#print('Config is already defined.') # not sure we ever make it here
return config
else:
# Config is only loaded for the first time, set it up.
#print('First time load of config')
# Update with user config
from . import _config_user
_config = update_recursive(config, _config_user.config)
# Add to config any bootup params
config.internal.ipython = is_using_ipython()
return config
__all__ = ['get_config']
| 6,018 | 30.186528 | 98 | py |
pyEPR | pyEPR-master/pyEPR/__config_user_old.py | """
User configuration file.
The dictionary of options specified here overwrites the pyEPR default
config defined in _config_default.py
Do not edit `_config_default.py` directly. Rather, overwrite attributes here
@author: Your name goes here
"""
from . import Dict
config = Dict(
# Folder to save result data to.
# PLEASE CHANGE THIS
root_dir=r'C:\data-pyEPR',
# Loss properties of various materials and surfaces
dissipation=Dict(
##################################################
# Bulk dielectric
# refs: https://arxiv.org/abs/1308.1743
# http://arxiv.org/pdf/1509.01854.pdf
tan_delta_sapp=1e-6, # tan(delta) for bulk surface
epsi=10, # dielectric
##################################################
# Surface dielectric
# ref: http://arxiv.org/pdf/1509.01854.pdf
# Surface dielectric (dirt) thickness
# units: meters
th=3e-9,
# Surface dielectric (dirt) constant
# units: relative permittivity
eps_r=10,
# Surface dielectric (dirt) loss tangent
# units: unitless, since this is tan(delta)
tan_delta_surf=1e-3,
##################################################
# Thin-film surface loss
# units: Ohms
# ref: https://arxiv.org/abs/1308.1743
surface_Rs=250e-9,
##################################################
# Seam current loss
# units: per Ohm meter; i.e., seam conductance
# ref: http://arxiv.org/pdf/1509.01119.pdf
gseam=1.0e3,
),
ansys=Dict(
# method_calc_P_mj sets the method used to calculate the participation ratio in eigenmode.
# Valid values:
# 'line_voltage' : Uses the line voltage integral
# 'J_surf_mag' : takes the avg. Jsurf over the rect. Make sure you have seeded
# lots of tets here. I recommend starting with 4 across smallest dimension.
# Multi-junction calculation of energy participation ratio matrix based on <I_J>.
# Current is integrated average of J_surf by default: (zkm 3/29/16)
# Will calculate the Pj matrix for the selected modes for the given junctions
# junc_rect array & length of junctions
method_calc_P_mj='line_voltage',
),
plotting=Dict(
# Default color map for plotting. Better if made into a string name
# taken from matplotlib.cm
default_color_map='viridis', # pylint: disable=no-member
),
)
__all__ = ['config']
| 2,592 | 30.621951 | 98 | py |
pyEPR | pyEPR-master/pyEPR/reports.py | """
Module for reporting utility functions
@author: Zlatko K Minev
"""
import pandas as pd
from .toolbox.plotting import legend_translucent, plt
def _style_plot_convergence(ax, ylabel=None, xlabel='Pass number', ylabel_col='k', y_title=False):
ax.set_xlabel(xlabel)
if ylabel:
if y_title:
ax.set_title(ylabel)
else:
ax.set_ylabel(ylabel, color=ylabel_col)
ax.grid()
ax.autoscale(tight=False)
ax.set_axisbelow(True) # Don't allow the axis to be on top of your data
ax.minorticks_on()
ax.grid(which='minor', linestyle=':',
linewidth='0.5', color='black', alpha=0.2)
ax.grid(which='major', alpha=0.5)
_style_plot_conv_kw = dict(marker='o', ms=4)
def plot_convergence_max_df(ax, s, kw={}, color='r'):
'''For a single pass'''
s.plot(ax=ax, **{**dict(c='r'), **_style_plot_conv_kw, **kw})
ax.set_yscale("log")
_style_plot_convergence(ax)
fig = ax.figure
fig.text(0.45, 0.95, s.name, ha="center",
va="bottom", size="medium", color=color)
ax.tick_params(axis='y', labelcolor=color)
#ax.axhline(1.0, color='k', lw=1.5,alpha= 0.35)
#ax.axhline(0.1, color='k', lw=1.5,alpha= 0.35)
ax.grid(which='minor', linestyle=':',
linewidth='0.5', color=color, alpha=0.25)
ax.grid(which='major', color='#c4abab', alpha=0.5)
ax.spines['left'].set_color(color)
def plot_convergence_solved_elem(ax, s, kw={}, color='b'):
'''For a single pass'''
(s/1000).plot(ax=ax, **{**dict(c='b'), **_style_plot_conv_kw, **kw})
_style_plot_convergence(ax)
# ax.set_ylim([100,None])
# ax.set_yscale("log")
ax.minorticks_off()
ax.grid(False)
ax.tick_params(axis='y', labelcolor=color)
# ax.ticklabel_format(style='sci',scilimits=(0,0))
fig = ax.figure
fig.text(0.6, 0.95, 'Solved elements (1000s)', ha="center",
va="bottom", size="medium", color=color)
ax.spines['left'].set_color('r')
ax.spines['right'].set_color(color)
def plot_convergence_f_vspass(ax, s, kw={}):
'''For a single pass'''
if s is not None:
(s).plot(ax=ax, **{**_style_plot_conv_kw, **kw})
_style_plot_convergence(ax, 'Eigenmode f vs. pass [GHz]', y_title=True)
legend_translucent(ax, leg_kw=dict(fontsize=6))
def plot_convergence_maxdf_vs_sol(ax, s, s2, kw={}):
'''
ax, 'Max Δf %', 'Solved elements', kw for plot
'''
s = s.copy()
s.index = s2
(s).plot(ax=ax, **{**_style_plot_conv_kw, **kw})
_style_plot_convergence(ax, s.name, xlabel='Solved elements', y_title=True)
ax.set_yscale("log")
ax.set_xscale("log")
# quick and dirty use
def _plot_q3d_convergence_main(epr, RES):
fig = epr.hfss_report_full_convergence(_display=False)
ax = fig.axes[0]
ax2 = ax.twinx()
ax.cla()
ax2.cla()
RES['alpha'].plot(ax=ax, c='b')
(RES['fQ']*1000).plot(ax=ax2, c='red')
from matplotlib import pyplot as plt
_style_plot_convergence(
ax, 'Alpha (blue), Freq (red) [MHz]', y_title=True)
ax2.set_ylabel('Frequency (MHz)', color='r')
ax.set_ylabel('Alpha(MHz)', color='b')
ax2.spines['right'].set_color('r')
ax2.tick_params(axis='y', labelcolor='r')
ax.tick_params(axis='y', labelcolor='b')
# legend_translucent(ax)
# legend_translucent(ax2)
ax.set_xlabel('Pass')
fig.tight_layout()
return fig
def _plot_q3d_convergence_chi_f(RES):
df_chi = pd.DataFrame(RES['chi_in_MHz'].values.tolist())
df_chi.index.name = 'Pass'
df_g = pd.DataFrame(RES['gbus'].values.tolist())
df_g.index.name = 'Pass'
fig, axs = plt.subplots(1, 2, figsize=(9, 3.5))
df_chi.plot(lw=2, ax=axs[0])
df_g.plot(lw=2, ax=axs[1])
_style_plot_convergence(axs[0])
_style_plot_convergence(axs[1])
axs[0].set_title(r'$\chi$ convergence (MHz)')
axs[1].set_title(r'$g$ convergence (MHz)')
return fig
| 3,910 | 30.039683 | 98 | py |
pyEPR | pyEPR-master/pyEPR/__init__.py | # This file is part of pyEPR: Energy participation ratio (EPR) design of
# quantum circuits in python
#
# Copyright (c) 2015-2020 and later, Zlatko K. Minev and Zaki Leghtas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the pyEPR nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
**pyEPR**
Automated Python module for the design and quantization of Josephson quantum circuits
Abstract: Superconducting circuits incorporating non-linear devices, such as Josephson
junctions and nanowires, are among the leading platforms for emerging quantum technologies.
Promising applications require designing and optimizing circuits with ever-increasing
complexity and controlling their dissipative and Hamiltonian parameters to several
significant digits. Therefore, there is a growing need for a systematic, simple, and robust
approach for precise circuit design, extensible to increased complexity.
The energy-participation ratio (EPR) approach presents such an approach to unify the design
of dissipation and Hamiltonians around a single concept — the energy participation, a number
between zero and one — in a single-step electromagnetic simulation. This markedly reduces
the required number of simulations and allows for robust extension to complex systems.
The approach is general purpose, derived ab initio, and valid for arbitrary non-linear
devices and circuit architectures. Experimental results on a variety of circuit quantum
electrodynamics (cQED) devices and architectures, 3D and flip-chip (2.5D), have been
demonstrated to exhibit ten percent to percent-level agreement for non-linear coupling
and modal Hamiltonian parameters over five-orders of magnitude and across a dozen samples.
Here, in this package, all routines of the EPR approach are fully automated.
An interface with ansys is provided.
Automated analysis of lumped and distributed circuits is provided.
@author: Zlatko Minev, Zaki Leghas, ... and the pyEPR team
@site: https://github.com/zlatko-minev/pyEPR
@license: "BSD-3-Clause"
@version: 0.9.0
@maintainer: Zlatko K. Minev and Asaf Diringer
@email: zlatko.minev@aya.yale.edu
@url: https://github.com/zlatko-minev/pyEPR
@status: "Dev-Production"
"""
# pylint: disable= wrong-import-position, invalid-name
# Compatibility with python 2.7 and 3
#from __future__ import division, print_function, absolute_import
import logging
import warnings
from pathlib import Path
from addict import Dict
##############################################################################
# Python header
__author__ = "Zlatko Minev, Zaki Leghas, and the pyEPR team"
__copyright__ = "Copyright 2015-2020, pyEPR team"
__credits__ = [
"Zlatko Minev", "Zaki Leghtas,", "Phil Rheinhold", "Asaf Diringer",
"Will Livingston", "Steven Touzard"
]
__license__ = "BSD-3-Clause"
__version__ = "0.9.0"
__maintainer__ = "Zlatko K. Minev and Asaf Diringer"
__email__ = "zlatko.minev@aya.yale.edu"
__url__ = r'https://github.com/zlatko-minev/pyEPR'
__status__ = "Dev-Production"
##############################################################################
# Config setup
from ._config_default import get_config
config = get_config()
##############################################################################
# Set up logging -- only on first loading of module, not on reloading.
logger = logging.getLogger('pyEPR') # singleton
if not len(logger.handlers):
from .toolbox._logging import set_up_logger
set_up_logger(logger)
del set_up_logger
##############################################################################
#
# Check that required packages are available. If not raise log warning.
try:
import pandas as pd
warnings.filterwarnings('ignore',
category=pd.io.pytables.PerformanceWarning)
del pd
except (ImportError, ModuleNotFoundError):
if config.internal.warn_missing_import:
logger.warning("IMPORT WARNING: `pandas` python package not found. %s",
config.internal.error_msg_missing_import)
# Check for a few usually troublesome packages
if config.internal.warn_missing_import:
# Check for qutip
try:
import qutip
del qutip
except (ImportError, ModuleNotFoundError):
logger.warning(
"""IMPORT WARNING: `qutip` package not found.
Numerical diagonalization will not work. Please install, e.g.:
$ conda install -c conda-forge qutip
%s""", config.internal.error_msg_missing_import)
try:
import pythoncom
del pythoncom
except (ImportError, ModuleNotFoundError):
logger.warning(
"""IMPORT WARNING:
Python package 'pythoncom' could not be loaded
It is used in communicating with HFSS on PCs. If you wish to do this, please set it up.
For Linux, check the HFSS python linux files for the com module used. It is equivalent,
and can be used just as well.
%s""", config.internal.error_msg_missing_import)
try:
from win32com.client import Dispatch, CDispatch
del Dispatch
del CDispatch
except (ImportError, ModuleNotFoundError):
logger.warning(
"""IMPORT WARNING: Could not load from 'win32com.client'.
The communication to hfss won't work. If you want to use it, you need to set it up.
%s""", config.internal.error_msg_missing_import)
try:
import pint # units
del pint
except (ImportError, ModuleNotFoundError):
logger.error(
"""IMPORT ERROR:
Python package 'pint' could not be loaded. It is used in communicating with HFSS. Try:
$ conda install -c conda-forge pint \n%s""",
config.internal.error_msg_missing_import)
# remove unused
del Path, warnings, logging
##############################################################################
# pyEPR convenience variable and function imports
from . import toolbox
from . import calcs
from . import ansys
from . import core
from .ansys import parse_units, parse_units_user, parse_entry
from .core import ProjectInfo, DistributedAnalysis, QuantumAnalysis,\
Project_Info, pyEPR_HFSSAnalysis, pyEPR_Analysis # names to be deprecated
__all__ = [
'logger',
'config',
'toolbox',
'calcs',
'ansys',
'core',
'ProjectInfo',
'DistributedAnalysis',
'QuantumAnalysis',
'Project_Info',
'pyEPR_HFSSAnalysis',
'pyEPR_Analysis', # names to be deprecated
'parse_units',
'parse_units_user',
'parse_entry'
]
# TODO: Add "about" method. Add to tutorial
| 8,129 | 39.049261 | 95 | py |
pyEPR | pyEPR-master/pyEPR/ansys.py | '''
pyEPR.ansys
2014-present
Purpose:
Handles Ansys interaction and control from version 2014 onward.
Tested most extensively with V2016 and V2019R3.
@authors:
Originally contributed by Phil Reinhold.
Developed further by Zlatko Minev, Zaki Leghtas, and the pyEPR team.
For the base version of hfss.py, see https://github.com/PhilReinhold/pyHFSS
'''
# Python 2.7 and 3 compatibility
from __future__ import (division, print_function)
from typing import List
import atexit
import os
import re
import signal
import tempfile
import time
import types
from collections.abc import Iterable
from copy import copy
from numbers import Number
from pathlib import Path
import numpy as np
import pandas as pd
from sympy.parsing import sympy_parser
import io
from . import logger
# Handle a few usually troublesome to import packages, which the use may not have
# installed yet
try:
import pythoncom
except (ImportError, ModuleNotFoundError):
pass #raise NameError ("pythoncom module not installed. Please install.")
try:
# TODO: Replace `win32com` with Linux compatible package.
# See Ansys python files in IronPython internal.
from win32com.client import Dispatch, CDispatch
except (ImportError, ModuleNotFoundError):
pass #raise NameError ("win32com module not installed. Please install.")
try:
from pint import UnitRegistry
ureg = UnitRegistry()
Q = ureg.Quantity
except (ImportError, ModuleNotFoundError):
pass # raise NameError ("Pint module not installed. Please install.")
##############################################################################
###
BASIS_ORDER = {
"Zero Order": 0,
"First Order": 1,
"Second Order": 2,
"Mixed Order": -1
}
# UNITS
# LENGTH_UNIT --- HFSS UNITS
# #Assumed default input units for ansys hfss
LENGTH_UNIT = 'meter'
# LENGTH_UNIT_ASSUMED --- USER UNITS
# if a user inputs a blank number with no units in `parse_fix`,
# we can assume the following using
LENGTH_UNIT_ASSUMED = 'mm'
def simplify_arith_expr(expr):
try:
out = repr(sympy_parser.parse_expr(str(expr)))
return out
except:
print("Couldn't parse", expr)
raise
def increment_name(base, existing):
if not base in existing:
return base
n = 1
def make_name():
return base + str(n)
while make_name() in existing:
n += 1
return make_name()
def extract_value_unit(expr, units):
"""
:type expr: str
:type units: str
:return: float
"""
try:
return Q(expr).to(units).magnitude
except Exception:
try:
return float(expr)
except Exception:
return expr
def extract_value_dim(expr):
"""
type expr: str
"""
return str(Q(expr).dimensionality)
def parse_entry(entry, convert_to_unit=LENGTH_UNIT):
'''
Should take a list of tuple of list... of int, float or str...
For iterables, returns lists
'''
if not isinstance(entry, list) and not isinstance(entry, tuple):
return extract_value_unit(entry, convert_to_unit)
else:
entries = entry
_entry = []
for entry in entries:
_entry.append(parse_entry(entry, convert_to_unit=convert_to_unit))
return _entry
def fix_units(x, unit_assumed=None):
'''
Convert all numbers to string and append the assumed units if needed.
For an iterable, returns a list
'''
unit_assumed = LENGTH_UNIT_ASSUMED if unit_assumed is None else unit_assumed
if isinstance(x, str):
# Check if there are already units defined, assume of form 2.46mm or 2.0 or 4.
if x[-1].isdigit() or x[-1] == '.': # number
return x + unit_assumed
else: # units are already applied
return x
elif isinstance(x, Number):
return fix_units(str(x) + unit_assumed, unit_assumed=unit_assumed)
elif isinstance(x, Iterable): # hasattr(x, '__iter__'):
return [fix_units(y, unit_assumed=unit_assumed) for y in x]
else:
return x
def parse_units(x):
'''
Convert number, string, and lists/arrays/tuples to numbers scaled
in HFSS units.
Converts to LENGTH_UNIT = meters [HFSS UNITS]
Assumes input units LENGTH_UNIT_ASSUMED = mm [USER UNITS]
[USER UNITS] ----> [HFSS UNITS]
'''
return parse_entry(fix_units(x))
def unparse_units(x):
'''
Undo effect of parse_unit.
Converts to LENGTH_UNIT_ASSUMED = mm [USER UNITS]
Assumes input units LENGTH_UNIT = meters [HFSS UNITS]
[HFSS UNITS] ----> [USER UNITS]
'''
return parse_entry(fix_units(x, unit_assumed=LENGTH_UNIT),
LENGTH_UNIT_ASSUMED)
def parse_units_user(x):
'''
Convert from user assumed units to user assumed units
[USER UNITS] ----> [USER UNITS]
'''
return parse_entry(fix_units(x, LENGTH_UNIT_ASSUMED), LENGTH_UNIT_ASSUMED)
class VariableString(str):
def __add__(self, other):
return var("(%s) + (%s)" % (self, other))
def __radd__(self, other):
return var("(%s) + (%s)" % (other, self))
def __sub__(self, other):
return var("(%s) - (%s)" % (self, other))
def __rsub__(self, other):
return var("(%s) - (%s)" % (other, self))
def __mul__(self, other):
return var("(%s) * (%s)" % (self, other))
def __rmul__(self, other):
return var("(%s) * (%s)" % (other, self))
def __div__(self, other):
return var("(%s) / (%s)" % (self, other))
def __rdiv__(self, other):
return var("(%s) / (%s)" % (other, self))
def __truediv__(self, other):
return var("(%s) / (%s)" % (self, other))
def __rtruediv__(self, other):
return var("(%s) / (%s)" % (other, self))
def __pow__(self, other):
return var("(%s) ^ (%s)" % (self, other))
def __rpow__(self, other):
return var("(%s) ^ (%s)" % (other, self))
def __neg__(self):
return var("-(%s)" % self)
def __abs__(self):
return var("abs(%s)" % self)
def var(x):
if isinstance(x, str):
return VariableString(simplify_arith_expr(x))
return x
_release_fns = []
def _add_release_fn(fn):
global _release_fns
_release_fns.append(fn)
atexit.register(fn)
signal.signal(signal.SIGTERM, fn)
signal.signal(signal.SIGABRT, fn)
def release():
'''
Release COM connection to Ansys.
'''
global _release_fns
for fn in _release_fns:
fn()
time.sleep(0.1)
# Note that _GetInterfaceCount is a member
refcount = pythoncom._GetInterfaceCount() # pylint: disable=no-member
if refcount > 0:
print("Warning! %d COM references still alive" % (refcount))
print("Ansys will likely refuse to shut down")
class COMWrapper(object):
def __init__(self):
_add_release_fn(self.release)
def release(self):
for k, v in self.__dict__.items():
if isinstance(v, CDispatch):
setattr(self, k, None)
class HfssPropertyObject(COMWrapper):
prop_holder = None
prop_tab = None
prop_server = None
def make_str_prop(name, prop_tab=None, prop_server=None):
return make_prop(name, prop_tab=prop_tab, prop_server=prop_server)
def make_int_prop(name, prop_tab=None, prop_server=None):
return make_prop(name,
prop_tab=prop_tab,
prop_server=prop_server,
prop_args=["MustBeInt:=", True])
def make_float_prop(name, prop_tab=None, prop_server=None):
return make_prop(name,
prop_tab=prop_tab,
prop_server=prop_server,
prop_args=["MustBeInt:=", False])
def make_prop(name, prop_tab=None, prop_server=None, prop_args=None):
def set_prop(self,
value,
prop_tab=prop_tab,
prop_server=prop_server,
prop_args=prop_args):
prop_tab = self.prop_tab if prop_tab is None else prop_tab
prop_server = self.prop_server if prop_server is None else prop_server
if isinstance(prop_tab, types.FunctionType):
prop_tab = prop_tab(self)
if isinstance(prop_server, types.FunctionType):
prop_server = prop_server(self)
if prop_args is None:
prop_args = []
self.prop_holder.ChangeProperty([
"NAME:AllTabs",
[
"NAME:" + prop_tab, ["NAME:PropServers", prop_server],
[
"NAME:ChangedProps",
["NAME:" + name, "Value:=", value] + prop_args
]
]
])
def get_prop(self, prop_tab=prop_tab, prop_server=prop_server):
prop_tab = self.prop_tab if prop_tab is None else prop_tab
prop_server = self.prop_server if prop_server is None else prop_server
if isinstance(prop_tab, types.FunctionType):
prop_tab = prop_tab(self)
if isinstance(prop_server, types.FunctionType):
prop_server = prop_server(self)
return self.prop_holder.GetPropertyValue(prop_tab, prop_server, name)
return property(get_prop, set_prop)
def set_property(prop_holder,
prop_tab,
prop_server,
name,
value,
prop_args=None):
'''
More general non obj oriented, functional version
prop_args = [] by default
'''
if not isinstance(prop_server, list):
prop_server = [prop_server]
return prop_holder.ChangeProperty([
"NAME:AllTabs",
[
"NAME:" + prop_tab, ["NAME:PropServers", *prop_server],
[
"NAME:ChangedProps",
["NAME:" + name, "Value:=", value] + (prop_args or [])
]
]
])
class HfssApp(COMWrapper):
def __init__(self, ProgID='AnsoftHfss.HfssScriptInterface'):
'''
Connect to IDispatch-based COM object.
Parameter is the ProgID or CLSID of the COM object.
This is found in the regkey.
Version changes for Ansys HFSS for the main object
v2016 - 'Ansoft.ElectronicsDesktop'
v2017 and subsequent - 'AnsoftHfss.HfssScriptInterface'
'''
super(HfssApp, self).__init__()
self._app = Dispatch(ProgID)
def get_app_desktop(self):
return HfssDesktop(self, self._app.GetAppDesktop())
# in v2016, there is also getApp - which can be called with HFSS
class HfssDesktop(COMWrapper):
def __init__(self, app, desktop):
"""
:type app: HfssApp
:type desktop: Dispatch
"""
super(HfssDesktop, self).__init__()
self.parent = app
self._desktop = desktop
# ansys version, needed to check for command changes,
# since some commands have changed over the years
self.version = self.get_version()
def close_all_windows(self):
self._desktop.CloseAllWindows()
def project_count(self):
count = len(self._desktop.GetProjects())
return count
def get_active_project(self):
return HfssProject(self, self._desktop.GetActiveProject())
def get_projects(self):
return [HfssProject(self, p) for p in self._desktop.GetProjects()]
def get_project_names(self):
return self._desktop.GetProjectList()
def get_messages(self, project_name="", design_name="", level=0):
"""Use: Collects the messages from a specified project and design.
Syntax: GetMessages <ProjectName>, <DesignName>, <SeverityName>
Return Value: A simple array of strings.
Parameters:
<ProjectName>
Type:<string>
Name of the project for which to collect messages.
An incorrect project name results in no messages (design is ignored)
An empty project name results in all messages (design is ignored)
<DesignName>
Type: <string>
Name of the design in the named project for which to collect messages
An incorrect design name results in no messages for the named project
An empty design name results in all messages for the named project
<SeverityName>
Type: <integer>
Severity is 0-3, and is tied in to info/warning/error/fatal types as follows:
0 is info and above
1 is warning and above
2 is error and fatal
3 is fatal only (rarely used)
"""
return self._desktop.GetMessages(project_name, design_name, level)
def get_version(self):
return self._desktop.GetVersion()
def new_project(self):
return HfssProject(self, self._desktop.NewProject())
def open_project(self, path):
''' returns error if already open '''
return HfssProject(self, self._desktop.OpenProject(path))
def set_active_project(self, name):
self._desktop.SetActiveProject(name)
@property
def project_directory(self):
return self._desktop.GetProjectDirectory()
@project_directory.setter
def project_directory(self, path):
self._desktop.SetProjectDirectory(path)
@property
def library_directory(self):
return self._desktop.GetLibraryDirectory()
@library_directory.setter
def library_directory(self, path):
self._desktop.SetLibraryDirectory(path)
@property
def temp_directory(self):
return self._desktop.GetTempDirectory()
@temp_directory.setter
def temp_directory(self, path):
self._desktop.SetTempDirectory(path)
class HfssProject(COMWrapper):
def __init__(self, desktop, project):
"""
:type desktop: HfssDesktop
:type project: Dispatch
"""
super(HfssProject, self).__init__()
self.parent = desktop
self._project = project
#self.name = project.GetName()
self._ansys_version = self.parent.version
def close(self):
self._project.Close()
def make_active(self):
self.parent.set_active_project(self.name)
def get_designs(self):
return [HfssDesign(self, d) for d in self._project.GetDesigns()]
def get_design_names(self):
return [d.GetName() for d in self._project.GetDesigns()]
def save(self, path=None):
if path is None:
self._project.Save()
else:
self._project.SaveAs(path, True)
def simulate_all(self):
self._project.SimulateAll()
def import_dataset(self, path):
self._project.ImportDataset(path)
def rename_design(self, design, rename):
if design in self.get_designs():
design.rename_design(design.name, rename)
else:
raise ValueError('%s design does not exist' % design.name)
def duplicate_design(self, target, source):
src_design = self.get_design(source)
return src_design.duplicate(name=target)
def get_variable_names(self):
return [VariableString(s) for s in self._project.GetVariables()]
def get_variables(self):
""" Returns the project variables only, which start with $. These are global variables. """
return {
VariableString(s): self.get_variable_value(s)
for s in self._project.GetVariables()
}
def get_variable_value(self, name):
return self._project.GetVariableValue(name)
def create_variable(self, name, value):
self._project.ChangeProperty([
"NAME:AllTabs",
[
"NAME:ProjectVariableTab",
["NAME:PropServers", "ProjectVariables"],
[
"Name:NewProps",
[
"NAME:" + name, "PropType:=", "VariableProp",
"UserDef:=", True, "Value:=", value
]
]
]
])
def set_variable(self, name, value):
if name not in self._project.GetVariables():
self.create_variable(name, value)
else:
self._project.SetVariableValue(name, value)
return VariableString(name)
def get_path(self):
if self._project:
return self._project.GetPath()
else:
raise Exception('''Error: HFSS Project does not have a path.
Either there is no HFSS project open, or it is not saved.''')
def new_design(self, design_name, solution_type, design_type="HFSS"):
design_name_int = increment_name(
design_name, [d.GetName() for d in self._project.GetDesigns()])
return HfssDesign(
self,
self._project.InsertDesign(design_type, design_name_int,
solution_type, ""))
def get_design(self, name):
return HfssDesign(self, self._project.GetDesign(name))
def get_active_design(self):
d = self._project.GetActiveDesign()
if d is None:
raise EnvironmentError("No Design Active")
return HfssDesign(self, d)
def new_dm_design(self, name: str):
"""Create a new driven model design
Args:
name (str): Name of driven modal design
"""
return self.new_design(name, "DrivenModal")
def new_em_design(self, name: str):
"""Create a new eigenmode design
Args:
name (str): Name of eigenmode design
"""
return self.new_design(name, "Eigenmode")
def new_q3d_design(self, name: str):
"""Create a new Q3D design.
Args:
name (str): Name of Q3D design
"""
return self.new_design(name, "Q3D", "Q3D Extractor")
@property # v2016
def name(self):
return self._project.GetName()
class HfssDesign(COMWrapper):
def __init__(self, project, design):
super(HfssDesign, self).__init__()
self.parent = project
self._design = design
self.name = design.GetName()
self._ansys_version = self.parent._ansys_version
try:
# This function does not exist if the design is not HFSS
self.solution_type = design.GetSolutionType()
except Exception as e:
logger.debug(
f'Exception occurred at design.GetSolutionType() {e}. Assuming Q3D design'
)
self.solution_type = 'Q3D'
if design is None:
return
self._setup_module = design.GetModule("AnalysisSetup")
self._solutions = design.GetModule("Solutions")
self._fields_calc = design.GetModule("FieldsReporter")
self._output = design.GetModule("OutputVariable")
self._boundaries = design.GetModule("BoundarySetup")
self._reporter = design.GetModule("ReportSetup")
self._modeler = design.SetActiveEditor("3D Modeler")
self._optimetrics = design.GetModule("Optimetrics")
self._mesh = design.GetModule("MeshSetup")
self.modeler = HfssModeler(self, self._modeler, self._boundaries,
self._mesh)
self.optimetrics = Optimetrics(self)
def add_message(self, message: str, severity: int = 0):
"""
Add a message to HFSS log with severity and context to message window.
Keyword Args:
severity (int) : 0 = Informational, 1 = Warning, 2 = Error, 3 = Fatal..
"""
project = self.parent
desktop = project.parent
oDesktop = desktop._desktop
oDesktop.AddMessage(project.name, self.name, severity, message)
def save_screenshot(self, path: str = None, show: bool = True):
if not path:
path = Path().absolute() / 'ansys.png' # TODO find better
self._modeler.ExportModelImageToFile(
str(path),
0,
0, # can be 0 For the default, use 0, 0. For higher resolution, set desired <width> and <height>, for example for 8k export as: 7680, 4320.
[
"NAME:SaveImageParams", "ShowAxis:=", "True", "ShowGrid:=",
"True", "ShowRuler:=", "True", "ShowRegion:=", "Default",
"Selections:=", "", "Orientation:=", ""
])
if show:
from IPython.display import display, Image
display(Image(str(path)))
return path
def rename_design(self, name):
old_name = self._design.GetName()
self._design.RenameDesignInstance(old_name, name)
def copy_to_project(self, project):
project.make_active()
project._project.CopyDesign(self.name)
project._project.Paste()
return project.get_active_design()
def duplicate(self, name=None):
dup = self.copy_to_project(self.parent)
if name is not None:
dup.rename_design(name)
return dup
def get_setup_names(self):
return self._setup_module.GetSetups()
def get_setup(self, name=None):
"""
:rtype: HfssSetup
"""
setups = self.get_setup_names()
if not setups:
raise EnvironmentError(" *** No Setups Present ***")
if name is None:
name = setups[0]
elif name not in setups:
raise EnvironmentError("Setup {} not found: {}".format(
name, setups))
if self.solution_type == "Eigenmode":
return HfssEMSetup(self, name)
elif self.solution_type == "DrivenModal":
return HfssDMSetup(self, name)
elif self.solution_type == "DrivenTerminal":
return HfssDTSetup(self, name)
elif self.solution_type == "Q3D":
return AnsysQ3DSetup(self, name)
def create_q3d_setup(self,
freq_ghz=5.,
name="Setup",
save_fields=False,
enabled=True,
max_passes=15,
min_passes=2,
min_converged_passes=2,
percent_error=0.5,
percent_refinement=30,
auto_increase_solution_order=True,
solution_order="High",
solver_type='Iterative'):
name = increment_name(name, self.get_setup_names())
self._setup_module.InsertSetup("Matrix", [
f"NAME:{name}", "AdaptiveFreq:=", f"{freq_ghz}GHz", "SaveFields:=",
save_fields, "Enabled:=", enabled,
[
"NAME:Cap", "MaxPass:=", max_passes, "MinPass:=", min_passes,
"MinConvPass:=", min_converged_passes, "PerError:=",
percent_error, "PerRefine:=", percent_refinement,
"AutoIncreaseSolutionOrder:=", auto_increase_solution_order,
"SolutionOrder:=", solution_order, "Solver Type:=", solver_type
]
])
return AnsysQ3DSetup(self, name)
def create_dm_setup(self,
freq_ghz=1,
name="Setup",
max_delta_s=0.1,
max_passes=10,
min_passes=1,
min_converged=1,
pct_refinement=30,
basis_order=-1):
name = increment_name(name, self.get_setup_names())
self._setup_module.InsertSetup("HfssDriven", [
"NAME:" + name, "Frequency:=",
str(freq_ghz) + "GHz", "MaxDeltaS:=", max_delta_s,
"MaximumPasses:=", max_passes, "MinimumPasses:=", min_passes,
"MinimumConvergedPasses:=", min_converged, "PercentRefinement:=",
pct_refinement, "IsEnabled:=", True, "BasisOrder:=", basis_order
])
return HfssDMSetup(self, name)
def create_dt_setup(self,
freq_ghz=1,
name="Setup",
max_delta_s=0.1,
max_passes=10,
min_passes=1,
min_converged=1,
pct_refinement=30,
basis_order=-1):
name = increment_name(name, self.get_setup_names())
self._setup_module.InsertSetup("HfssDriven", [
"NAME:" + name, "Frequency:=",
str(freq_ghz) + "GHz", "MaxDeltaS:=", max_delta_s,
"MaximumPasses:=", max_passes, "MinimumPasses:=", min_passes,
"MinimumConvergedPasses:=", min_converged, "PercentRefinement:=",
pct_refinement, "IsEnabled:=", True, "BasisOrder:=", basis_order
])
return HfssDTSetup(self, name)
def create_em_setup(self,
name="Setup",
min_freq_ghz=1,
n_modes=1,
max_delta_f=0.1,
max_passes=10,
min_passes=1,
min_converged=1,
pct_refinement=30,
basis_order=-1):
name = increment_name(name, self.get_setup_names())
self._setup_module.InsertSetup("HfssEigen", [
"NAME:" + name, "MinimumFrequency:=",
str(min_freq_ghz) + "GHz", "NumModes:=", n_modes, "MaxDeltaFreq:=",
max_delta_f, "ConvergeOnRealFreq:=", True, "MaximumPasses:=",
max_passes, "MinimumPasses:=", min_passes,
"MinimumConvergedPasses:=", min_converged, "PercentRefinement:=",
pct_refinement, "IsEnabled:=", True, "BasisOrder:=", basis_order
])
return HfssEMSetup(self, name)
def delete_setup(self, name):
if name in self.get_setup_names():
self._setup_module.DeleteSetups(name)
def delete_full_variation(self,
DesignVariationKey="All",
del_linked_data=False):
"""
DeleteFullVariation
Use: Use to selectively make deletions or delete all solution data.
Command: HFSS>Results>Clean Up Solutions...
Syntax: DeleteFullVariation Array(<parameters>), boolean
Parameters: All | <DataSpecifierArray>
If, All, all data of existing variations is deleted.
Array(<DesignVariationKey>, )
<DesignVariationKey>
Type: <string>
Design variation string.
<Boolean>
Type: boolean
Whether to also delete linked data.
"""
self._design.DeleteFullVariation("All", False)
def get_nominal_variation(self):
"""
Use: Gets the nominal variation string
Return Value: Returns a string representing the nominal variation
Returns string such as "Height='0.06mm' Lj='13.5nH'"
"""
return self._design.GetNominalVariation()
def create_variable(self, name, value, postprocessing=False):
if postprocessing == True:
variableprop = "PostProcessingVariableProp"
else:
variableprop = "VariableProp"
self._design.ChangeProperty([
"NAME:AllTabs",
[
"NAME:LocalVariableTab",
["NAME:PropServers", "LocalVariables"],
[
"Name:NewProps",
[
"NAME:" + name, "PropType:=", variableprop,
"UserDef:=", True, "Value:=", value
]
]
]
])
def _variation_string_to_variable_list(self,
variation_string: str,
for_prop_server=True):
"""Example:
Takes
"Cj='2fF' Lj='13.5nH'"
for for_prop_server=True into
[['NAME:Cj', 'Value:=', '2fF'], ['NAME:Lj', 'Value:=', '13.5nH']]
or for for_prop_server=False into
[['Cj', '2fF'], ['Lj', '13.5nH']]
"""
s = variation_string
s = s.split(' ')
s = [s1.strip().strip("''").split("='") for s1 in s]
if for_prop_server:
local, project = [], []
for arr in s:
to_add = [f'NAME:{arr[0]}', "Value:=", arr[1]]
if arr[0][0] == '$':
project += [to_add] # global variable
else:
local += [to_add] # local variable
return local, project
else:
return s
def set_variables(self, variation_string: str):
"""
Set all variables to match a solved variation string.
Args:
variation_string (str) : Variation string such as
"Cj='2fF' Lj='13.5nH'"
"""
assert isinstance(variation_string, str)
content = ["NAME:ChangedProps"]
local, project = self._variation_string_to_variable_list(
variation_string)
#print('\nlocal=', local, '\nproject=', project)
if len(project) > 0:
self._design.ChangeProperty([
"NAME:AllTabs",
[
"NAME:ProjectVariableTab",
["NAME:PropServers", "ProjectVariables"], content + project
]
])
if len(local) > 0:
self._design.ChangeProperty([
"NAME:AllTabs",
[
"NAME:LocalVariableTab",
["NAME:PropServers", "LocalVariables"], content + local
]
])
def set_variable(self, name: str, value: str, postprocessing=False):
"""Warning: THis is case sensitive,
Arguments:
name {str} -- Name of variable to set, such as 'Lj_1'.
This is not the same as as 'LJ_1'.
You must use the same casing.
value {str} -- Value, such as '10nH'
Keyword Arguments:
postprocessing {bool} -- Postprocessing variable only or not.
(default: {False})
Returns:
VariableString
"""
# TODO: check if variable does not exist and quit if it doesn't?
if name not in self.get_variable_names():
self.create_variable(name, value, postprocessing=postprocessing)
else:
self._design.SetVariableValue(name, value)
return VariableString(name)
def get_variable_value(self, name):
""" Can only access the design variables, i.e., the local ones
Cannot access the project (global) variables, which start with $. """
return self._design.GetVariableValue(name)
def get_variable_names(self):
""" Returns the local design variables.
Does not return the project (global) variables, which start with $. """
return [
VariableString(s) for s in self._design.GetVariables() +
self._design.GetPostProcessingVariables()
]
def get_variables(self):
""" Returns dictionary of local design variables and their values.
Does not return the project (global) variables and their values,
whose names start with $. """
local_variables = self._design.GetVariables(
) + self._design.GetPostProcessingVariables()
return {lv: self.get_variable_value(lv) for lv in local_variables}
def copy_design_variables(self, source_design):
''' does not check that variables are all present '''
# don't care about values
source_variables = source_design.get_variables()
for name, value in source_variables.items():
self.set_variable(name, value)
def get_excitations(self):
self._boundaries.GetExcitations()
def _evaluate_variable_expression(self, expr, units):
"""
:type expr: str
:type units: str
:return: float
"""
try:
sexp = sympy_parser.parse_expr(expr)
except SyntaxError:
return Q(expr).to(units).magnitude
sub_exprs = {
fs: self.get_variable_value(fs.name)
for fs in sexp.free_symbols
}
return float(
sexp.subs({
fs: self._evaluate_variable_expression(e, units)
for fs, e in sub_exprs.items()
}))
def eval_expr(self, expr, units="mm"):
return str(self._evaluate_variable_expression(expr, units)) + units
def Clear_Field_Clac_Stack(self):
self._fields_calc.CalcStack("Clear")
def clean_up_solutions(self):
self._design.DeleteFullVariation('All', True) # Delete existing solutions
class HfssSetup(HfssPropertyObject):
prop_tab = "HfssTab"
passes = make_int_prop("Passes") # see EditSetup
n_modes = make_int_prop("Modes")
pct_refinement = make_float_prop("Percent Refinement")
delta_f = make_float_prop("Delta F")
min_freq = make_float_prop("Min Freq")
basis_order = make_str_prop("Basis Order")
def __init__(self, design, setup: str):
"""
:type design: HfssDesign
:type setup: Dispatch
:COM Scripting Help: "Analysis Setup Module Script Commands"
Get properties:
setup.parent._design.GetProperties("HfssTab",'AnalysisSetup:Setup1')
"""
super(HfssSetup, self).__init__()
self.parent = design
self.prop_holder = design._design
self._setup_module = design._setup_module
self._reporter = design._reporter
self._solutions = design._solutions
self.name = setup
self.solution_name = setup + " : LastAdaptive"
#self.solution_name_pass = setup + " : AdaptivePass"
self.prop_server = "AnalysisSetup:" + setup
self.expression_cache_items = []
self._ansys_version = self.parent._ansys_version
def analyze(self, name=None):
'''
Use: Solves a single solution setup and all of its frequency sweeps.
Command: Right-click a solution setup in the project tree, and then click Analyze
on the shortcut menu.
Syntax: Analyze(<SetupName>)
Parameters: <setupName>
Return Value: None
-----------------------------------------------------
Will block the until the analysis is completely done.
Will raise a com_error if analysis is aborted in HFSS.
'''
if name is None:
name = self.name
logger.info(f'Analyzing setup {name}')
return self.parent._design.Analyze(name)
def solve(self, name=None):
'''
Use: Performs a blocking simulation.
The next script command will not be executed
until the simulation is complete.
Command: HFSS>Analyze
Syntax: Solve <SetupNameArray>
Return Value: Type: <int>
-1: simulation error
0: normal completion
Parameters: <SetupNameArray>: Array(<SetupName>, <SetupName>, ...)
<SetupName>
Type: <string>
Name of the solution setup to solve.
Example:
return_status = oDesign.Solve Array("Setup1", "Setup2")
-----------------------------------------------------
HFSS abort: still returns 0 , since termination by user.
'''
if name is None:
name = self.name
return self.parent._design.Solve(name)
def insert_sweep(self,
start_ghz,
stop_ghz,
count=None,
step_ghz=None,
name="Sweep",
type="Fast",
save_fields=False):
if not type in ['Fast', 'Interpolating', 'Discrete']:
logger.error(
"insert_sweep: Error type was not in ['Fast', 'Interpolating', 'Discrete']"
)
name = increment_name(name, self.get_sweep_names())
params = [
"NAME:" + name,
"IsEnabled:=", True,
"Type:=", type,
"SaveFields:=", save_fields,
"SaveRadFields:=", False,
# "GenerateFieldsForAllFreqs:="
"ExtrapToDC:=", False,
]
# not sure when exactly this changed between 2016 and 2019
if self._ansys_version >= '2019':
if count:
params.extend([
"RangeType:=", 'LinearCount', "RangeStart:=",
f"{start_ghz:f}GHz", "RangeEnd:=", f"{stop_ghz:f}GHz",
"RangeCount:=", count
])
if step_ghz:
params.extend([
"RangeType:=", 'LinearStep', "RangeStart:=",
f"{start_ghz:f}GHz", "RangeEnd:=", f"{stop_ghz:f}GHz",
"RangeStep:=", step_ghz
])
if (count and step_ghz) or ((not count) and (not step_ghz)):
logger.error(
'ERROR: you should provide either step_ghz or count \
when inserting an HFSS driven model freq sweep. \
YOu either provided both or neither! See insert_sweep.')
else:
params.extend([
"StartValue:=",
"%fGHz" % start_ghz, "StopValue:=",
"%fGHz" % stop_ghz
])
if step_ghz is not None:
params.extend([
"SetupType:=", "LinearSetup", "StepSize:=",
"%fGHz" % step_ghz
])
else:
params.extend(["SetupType:=", "LinearCount", "Count:=", count])
self._setup_module.InsertFrequencySweep(self.name, params)
return HfssFrequencySweep(self, name)
def delete_sweep(self, name):
self._setup_module.DeleteSweep(self.name, name)
# def add_fields_convergence_expr(self, expr, pct_delta, phase=0):
# """note: because of hfss idiocy, you must call "commit_convergence_exprs"
# after adding all exprs"""
# assert isinstance(expr, NamedCalcObject)
# self.expression_cache_items.append(
# ["NAME:CacheItem",
# "Title:=", expr.name+"_conv",
# "Expression:=", expr.name,
# "Intrinsics:=", "Phase='{}deg'".format(phase),
# "IsConvergence:=", True,
# "UseRelativeConvergence:=", 1,
# "MaxConvergenceDelta:=", pct_delta,
# "MaxConvergeValue:=", "0.05",
# "ReportType:=", "Fields",
# ["NAME:ExpressionContext"]])
# def commit_convergence_exprs(self):
# """note: this will eliminate any convergence expressions not added
# through this interface"""
# args = [
# "NAME:"+self.name,
# ["NAME:ExpressionCache", self.expression_cache_items]
# ]
# self._setup_module.EditSetup(self.name, args)
def get_sweep_names(self):
return self._setup_module.GetSweeps(self.name)
def get_sweep(self, name=None):
sweeps = self.get_sweep_names()
if not sweeps:
raise EnvironmentError("No Sweeps Present")
if name is None:
name = sweeps[0]
elif name not in sweeps:
raise EnvironmentError("Sweep {} not found in {}".format(
name, sweeps))
return HfssFrequencySweep(self, name)
def add_fields_convergence_expr(self, expr, pct_delta, phase=0):
"""note: because of hfss idiocy, you must call "commit_convergence_exprs"
after adding all exprs"""
assert isinstance(expr, NamedCalcObject)
self.expression_cache_items.append([
"NAME:CacheItem", "Title:=", expr.name + "_conv", "Expression:=",
expr.name, "Intrinsics:=", "Phase='{}deg'".format(phase),
"IsConvergence:=", True, "UseRelativeConvergence:=", 1,
"MaxConvergenceDelta:=", pct_delta, "MaxConvergeValue:=", "0.05",
"ReportType:=", "Fields", ["NAME:ExpressionContext"]
])
def commit_convergence_exprs(self):
"""note: this will eliminate any convergence expressions not added through this interface"""
args = [
"NAME:" + self.name,
["NAME:ExpressionCache", self.expression_cache_items]
]
self._setup_module.EditSetup(self.name, args)
def get_convergence(self, variation="", pre_fn_args=[], overwrite=True):
'''
Returns converge as a dataframe
Variation should be in the form
variation = "scale_factor='1.2001'" ...
'''
# TODO: (Daniel) I think this data should be store in a more comfortable datatype (dictionary maybe?)
# Write file
temp = tempfile.NamedTemporaryFile()
temp.close()
temp = temp.name + '.conv'
self.parent._design.ExportConvergence(self.name, variation,
*pre_fn_args, temp, overwrite)
# Read File
temp = Path(temp)
if not temp.is_file():
logger.error(
f'''ERROR! Error in trying to read temporary convergence file.
`get_convergence` did not seem to have the file written {str(temp)}.
Perhaps there was no convergence? Check to see if there is a CONV available for this current variation. If the nominal design is not solved, it will not have a CONV., but will show up as a variation
Check for error messages in HFSS.
Retuning None''')
return None, ''
text = temp.read_text()
# Parse file
text2 = text.split(r'==================')
if len(text) >= 3:
df = pd.read_csv(io.StringIO(text2[3].strip()),
sep='|',
skipinitialspace=True,
index_col=0).drop('Unnamed: 3', axis=1)
else:
logger.error(f'ERROR IN reading in {temp}:\n{text}')
df = None
return df, text
def get_mesh_stats(self, variation=""):
''' variation should be in the form
variation = "scale_factor='1.2001'" ...
'''
temp = tempfile.NamedTemporaryFile()
temp.close()
# print(temp.name0
# seems broken in 2016 because of extra text added to the top of the file
self.parent._design.ExportMeshStats(self.name, variation,
temp.name + '.mesh', True)
try:
df = pd.read_csv(temp.name + '.mesh',
delimiter='|',
skipinitialspace=True,
skiprows=7,
skipfooter=1,
skip_blank_lines=True,
engine='python')
df = df.drop('Unnamed: 9', axis=1)
except Exception as e:
print("ERROR in MESH reading operation.")
print(e)
print(
'ERROR! Error in trying to read temporary MESH file ' +
temp.name +
'\n. Check to see if there is a mesh available for this current variation.\
If the nominal design is not solved, it will not have a mesh., \
but will show up as a variation.')
df = None
return df
def get_profile(self, variation=""):
fn = tempfile.mktemp()
self.parent._design.ExportProfile(self.name, variation, fn, False)
df = pd.read_csv(fn,
delimiter='\t',
skipinitialspace=True,
skiprows=6,
skipfooter=1,
skip_blank_lines=True,
engine='python')
# just broken down by new lines
return df
def get_fields(self):
return HfssFieldsCalc(self)
class HfssDMSetup(HfssSetup):
"""
Driven modal setup
"""
solution_freq = make_float_prop("Solution Freq")
delta_s = make_float_prop("Delta S")
solver_type = make_str_prop("Solver Type")
def setup_link(self, linked_setup):
'''
type: linked_setup <HfssSetup>
'''
args = [
"NAME:" + self.name,
[
"NAME:MeshLink",
"Project:=",
"This Project*",
"Design:=",
linked_setup.parent.name,
"Soln:=",
linked_setup.solution_name,
self._map_variables_by_name(),
"ForceSourceToSolve:=",
True,
"PathRelativeTo:=",
"TargetProject",
],
]
self._setup_module.EditSetup(self.name, args)
def _map_variables_by_name(self):
''' does not check that variables are all present '''
# don't care about values
project_variables = self.parent.parent.get_variable_names()
design_variables = self.parent.get_variable_names()
# build array
args = [
"NAME:Params",
]
for name in project_variables:
args.extend([str(name) + ":=", str(name)])
for name in design_variables:
args.extend([str(name) + ":=", str(name)])
return args
def get_solutions(self):
return HfssDMDesignSolutions(self, self.parent._solutions)
class HfssDTSetup(HfssDMSetup):
def get_solutions(self):
return HfssDTDesignSolutions(self, self.parent._solutions)
class HfssEMSetup(HfssSetup):
"""
Eigenmode setup
"""
min_freq = make_float_prop("Min Freq")
n_modes = make_int_prop("Modes")
delta_f = make_float_prop("Delta F")
def get_solutions(self):
return HfssEMDesignSolutions(self, self.parent._solutions)
class AnsysQ3DSetup(HfssSetup):
"""
Q3D setup
"""
prop_tab = "CG"
max_pass = make_int_prop("Max. Number of Passes")
min_pass = make_int_prop("Min. Number of Passes")
pct_error = make_int_prop("Percent Error")
frequency = make_str_prop("Adaptive Freq", 'General') # e.g., '5GHz'
n_modes = 0 # for compatibility with eigenmode
def get_frequency_Hz(self):
return int(ureg(self.frequency).to('Hz').magnitude)
def get_solutions(self):
return HfssQ3DDesignSolutions(self, self.parent._solutions)
def get_convergence(self, variation=""):
'''
Returns df
# Triangle Delta %
Pass
1 164 NaN
'''
return super().get_convergence(variation, pre_fn_args=['CG'])
def get_matrix(
self,
variation='',
pass_number=0,
frequency=None,
MatrixType='Maxwell',
solution_kind='LastAdaptive', # AdaptivePass
ACPlusDCResistance=False,
soln_type="C"):
'''
Arguments:
-----------
variation: an empty string returns nominal variation.
Otherwise need the list
frequency: in Hz
soln_type = "C", "AC RL" and "DC RL"
solution_kind = 'LastAdaptive' # AdaptivePass
Internals:
-----------
Uses self.solution_name = Setup1 : LastAdaptive
Returns:
---------------------
df_cmat, user_units, (df_cond, units_cond), design_variation
'''
if frequency is None:
frequency = self.get_frequency_Hz()
temp = tempfile.NamedTemporaryFile()
temp.close()
path = temp.name + '.txt'
# <FileName>, <SolnType>, <DesignVariationKey>, <Solution>, <Matrix>, <ResUnit>,
# <IndUnit>, <CapUnit>, <CondUnit>, <Frequency>, <MatrixType>, <PassNumber>,
# <ACPlusDCResistance>
logger.info(f'Exporting matrix data to ({path}, {soln_type}, {variation}, '
f'{self.name}:{solution_kind}, '
'"Original", "ohm", "nH", "fF", '
f'"mSie", {frequency}, {MatrixType}, '
f'{pass_number}, {ACPlusDCResistance}')
self.parent._design.ExportMatrixData(path, soln_type, variation,
f'{self.name}:{solution_kind}',
"Original", "ohm", "nH", "fF",
"mSie", frequency, MatrixType,
pass_number, ACPlusDCResistance)
df_cmat, user_units, (df_cond, units_cond), design_variation = \
self.load_q3d_matrix(path)
return df_cmat, user_units, (df_cond, units_cond), design_variation
@staticmethod
def _readin_Q3D_matrix(path: str):
"""
Read in the txt file created from q3d export
and output the capacitance matrix
When exporting pick "save as type: data table"
See Zlatko
RETURNS: Dataframe
Example file:
```
DesignVariation:$BBoxL='650um' $boxH='750um' $boxL='2mm' $QubitGap='30um' \
$QubitH='90um' \$QubitL='450um' Lj_1='13nH'
Setup1:LastAdaptive
Problem Type:C
C Units:farad, G Units:mSie
Reduce Matrix:Original
Frequency: 5.5E+09 Hz
Capacitance Matrix
ground_plane Q1_bus_Q0_connector_pad Q1_bus_Q2_connector_pad Q1_pad_bot Q1_pad_top1 Q1_readout_connector_pad
ground_plane 2.8829E-13 -3.254E-14 -3.1978E-14 -4.0063E-14 -4.3842E-14 -3.0053E-14
Q1_bus_Q0_connector_pad -3.254E-14 4.7257E-14 -2.2765E-16 -1.269E-14 -1.3351E-15 -1.451E-16
Q1_bus_Q2_connector_pad -3.1978E-14 -2.2765E-16 4.5327E-14 -1.218E-15 -1.1552E-14 -5.0414E-17
Q1_pad_bot -4.0063E-14 -1.269E-14 -1.218E-15 9.5831E-14 -3.2415E-14 -8.3665E-15
Q1_pad_top1 -4.3842E-14 -1.3351E-15 -1.1552E-14 -3.2415E-14 9.132E-14 -1.0199E-15
Q1_readout_connector_pad -3.0053E-14 -1.451E-16 -5.0414E-17 -8.3665E-15 -1.0199E-15 3.9884E-14
Conductance Matrix
ground_plane Q1_bus_Q0_connector_pad Q1_bus_Q2_connector_pad Q1_pad_bot Q1_pad_top1 Q1_readout_connector_pad
ground_plane 0 0 0 0 0 0
Q1_bus_Q0_connector_pad 0 0 0 0 0 0
Q1_bus_Q2_connector_pad 0 0 0 0 0 0
Q1_pad_bot 0 0 0 0 0 0
Q1_pad_top1 0 0 0 0 0 0
Q1_readout_connector_pad 0 0 0 0 0 0
```
"""
text = Path(path).read_text()
s1 = text.split('Capacitance Matrix')
assert len(s1) == 2, "Could not split text to `Capacitance Matrix`"
s2 = s1[1].split('Conductance Matrix')
df_cmat = pd.read_csv(io.StringIO(s2[0].strip()),
delim_whitespace=True,
skipinitialspace=True,
index_col=0)
units = re.findall(r'C Units:(.*?),', text)[0]
if len(s2) > 1:
df_cond = pd.read_csv(io.StringIO(s2[1].strip()),
delim_whitespace=True,
skipinitialspace=True,
index_col=0)
units_cond = re.findall(r'G Units:(.*?)\n', text)[0]
else:
df_cond = None
var = re.findall(r'DesignVariation:(.*?)\n',
text) # this changed circa v2020
if len(var) < 1: # didnt find
var = re.findall(r'Design Variation:(.*?)\n', text)
if len(var) < 1: # didnt find
# May not be present if there are no design variations to begin
# with and no variables in the design.
pass #logger.error(f'Failed to parse Q3D matrix Design Variation:\nFile:{path}\nText:{text}')
var = ['']
design_variation = var[0]
return df_cmat, units, design_variation, df_cond, units_cond
@staticmethod
def load_q3d_matrix(path, user_units='fF'):
"""Load Q3D capacitance file exported as Maxwell matrix.
Exports also conductance conductance.
Units are read in automatically and converted to user units.
Arguments:
path {[str or Path]} -- [path to file text with matrix]
Returns:
df_cmat, user_units, (df_cond, units_cond), design_variation
dataframes: df_cmat, df_cond
"""
df_cmat, Cunits, design_variation, df_cond, units_cond = AnsysQ3DSetup._readin_Q3D_matrix(
path)
# Unit convert
q = ureg.parse_expression(Cunits).to(user_units)
df_cmat = df_cmat * q.magnitude # scale to user units
#print("Imported capacitance matrix with UNITS: [%s] now converted to USER UNITS:[%s] from file:\n\t%s"%(Cunits, user_units, path))
return df_cmat, user_units, (df_cond, units_cond), design_variation
class HfssDesignSolutions(COMWrapper):
def __init__(self, setup, solutions):
'''
:type setup: HfssSetup
'''
super(HfssDesignSolutions, self).__init__()
self.parent = setup
self._solutions = solutions
self._ansys_version = self.parent._ansys_version
def get_valid_solution_list(self):
'''
Gets all available solution names that exist in a design.
Return example:
('Setup1 : AdaptivePass', 'Setup1 : LastAdaptive')
'''
return self._solutions.GetValidISolutionList()
def list_variations(self, setup_name: str = None):
"""
Get a list of solved variations.
Args:
setup_name(str) : Example name ("Setup1 : LastAdaptive") Defaults to None.
Returns:
An array of strings corresponding to solved variations.
.. code-block:: python
("Cj='2fF' Lj='12nH'",
"Cj='2fF' Lj='12.5nH'",
"Cj='2fF' Lj='13nH'",
"Cj='2fF' Lj='13.5nH'",
"Cj='2fF' Lj='14nH'")
"""
if setup_name is None:
setup_name = str(self.parent.solution_name)
return self._solutions.ListVariations(setup_name)
class HfssEMDesignSolutions(HfssDesignSolutions):
def eigenmodes(self, lv=""):
'''
Returns the eigenmode data of freq and kappa/2p
'''
fn = tempfile.mktemp()
#print(self.parent.solution_name, lv, fn)
self._solutions.ExportEigenmodes(self.parent.solution_name, lv, fn)
data = np.genfromtxt(fn, dtype='str')
# Update to Py 3:
# np.loadtxt and np.genfromtxt operate in byte mode, which is the default string type in Python 2.
# But Python 3 uses unicode, and marks bytestrings with this b.
# getting around the very annoying fact that
if np.size(np.shape(data)) == 1:
# in Python a 1D array does not have shape (N,1)
data = np.array([data])
else: # but rather (N,) ....
pass
if np.size(data[0, :]) == 6: # checking if values for Q were saved
# eigvalue=(omega-i*kappa/2)/2pi
kappa_over_2pis = [2 * float(ii) for ii in data[:, 3]]
# so kappa/2pi = 2*Im(eigvalue)
else:
kappa_over_2pis = None
# print(data[:,1])
freqs = [float(ii) for ii in data[:, 1]]
return freqs, kappa_over_2pis
"""
Export eigenmodes vs pass number
Did not figure out how to set pass number in a hurry.
import tempfile
self = epr_hfss.solutions
'''
HFSS: Exports a tab delimited table of Eigenmodes in HFSS. Not in HFSS-IE.
<setupName> <solutionName> <DesignVariationKey>
<filename>
Return Value: None
Parameters:
<SolutionName>
Type: <string>
Name of the solutions within the solution setup.
<DesignVariationKey>
Type: <string>
Design variation string.
'''
setup = self.parent
fn = tempfile.mktemp()
variation_list=''
soln_name = f'{setup.name} : AdaptivePas'
available_solns = self._solutions.GetValidISolutionList()
if not(soln_name in available_solns):
logger.error(f'ERROR Tried to export freq vs pass number, but solution `{soln_name}` was not in available `{available_solns}`. Returning []')
#return []
self._solutions.ExportEigenmodes(soln_name, ['Pass:=5'], fn) # ['Pass:=5'] fails can do with ''
"""
def set_mode(self, n, phase=0, FieldType='EigenStoredEnergy'):
'''
Indicates which source excitations should be used for fields post processing.
HFSS>Fields>Edit Sources
Mode count starts at 1
Amplitude is set to 1
No error is thrown if a number exceeding number of modes is set
FieldType -- EigenStoredEnergy or EigenPeakElecticField
'''
n_modes = int(self.parent.n_modes)
if n < 1:
err = f'ERROR: You tried to set a mode < 1. {n}/{n_modes}'
logger.error(err)
raise Exception(err)
if n > n_modes:
err = f'ERROR: You tried to set a mode > number of modes {n}/{n_modes}'
logger.error(err)
raise Exception(err)
if self._ansys_version >= '2019':
# THIS WORKS FOR v2019R2
self._solutions.EditSources(
[["FieldType:=", "EigenPeakElectricField"],
[
"Name:=", "Modes", "Magnitudes:=",
["1" if i + 1 == n else "0" for i in range(n_modes)],
"Phases:=",
[
str(phase) if i + 1 == n else "0"
for i in range(n_modes)
]
]])
else:
# The syntax has changed for AEDT 18.2.
# see https://ansyshelp.ansys.com/account/secured?returnurl=/Views/Secured/Electronics/v195//Subsystems/HFSS/Subsystems/HFSS%20Scripting/HFSS%20Scripting.htm
self._solutions.EditSources(
"EigenStoredEnergy", ["NAME:SourceNames", "EigenMode"],
["NAME:Modes", n_modes], ["NAME:Magnitudes"] +
[1 if i + 1 == n else 0
for i in range(n_modes)], ["NAME:Phases"] +
[phase if i + 1 == n else 0 for i in range(n_modes)],
["NAME:Terminated"], ["NAME:Impedances"])
def has_fields(self, variation_string=None):
'''
Determine if fields exist for a particular solution.
variation_string : str | None
This must the string that describes the variation in hFSS, not 0 or 1, but
the string of variables, such as
"Cj='2fF' Lj='12.75nH'"
If None, gets the nominal variation
'''
if variation_string is None:
variation_string = self.parent.parent.get_nominal_variation()
return bool(
self._solutions.HasFields(self.parent.solution_name,
variation_string))
def create_report(self,
plot_name,
xcomp,
ycomp,
params,
pass_name='LastAdaptive'):
'''
pass_name: AdaptivePass, LastAdaptive
Example
-------
Example plot for a single variation all pass converge of mode freq
.. code-block:: python
ycomp = [f"re(Mode({i}))" for i in range(1,1+epr_hfss.n_modes)]
params = ["Pass:=", ["All"]]+variation
setup.create_report("Freq. vs. pass", "Pass", ycomp, params, pass_name='AdaptivePass')
'''
assert isinstance(ycomp, list)
assert isinstance(params, list)
setup = self.parent
reporter = setup._reporter
return reporter.CreateReport(
plot_name, "Eigenmode Parameters", "Rectangular Plot",
f"{setup.name} : {pass_name}", [], params,
["X Component:=", xcomp, "Y Component:=", ycomp], [])
class HfssDMDesignSolutions(HfssDesignSolutions):
pass
class HfssDTDesignSolutions(HfssDesignSolutions):
pass
class HfssQ3DDesignSolutions(HfssDesignSolutions):
pass
class HfssFrequencySweep(COMWrapper):
prop_tab = "HfssTab"
start_freq = make_float_prop("Start")
stop_freq = make_float_prop("Stop")
step_size = make_float_prop("Step Size")
count = make_float_prop("Count")
sweep_type = make_str_prop("Type")
def __init__(self, setup, name):
"""
:type setup: HfssSetup
:type name: str
"""
super(HfssFrequencySweep, self).__init__()
self.parent = setup
self.name = name
self.solution_name = self.parent.name + " : " + name
self.prop_holder = self.parent.prop_holder
self.prop_server = self.parent.prop_server + ":" + name
self._ansys_version = self.parent._ansys_version
def analyze_sweep(self):
self.parent.analyze(self.solution_name)
def get_network_data(self, formats):
if isinstance(formats, str):
formats = formats.split(",")
formats = [f.upper() for f in formats]
fmts_lists = {'S': [], 'Y': [], 'Z': []}
for f in formats:
fmts_lists[f[0]].append((int(f[1]), int(f[2])))
ret = [None] * len(formats)
freq = None
for data_type, list in fmts_lists.items():
if list:
fn = tempfile.mktemp()
self.parent._solutions.ExportNetworkData(
[], self.parent.name + " : " + self.name, 2, fn, ["all"],
False, 0, data_type, -1, 1, 15)
with open(fn) as f:
f.readline()
colnames = f.readline().split()
array = np.loadtxt(fn, skiprows=2)
# WARNING for python 3 probably need to use genfromtxt
if freq is None:
freq = array[:, 0]
# TODO: If Ansys version is 2019, use 'Real' and 'Imag'
# in place of 'Re' and 'Im
for i, j in list:
real_idx = colnames.index("%s[%d,%d]_Re" %
(data_type, i, j))
imag_idx = colnames.index("%s[%d,%d]_Im" %
(data_type, i, j))
c_arr = array[:, real_idx] + 1j * array[:, imag_idx]
ret[formats.index("%s%d%d" % (data_type, i, j))] = c_arr
return freq, ret
def create_report(self, name, expr):
existing = self.parent._reporter.GetAllReportNames()
name = increment_name(name, existing)
var_names = self.parent.parent.get_variable_names()
var_args = sum([["%s:=" % v_name, ["Nominal"]]
for v_name in var_names], [])
self.parent._reporter.CreateReport(
name, "Modal Solution Data", "Rectangular Plot",
self.solution_name, ["Domain:=", "Sweep"],
["Freq:=", ["All"]] + var_args,
["X Component:=", "Freq", "Y Component:=", [expr]], [])
return HfssReport(self.parent.parent, name)
def get_report_arrays(self, expr):
r = self.create_report("Temp", expr)
return r.get_arrays()
class HfssReport(COMWrapper):
def __init__(self, design, name):
"""
:type design: HfssDesign
:type name: str
"""
super(HfssReport, self).__init__()
self.parent_design = design
self.name = name
def export_to_file(self, filename):
filepath = os.path.abspath(filename)
self.parent_design._reporter.ExportToFile(self.name, filepath)
def get_arrays(self):
fn = tempfile.mktemp(suffix=".csv")
self.export_to_file(fn)
return np.loadtxt(fn, skiprows=1, delimiter=',').transpose()
# warning for python 3 probably need to use genfromtxt
class Optimetrics(COMWrapper):
"""
Optimetrics script commands executed by the "Optimetrics" module.
Example use:
.. code-block:: python
opti = Optimetrics(pinfo.design)
names = opti.get_setup_names()
print('Names of optimetrics: ', names)
opti.solve_setup(names[0])
Note that running optimetrics requires the license for Optimetrics by Ansys.
"""
def __init__(self, design):
super(Optimetrics, self).__init__()
self.design = design # parent
self._optimetrics = self.design._optimetrics # <COMObject GetModule>
self.setup_names = None
def get_setup_names(self):
"""
Return list of Optimetrics setup names
"""
self.setup_names = list(self._optimetrics.GetSetupNames())
return self.setup_names.copy()
def solve_setup(self, setup_name: str):
"""
Solves the specified Optimetrics setup.
Corresponds to: Right-click the setup in the project tree, and then click
Analyze on the shortcut menu.
setup_name (str) : name of setup, should be in get_setup_names
Blocks execution until ready to use.
Note that this requires the license for Optimetrics by Ansys.
"""
return self._optimetrics.SolveSetup(setup_name)
def create_setup(self,
variable,
swp_params,
name="ParametricSetup1",
swp_type='linear_step',
setup_name=None,
save_fields=True,
copy_mesh=True,
solve_with_copied_mesh_only=True,
setup_type='parametric'):
"""
Inserts a new parametric setup of one variable. Either with sweep
definition or from file.
*Synchronized* sweeps (more than one variable changing at once)
can be implemented by giving a list of variables to ``variable``
and corresponding lists to ``swp_params`` and ``swp_type``.
The lengths of the sweep types should match (excluding single value).
Corresponds to ui access:
Right-click the Optimetrics folder in the project tree, and then click
Add> Parametric on the shortcut menu.
Ansys provides six sweep definitions types specified using the swp_type
variable.
Sweep type definitions:
- 'single_value'
Specify a single value for the sweep definition.
- 'linear_step'
Specify a linear range of values with a constant step size.
- 'linear_count'
Specify a linear range of values and the number, or count of points
within this range.
- 'decade_count'
Specify a logarithmic (base 10) series of values, and the number of
values to calculate in each decade.
- 'octave_count'
Specify a logarithmic (base 2) series of values, and the number of
values to calculate in each octave.
- 'exponential_count'
Specify an exponential (base e) series of values, and the number of
values to calculate.
For swp_type='single_value' swp_params is the single value.
For swp_type='linear_step' swp_params is start, stop, step:
swp_params = ("12.8nH", "13.6nH", "0.2nH")
All other types swp_params is start, stop, count:
swp_params = ("12.8nH", "13.6nH", 4)
The definition of count varies amongst the available types.
For Decade count and Octave count, the Count value specifies the number
of points to calculate in every decade or octave. For Exponential count,
the Count value is the total number of points. The total number of
points includes the start and stop values.
For parametric from file, setup_type='parametric_file', pass in a file
name and path to swp_params like "C:\\test.csv" or "C:\\test.txt" for
example.
Example csv formatting:
*,Lj_qubit
1,12.2nH
2,9.7nH
3,10.2nH
See Ansys documentation for additional formatting instructions.
"""
setup_name = setup_name or self.design.get_setup_names()[0]
print(
f"Inserting optimetrics setup `{name}` for simulation setup: `{setup_name}`"
)
if setup_type == 'parametric':
type_map = {
'linear_count': 'LINC',
'decade_count': 'DEC',
'octave_count': 'OCT',
'exponential_count': 'ESTP',
}
valid_swp_types = {'single_value', 'linear_step'} | set(type_map.keys())
if isinstance(variable, Iterable) and not isinstance(variable, str):
# synchronized sweep, check that data is in correct format
assert len(swp_params) == len(swp_type) == len(variable), \
'Incorrect swp_params or swp_type format for synchronised sweep.'
synchronize = True
else:
# convert all to lists as we can reuse same code for synchronized
swp_type = [swp_type]
swp_params = [swp_params]
variable = [variable]
synchronize = False
if any(e not in valid_swp_types for e in swp_type):
raise NotImplementedError()
else:
swp_str = list()
for i, e in enumerate(swp_type):
if e == 'single_value':
# Single takes string of single variable no swp_type_name
swp_str.append(f"{swp_params[i]}")
else:
# correct number of inputs
assert len(swp_params[i]) == 3, "Incorrect number of sweep parameters."
# Not checking for compatible unit types
if e == 'linear_step':
swp_type_name = "LIN"
else:
# counts needs to be an integer number
assert isinstance(swp_params[i][2], int), "Count must be integer."
swp_type_name = type_map[e]
# prepare the string to pass to Ansys
swp_str.append(f"{swp_type_name} {swp_params[i][0]} {swp_params[i][1]} {swp_params[i][2]}")
self._optimetrics.InsertSetup("OptiParametric", [
f"NAME:{name}", "IsEnabled:=", True,
[
"NAME:ProdOptiSetupDataV2",
"SaveFields:=",
save_fields,
"CopyMesh:=",
copy_mesh,
"SolveWithCopiedMeshOnly:=",
solve_with_copied_mesh_only,
], ["NAME:StartingPoint"], "Sim. Setups:=", [setup_name],
[
"NAME:Sweeps",
*[[
"NAME:SweepDefinition", "Variable:=", var_name, "Data:=",
swp, "OffsetF1:=", False, "Synchronize:=", int(synchronize)
] for var_name, swp in zip(variable, swp_str)]
], ["NAME:Sweep Operations"], ["NAME:Goals"]
])
elif setup_type == 'parametric_file':
# Uses the file name as the swp_params
filename = swp_params
self._optimetrics.ImportSetup("OptiParametric",
[
f"NAME:{name}",
filename,
])
self._optimetrics.EditSetup(f"{name}",
[
f"NAME:{name}",
[
"NAME:ProdOptiSetupDataV2",
"SaveFields:=" , save_fields,
"CopyMesh:=" , copy_mesh,
"SolveWithCopiedMeshOnly:=", solve_with_copied_mesh_only,
],
])
else:
raise NotImplementedError()
class HfssModeler(COMWrapper):
def __init__(self, design, modeler, boundaries, mesh):
"""
:type design: HfssDesign
"""
super(HfssModeler, self).__init__()
self.parent = design
self._modeler = modeler
self._boundaries = boundaries
self._mesh = mesh # Mesh module
def set_units(self, units, rescale=True):
self._modeler.SetModelUnits(
["NAME:Units Parameter", "Units:=", units, "Rescale:=", rescale])
def get_units(self):
"""Get the model units.
Return Value: A string contains current model units. """
return str(self._modeler.GetModelUnits())
def get_all_properties(self, obj_name, PropTab='Geometry3DAttributeTab'):
'''
Get all properties for modeler PropTab, PropServer
'''
PropServer = obj_name
properties = {}
for key in self._modeler.GetProperties(PropTab, PropServer):
properties[key] = self._modeler.GetPropertyValue(
PropTab, PropServer, key)
return properties
def _attributes_array(
self,
name=None,
nonmodel=False,
wireframe=False,
color=None,
transparency=0.9,
material=None, # str
solve_inside=None, # bool
coordinate_system="Global"):
arr = ["NAME:Attributes", "PartCoordinateSystem:=", coordinate_system]
if name is not None:
arr.extend(["Name:=", name])
if nonmodel or wireframe:
flags = 'NonModel' if nonmodel else '' # can be done smarter
if wireframe:
flags += '#' if len(flags) > 0 else ''
flags += 'Wireframe'
arr.extend(["Flags:=", flags])
if color is not None:
arr.extend(["Color:=", "(%d %d %d)" % color])
if transparency is not None:
arr.extend(["Transparency:=", transparency])
if material is not None:
arr.extend(["MaterialName:=", material])
if solve_inside is not None:
arr.extend(["SolveInside:=", solve_inside])
return arr
def _selections_array(self, *names):
return ["NAME:Selections", "Selections:=", ",".join(names)]
def mesh_length(self,
name_mesh,
objects: list,
MaxLength='0.1mm',
**kwargs):
'''
"RefineInside:=" , False,
"Enabled:=" , True,
"RestrictElem:=" , False,
"NumMaxElem:=" , "1000",
"RestrictLength:=" , True,
"MaxLength:=" , "0.1mm"
Example use:
modeler.assign_mesh_length('mesh2', ["Q1_mesh"], MaxLength=0.1)
'''
assert isinstance(objects, list)
arr = [
f"NAME:{name_mesh}", "Objects:=", objects, 'MaxLength:=', MaxLength
]
ops = [
'RefineInside', 'Enabled', 'RestrictElem', 'NumMaxElem',
'RestrictLength'
]
for key, val in kwargs.items():
if key in ops:
arr += [key + ':=', str(val)]
else:
logger.error('KEY `{key}` NOT IN ops!')
self._mesh.AssignLengthOp(arr)
def mesh_reassign(self, name_mesh, objects: list):
assert isinstance(objects, list)
self._mesh.ReassignOp(name_mesh, ["Objects:=", objects])
def mesh_get_names(self, kind="Length Based"):
''' "Length Based", "Skin Depth Based", ...'''
return list(self._mesh.GetOperationNames(kind))
def mesh_get_all_props(self, mesh_name):
# TODO: make mesh tis own class with properties
prop_tab = 'MeshSetupTab'
prop_server = f'MeshSetup:{mesh_name}'
prop_names = self.parent._design.GetProperties('MeshSetupTab',
prop_server)
dic = {}
for name in prop_names:
dic[name] = self._modeler.GetPropertyValue(prop_tab, prop_server,
name)
return dic
def draw_box_corner(self, pos, size, **kwargs):
name = self._modeler.CreateBox([
"NAME:BoxParameters", "XPosition:=",
str(pos[0]), "YPosition:=",
str(pos[1]), "ZPosition:=",
str(pos[2]), "XSize:=",
str(size[0]), "YSize:=",
str(size[1]), "ZSize:=",
str(size[2])
], self._attributes_array(**kwargs))
return Box(name, self, pos, size)
def draw_box_center(self, pos, size, **kwargs):
"""
Creates a 3-D box centered at pos [x0, y0, z0], with width
size [xwidth, ywidth, zwidth] along each respective direction.
Args:
pos (list): Coordinates of center of box, [x0, y0, z0]
size (list): Width of box along each direction, [xwidth, ywidth, zwidth]
"""
corner_pos = [var(p) - var(s) / 2 for p, s in zip(pos, size)]
return self.draw_box_corner(corner_pos, size, **kwargs)
def draw_polyline(self, points, closed=True, **kwargs):
"""
Draws a closed or open polyline.
If closed = True, then will make into a sheet.
points : need to be in the correct units
For optional arguments, see _attributes_array; these include:
```
nonmodel=False,
wireframe=False,
color=None,
transparency=0.9,
material=None, # str
solve_inside=None, # bool
coordinate_system="Global"
```
"""
pointsStr = ["NAME:PolylinePoints"]
indexsStr = ["NAME:PolylineSegments"]
for ii, point in enumerate(points):
pointsStr.append([
"NAME:PLPoint", "X:=",
str(point[0]), "Y:=",
str(point[1]), "Z:=",
str(point[2])
])
indexsStr.append([
"NAME:PLSegment", "SegmentType:=", "Line", "StartIndex:=", ii,
"NoOfPoints:=", 2
])
if closed:
pointsStr.append([
"NAME:PLPoint", "X:=",
str(points[0][0]), "Y:=",
str(points[0][1]), "Z:=",
str(points[0][2])
])
params_closed = [
"IsPolylineCovered:=", True, "IsPolylineClosed:=", True
]
else:
indexsStr = indexsStr[:-1]
params_closed = [
"IsPolylineCovered:=", True, "IsPolylineClosed:=", False
]
name = self._modeler.CreatePolyline(
["NAME:PolylineParameters", *params_closed, pointsStr, indexsStr],
self._attributes_array(**kwargs))
if closed:
return Polyline(name, self, points)
else:
return OpenPolyline(name, self, points)
def draw_rect_corner(self, pos, x_size=0, y_size=0, z_size=0, **kwargs):
size = [x_size, y_size, z_size]
assert 0 in size
axis = "XYZ"[size.index(0)]
w_idx, h_idx = {'X': (1, 2), 'Y': (2, 0), 'Z': (0, 1)}[axis]
name = self._modeler.CreateRectangle([
"NAME:RectangleParameters", "XStart:=",
str(pos[0]), "YStart:=",
str(pos[1]), "ZStart:=",
str(pos[2]), "Width:=",
str(size[w_idx]), "Height:=",
str(size[h_idx]), "WhichAxis:=", axis
], self._attributes_array(**kwargs))
return Rect(name, self, pos, size)
def draw_rect_center(self, pos, x_size=0, y_size=0, z_size=0, **kwargs):
"""
Creates a rectangle centered at pos [x0, y0, z0].
It is assumed that the rectangle lies parallel to the xy, yz, or xz plane.
User inputs 2 of 3 of the following: x_size, y_size, and z_size
depending on how the rectangle is oriented.
Args:
pos (list): Coordinates of rectangle center, [x0, y0, z0]
x_size (int, optional): Width along the x direction. Defaults to 0.
y_size (int, optional): Width along the y direction. Defaults to 0.
z_size (int, optional): Width along the z direction]. Defaults to 0.
"""
corner_pos = [
var(p) - var(s) / 2. for p, s in zip(pos, [x_size, y_size, z_size])
]
return self.draw_rect_corner(corner_pos, x_size, y_size, z_size,
**kwargs)
def draw_cylinder(self, pos, radius, height, axis, **kwargs):
assert axis in "XYZ"
return self._modeler.CreateCylinder([
"NAME:CylinderParameters", "XCenter:=", pos[0], "YCenter:=",
pos[1], "ZCenter:=", pos[2], "Radius:=", radius, "Height:=",
height, "WhichAxis:=", axis, "NumSides:=", 0
], self._attributes_array(**kwargs))
def draw_cylinder_center(self, pos, radius, height, axis, **kwargs):
axis_idx = ["X", "Y", "Z"].index(axis)
edge_pos = copy(pos)
edge_pos[axis_idx] = var(pos[axis_idx]) - var(height) / 2
return self.draw_cylinder(edge_pos, radius, height, axis, **kwargs)
def draw_wirebond(self,
pos,
ori,
width,
height='0.1mm',
z=0,
wire_diameter="0.02mm",
NumSides=6,
**kwargs):
'''
Args:
pos: 2D position vector (specify center point)
ori: should be normed
z: z position
# TODO create Wirebond class
position is the origin of one point
ori is the orientation vector, which gets normalized
'''
p = np.array(pos)
o = np.array(ori)
pad1 = p - o * width / 2.
name = self._modeler.CreateBondwire([
"NAME:BondwireParameters", "WireType:=", "Low", "WireDiameter:=",
wire_diameter, "NumSides:=", NumSides, "XPadPos:=", pad1[0],
"YPadPos:=", pad1[1], "ZPadPos:=", z, "XDir:=", ori[0], "YDir:=",
ori[1], "ZDir:=", 0, "Distance:=", width, "h1:=", height, "h2:=",
"0mm", "alpha:=", "80deg", "beta:=", "80deg", "WhichAxis:=", "Z"
], self._attributes_array(**kwargs))
return name
def draw_region(self,
Padding,
PaddingType="Percentage Offset",
name='Region',
material="\"vacuum\""):
"""
PaddingType : 'Absolute Offset', "Percentage Offset"
"""
# TODO: Add option to modify these
RegionAttributes = [
"NAME:Attributes", "Name:=", name, "Flags:=", "Wireframe#",
"Color:=", "(255 0 0)", "Transparency:=", 1,
"PartCoordinateSystem:=", "Global", "UDMId:=", "",
"IsAlwaysHiden:=", False, "MaterialValue:=", material,
"SolveInside:=", True
]
self._modeler.CreateRegion([
"NAME:RegionParameters", "+XPaddingType:=", PaddingType,
"+XPadding:=", Padding[0][0], "-XPaddingType:=", PaddingType,
"-XPadding:=", Padding[0][1], "+YPaddingType:=", PaddingType,
"+YPadding:=", Padding[1][0], "-YPaddingType:=", PaddingType,
"-YPadding:=", Padding[1][1], "+ZPaddingType:=", PaddingType,
"+ZPadding:=", Padding[2][0], "-ZPaddingType:=", PaddingType,
"-ZPadding:=", Padding[2][1]
], RegionAttributes)
def unite(self, names, keep_originals=False):
self._modeler.Unite(
self._selections_array(*names),
["NAME:UniteParameters", "KeepOriginals:=", keep_originals])
return names[0]
def intersect(self, names, keep_originals=False):
self._modeler.Intersect(
self._selections_array(*names),
["NAME:IntersectParameters", "KeepOriginals:=", keep_originals])
return names[0]
def translate(self, name, vector):
self._modeler.Move(self._selections_array(name), [
"NAME:TranslateParameters", "TranslateVectorX:=", vector[0],
"TranslateVectorY:=", vector[1], "TranslateVectorZ:=", vector[2]
])
def get_boundary_assignment(self, boundary_name: str):
# Gets a list of face IDs associated with the given boundary or excitation assignment.
objects = self._boundaries.GetBoundaryAssignment(boundary_name)
# Gets an object name corresponding to the input face id. Returns the name of the corresponding object name.
objects = [self._modeler.GetObjectNameByFaceID(k) for k in objects]
return objects
def append_PerfE_assignment(self, boundary_name: str, object_names: list):
'''
This will create a new boundary if need, and will
otherwise append given names to an existing boundary
'''
# enforce
boundary_name = str(boundary_name)
if isinstance(object_names, str):
object_names = [object_names]
object_names = list(object_names) # enforce list
# do actual work
if boundary_name not in self._boundaries.GetBoundaries(
): # GetBoundariesOfType("Perfect E")
# need to make a new boundary
self.assign_perfect_E(object_names, name=boundary_name)
else:
# need to append
objects = list(self.get_boundary_assignment(boundary_name))
self._boundaries.ReassignBoundary([
"NAME:" + boundary_name, "Objects:=",
list(set(objects + object_names))
])
def append_mesh(self, mesh_name: str, object_names: list, old_objs: list,
**kwargs):
'''
This will create a new boundary if need, and will
otherwise append given names to an existing boundary
old_obj = circ._mesh_assign
'''
mesh_name = str(mesh_name)
if isinstance(object_names, str):
object_names = [object_names]
object_names = list(object_names) # enforce list
if mesh_name not in self.mesh_get_names(
): # need to make a new boundary
objs = object_names
self.mesh_length(mesh_name, object_names, **kwargs)
else: # need to append
objs = list(set(old_objs + object_names))
self.mesh_reassign(mesh_name, objs)
return objs
def assign_perfect_E(self, obj: List[str], name: str = 'PerfE'):
'''
Assign a boundary condition to a list of objects.
Arg:
objs (List[str]): Takes a name of an object or a list of object names.
name(str): If `name` is not specified `PerfE` is appended to object name for the name.
'''
if not isinstance(obj, list):
obj = [obj]
if name == 'PerfE':
name = str(obj) + '_' + name
name = increment_name(name, self._boundaries.GetBoundaries())
self._boundaries.AssignPerfectE(
["NAME:" + name, "Objects:=", obj, "InfGroundPlane:=", False])
def _make_lumped_rlc(self, r, l, c, start, end, obj_arr, name="LumpRLC"):
name = increment_name(name, self._boundaries.GetBoundaries())
params = ["NAME:" + name]
params += obj_arr
params.append([
"NAME:CurrentLine",
# for some reason here it seems to switch to use the model units, rather than meters
"Start:=",
fix_units(start, unit_assumed=LENGTH_UNIT),
"End:=",
fix_units(end, unit_assumed=LENGTH_UNIT)
])
params += [
"UseResist:=", r != 0, "Resistance:=", r, "UseInduct:=", l != 0,
"Inductance:=", l, "UseCap:=", c != 0, "Capacitance:=", c
]
self._boundaries.AssignLumpedRLC(params)
def _make_lumped_port(self,
start,
end,
obj_arr,
z0="50ohm",
name="LumpPort"):
start = fix_units(start, unit_assumed=LENGTH_UNIT)
end = fix_units(end, unit_assumed=LENGTH_UNIT)
name = increment_name(name, self._boundaries.GetBoundaries())
params = ["NAME:" + name]
params += obj_arr
params += [
"RenormalizeAllTerminals:=", True, "DoDeembed:=", False,
[
"NAME:Modes",
[
"NAME:Mode1", "ModeNum:=", 1, "UseIntLine:=", True,
["NAME:IntLine", "Start:=", start, "End:=",
end], "CharImp:=", "Zpi", "AlignmentGroup:=", 0,
"RenormImp:=", "50ohm"
]
], "ShowReporterFilter:=", False, "ReporterFilter:=", [True],
"FullResistance:=", z0, "FullReactance:=", "0ohm"
]
self._boundaries.AssignLumpedPort(params)
def get_face_ids(self, obj):
return self._modeler.GetFaceIDs(obj)
def get_object_name_by_face_id(self, ID: str):
''' Gets an object name corresponding to the input face id. '''
return self._modeler.GetObjectNameByFaceID(ID)
def get_vertex_ids(self, obj):
"""
Get the vertex IDs of given an object name
oVertexIDs = oEditor.GetVertexIDsFromObject(“Box1”)
"""
return self._modeler.GetVertexIDsFromObject(obj)
def eval_expr(self, expr, units="mm"):
if not isinstance(expr, str):
return expr
return self.parent.eval_expr(expr, units)
def get_objects_in_group(self, group):
"""
Use: Returns the objects for the specified group.
Return Value: The objects in the group.
Parameters: <groupName> Type: <string>
One of <materialName>, <assignmentName>, "Non Model",
"Solids", "Unclassified", "Sheets", "Lines"
"""
if self._modeler:
return list(self._modeler.GetObjectsInGroup(group))
else:
return list()
def set_working_coordinate_system(self, cs_name="Global"):
"""
Use: Sets the working coordinate system.
Command: Modeler>Coordinate System>Set Working CS
"""
self._modeler.SetWCS([
"NAME:SetWCS Parameter",
"Working Coordinate System:=",
cs_name,
"RegionDepCSOk:=",
False # this one is prob not needed, but comes with the record tool
])
def create_relative_coorinate_system_both(self,
cs_name,
origin=["0um", "0um", "0um"],
XAxisVec=["1um", "0um", "0um"],
YAxisVec=["0um", "1um", "0um"]):
"""
Use: Creates a relative coordinate system. Only the Name attribute of the <AttributesArray> parameter is supported.
Command: Modeler>Coordinate System>Create>Relative CS->Offset
Modeler>Coordinate System>Create>Relative CS->Rotated
Modeler>Coordinate System>Create>Relative CS->Both
Current coordinate system is set right after this.
cs_name : name of coord. sys
If the name already exists, then a new coordinate system with _1 is created.
origin, XAxisVec, YAxisVec: 3-vectors
You can also pass in params such as origin = [0,1,0] rather than ["0um","1um","0um"], but these will be interpreted in default units, so it is safer to be explicit. Explicit over implicit.
"""
self._modeler.CreateRelativeCS([
"NAME:RelativeCSParameters", "Mode:=", "Axis/Position",
"OriginX:=", origin[0], "OriginY:=", origin[1], "OriginZ:=",
origin[2], "XAxisXvec:=", XAxisVec[0], "XAxisYvec:=", XAxisVec[1],
"XAxisZvec:=", XAxisVec[2], "YAxisXvec:=", YAxisVec[0],
"YAxisYvec:=", YAxisVec[1], "YAxisZvec:=", YAxisVec[1]
], ["NAME:Attributes", "Name:=", cs_name])
def subtract(self, blank_name, tool_names, keep_originals=False):
selection_array = [
"NAME:Selections", "Blank Parts:=", blank_name, "Tool Parts:=",
",".join(tool_names)
]
self._modeler.Subtract(
selection_array,
["NAME:UniteParameters", "KeepOriginals:=", keep_originals])
return blank_name
def _fillet(self, radius, vertex_index, obj):
vertices = self._modeler.GetVertexIDsFromObject(obj)
if isinstance(vertex_index, list):
to_fillet = [int(vertices[v]) for v in vertex_index]
else:
to_fillet = [int(vertices[vertex_index])]
# print(vertices)
# print(radius)
self._modeler.Fillet(["NAME:Selections", "Selections:=", obj], [
"NAME:Parameters",
[
"NAME:FilletParameters", "Edges:=", [], "Vertices:=",
to_fillet, "Radius:=", radius, "Setback:=", "0mm"
]
])
def _fillet_edges(self, radius, edge_index, obj):
edges = self._modeler.GetEdgeIDsFromObject(obj)
if isinstance(edge_index, list):
to_fillet = [int(edges[e]) for e in edge_index]
else:
to_fillet = [int(edges[edge_index])]
self._modeler.Fillet(["NAME:Selections", "Selections:=", obj], [
"NAME:Parameters",
[
"NAME:FilletParameters", "Edges:=", to_fillet, "Vertices:=",
[], "Radius:=", radius, "Setback:=", "0mm"
]
])
def _fillets(self, radius, vertices, obj):
self._modeler.Fillet(["NAME:Selections", "Selections:=", obj], [
"NAME:Parameters",
[
"NAME:FilletParameters", "Edges:=", [], "Vertices:=", vertices,
"Radius:=", radius, "Setback:=", "0mm"
]
])
def _sweep_along_path(self, to_sweep, path_obj):
"""
Adds thickness to path_obj by extending to a new dimension.
to_sweep acts as a putty knife that determines the thickness.
Args:
to_sweep (polyline): Small polyline running perpendicular to path_obj
whose length is the desired resulting thickness
path_obj (polyline): Original polyline; want to broaden this
"""
self.rename_obj(path_obj, str(path_obj) + '_path')
new_name = self.rename_obj(to_sweep, path_obj)
names = [path_obj, str(path_obj) + '_path']
self._modeler.SweepAlongPath(self._selections_array(*names), [
"NAME:PathSweepParameters", "DraftAngle:=", "0deg", "DraftType:=",
"Round", "CheckFaceFaceIntersection:=", False, "TwistAngle:=",
"0deg"
])
return Polyline(new_name, self)
def sweep_along_vector(self, names, vector):
self._modeler.SweepAlongVector(self._selections_array(*names), [
"NAME:VectorSweepParameters", "DraftAngle:=", "0deg",
"DraftType:=", "Round", "CheckFaceFaceIntersection:=", False,
"SweepVectorX:=", vector[0], "SweepVectorY:=", vector[1],
"SweepVectorZ:=", vector[2]
])
def rename_obj(self, obj, name):
self._modeler.ChangeProperty([
"NAME:AllTabs",
[
"NAME:Geometry3DAttributeTab", ["NAME:PropServers",
str(obj)],
["NAME:ChangedProps", ["NAME:Name", "Value:=",
str(name)]]
]
])
return name
class ModelEntity(str, HfssPropertyObject):
prop_tab = "Geometry3DCmdTab"
model_command = None
transparency = make_float_prop("Transparent",
prop_tab="Geometry3DAttributeTab",
prop_server=lambda self: self)
material = make_str_prop("Material",
prop_tab="Geometry3DAttributeTab",
prop_server=lambda self: self)
wireframe = make_float_prop("Display Wireframe",
prop_tab="Geometry3DAttributeTab",
prop_server=lambda self: self)
coordinate_system = make_str_prop("Coordinate System")
def __new__(self, val, *args, **kwargs):
return str.__new__(self, val)
def __init__(self, val, modeler):
"""
:type val: str
:type modeler: HfssModeler
"""
super(ModelEntity,
self).__init__() # val) #Comment out keyword to match arguments
self.modeler = modeler
self.prop_server = self + ":" + self.model_command + ":1"
class Box(ModelEntity):
model_command = "CreateBox"
position = make_float_prop("Position")
x_size = make_float_prop("XSize")
y_size = make_float_prop("YSize")
z_size = make_float_prop("ZSize")
def __init__(self, name, modeler, corner, size):
"""
:type name: str
:type modeler: HfssModeler
:type corner: [(VariableString, VariableString, VariableString)]
:param size: [(VariableString, VariableString, VariableString)]
"""
super(Box, self).__init__(name, modeler)
self.modeler = modeler
self.prop_holder = modeler._modeler
self.corner = corner
self.size = size
self.center = [c + s / 2 for c, s in zip(corner, size)]
faces = modeler.get_face_ids(self)
self.z_back_face, self.z_front_face = faces[0], faces[1]
self.y_back_face, self.y_front_face = faces[2], faces[4]
self.x_back_face, self.x_front_face = faces[3], faces[5]
class Rect(ModelEntity):
model_command = "CreateRectangle"
# TODO: Add a rotated rectangle object.
# Will need to first create rect, then apply rotate operation.
def __init__(self, name, modeler, corner, size):
super(Rect, self).__init__(name, modeler)
self.prop_holder = modeler._modeler
self.corner = corner
self.size = size
self.center = [c + s / 2 if s else c for c, s in zip(corner, size)]
def make_center_line(self, axis):
'''
Returns `start` and `end` list of 3 coordinates
'''
axis_idx = ["x", "y", "z"].index(axis.lower())
start = [c for c in self.center]
start[axis_idx] -= self.size[axis_idx] / 2
start = [self.modeler.eval_expr(s) for s in start]
end = [c for c in self.center]
end[axis_idx] += self.size[axis_idx] / 2
end = [self.modeler.eval_expr(s) for s in end]
return start, end
def make_rlc_boundary(self, axis, r=0, l=0, c=0, name="LumpRLC"):
start, end = self.make_center_line(axis)
self.modeler._make_lumped_rlc(r,
l,
c,
start,
end, ["Objects:=", [self]],
name=name)
def make_lumped_port(self, axis, z0="50ohm", name="LumpPort"):
start, end = self.make_center_line(axis)
self.modeler._make_lumped_port(start,
end, ["Objects:=", [self]],
z0=z0,
name=name)
class Polyline(ModelEntity):
'''
Assume closed polyline, which creates a polygon.
'''
model_command = "CreatePolyline"
def __init__(self, name, modeler, points=None):
super(Polyline, self).__init__(name, modeler)
self.prop_holder = modeler._modeler
if points is not None:
self.points = points
self.n_points = len(points)
else:
pass
# TODO: points = collection of points
# axis = find_orth_axis()
# TODO: find the plane of the polyline for now, assume Z
# def find_orth_axis():
# X, Y, Z = (True, True, True)
# for point in points:
# X =
def unite(self, list_other):
union = self.modeler.unite(self + list_other)
return Polyline(union, self.modeler)
def make_center_line(self, axis): # Expects to act on a rectangle...
# first : find center and size
center = [0, 0, 0]
for point in self.points:
center = [
center[0] + point[0] / self.n_points,
center[1] + point[1] / self.n_points,
center[2] + point[2] / self.n_points
]
size = [
2 * (center[0] - self.points[0][0]),
2 * (center[1] - self.points[0][1]),
2 * (center[1] - self.points[0][2])
]
axis_idx = ["x", "y", "z"].index(axis.lower())
start = [c for c in center]
start[axis_idx] -= size[axis_idx] / 2
start = [
self.modeler.eval_var_str(s, unit=LENGTH_UNIT) for s in start
] # TODO
end = [c for c in center]
end[axis_idx] += size[axis_idx] / 2
end = [self.modeler.eval_var_str(s, unit=LENGTH_UNIT) for s in end]
return start, end
def make_rlc_boundary(self, axis, r=0, l=0, c=0, name="LumpRLC"):
name = str(self) + '_' + name
start, end = self.make_center_line(axis)
self.modeler._make_lumped_rlc(r,
l,
c,
start,
end, ["Objects:=", [self]],
name=name)
def fillet(self, radius, vertex_index):
self.modeler._fillet(radius, vertex_index, self)
def vertices(self):
return self.modeler.get_vertex_ids(self)
def rename(self, new_name):
'''
Warning: The increment_name only works if the sheet has not been stracted or used as a tool elsewhere.
These names are not checked; they require modifying get_objects_in_group.
'''
new_name = increment_name(
new_name, self.modeler.get_objects_in_group(
"Sheets")) # this is for a closed polyline
# check to get the actual new name in case there was a substracted object with that name
face_ids = self.modeler.get_face_ids(str(self))
self.modeler.rename_obj(self, new_name) # now rename
if len(face_ids) > 0:
new_name = self.modeler.get_object_name_by_face_id(face_ids[0])
return Polyline(str(new_name), self.modeler)
class OpenPolyline(ModelEntity): # Assume closed polyline
model_command = "CreatePolyline"
show_direction = make_prop('Show Direction',
prop_tab="Geometry3DAttributeTab",
prop_server=lambda self: self)
def __init__(self, name, modeler, points=None):
super(OpenPolyline, self).__init__(name, modeler)
self.prop_holder = modeler._modeler
if points is not None:
self.points = points
self.n_points = len(points)
else:
pass
# axis = find_orth_axis()
# TODO: find the plane of the polyline for now, assume Z
# def find_orth_axis():
# X, Y, Z = (True, True, True)
# for point in points:
# X =
def vertices(self):
return self.modeler.get_vertex_ids(self)
def fillet(self, radius, vertex_index):
self.modeler._fillet(radius, vertex_index, self)
def fillets(self, radius, do_not_fillet=[]):
'''
do_not_fillet : Index list of vertices to not fillete
'''
raw_list_vertices = self.modeler.get_vertex_ids(self)
list_vertices = []
for vertex in raw_list_vertices[1:-1]: # ignore the start and finish
list_vertices.append(int(vertex))
list_vertices = list(
map(
int,
np.delete(list_vertices,
np.array(do_not_fillet, dtype=int) - 1)))
#print(list_vertices, type(list_vertices[0]))
if len(list_vertices) != 0:
self.modeler._fillets(radius, list_vertices, self)
else:
pass
def sweep_along_path(self, to_sweep):
return self.modeler._sweep_along_path(to_sweep, self)
def rename(self, new_name):
'''
Warning: The increment_name only works if the sheet has not been stracted or used as a tool elsewher.
These names are not checked - They require modifying get_objects_in_group
'''
new_name = increment_name(new_name,
self.modeler.get_objects_in_group("Lines"))
# , self.points)
return OpenPolyline(self.modeler.rename_obj(self, new_name),
self.modeler)
def copy(self, new_name):
new_obj = OpenPolyline(self.modeler.copy(self), self.modeler)
return new_obj.rename(new_name)
class HfssFieldsCalc(COMWrapper):
def __init__(self, setup):
"""
:type setup: HfssSetup
"""
self.setup = setup
super(HfssFieldsCalc, self).__init__()
self.parent = setup
self.Mag_E = NamedCalcObject("Mag_E", setup)
self.Mag_H = NamedCalcObject("Mag_H", setup)
self.Mag_Jsurf = NamedCalcObject("Mag_Jsurf", setup)
self.Mag_Jvol = NamedCalcObject("Mag_Jvol", setup)
self.Vector_E = NamedCalcObject("Vector_E", setup)
self.Vector_H = NamedCalcObject("Vector_H", setup)
self.Vector_Jsurf = NamedCalcObject("Vector_Jsurf", setup)
self.Vector_Jvol = NamedCalcObject("Vector_Jvol", setup)
self.ComplexMag_E = NamedCalcObject("ComplexMag_E", setup)
self.ComplexMag_H = NamedCalcObject("ComplexMag_H", setup)
self.ComplexMag_Jsurf = NamedCalcObject("ComplexMag_Jsurf", setup)
self.ComplexMag_Jvol = NamedCalcObject("ComplexMag_Jvol", setup)
self.P_J = NamedCalcObject("P_J", setup)
self.named_expression = {
} # dictionary to hold additional named expressions
def clear_named_expressions(self):
self.parent.parent._fields_calc.ClearAllNamedExpr()
def declare_named_expression(self, name):
""""
If a named expression has been created in the fields calculator, this
function can be called to initialize the name to work with the fields object
"""
self.named_expression[name] = NamedCalcObject(name, self.setup)
def use_named_expression(self, name):
"""
Expression can be used to access dictionary of named expressions,
Alternately user can access dictionary directly via named_expression()
"""
return self.named_expression[name]
class CalcObject(COMWrapper):
def __init__(self, stack, setup):
"""
:type stack: [(str, str)]
:type setup: HfssSetup
"""
super(CalcObject, self).__init__()
self.stack = stack
self.setup = setup
self.calc_module = setup.parent._fields_calc
def _bin_op(self, other, op):
if isinstance(other, (int, float)):
other = ConstantCalcObject(other, self.setup)
stack = self.stack + other.stack
stack.append(("CalcOp", op))
return CalcObject(stack, self.setup)
def _unary_op(self, op):
stack = self.stack[:]
stack.append(("CalcOp", op))
return CalcObject(stack, self.setup)
def __add__(self, other):
return self._bin_op(other, "+")
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self._bin_op(other, "-")
def __rsub__(self, other):
return (-self) + other
def __mul__(self, other):
return self._bin_op(other, "*")
def __rmul__(self, other):
return self * other
def __div__(self, other):
return self._bin_op(other, "/")
def __rdiv__(self, other):
other = ConstantCalcObject(other, self.setup)
return other / self
def __pow__(self, other):
return self._bin_op(other, "Pow")
def dot(self, other):
return self._bin_op(other, "Dot")
def __neg__(self):
return self._unary_op("Neg")
def __abs__(self):
return self._unary_op("Abs")
def __mag__(self):
return self._unary_op("Mag")
def mag(self):
return self._unary_op("Mag")
def smooth(self):
return self._unary_op("Smooth")
def conj(self):
return self._unary_op("Conj") # make this right
def scalar_x(self):
return self._unary_op("ScalarX")
def scalar_y(self):
return self._unary_op("ScalarY")
def scalar_z(self):
return self._unary_op("ScalarZ")
def norm_2(self):
return (self.__mag__()).__pow__(2)
# return self._unary_op("ScalarX")**2+self._unary_op("ScalarY")**2+self._unary_op("ScalarZ")**2
def real(self):
return self._unary_op("Real")
def imag(self):
return self._unary_op("Imag")
def complexmag(self):
return self._unary_op("CmplxMag")
def _integrate(self, name, type):
stack = self.stack + [(type, name), ("CalcOp", "Integrate")]
return CalcObject(stack, self.setup)
def _maximum(self, name, type):
stack = self.stack + [(type, name), ("CalcOp", "Maximum")]
return CalcObject(stack, self.setup)
def getQty(self, name):
stack = self.stack + [("EnterQty", name)]
return CalcObject(stack, self.setup)
def integrate_line(self, name):
return self._integrate(name, "EnterLine")
def normal2surface(self, name):
''' return the part normal to surface.
Complex Vector. '''
stack = self.stack + [("EnterSurf", name),
("CalcOp", "Normal")]
stack.append(("CalcOp", "Dot"))
stack.append(("EnterSurf", name))
stack.append(("CalcOp", "Normal"))
stack.append(("CalcOp", "*"))
return CalcObject(stack, self.setup)
def tangent2surface(self, name):
''' return the part tangent to surface.
Complex Vector. '''
stack = self.stack + [("EnterSurf", name),
("CalcOp", "Normal")]
stack.append(("CalcOp", "Dot"))
stack.append(("EnterSurf", name))
stack.append(("CalcOp", "Normal"))
stack.append(("CalcOp", "*"))
stack = self.stack + stack
stack.append(("CalcOp", "-"))
return CalcObject(stack, self.setup)
def integrate_line_tangent(self, name):
''' integrate line tangent to vector expression \n
name = of line to integrate over '''
self.stack = self.stack + [("EnterLine", name), ("CalcOp", "Tangent"),
("CalcOp", "Dot")]
return self.integrate_line(name)
def line_tangent_coor(self, name, coordinate):
''' integrate line tangent to vector expression \n
name = of line to integrate over '''
if coordinate not in ['X', 'Y', 'Z']:
raise ValueError
self.stack = self.stack + [("EnterLine", name), ("CalcOp", "Tangent"),
("CalcOp", "Scalar" + coordinate)]
return self.integrate_line(name)
def integrate_surf(self, name="AllObjects"):
return self._integrate(name, "EnterSurf")
def integrate_vol(self, name="AllObjects"):
return self._integrate(name, "EnterVol")
def maximum_vol(self, name='AllObjects'):
return self._maximum(name, 'EnterVol')
def times_eps(self):
stack = self.stack + [("ClcMaterial", ("Permittivity (epsi)", "mult"))]
return CalcObject(stack, self.setup)
def times_mu(self):
stack = self.stack + [("ClcMaterial", ("Permeability (mu)", "mult"))]
return CalcObject(stack, self.setup)
def write_stack(self):
for fn, arg in self.stack:
if np.size(arg) > 1 and fn not in ['EnterVector']:
getattr(self.calc_module, fn)(*arg)
else:
getattr(self.calc_module, fn)(arg)
def save_as(self, name):
"""if the object already exists, try clearing your
named expressions first with fields.clear_named_expressions"""
self.write_stack()
self.calc_module.AddNamedExpr(name)
return NamedCalcObject(name, self.setup)
def evaluate(self, phase=0, lv=None, print_debug=False): # , n_mode=1):
self.write_stack()
if print_debug:
print('---------------------')
print('writing to stack: OK')
print('-----------------')
#self.calc_module.set_mode(n_mode, 0)
setup_name = self.setup.solution_name
if lv is not None:
args = lv
else:
args = []
args.append("Phase:=")
args.append(str(int(phase)) + "deg")
if isinstance(self.setup, HfssDMSetup):
args.extend(["Freq:=", self.setup.solution_freq])
self.calc_module.ClcEval(setup_name, args)
return float(self.calc_module.GetTopEntryValue(setup_name, args)[0])
class NamedCalcObject(CalcObject):
def __init__(self, name, setup):
self.name = name
stack = [("CopyNamedExprToStack", name)]
super(NamedCalcObject, self).__init__(stack, setup)
class ConstantCalcObject(CalcObject):
def __init__(self, num, setup):
stack = [("EnterScalar", num)]
super(ConstantCalcObject, self).__init__(stack, setup)
class ConstantVecCalcObject(CalcObject):
def __init__(self, vec, setup):
stack = [("EnterVector", vec)]
super(ConstantVecCalcObject, self).__init__(stack, setup)
def get_active_project():
''' If you see the error:
"The requested operation requires elevation."
then you need to run your python as an admin.
'''
import ctypes
import os
try:
is_admin = os.getuid() == 0
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if not is_admin:
print('\033[93m WARNING: you are not running as an admin! \
You need to run as an admin. You will probably get an error next.\
\033[0m')
app = HfssApp()
desktop = app.get_app_desktop()
return desktop.get_active_project()
def get_active_design():
project = get_active_project()
return project.get_active_design()
def get_report_arrays(name: str):
d = get_active_design()
r = HfssReport(d, name)
return r.get_arrays()
def load_ansys_project(proj_name: str,
project_path: str = None,
extension: str = '.aedt'):
'''
Utility function to load an Ansys project.
Args:
proj_name : None --> get active. (make sure 2 run as admin)
extension : `aedt` is for 2016 version and newer
'''
if project_path:
# convert slashes correctly for system
project_path = Path(project_path)
# Checks
assert project_path.is_dir(
), "ERROR! project_path is not a valid directory \N{loudly crying face}.\
Check the path, and especially \\ characters."
project_path /= project_path / Path(proj_name + extension)
if (project_path).is_file():
logger.info('\tFile path to HFSS project found.')
else:
raise Exception(
"ERROR! Valid directory, but invalid project filename. \N{loudly crying face} Not found!\
Please check your filename.\n%s\n" % project_path)
if (project_path / '.lock').is_file():
logger.warning(
'\t\tFile is locked. \N{fearful face} If connection fails, delete the .lock file.'
)
app = HfssApp()
logger.info("\tOpened Ansys App")
desktop = app.get_app_desktop()
logger.info(f"\tOpened Ansys Desktop v{desktop.get_version()}")
#logger.debug(f"\tOpen projects: {desktop.get_project_names()}")
if proj_name is not None:
if proj_name in desktop.get_project_names():
desktop.set_active_project(proj_name)
project = desktop.get_active_project()
else:
project = desktop.open_project(str(project_path))
else:
projects_in_app = desktop.get_projects()
if projects_in_app:
project = desktop.get_active_project()
else:
project = None
if project:
logger.info(
f"\tOpened Ansys Project\n\tFolder: {project.get_path()}\n\tProject: {project.name}"
)
else:
logger.info(f"\tAnsys Project was not found.\n\t Project is None.")
return app, desktop, project
| 117,733 | 34.78541 | 223 | py |
pyEPR | pyEPR-master/pyEPR/_config_user.py | """
User configuration file.
The dictionary of options specified here overwrites the pyEPR default
config defined in _config_default.py
Do not edit `_config_default.py` directly. Rather, overwrite attributes here.
GIT: Do not track changes here.
Keep your changes only locally. Use the shell command
$ git update-index --skip-worktree _config_user.py
in the pyEPR/pyEPR folder to stop tracking this file.
To add changes back, use can use `git update-index --no-skip-worktree <file>`
@author: Your name goes here
"""
from . import Dict
config = Dict( # pylint: disable=invalid-name
# Folder to save result data to.
# PLEASE CHANGE THIS
root_dir=r'C:\data-pyEPR', # Not all machines have a D drive so substituting D with C here
# Loss properties of various materials and surfaces
dissipation=Dict(
##################################################
# Bulk dielectric
# refs: https://arxiv.org/abs/1308.1743
# http://arxiv.org/pdf/1509.01854.pdf
tan_delta_sapp=1e-6, # tan(delta) for bulk surface
epsi=10, # dielectric
##################################################
# Surface dielectric
# ref: http://arxiv.org/pdf/1509.01854.pdf
# Surface dielectric (dirt) thickness
# units: meters
th=3e-9,
# Surface dielectric (dirt) constant
# units: relative permittivity
eps_r=10,
# Surface dielectric (dirt) loss tangent
# units: unitless, since this is tan(delta)
tan_delta_surf=1e-3,
##################################################
# Surface object specific dielectric properties.
# These will override ones above when applicable
dielectric_surfaces=Dict(
trace=Dict(
tan_delta_surf=0.001,
th=5e-9,
eps_r=10
),
gap=Dict(
tan_delta_surf=0.001,
th=2e-9,
eps_r=10
)
),
##################################################
# Thin-film surface loss
# units: Ohms
# ref: https://arxiv.org/abs/1308.1743
surface_Rs=250e-9,
##################################################
# Seam current loss
# units: per Ohm meter; i.e., seam conductance
# ref: http://arxiv.org/pdf/1509.01119.pdf
gseam=1.0e3,
),
ansys=Dict(
# method_calc_P_mj sets the method used to calculate the participation ratio in eigenmode.
# Valid values:
# 'line_voltage' : Uses the line voltage integral
# 'J_surf_mag' : takes the avg. Jsurf over the rect. Make sure you have seeded
# lots of tets here. I recommend starting with 4 across smallest dimension.
# Multi-junction calculation of energy participation ratio matrix based on <I_J>.
# Current is integrated average of J_surf by default: (zkm 3/29/16)
# Will calculate the Pj matrix for the selected modes for the given junctions
# junc_rect array & length of junctions
method_calc_P_mj='line_voltage',
),
plotting=Dict(
# Default color map for plotting. Better if made into a string name
# taken from matplotlib.cm
default_color_map='viridis', # pylint: disable=no-member
),
)
__all__ = ['config']
| 3,435 | 32.038462 | 98 | py |
pyEPR | pyEPR-master/pyEPR/calcs/quantum.py | """
Implementation of basic quantum operation in numpy,
to effortlessly remove the need in the `qutip` package.
"""
import numpy as np
def create(n: int):
"""Returns matrix representation of an n-dimensional creation operator"""
diag = np.sqrt(np.arange(1,n))
mat = np.zeros([n, n])
np.fill_diagonal(mat[1:], diag)
return mat
def destroy(n: int):
"""Returns matrix representation of an n-dimensional annihilation operator"""
diag = np.sqrt(np.arange(1, n))
mat = np.zeros([n, n])
np.fill_diagonal(mat[:, 1:], diag)
return mat
def number(n: int):
"""Returns matrix representation of an n-dimensional number operator"""
mat = np.zeros([n, n])
np.fill_diagonal(mat, np.arange(n))
return mat
def basis(n: int, N: int): # Numpy does provide a method that does this but it's very slow
"""Returns the n-th, N-dimensional basis vector"""
vec = np.zeros([N, 1])
vec[n] = 1.0
return vec
| 954 | 27.939394 | 90 | py |
pyEPR | pyEPR-master/pyEPR/calcs/constants.py | """
pyEPR constants and convenience definitions.
@author: Zlatko Minev
"""
# pylint: disable=invalid-name
from scipy.constants import Planck, elementary_charge, epsilon_0, pi # pylint: disable=unused-import
# Pi
π = pi
# Reduced Planks constant
ħ = hbar = Planck/(2*pi)
# Reduced Flux Quantum (3.29105976 × 10-16 Webers)
ϕ0 = fluxQ = ħ / (2*elementary_charge)
# Magnitude of the electric charge carried by a single electron
e_el = elementary_charge
| 457 | 20.809524 | 101 | py |
pyEPR | pyEPR-master/pyEPR/calcs/back_box_numeric.py | '''
Numerical diagonalization of quantum Hamiltonian and parameter
extraction.
@author: Phil Reinhold, Zlatko Minev, Lysander Christakis
Original code on black_box_hamiltonian and make_dispersive functions by Phil Reinhold
Revisions and updates by Zlatko Minev & Lysander Christakis
'''
# pylint: disable=invalid-name
from __future__ import print_function
from functools import reduce
import numpy as np
from .constants import Planck as h
from .constants import fluxQ, hbar
from .hamiltonian import MatrixOps
try:
import qutip
from qutip import basis, tensor
except (ImportError, ModuleNotFoundError):
pass
__all__ = [ 'epr_numerical_diagonalization',
'make_dispersive',
'black_box_hamiltonian',
'black_box_hamiltonian_nq']
dot = MatrixOps.dot
cos_approx = MatrixOps.cos_approx
# ==============================================================================
# ANALYSIS FUNCTIONS
# ==============================================================================
def epr_numerical_diagonalization(freqs, Ljs, ϕzpf,
cos_trunc=8,
fock_trunc=9,
use_1st_order=False,
return_H=False,
non_linear_potential=None):
'''
Numerical diagonalization for pyEPR. Ask Zlatko for details.
:param fs: (GHz, not radians) Linearized model, H_lin, normal mode frequencies in Hz, length M
:param ljs: (Henries) junction linearized inductances in Henries, length J
:param fzpfs: (reduced) Reduced Zero-point fluctuation of the junction fluxes for each mode
across each junction, shape MxJ
:return: Hamiltonian mode freq and dispersive shifts. Shifts are in MHz.
Shifts have flipped sign so that down shift is positive.
'''
freqs, Ljs, ϕzpf = map(np.array, (freqs, Ljs, ϕzpf))
assert(all(freqs < 1E6)
), "Please input the frequencies in GHz. \N{nauseated face}"
assert(all(Ljs < 1E-3)
), "Please input the inductances in Henries. \N{nauseated face}"
Hs = black_box_hamiltonian(freqs * 1E9, Ljs.astype(float), fluxQ*ϕzpf,
cos_trunc, fock_trunc, individual=use_1st_order,
non_linear_potential = non_linear_potential)
f_ND, χ_ND, _, _ = make_dispersive(
Hs, fock_trunc, ϕzpf, freqs, use_1st_order=use_1st_order)
χ_ND = -1*χ_ND * 1E-6 # convert to MHz, and flip sign so that down shift is positive
return (f_ND, χ_ND, Hs) if return_H else (f_ND, χ_ND)
def black_box_hamiltonian(fs, ljs, fzpfs, cos_trunc=5, fock_trunc=8, individual=False,
non_linear_potential = None):
r"""
:param fs: Linearized model, H_lin, normal mode frequencies in Hz, length N
:param ljs: junction linearized inductances in Henries, length M
:param fzpfs: Zero-point fluctuation of the junction fluxes for each mode across each junction,
shape MxJ
:return: Hamiltonian in units of Hz (i.e H / h)
All in SI units. The ZPF fed in are the generalized, not reduced, flux.
Description:
Takes the linear mode frequencies, :math:`\omega_m`, and the zero-point fluctuations, ZPFs, and
builds the Hamiltonian matrix of :math:`H_{full}`, assuming cos potential.
"""
n_modes = len(fs)
njuncs = len(ljs)
fs, ljs, fzpfs = map(np.array, (fs, ljs, fzpfs))
ejs = fluxQ**2 / ljs
fjs = ejs / h
fzpfs = np.transpose(fzpfs) # Take from MxJ to JxM
assert np.isnan(fzpfs).any(
) == False, "Phi ZPF has NAN, this is NOT allowed! Fix me. \n%s" % fzpfs
assert np.isnan(ljs).any(
) == False, "Ljs has NAN, this is NOT allowed! Fix me."
assert np.isnan(
fs).any() == False, "freqs has NAN, this is NOT allowed! Fix me."
assert fzpfs.shape == (njuncs, n_modes), "incorrect shape for zpf array, {} not {}".format(
fzpfs.shape, (njuncs, n_modes))
assert fs.shape == (n_modes,), "incorrect number of mode frequencies"
assert ejs.shape == (njuncs,), "incorrect number of qubit frequencies"
def tensor_out(op, loc):
"Make operator <op> tensored with identities at locations other than <loc>"
op_list = [qutip.qeye(fock_trunc) for i in range(n_modes)]
op_list[loc] = op
return reduce(qutip.tensor, op_list)
a = qutip.destroy(fock_trunc)
ad = a.dag()
n = qutip.num(fock_trunc)
mode_fields = [tensor_out(a + ad, i) for i in range(n_modes)]
mode_ns = [tensor_out(n, i) for i in range(n_modes)]
def cos(x):
return cos_approx(x, cos_trunc=cos_trunc)
if non_linear_potential is None:
non_linear_potential = cos
linear_part = dot(fs, mode_ns)
cos_interiors = [dot(fzpf_row/fluxQ, mode_fields) for fzpf_row in fzpfs]
nonlinear_part = dot(-fjs, map(non_linear_potential, cos_interiors))
if individual:
return linear_part, nonlinear_part
else:
return linear_part + nonlinear_part
bbq_hmt = black_box_hamiltonian
def make_dispersive(H, fock_trunc, fzpfs=None, f0s=None, chi_prime=False,
use_1st_order=False):
r"""
Input: Hamiltonian Matrix.
Optional: phi_zpfs and normal mode frequencies, f0s.
use_1st_order : deprecated
Output:
Return dressed mode frequencies, chis, chi prime, phi_zpf flux (not reduced), and linear frequencies
Description:
Takes the Hamiltonian matrix `H` from bbq_hmt. It them finds the eigenvalues/eigenvectors and assigns quantum numbers to them --- i.e., mode excitations, such as, for instance, for three mode, :math:`|0,0,0\rangle` or :math:`|0,0,1\rangle`, which correspond to no excitations in any of the modes or one excitation in the 3rd mode, resp. The assignment is performed based on the maximum overlap between the eigenvectors of H_full and H_lin. If this crude explanation is confusing, let me know, I will write a more detailed one |:slightly_smiling_face:|
Based on the assignment of the excitations, the function returns the dressed mode frequencies :math:`\omega_m^\prime`, and the cross-Kerr matrix (including anharmonicities) extracted from the numerical diagonalization, as well as from 1st order perturbation theory.
Note, the diagonal of the CHI matrix is directly the anharmonicity term.
"""
if hasattr(H, '__len__'): # is it an array / list?
[H_lin, H_nl] = H
H = H_lin + H_nl
else: # make sure its a quanutm object
assert type(
H) == qutip.qobj.Qobj, "Please pass in either a list of Qobjs or Qobj for the Hamiltonian"
print("Starting the diagonalization")
evals, evecs = H.eigenstates()
print("Finished the diagonalization")
evals -= evals[0]
N = int(np.log(H.shape[0]) / np.log(fock_trunc)) # number of modes
assert H.shape[0] == fock_trunc ** N
def fock_state_on(d):
''' d={mode number: # of photons} '''
return qutip.tensor(*[qutip.basis(fock_trunc, d.get(i, 0)) for i in range(N)]) # give me the value d[i] or 0 if d[i] does not exist
if use_1st_order:
num_modes = N
print("Using 1st O")
def multi_index_2_vector(d, num_modes, fock_trunc):
return tensor([basis(fock_trunc, d.get(i, 0)) for i in range(num_modes)])
'''this function creates a vector representation a given fock state given the data for excitations per
mode of the form d={mode number: # of photons}'''
def find_multi_indices(fock_trunc):
multi_indices = [{ind: item for ind, item in enumerate([i, j, k])} for i in range(fock_trunc)
for j in range(fock_trunc)
for k in range(fock_trunc)]
return multi_indices
'''this function generates all possible multi-indices for three modes for a given fock_trunc'''
def get_expect_number(left, middle, right):
return (left.dag()*middle*right).data.toarray()[0, 0]
'''this function calculates the expectation value of an operator called "middle" '''
def get_basis0(fock_trunc, num_modes):
multi_indices = find_multi_indices(fock_trunc)
basis0 = [multi_index_2_vector(
multi_indices[i], num_modes, fock_trunc) for i in range(len(multi_indices))]
evalues0 = [get_expect_number(v0, H_lin, v0).real for v0 in basis0]
return multi_indices, basis0, evalues0
'''this function creates a basis of fock states and their corresponding eigenvalues'''
def closest_state_to(vector0):
def PT_on_vector(original_vector, original_basis, pertub, energy0, evalue):
new_vector = 0 * original_vector
for i in range(len(original_basis)):
if (energy0[i]-evalue) > 1e-3:
new_vector += ((original_basis[i].dag()*H_nl*original_vector).data.toarray()[
0, 0])*original_basis[i]/(evalue-energy0[i])
else:
pass
return (new_vector + original_vector)/(new_vector + original_vector).norm()
'''this function calculates the normalized vector with the first order correction term
from the non-linear hamiltonian '''
[multi_indices, basis0, evalues0] = get_basis0(
fock_trunc, num_modes)
evalue0 = get_expect_number(vector0, H_lin, vector0)
vector1 = PT_on_vector(vector0, basis0, H_nl, evalues0, evalue0)
index = np.argmax([(vector1.dag() * evec).norm()
for evec in evecs])
return evals[index], evecs[index]
else:
def closest_state_to(s):
def distance(s2):
return (s.dag() * s2[1]).norm()
return max(zip(evals, evecs), key=distance)
f1s = [closest_state_to(fock_state_on({i: 1}))[0] for i in range(N)]
chis = [[0]*N for _ in range(N)]
chips = [[0]*N for _ in range(N)]
for i in range(N):
for j in range(i, N):
d = {k: 0 for k in range(N)} # put 0 photons in each mode (k)
d[i] += 1
d[j] += 1
# load ith mode and jth mode with 1 photon
fs = fock_state_on(d)
ev, evec = closest_state_to(fs)
chi = (ev - (f1s[i] + f1s[j]))
chis[i][j] = chi
chis[j][i] = chi
if chi_prime:
d[j] += 1
fs = fock_state_on(d)
ev, evec = closest_state_to(fs)
chip = (ev - (f1s[i] + 2*f1s[j]) - 2 * chis[i][j])
chips[i][j] = chip
chips[j][i] = chip
if chi_prime:
return np.array(f1s), np.array(chis), np.array(chips), np.array(fzpfs), np.array(f0s)
else:
return np.array(f1s), np.array(chis), np.array(fzpfs), np.array(f0s)
def black_box_hamiltonian_nq(freqs, zmat, ljs, cos_trunc=6, fock_trunc=8, show_fit=False):
"""
N-Qubit version of bbq, based on the full Z-matrix
Currently reproduces 1-qubit data, untested on n-qubit data
Assume: Solve the model without loss in HFSS.
"""
nf = len(freqs)
nj = len(ljs)
assert zmat.shape == (nf, nj, nj)
imY = (1/zmat[:, 0, 0]).imag
# zeros where the sign changes from negative to positive
(zeros,) = np.where((imY[:-1] <= 0) & (imY[1:] > 0))
nz = len(zeros)
imYs = np.array([1 / zmat[:, i, i] for i in range(nj)]).imag
f0s = np.zeros(nz)
slopes = np.zeros((nj, nz))
import matplotlib.pyplot as plt
# Fit a second order polynomial in the region around the zero
# Extract the exact location of the zero and the associated slope
# If you need better than second order fit, you're not sampling finely enough
for i, z in enumerate(zeros):
f0_guess = (freqs[z+1] + freqs[z]) / 2
zero_polys = np.polyfit(
freqs[z-1:z+3] - f0_guess, imYs[:, z-1:z+3].transpose(), 2)
zero_polys = zero_polys.transpose()
f0s[i] = f0 = min(np.roots(zero_polys[0]),
key=lambda r: abs(r)) + f0_guess
for j, p in enumerate(zero_polys):
slopes[j, i] = np.polyval(np.polyder(p), f0 - f0_guess)
if show_fit:
plt.plot(freqs[z-1:z+3] - f0_guess, imYs[:, z-1:z +
3].transpose(), lw=1, ls='--', marker='o', label=str(f0))
p = np.poly1d(zero_polys[0, :])
p2 = np.poly1d(zero_polys[1, :])
plt.plot(freqs[z-1:z+3] - f0_guess, p(freqs[z-1:z+3] - f0_guess))
plt.plot(freqs[z-1:z+3] - f0_guess, p2(freqs[z-1:z+3] - f0_guess))
plt.legend(loc=0)
zeffs = 2 / (slopes * f0s[np.newaxis, :])
# Take signs with respect to first port
zsigns = np.sign(zmat[zeros, 0, :])
fzpfs = zsigns.transpose() * np.sqrt(hbar * abs(zeffs) / 2)
H = black_box_hamiltonian(f0s, ljs, fzpfs, cos_trunc, fock_trunc)
return make_dispersive(H, fock_trunc, fzpfs, f0s)
black_box_hamiltonian_nq = black_box_hamiltonian_nq
| 13,172 | 41.908795 | 566 | py |
pyEPR | pyEPR-master/pyEPR/calcs/hamiltonian.py | """
Hamiltonian and Matrix Operations.
Hamiltonian operations heavily draw on qutip package.
This package must be installed for them to work.
"""
try:
import qutip
from qutip import Qobj # basis, tensor,
except (ImportError, ModuleNotFoundError):
Qobj=None
pass
from ..toolbox.pythonic import fact
class MatrixOps(object):
@staticmethod
def cos(op_cos_arg: Qobj):
"""
Make cosine operator matrix from argument op_cos_arg
op_cos_arg (qutip.Qobj) : argument of the cosine
"""
return 0.5*((1j*op_cos_arg).expm() + (-1j*op_cos_arg).expm())
@staticmethod
def cos_approx(x, cos_trunc=5):
"""
Create a Taylor series matrix approximation of the cosine, up to some order.
"""
return sum((-1)**i * x**(2*i) / float(fact(2*i)) for i in range(2, cos_trunc + 1))
@staticmethod
def dot(ais, bis):
"""
Dot product
"""
return sum(ai*bi for ai, bi in zip(ais, bis))
class HamOps(object):
@staticmethod
def fock_state_on(d: dict, fock_trunc: int, N_modes: int):
''' d={mode number: # of photons} In the bare eigen basis
'''
# give me the value d[i] or 0 if d[i] does not exist
return qutip.tensor(*[qutip.basis(fock_trunc, d.get(i, 0))
for i in range(N_modes)])
@staticmethod
def closest_state_to(s: Qobj, energyMHz, evecs):
"""
Returns the energy of the closest state to s
"""
def distance(s2):
return (s.dag() * s2[1]).norm()
return max(zip(energyMHz, evecs), key=distance)
@staticmethod
def closest_state_to_idx(s: Qobj, evecs):
"""
Returns the index
"""
def distance(s2):
return (s.dag() * s2[1]).norm()
return max(zip(range(len(evecs)), evecs), key=distance)
@staticmethod
def identify_Fock_levels(fock_trunc: int, evecs,
N_modes=2,
Fock_max=4):
"""
Return quantum numbers in terms of the undiagonalized eigenbasis.
"""
# to do: need to turn Fock_max into arb algo on each mode
def fock_state_on(d):
return HamOps.fock_state_on(d, fock_trunc, N_modes)
def closest_state_to_idx(s):
return HamOps.closest_state_to_idx(s, evecs)
FOCKr = {}
for d1 in range(Fock_max):
for d2 in range(Fock_max):
d = {0: d1, 1: d2}
FOCKr[closest_state_to_idx(fock_state_on(d))[0]] = d
return FOCKr
| 2,628 | 27.576087 | 90 | py |
pyEPR | pyEPR-master/pyEPR/calcs/convert.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 18:14:08 2019
Unit and variable conversions.
@author: Zlatko Minev
"""
from __future__ import (absolute_import, # Python 2.7 and 3 compatibility
division, print_function)
import numpy as np
import pandas as pd
from numpy import sqrt
from .basic import CalcsBasic
from .constants import (Planck, e_el, elementary_charge, fluxQ, hbar, pi, ħ, π,
ϕ0)
class Convert():
'''
Static container class for conversions of units and variables.
TEST CONVERSION:
.. code-block:: python
from pyEPR.toolbox.conversions import Convert
Lj_nH, Cs_fF = 11, 60
Convert.transmon_print_all_params(Lj_nH, Cs_fF);
'''
# Known SI prefixed
_prefix = {'y': -24, # yocto
'z': -21, # zepto
'a': -18, # atto
'f': -15, # femto
'p': -12, # pico
'n': -9, # nano
'u': -6, # micro
'm': -3, # mili
'c': -2, # centi
'd': -1, # deci
' ': 0,
'k': 3, # kilo
'M': 6, # mega
'G': 9, # giga
'T': 12, # tera
'P': 15, # peta
'E': 18, # exa
'Z': 21, # zetta
'Y': 24, # yotta
}
# Known SI units
_SI_units = ['H', # Henries
'F', # Farads
'Hz', # Hertz
'Ohm', # Ohms
'Ω', # Ohms
'Wb' # Webers
'J', # Joules
'A' # Amps
]
@staticmethod
def toSI(number, from_units: str):
r"""
Convert from SI unit prefix to regular SI units
If the from_units is ' ' or not in the prefix list,
then the unit is assumed to be
"""
if from_units in Convert._SI_units:
from_units = ' '
# else: we assume that the first letter is a prefix
return number*(10**Convert._prefix.get(from_units[0]))
@staticmethod
def fromSI(number, from_units: str):
r"""Convert a number with SI units, such as fF to F.
Arguments:
number {[numeric]} -- number
from_units {str} -- string
Returns:
numeric number, with units expanded
"""
if from_units in Convert._SI_units:
from_units = ' '
# else: we assume that the first letter is a prefix
return number*(10**(-Convert._prefix.get(from_units[0])))
@staticmethod
def _convert_num(out_func, in_num, in_units, out_units):
in_num = 1.0*in_num # to float
# convert units of input number
in_num = Convert.toSI(in_num, in_units)
out_num = out_func(in_num) # Assume func processes all in SI units
out_num = Convert.fromSI(out_num, out_units)
return out_num
@staticmethod
def Ej_from_Lj(Lj, units_in='nH', units_out='MHz'):
r'''
Josephson Junction energy from Josephson inductance.
Returns in MHz
:math:`E_j = \phi_0^2 / L_J`
'''
return Convert._convert_num(
# Plank to go from Joules to Hz
lambda _Lj: Planck**-1 * (ϕ0**2)/_Lj,
Lj, units_in, units_out)
@staticmethod
def Lj_from_Ej(Ej, units_in='MHz', units_out='nH'):
r'''
Josephson Junction ind from Josephson energy in MHZ.
Returns in units of nano Henries by default
:math:`E_j = \phi_0^2 / L_J`
'''
return Convert._convert_num(
lambda _x: (ϕ0**2.)/(_x*Planck), # Plank to go from Joules to Hz
Ej, units_in, units_out)
@staticmethod
def Ic_from_Lj(Lj, units_in='nH', units_out='nA'):
r'''
Josephson Junction crit. curr from Josephson inductance.
:math:`E_j = \phi_0^2 / L_J = \phi_0 I_C`
'''
return Convert._convert_num(
lambda _x: ϕ0/_x, # Plank to go from Joules to Hz
Lj, units_in, units_out)
@staticmethod
def Lj_from_Ic(Lj, units_in='nA', units_out='nH'):
r'''
Josephson Junction crit. curr from Josephson inductance.
:math:`E_j = \phi_0^2 / L_J = \phi_0 I_C`
'''
return Convert._convert_num(
lambda _x: ϕ0/_x, # Plank to go from Joules to Hz
Lj, units_in, units_out)
@staticmethod
def Ec_from_Cs(Cs, units_in='fF', units_out='MHz'):
r'''
Charging energy :math:`4E_c n^2`, where :math:`n=Q/2e`
Returns in MHz
:math:`E_{C}=\frac{e^{2}}{2C}J`
'''
return Convert._convert_num(
# Plank to go from Joules to Hz
lambda _x: Planck**-1 * (e_el**2.)/(2.*_x),
Cs, units_in, units_out)
@staticmethod
def Cs_from_Ec(Ec, units_in='MHz', units_out='fF'):
r'''
Charging energy :math:`4E_c n^2`, where :math:`n=Q/2e`
Returns in SI units, in Farads.
:math:`E_{C}=\frac{e^{2}}{2C}J`
'''
return Convert._convert_num(
# Plank to go from Joules to Hz
lambda _x: (e_el**2.)/(2.*_x*Planck),
Ec, units_in, units_out)
@staticmethod
def ZPF_from_LC(L, C):
r'''
Input units assumed to be identical
Returns Phi ZPF in and Q_ZPF in NOT reduced units, but SI
'''
Z = sqrt(L/C)
return (sqrt(hbar*Z/2.), sqrt(hbar/(2.*Z))) # Phi , Q
@staticmethod
def ZPF_from_EPR(hfss_freqs, hfss_epr_, hfss_signs, hfss_Ljs,
Lj_units_in='H', to_df=False):
r"""
Parameters:
Can be either Pandas or numpy arrays.
hfss_freqs : HFSS Freqs. (standard units: GHz, but these will cancel with Ejs) (list/Series)
hfss_epr : EPR ratio matrix, dim = M x J (2D array/DataFrame)
hfss_signs : Sign matrix, dim = M x J (2D array/DataFrame)
hfss_Ljs : Assumed in Henries (see Lj_units_in). (list/Series)
Lj_units_in : Default 'H' for Henries. Can change here.
Returns:
M x J matrix of reduced ZPF; i.e., scaled by reduced flux quantum.
type: np.array
and a tuple of matrices.
Example use:
ϕzpf, (Ωm, Ej, Pmj, Smj) = Convert.ZPF_from_EPR(hfss_freqs, hfss_epr, hfss_signs, hfss_Ljs, to_df=True)
"""
hfss_freqs, hfss_epr, hfss_signs, hfss_Ljs = map(
np.array, (hfss_freqs, hfss_epr_, hfss_signs, hfss_Ljs))
Ωd = np.diagflat(hfss_freqs)
Ej = Convert.Ej_from_Lj(
hfss_Ljs, units_in=Lj_units_in, units_out='GHz')
Ej = np.diagflat(Ej)
ϕzpfs = CalcsBasic.epr_to_zpf(hfss_epr, hfss_signs, Ωd, Ej)
if to_df:
ϕzpfs = pd.DataFrame(
ϕzpfs, columns='ϕ'+hfss_epr_.columns.values, index=hfss_epr_.index)
return ϕzpfs, (Ωd, Ej, hfss_epr, hfss_signs)
@staticmethod
def Omega_from_LC(L, C):
r'''
Calculate the resonant *angular* frequency
'''
return sqrt(1./(L*C))
| 7,260 | 29.897872 | 115 | py |
pyEPR | pyEPR-master/pyEPR/calcs/__init__.py | """
Main calculation module
"""
from . import constants
from .basic import CalcsBasic
from .convert import Convert
from .transmon import CalcsTransmon
| 151 | 18 | 35 | py |
pyEPR | pyEPR-master/pyEPR/calcs/transmon.py | """
Transmon calculations
"""
import math
import numpy as np
from numpy import pi, sqrt, exp
from numpy.linalg import inv
from .constants import e_el, fluxQ
from .convert import Convert
from ..toolbox.pythonic import divide_diagonal_by_2
class CalcsTransmon():
"""
Common calculations and parameter reporting used for transmon qubits.
"""
@staticmethod
def dispersiveH_params_PT_O1(Pmj, Ωm, Ej):
"""
First order PT on the 4th power of the JJ cosine.
This function applied to an unfrustrated Josephson junction.
Pmj : Matrix MxJ
Ωm : GHz Matrix MxM
Ej : GHz Matrix JxJ
returns f_O1, χ_O1
χ_O1 has diagonal divided by 2 so as to give true anharmonicity.
Example use:
..codeblock python
# PT_01: Calculate 1st order PT results
f_O1, χ_O1 = Calc_basic.dispersiveH_params_PT_O1(Pmj, Ωm, Ej)
"""
Pmj, Ωm, Ej = map(np.array, (Pmj, Ωm, Ej))
assert Ωm.shape[0] == Ωm.shape[1]
assert Ej.shape[0] == Ej.shape[1]
assert Ωm.shape[1] == Pmj.shape[0]
assert Pmj.shape[1] == Ej.shape[0]
f_0 = np.diag(Ωm)
χ_O1 = 0.25 * Ωm @ Pmj @ inv(Ej) @ Pmj.T @ Ωm * 1000. # GHz to MHz
f_O1 = f_0 - 0.5*np.ndarray.flatten(np.array(χ_O1.sum(1))) / \
1000. # 1st order PT expect freq to be dressed down by alpha
# Make the diagonals alpha
χ_O1 = divide_diagonal_by_2(χ_O1)
return f_O1, χ_O1
@staticmethod
def transmon_get_all_params(Ej_MHz, Ec_MHz):
"""
Linear harmonic oscillator approximation of transmon.
Convenience func
"""
Ej, Ec = Ej_MHz, Ec_MHz
Lj_H, Cs_F = Convert.Lj_from_Ej(
Ej, 'MHz', 'H'), Convert.Cs_from_Ec(Ec, 'MHz', 'F') # SI units
Phi_ZPF, Q_ZPF = Convert.ZPF_from_LC(Lj_H, Cs_F)
Omega_MHz = sqrt(1./(Lj_H*Cs_F)) * 1E-6 # MHz
f_MHz = Omega_MHz / (2*pi)*1E-3
Z_Ohms = sqrt(Lj_H/Cs_F)
phi_ZPF = Phi_ZPF/fluxQ
n_ZPF = Q_ZPF / (2*e_el)
return {'Ej_MHz': Ej_MHz, 'Ec_MHz': Ec_MHz,
'Lj_H': Lj_H, 'Cs_F': Cs_F,
'Lj_nH': Lj_H*1E9, 'Cs_fF': Cs_F*1E15,
'Phi_ZPF': Phi_ZPF, 'Q_ZPF': Q_ZPF,
'phi_ZPF': phi_ZPF, 'n_ZPF': n_ZPF,
'Omega_MHz': Omega_MHz,
'f_MHz': f_MHz,
'Z_Ohms': Z_Ohms,
}
@staticmethod
def transmon_print_all_params(Lj_nH, Cs_fF):
"""
Linear harmonic oscillator approximation of transmon.
Convenience func
"""
# Parameters - duplicates with transmon_get_all_params
Ej, Ec = Convert.Ej_from_Lj(Lj_nH, 'nH', 'MHz'), Convert.Ec_from_Cs(
Cs_fF, 'fF', 'MHz') # MHz
Lj_H, Cs_F = Convert.Lj_from_Ej(Ej, 'MHz', 'H'), Convert.Cs_from_Ec(
Ec, 'MHz', 'F') # SI units
Phi_ZPF, Q_ZPF = Convert.ZPF_from_LC(Lj_H, Cs_F)
Omega_MHz = sqrt(1./(Lj_H*Cs_F)) * 1E-6 # MHz
# Print
text = r"""
\begin{align}
L_J &=%.1f \mathrm{\ nH} & C_\Sigma &=%.1f \mathrm{\ fF} \\
E_J &=%.2f \mathrm{\ GHz} & E_C &=%.0f \mathrm{\ MHz} \\
\omega_0 &=2\pi\times %.2f \mathrm{\ GHz} & Z_0 &= %.0f \mathrm{\ \Omega} \\
\phi_\mathrm{ZPF} &= %.2f \ \ \phi_0 & n_\mathrm{ZPF} &=%.2f \ \ (2e) \\
\end{align}
""" % (Lj_H*1E9, Cs_F*1E15, Ej/1E3, Ec,
Omega_MHz / (2*pi)*1E-3, sqrt(Lj_H/Cs_F),
Phi_ZPF/fluxQ, Q_ZPF / (2*e_el))
from IPython.display import display, Math
display(Math(text))
return text
@staticmethod
def charge_dispersion_approx(m, Ec, Ej):
"""
Use Eq. (2.5) of Koch's paper.
"""
return sqrt(2./pi) * Ec * (-1.)**(m) * 2.**(4.*m+5.) * exp(-sqrt(8*Ej/Ec)) * (Ej/(2*Ec))**(m/2.+3./4.)\
/ math.factorial(m)
| 4,055 | 31.448 | 111 | py |
pyEPR | pyEPR-master/pyEPR/calcs/basic.py | """
Basic calculations that apply in general .
"""
import numpy as np
from numpy import sqrt
from .. import logger
class CalcsBasic():
@staticmethod
def epr_to_zpf(Pmj, SJ, Ω, EJ):
r'''
Arguments, All as matrices (numpy arrays):
:Pnj: MxJ energy-participation-ratio matrix, p_mj
:SJ: MxJ sign matrix, s_mj
:Ω: MxM diagonal matrix of frequencies (GHz, not radians, diagonal)
:EJ: JxJ diagonal matrix matrix of Josephson energies (in same units as Om)
RETURNS:
reduced zpf (in units of :math:`\phi_0`)
'''
(Pmj, SJ, Ω, EJ) = map(np.array, (Pmj, SJ, Ω, EJ))
if (Pmj < 0).any():
print('BAD!')
logger.error(f"""The simulation is not converged!!! \N{nauseated face}
Some of the energy participations are less than zero.
This happens when some participations are tiny 10^-8 or less
or when not enough passes have been taken. The Pmj matrix is
{Pmj}""")
# Technically, there the equation is hbar omega / 2J, but here we assume
# that the hbar is absorbed in the units of omega, and omega and Ej have the same units.
# PHI=np.zeros((3,3))
# for m in range(3):
# for j in range(3):
# PHI[m,j] = SJ[m,j]*sqrt(PJ[m,j]*Om[m,m]/(2.*EJ[j,j]))
return SJ * sqrt(0.5 * Ω @ Pmj @ np.linalg.inv(EJ))
@staticmethod
def epr_cap_to_nzpf(Pmj_cap, SJ, Ω, Ec):
"""
Experimental. To be tested
"""
(Pmj, SJ, Ω, EJ) = map(np.array, (Pmj_cap, SJ, Ω, Ec))
return SJ * sqrt(Ω @ Pmj @ np.linalg.inv(Ec) /(4*4))
| 1,691 | 33.530612 | 96 | py |
pyEPR | pyEPR-master/pyEPR/toolbox/plotting.py | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 19:30:12 2017
Plotting snippets and useful functions
@author: Zlatko K. Minev
"""
from __future__ import absolute_import, division, print_function
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.axes import Axes
from matplotlib.colors import rgb2hex
from .. import config
default_colormap = lambda: getattr(mpl.cm, config.plotting.default_color_map)
# ==============================================================================
# Plotting - MPL basics
# ==============================================================================
def mpl_dpi(dpi=200):
'''
Set the matplotlib resolution for images dots per inch
'''
mpl.rcParams['figure.dpi'] = dpi
mpl.rcParams['savefig.dpi'] = dpi
def plt_cla(ax: Axes):
'''
Clear all plotted objects on an axis
ax : matplotlib axis
'''
ax = ax if not ax is None else plt.gca()
for artist in ax.lines + ax.collections + ax.patches + ax.images + ax.texts:
artist.remove()
if ax.legend_:
ax.legend_.remove()
def legend_translucent(ax: Axes, values=[], loc=0, alpha=0.5, leg_kw={}):
'''
values = [ ["%.2f" %k for k in RES] ]
Also, you can use the following:
leg_kw = dict(fancybox =True, fontsize = 9,
framealpha =0.5, ncol = 1)
blah.plot().legend(**leg_kw )
'''
if ax.get_legend_handles_labels() == ([], []):
return None
leg = ax.legend(*values, loc=loc, fancybox=True, **leg_kw)
leg.get_frame().set_alpha(alpha)
return leg
#################################################################################
# Color cycles
def get_last_color(ax: Axes):
'''
gets the color for the last plotted line
use:
datai.plot(label=name, marker='o')
data.plot(label=name, marker='o', c=get_last_color(plt.gca()))
'''
return ax.lines[-1].get_color()
def get_next_color(ax: Axes):
'''
To reset color cycle
ax.set_prop_cycle(None)
USE
from cycler import cycler
ax.set_prop_cycle(cycler('color', COLORS1) ) # COLORS1 = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00']
get_color_cycle(3) ['c', 'm', 'y', 'k']; # from cycler import cycler
See also get_color_cycle
'''
return next(ax._get_lines.prop_cycler)['color']
def get_color_cycle(n, colormap=None, start=0., stop=1., format='hex'):
'''
See also get_next_color
'''
colormap = colormap or default_colormap()
pts = np.linspace(start, stop, n)
if format == 'hex':
colors = [rgb2hex(colormap(pt)) for pt in pts]
return colors
def cmap_discrete(n, cmap_kw={}):
''' Discrete colormap.
cmap_kw = dict(colormap = plt.cm.gist_earth, start = 0.05, stop = .95)
cmap_kw
-----------------------
helix = True, Allows us to instead call helix from here
'''
if cmap_kw.pop('helix', False):
return cmap_discrete_CubeHelix(n, helix_kw=cmap_kw)
cmap_KW = dict(colormap=default_colormap(),
start=0.05, stop=.95)
cmap_KW.update(cmap_kw)
return get_color_cycle(n+1, **cmap_KW)
def cmap_discrete_CubeHelix(n, helix_kw={}):
'''
https://github.com/jiffyclub/palettable/blob/master/demo/Cubehelix%20Demo.ipynb
cube.show_discrete_image()
Requires palettable
'''
from palettable import cubehelix # pylint: disable=import-error
helix_KW = dict(start_hue=240., end_hue=-300., min_sat=1., max_sat=2.5,
min_light=0.3, max_light=0.8, gamma=.9)
helix_KW.update(helix_kw)
cube = cubehelix.Cubehelix.make(n=n, **helix_KW)
return cube.mpl_colors
def xarr_heatmap(fg, title=None, kwheat={}, fmt=('%.3f', '%.2f'), fig=None):
'''
Needs seaborn and xarray
'''
fig = plt.figure() if fig == None else fig
df = fg.to_pandas()
# format indices
df.index = [float(fmt[0] % x) for x in df.index]
df.columns = [float(fmt[1] % x) for x in df.columns]
import seaborn as sns
ax = sns.heatmap(df, annot=True, **kwheat)
ax.invert_yaxis()
ax.set_title(title)
ax.set_xlabel(fg.dims[1])
ax.set_ylabel(fg.dims[0])
__all__ = ['legend_translucent', 'cmap_discrete',
'get_color_cycle', 'xarr_heatmap']
"""
Jupyter widgets:
--------------------------
Not seeing widgets: https://github.com/tqdm/tqdm/issues/451
conda update tqdm
# This might already work, will require a lot of updates, if not then do:
conda install nodejs
jupyter labextension install @jupyter-widgets/jupyterlab-manager
jupyter nbextension enable --py widgetsnbextension
"""
| 4,711 | 26.717647 | 117 | py |
pyEPR | pyEPR-master/pyEPR/toolbox/_logging.py | import logging
from .. import config
def set_up_logger(logger):
# add custom stream handler
logger.c_handler = logging.StreamHandler()
# Jupyter notebooks already has a stream handler on the default log,
# Do not propagate upstream to the root logger.
# https://stackoverflow.com/questions/31403679/python-logging-module-duplicated-console-output-ipython-notebook-qtconsole
logger.propagate = False
logger.c_format = logging.Formatter(config.log.format, config.log.datefmt)
logger.c_handler.setFormatter(logger.c_format)
logger.addHandler(logger.c_handler)
logger.setLevel(getattr(logging, config.log.level))
| 655 | 31.8 | 125 | py |
pyEPR | pyEPR-master/pyEPR/toolbox/pythonic.py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 04 09:32:46 2017
@author: Zlatko K. Minev, pyEPR ream
"""
from __future__ import division, print_function, absolute_import # Python 2.7 and 3 compatibility
import platform # Which OS we run
import numpy as np
import pandas as pd
import warnings
import matplotlib.pyplot as plt
# Constants
from collections import OrderedDict
from ..calcs.constants import Planck, elementary_charge, epsilon_0, pi, π, ħ, ϕ0, e_el
from .. import Dict
# ==============================================================================
# Utility functions
# ==============================================================================
def combinekw(kw1, kw2):
''' Copy kw1, update with kw2, return result '''
kw = kw1.copy()
kw.update(kw2)
return kw
def isint(value):
try:
int(value)
return True
except ValueError:
return False
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def floor_10(x):
''' round to nearest lower power of 10 c'''
return 10.**(np.floor(np.log10(x)))
def fact(n):
''' Factorial '''
if n <= 1:
return 1
return n * fact(n-1)
def nck(n, k):
''' choose '''
return fact(n)/(fact(k)*fact(n-k))
def get_above_diagonal(M):
''' extract the values that are above the diagonal.
Assumes square matrix
'''
return M[np.triu_indices(M.shape[0], k=1)]
def df_find_index(s: pd.Series, find, degree=2, ax=False):
"""
Given a Pandas Series such as of freq with index Lj,
find the Lj that would give the right frequency
"""
max_ = max(s.index.values)
min_ = min(s.index.values)
if find <= max_ and find >= min_:
# interpolate
z = pd.Series(list(s.index.values)+[np.NaN], index=list(s) + [find])
z = z.sort_index()
z = z.interpolate()
return z[find], z
else:
print('extrapolating')
z = pd.Series(list(s.index.values), index=list(s))
p = df_extrapolate(z, degree=degree, ax=False)
value = p(find)
return value, p
def df_interpolate_value(s: pd.Series, find, ax=False, method='index'):
"""
Given a Pandas Series such as of freq with index Lj,
find the freq that would correspond to Lj given a value not in the index
"""
z = pd.Series(list(s) + [np.NaN], index=list(s.index.values)+[find])
z = z.sort_index()
z = z.interpolate(method=method)
return z[find], z
def df_extrapolate(s, degree=2, ax=False, rng_scale=2.):
"""
For a pandas series
Returns np.poly1d
"""
z = np.polyfit(s.index.values, s.values, degree)
p = np.poly1d(z)
if ax:
if ax is True:
ax = plt.gca()
max_ = max(s.index.values)
min_ = min(s.index.values)
rng = max_ - min_
xp = np.linspace(min_-rng_scale*rng, max_+rng_scale*rng, 100)
ys = p(xp)
ax.plot(xp, ys)
s.plot(marker='o', ax=ax)
return p
def df_regress_value(s: pd.Series, index, degree=2, ax=False, method='index',
rng_scale=2.):
"""
for pandas series.
calls either df_interpolate_value or df_extrapolate
"""
max_ = max(s.index.values)
min_ = min(s.index.values)
if index > max_ or index < min_:
# print('extrapolate')
p = df_extrapolate(s, degree=degree, ax=ax, rng_scale=rng_scale)
value = p(index)
else:
value = df_interpolate_value(s, index, method=method)[0]
return value
def series_of_1D_dict_to_multi_df(Uj_ind: pd.Series):
df = pd.DataFrame(dict([(k, v) for k, v in Uj_ind.items()])).transpose()
df.index.set_names(Uj_ind.index.names, inplace=True)
return df
def sort_df_col(df):
''' sort by numerical int order '''
return df.sort_index(axis=1)
# Buggy code, doesn't handles ints as inputs or floats as inputs
col_names = df.columns
if np.all(col_names.map(isint)):
return df[col_names.astype(int).sort_values().astype(str)]
elif np.all(col_names.map(isfloat)):
# raises error in some cases
return df[col_names.astype(float).sort_values().astype(str)]
else:
return df
def sort_Series_idx(sr):
''' sort by numerical int order '''
idx_names = sr.index
if np.all(idx_names.map(isint)):
return sr[idx_names.astype(int).sort_values().astype(str)]
if np.all(idx_names.map(isfloat)):
return sr[idx_names.astype(float).sort_values().astype(str)]
else:
return sr
def get_instance_vars(obj, Forbidden=[]):
VARS = {}
for v in dir(obj):
if not (v.startswith('__') or v.startswith('_')):
if not callable(getattr(obj, v)):
# Added for using addict.Dict which is not callable.
if not isinstance(getattr(obj, v), Dict):
if not (v in Forbidden):
VARS[v] = getattr(obj, v)
return VARS
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used. See StackExchange"""
def newFunc(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn("Call to deprecated function {}.".format(
func.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def info_str_platform():
return '''
System platform information:
system : %s
node : %s
release : %s
machine : %s
processor: %s
summary : %s
version : %s
Python platform information:
version : %s (implem: %s)
compiler : %s
''' % (
platform.system(),
platform.node(),
platform.release(),
platform.machine(),
platform.processor(),
platform.platform(),
platform.version(),
platform.python_version(), platform.python_implementation(),
platform.python_compiler())
# ==============================================================================
# Matrix
# ==============================================================================
def print_matrix(M, frmt="{:7.2f}", append_row=""):
M = np.mat(M)
for row in np.array(M.tolist()):
print(' ', end='')
for chi in row:
print(frmt.format(chi), end='')
print(append_row+"\n", end='')
def divide_diagonal_by_2(CHI0, div_fact=2.):
CHI = CHI0.copy()
CHI[np.diag_indices_from(CHI)] /= div_fact
return CHI
def print_NoNewLine(text):
print((text), end='')
def print_color(text, style=0, fg=24, bg=43, newline=True):
'''For newer, see pc (or Print_colors)
style 0..8; fg 30..38; bg 40..48
'''
format = ';'.join([str(style), str(fg), str(bg)])
s = '\x1b[%sm %s \x1b[0m' % (format, text)
if newline:
print(s)
else:
print(s, end='')
class Print_colors:
'''Colors class:reset all colors with colors.reset; two
sub classes fg for foreground
and bg for background; use as colors.subclass.colorname.
i.e. colors.fg.red or colors.bg.green also, the generic bold, disable,
underline, reverse, strike through,
and invisible work with the main class i.e. colors.bold
https://www.geeksforgeeks.org/print-colors-python-terminal/
Example use:
..codeblock python
print(colors.bg.green, "adgd", colors.fg.red, "dsgdsg")
print(colors.bg.lightgrey, "dsgsd", colors.fg.red, "sdgsd")
'''
reset = '\033[0m'
bold = '\033[01m'
disable = '\033[02m'
underline = '\033[04m'
reverse = '\033[07m'
strikethrough = '\033[09m'
invisible = '\033[08m'
class fg:
black = '\033[30m'
red = '\033[31m'
green = '\033[32m'
orange = '\033[33m'
blue = '\033[34m'
purple = '\033[35m'
cyan = '\033[36m'
lightgrey = '\033[37m'
darkgrey = '\033[90m'
lightred = '\033[91m'
lightgreen = '\033[92m'
yellow = '\033[93m'
lightblue = '\033[94m'
pink = '\033[95m'
lightcyan = '\033[96m'
class bg:
black = '\033[40m'
red = '\033[41m'
green = '\033[42m'
orange = '\033[43m'
blue = '\033[44m'
purple = '\033[45m'
cyan = '\033[46m'
lightgrey = '\033[47m'
pc = Print_colors
# ==============================================================================
# %% Dataframe
# ==============================================================================
def DataFrame_col_diff(PS, indx=0):
''' check weather the columns of a dataframe are equal,
returns a T/F series of the row index that specifies which rows are different
USE:
PS[DataFrame_col_diff(PS)]
'''
R = []
for i in range(PS.shape[1]-1):
R += [PS.iloc[:, i] == PS.iloc[:, i+1]]
if len(R) == 1:
return np.logical_not(R[0])
else:
return np.logical_not(np.logical_and.reduce(R)) # pylint: disable=no-member
def DataFrame_display_side_by_side(*args, do_display=True):
'''
from pyEPR.toolbox.pythonic import display_dfs
https://stackoverflow.com/questions/38783027/jupyter-notebook-display-two-pandas-tables-side-by-side
'''
from IPython.display import display_html
html_str = ''
for df in args:
html_str += df.to_html()
text = html_str.replace('table', 'table style="display:inline"')
if do_display:
display_html(text, raw=True)
return text
display_dfs = DataFrame_display_side_by_side
def xarray_unravel_levels(arr, names, my_convert=lambda x: x):
''' Takes in nested dict of dict of dataframes
names : names of lists; you dont have to include the last two dataframe columns & rows, but you can to override them
requires xarray
'''
import xarray # pylint: disable=import-error
if type(arr) == pd.DataFrame:
return xarray.DataArray(arr, dims=None if len(names) == 0 else names)
elif type(arr) in [OrderedDict, dict]:
return xarray.concat([xarray_unravel_levels(item, names[1:]) for k, item in arr.items()], pd.Index(arr.keys(), name=names[0]))
elif type(arr) == xarray.DataArray:
return arr
else:
return my_convert(arr)
def robust_percentile(calc_data, ROBUST_PERCENTILE=2.):
'''
analysis helper function
'''
vmin = np.percentile(calc_data, ROBUST_PERCENTILE)
vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)
return vmin, vmax
__all__ = ['fact', 'nck', 'combinekw',
'divide_diagonal_by_2', 'df_find_index',
'sort_df_col', 'sort_Series_idx',
'print_matrix', 'print_NoNewLine',
'DataFrame_col_diff', 'xarray_unravel_levels', 'robust_percentile']
| 11,156 | 27.681234 | 134 | py |
pyEPR | pyEPR-master/pyEPR/toolbox/__init__.py | 0 | 0 | 0 | py | |
LogConcComp | LogConcComp-main/src/LCD.py | import numpy as np
from numpy import linalg as la
from scipy.spatial import ConvexHull
import scipy.stats as st
from gurobipy import *
import copy
import time
from functools import reduce
from numbers import Number
from multiprocessing import Pool as pyPool
import multiprocessing
import io
import contextlib
import numba as nb
import warnings
warnings.filterwarnings('ignore')
import QMC
from utils import *
import json
from tqdm.notebook import tnrange
from tqdm import tqdm
import pandas as pd
import matplotlib.pyplot as plt
from gurobipy import *
def randb(n,d,rng=None,random_state=None):
rng = rng if rng is not None else np.random.RandomState(random_state)
Z = rng.randn(n,d)
Z/=np.apply_along_axis(la.norm,1,Z)[:,None]
Z*= rng.rand(n,1)**(1/d)
return Z
@nb.njit(cache=True)
def ab_to_phi(X,a,b):
tmp = X@a.T+b
return np_apply_along_axis(np.max, 1, tmp)
def convex_phi_from_eqns(equations, X):
a = -equations[:,:-2]/equations[:,-2].reshape(-1,1)
b = -equations[:,-1]/equations[:,-2]
return ab_to_phi(X,a,b)
def ab_to_nu(X,a,b):
tmp = X@a.T+b
max_idx = tmp.argmax(axis=1)
return -np.hstack([a[max_idx],b[max_idx].reshape(-1,1)])
@nb.njit(parallel=True,cache=True)
def feas_from_cvh(equations, X,threshold=1e10):
N,d = X.shape
nsimplex = equations.shape[0]
_X = np.hstack((X,np.ones((N,1))))
feas_list = np.full(N,False)
if nsimplex*N*d <= threshold:
feas_list = np_apply_along_axis(np.max,0,equations@_X.T) <= 0
return list(feas_list)
else:
B = min(math.ceil(nsimplex*N*d/threshold),N)
split_indices = split(N,B)
start, end = 0, 0
for ell in range(B):
start = end
end = split_indices[ell]
x = _X[start:end]
feas_list[start:end] = np_apply_along_axis(np.max,0,equations@x.T) <= 0
return list(feas_list)
def func_piecewise_max_subg(f,g,X,w,a,b,thres = 1e10, whole = False):
n,d = X.shape
K,_ = a.shape
if whole or n*K*(d+1) <= thres:
tmp = X@a.T+b
values = tmp.max(axis = 1)
max_idx = tmp.argmax(axis = 1)
subg_a = np.zeros_like(a)
subg_b = np.zeros_like(b)
np.add.at(subg_a, max_idx,X*w.reshape(-1,1)*g(values).reshape(-1,1))
np.add.at(subg_b, max_idx,w*g(values))
values = f(values)
return values, subg_a, subg_b
else:
B = min(math.ceil(n*K*(d+1)/thres),n)
values = np.zeros(n)
subg_a = np.zeros_like(a)
subg_b = np.zeros_like(b)
split_indices = split(n,B)
start, end = 0, 0
for ell in range(B):
start = end
end = split_indices[ell]
values[start:end], subg_a_tmp, subg_b_tmp = func_piecewise_max_subg_weights(f,g,X[start:end], w[start:end], a,b,thres=thres,whole=True)
subg_a += subg_a_tmp
subg_b += subg_b_tmp
return values, subg_a, subg_b
identity = nb.njit(lambda x: x)
grad_identity = nb.njit(lambda x: np.ones_like(x))
expneg = nb.njit(lambda x: np.exp(-x))
grad_expneg = nb.njit(lambda x: -np.exp(-x))
@nb.njit(cache=True)
def calcJd(phi_d,sort=True,eps= 1e-3,factorial = np.array([1,1,2,6,24,120,720,5040,40320,362880,3628800,39916800,479001600])):
if len(phi_d) ==1:
return np.exp(phi_d[0])
if not sort:
phi_d = np.sort(phi_d)
d = len(phi_d) - 1
if phi_d[-1]-phi_d[0] < eps:
phi_bar = np.mean(phi_d)
phi_demean = phi_d - phi_bar
sumPow2 = np.power(phi_demean,2).sum()
sumPow3 = np.power(phi_demean,3).sum()
return np.exp(phi_bar)*(1/factorial[d]+sumPow2/(2*factorial[d+2])+sumPow3/(3*factorial[d+3]))
else:
return (calcJd(phi_d[1:])-calcJd(phi_d[:-1]))/(phi_d[-1]-phi_d[0])
def calcExactIntegral(X, phi, grad = False,hypers = False,njobs = 0,verbose=0):
if type(phi) == list:
phi = np.array(phi)
n = X.shape[0]
Xphi = np.concatenate([X,phi.reshape(-1,1)],axis = 1)
if la.norm(phi-phi[0]) <= 1e-8:
cvh = ConvexHull(X)
Delta = cvh.volume
if grad:
return Delta*np.exp(-phi[0]), np.ones(n)*Delta/n
else:
return Delta*np.exp(-phi[0])
if hypers:
return Delta*np.exp(-phi[0]), len(np.unique(cvh.simplices,axis=0))
try:
hull = ConvexHull(Xphi)
except:
hull = ConvexHull(Xphi, qhull_options='QJ')
Xphi[:,-1] = 1
active_simplices = hull.simplices[hull.equations[:,-2] <0]
if verbose:
print(active_simplices.shape)
if njobs > 0:
with Pool(njobs) as p:
detlist = np.array(list(p.map(lambda simplex: (np.abs(la.det(Xphi[simplex]))),active_simplices)))
Jdlist = np.array(list(p.map(lambda simplex:calcJd(-phi[simplex],sort=False), active_simplices)))
else:
detlist = np.array(list(map(lambda simplex: (np.abs(la.det(Xphi[simplex]))),active_simplices)))
Jdlist = np.array(list(map(lambda simplex:calcJd(-phi[simplex],sort=False), active_simplices)))
assert grad + hypers <= 1
if grad:
subgrad = np.ones(n)/n
for i,simplex in enumerate(active_simplices):
tmp = -phi[simplex]
deti = detlist[i]
if njobs > 0:
Jdylist = np.array(list(p.map(lambda idx: calcJd(np.append(-tmp,-phi[idx]),sort=False), simplex)))
else:
Jdylist = np.array(list(map(lambda idx: calcJd(np.append(-tmp,-phi[idx]),sort=False), simplex)))
subgrad[simplex] -= deti * Jdylist
return (detlist*Jdlist).sum(), subgrad
else:
return (detlist*Jdlist).sum()
if hypers:
return (detlist*Jdlist).sum(), len(np.unique(active_simplices,axis=0 ))
class Iterate:
def __init__(self,a = None, b = None, phi = None):
assert (a is not None and b is not None) or (phi is not None)
self.a = copy.deepcopy(a)
self.b = copy.deepcopy(b)
self.phi = copy.deepcopy(phi)
def calcExactIntegral(self,sample):
if self.phi is None:
self.phi = _ab_to_phi(sample.X,self.a,self.b)
self.exact_integral = sample.calcExactIntegral(self.phi,njobs = 24)
return self.exact_integral
class Sample:
N0dict = {2:100,3:50,4:24,5:12,6:8}
N0dict_sparse = {2:50,3:24,4:12,5:8,6:6}
def __init__(self, X=None, n=100,d=2,dist= "normal",random_state = 42,**kwargs):
self.random = np.random.RandomState(random_state)
if X is not None:
self.X = X
self.n, self.d = X.shape
else:
self.X, self.true_phi = self.generate_X(n,d,dist,**kwargs)
self.n = n
self.d = d
self.dist = dist
self.sample_str= '%d_%d_%s_seed%d'%(self.n,self.d,self.dist,random_state)
def generate_X(self,n,d,dist= "normal",**kwargs):
if dist == "normal":
X = self.random.randn(n,d)
true_phi = -np.sum(st.norm.logpdf(X),axis=1)
elif dist == "uniform":
X = self.random.rand(n,d)
true_phi = np.zeros(n)
elif dist == "laplace":
loc = kwargs.get('loc', 0.0)
scale = kwargs.get('scale',1.0)
X = self.random.laplace(loc,scale,size=(n,d))
true_phi = -np.sum(st.laplace.logpdf(X,loc=loc,scale=scale),axis=1)
elif dist == "beta":
a = kwargs.get('a',2.)
b = kwargs.get('b',2.)
X = self.random.beta(a,b,size=(n,d))
true_phi = -np.sum(st.beta.logpdf(X,a,b),axis=1)
elif dist == "dirichlet":
alpha = kwargs.get('alpha',np.ones(d+1)*2)
X = self.random.dirichlet(alpha, n)
true_phi = -np.array([st.dirichlet.logpdf(x,alpha) for x in X])
X = X[:,:-1]
return X,true_phi
def generate_feas_grid_mat(self, method="uniform", N = None, N0 = None, threshold=1e10,randomized=False,m=1,first=0,rng=None):
rng = rng if rng is not None else self.random
self.qmc_seq = QMC.SeqGenerator(method=method,randomized=randomized, seed=1,rng= rng)
self.grid_method = method
if not hasattr(self,"cvh"):
self.cvh = ConvexHull(self.X)
if not hasattr(self,"min_d"):
self.min_d = self.X.min(axis = 0)
self.max_d = self.X.max(axis = 0)
if not hasattr(self, "phat"):
self.phat = self.cvh.volume/np.product(self.max_d-self.min_d)
assert (N is not None or N0 is not None)
N_total = 0
if method =="uniform" or N is None:
if N0 is not None:
self.N0= N0
else:
self.N0 = self.N0dict.get(self.d, 6)
N_total = self.N0**self.d
else:
N_total = int(N/self.phat*1.1)
self.N = N
grid_mat = self.qmc_seq.rand(N_total, self.d,lb = self.min_d, ub=self.max_d)
feas_list = feas_from_cvh(self.cvh.equations,grid_mat,threshold)
assert(len(feas_list) == N_total)
if N is None:
self.N = sum(feas_list)
else:
self.N = min(N,sum(feas_list))
self.grid_mat = grid_mat[np.array(feas_list)][:self.N]
self.Delta = self.cvh.volume
if method == "uniform" and m != 1:
self.grid_mat = self.grid_mat[first::m,:]
self.N= self.grid_mat.shape[0]
def generate_new_random_feas_grid_mat(self, N1 = None, threshold=1e10,rng=None):
if not hasattr(self,"grid_method") or self.grid_method != "random":
self.grid_method = "random"
if not hasattr(self,"cvh"):
self.cvh = ConvexHull(self.X)
self.Delta = self.cvh.volume
if not hasattr(self,"min_d"):
self.min_d = self.X.min(axis = 0)
self.max_d = self.X.max(axis = 0)
if not hasattr(self, "phat"):
self.phat = self.cvh.volume/np.product(self.max_d-self.min_d)
if rng is None:
rng = self.random
N_total = int(N1/self.phat*1.1)
grid_mat = self.min_d+rng.rand(N_total,self.d)*(self.max_d-self.min_d)
feas_list = feas_from_cvh(self.cvh.equations,grid_mat,threshold)
assert(len(feas_list) == N_total)
N1 = min(N1,sum(feas_list))
return grid_mat[np.array(feas_list)][:N1]
def generate_X1(self):
self.X1 = np.hstack((self.X,np.ones((self.n,1))))
def calcExactIntegral(self,phi,grad = False, njobs = 0,verbose = 0):
return calcExactIntegral(self.X,phi,grad,njobs,verbose)
def solve_LPs_gurobi(phi,X1,X_list):
N1, d = X_list.shape
n = X1.shape[0]
x = np.zeros(d+1)
x[-1] = 1
model = Model()
model.Params.OutputFlag=0
model.Params.presolve = 1
alpha = model.addVars(n,lb=0,name="alpha")
conv_comb = model.addConstrs(( quicksum(alpha[i]*X1[i,k] for i in range(n)) == x[k] for k in range(d+1)))
model.setObjective(quicksum(phi[i]*alpha[i] for i in range(n)),GRB.MINIMIZE)
model.update()
integral = 0
grad_int = np.zeros(n)
for ell in range(N1):
x = X_list[ell,:]
for k in range(d):
conv_comb[k].rhs = x[k]
model.optimize()
integral += np.exp(-model.ObjVal)
grad_int -= np.exp(-model.ObjVal)*np.array(model.X)
return integral, grad_int
def solve_QPs_gurobi(phi,X1,X_list,u):
N1, d = X_list.shape
n = X1.shape[0]
x = np.zeros(d+1)
x[-1] = 1
model = Model()
model.Params.OutputFlag=0
model.Params.BarHomogeneous = 1
model.Params.presolve = 1
alpha = model.addVars(n,lb=0,name="alpha")
conv_comb = model.addConstrs(( quicksum(alpha[i]*X1[i,k] for i in range(n)) == x[k] for k in range(d+1)))
model.setObjective(quicksum(phi[i]*alpha[i]+u/2*(alpha[i]-1/n)*(alpha[i]-1/n) for i in range(n)),GRB.MINIMIZE)
model.update()
integral = 0
grad_int = np.zeros(n)
for ell in range(N1):
x = X_list[ell,:]
for k in range(d):
conv_comb[k].rhs = x[k]
model.optimize()
integral += np.exp(-model.ObjVal)
grad_int -= np.exp(-model.ObjVal)*np.array(model.X)
return integral, grad_int
def get_all_from_phi_LP(iterate,X1,Delta,w,grid_mat,normalize=True,njobs=24):
N = grid_mat.shape[0]
phi = iterate.phi
if njobs == 1:
res = solve_LPs_gurobi(phi,X1,grid_mat)
integral = res[0]
grad_int = res[1]
else:
with pyPool(njobs) as pool:
res = pool.starmap(solve_LPs_gurobi, zip([phi]*njobs,[X1]*njobs, np.array_split(grid_mat,njobs)))
integral = np.sum([x[0] for x in res])
grad_int = np.sum(np.array([x[1] for x in res]),axis=0)
integral *= (Delta/N)
grad_int *= (Delta/N)
iterate.phi += (np.log(integral) if normalize else 0)
iterate.integral = 1
iterate.grad_phi = grad_int / (integral if normalize else 1) + w
iterate.obj = phi@w + iterate.integral
return iterate
def get_all_from_phi_QP(iterate,X1,Delta,w,grid_mat,u,normalize=True,njobs=24):
N = grid_mat.shape[0]
phi = iterate.phi
if njobs == 1:
res = solve_QPs_gurobi(phi,X1,grid_mat,u)
integral = res[0]
grad_int = res[1]
else:
with pyPool(njobs) as pool:
res = pool.starmap(solve_QPs_gurobi, zip([phi]*njobs,[X1]*njobs, np.array_split(grid_mat,njobs),[u]*njobs))
integral = np.sum([x[0] for x in res])
grad_int = np.sum(np.array([x[1] for x in res]),axis=0)
integral *= (Delta/N)
grad_int *= (Delta/N)
iterate.phi += (np.log(integral) if normalize else 0)
iterate.integral = 1 if normalize else integral
iterate.grad_phi = grad_int / (integral if normalize else 1) + w
iterate.obj = phi@w + iterate.integral
return iterate
def get_all_from_ab(iterate,X,grid_mat,Delta,w,normalize=True,thres=1e10):
n,d = X.shape
N,d = grid_mat.shape
a = iterate.a
b = iterate.b
phi, grad_sum_a, grad_sum_b = func_piecewise_max_subg(identity,grad_identity,X,w,a,b)
likelihood, grad_int_a, grad_int_b = func_piecewise_max_subg(expneg,grad_expneg,grid_mat,np.ones(N),a,b)
integral = np.sum(likelihood) * Delta / N
grad_int_a *= (Delta/N)
grad_int_b *= (Delta/N)
iterate.b += (np.log(integral) if normalize else 0)
iterate.phi = phi + (np.log(integral) if normalize else 0)
iterate.integral = 1 if normalize else integral
iterate.obj = iterate.phi@w + iterate.integral
iterate.grad_a = grad_sum_a + grad_int_a / (integral if normalize else 1)
iterate.grad_b = grad_sum_b + grad_int_b / (integral if normalize else 1)
return iterate
class NCLCD:
def __init__(self, sample:Sample, w = None, K = None, K0 = 10, maxIters = 100,
initStepSize = 1, stepSizeMode = "sqrt-decay",
tol = 1e-5, normalize = True, random_state = 42, plot=True):
self.sample = sample
self.w = w if w is not None else np.ones(self.sample.n)/self.sample.n
assert K is not None or K0 is not None
self.K = K if K is not None else sample.d * K0
self.iterates = []
self.maxIters = maxIters
self.initStepSize = initStepSize
self.stepSizeMode = "sqrt-decay"
self.normalize = normalize
self.tol = tol
self.random = np.random.RandomState(random_state)
self.plot = plot
@nb.jit(nopython=False)
def subgradient(self, a = None, b = None):
self.algo_name = "nonconvex_subgradient"
if a is None:
a = self.random.randn(self.K, self.sample.d)
b = self.random.randn(self.K)
self.algo_str = "nonconvex_subgradient_%d_%d_%.0e_%d_uniform"%(self.K, self.maxIters,self.tol, self.sample.N0)
start_t = time.time()
print("algorithm started...")
last = Iterate(a=a,b=b)
get_all_from_ab(last,self.sample.X,self.sample.grid_mat,self.sample.Delta,self.w,self.normalize)
last.time = time.time()-start_t
min_obj = last.obj
arg_min = 0
increase_cnt = 0
self.iterates.append(last)
for t in tnrange(1,self.maxIters+1):
a = last.a - self.initStepSize * last.grad_a / la.norm(last.grad_a) / np.sqrt(t)
b = last.b - self.initStepSize * last.grad_b / la.norm(last.grad_b) / np.sqrt(t)
cur = Iterate(a=a,b=b)
get_all_from_ab(cur,self.sample.X,self.sample.grid_mat,self.sample.Delta,self.w,self.normalize)
cur.time = time.time()-start_t
self.iterates.append(cur)
if np.abs(min_obj - cur.obj) <= self.tol:
print(min_obj,cur.obj)
break
if cur.obj > min_obj+self.tol:
increase_cnt += 1
if increase_cnt == 20:
break
else:
increase_cnt = 0
if cur.obj < min_obj:
arg_min = t
min_obj = cur.obj
last = cur
self.disc_arg_min = arg_min
self.min_disc_obj = min_obj
self.runtime = time.time() - start_t
self.final_phi = self.iterates[self.disc_arg_min].phi
self.final_time = self.iterates[self.disc_arg_min].time
print("algorithm finished!\n")
print("running time: ", "\033[1m",time_to_string(self.runtime),"\033[0;0m","\n" )
self.disc_obj_times = [iterate.time for iterate in self.iterates]
self.disc_objs = [iterate.obj for iterate in self.iterates]
print("min discretized obj achieved: ", "\033[1m",self.min_disc_obj,"\033[0;0m")
if self.plot:
plt.figure(figsize=(16,9))
plt.scatter(self.iterates[self.disc_arg_min].time, self.min_disc_obj, c="red",zorder = 1)
plt.plot(self.disc_obj_times,self.disc_objs,color="orange",zorder=0)
plt.xlabel("time")
plt.ylabel("obj")
plt.show()
def calcExactFinalObjective(self,phi = None,verbose=0):
if phi is None:
integral = self.sample.calcExactIntegral(self.final_phi,verbose=verbose)
return self.final_phi@self.w + np.log(integral) + 1
else:
integral = self.sample.calcExactIntegral(phi,verbose=verbose)
return phi@self.w + np.log(integral) + 1
def to_dict(self,contents = "all"):
d = dict()
d2 = dict()
if contents in ("all","info"):
for key in self.__dict__:
if key not in ['sample','iterates','random','solver_class','solver','ws_duals','solver_method','lbmodel','phi','v','rad','rad_constr','ub_constrs','lb_constrs','int_pos','status','stepModel']:
if key not in ['final_phi','w']:
d[key] = getattr(self,key)
else:
d[key] = getattr(self,key).tolist()
d['n'] = self.sample.n
d['d'] = self.sample.d
if contents in ("all","hist"):
tmp_iterate = self.iterates[self.disc_arg_min]
d2['phi_hist'] = [it.phi.tolist() for it in self.iterates]
d2['a_hist'] = [it.a.tolist() for it in self.iterates]
d2['b_hist'] = [it.b.tolist() for it in self.iterates]
d2['grad_a_hist'] = [it.grad_a.tolist() for it in self.iterates]
d2['grad_b_hist'] = [it.grad_b.tolist() for it in self.iterates]
if contents =="info":
return d
elif contents =="hist":
return d2
elif contents=="all":
d.update(d2)
return d
def to_json(self,path):
d = dict()
d2 = dict()
for key in self.__dict__:
if key not in ['sample','iterates','random','solver_class','solver','ws_duals','solver_method','lbmodel','phi','v','rad','rad_constr','ub_constrs','lb_constrs','int_pos','status','stepModel']:
if key not in ['final_phi','w']:
d[key] = getattr(self,key)
else:
d[key] = getattr(self,key).tolist()
d['n'] = self.sample.n
d['d'] = self.sample.d
tmp_iterate = self.iterates[self.disc_arg_min]
d2['phi_hist'] = [it.phi.tolist() for it in self.iterates]
d2['a_hist'] = [it.a.tolist() for it in self.iterates]
d2['b_hist'] = [it.b.tolist() for it in self.iterates]
d2['grad_a_hist'] = [it.grad_a.tolist() for it in self.iterates]
d2['grad_b_hist'] = [it.grad_b.tolist() for it in self.iterates]
with open(path+"info/%s_%s_info.json"%(self.sample.sample_str,self.algo_str),'w') as f:
json.dump(d,f)
with open(path+"hist/%s_%s_hist.json"%(self.sample.sample_str,self.algo_str),'w') as f:
json.dump(d2,f)
with open(path+"soln/%s_%s_soln.npy"%(self.sample.sample_str,self.algo_str),'wb') as f:
np.save(f, self.final_phi)
class ConvexLCD:
def __init__(self, sample:Sample, w = None, maxIters = 128, maxtime = 14400, random_state = 42, njobs = -1, verbose = 0, normalize = False, evaluation=True, plot=True, **kwargs):
self.sample = sample
self.w = w if w is not None else np.ones(self.sample.n)/self.sample.n
self.iterates = []
self.maxIters = maxIters
self.maxtime = maxtime
self.random = np.random.RandomState(random_state)
self.random_state = random_state
self.sample.generate_X1()
self.verbose= verbose
self.normalize = normalize
self.evaluation = evaluation
self.plot = plot
if njobs == 0:
self.parallel = False
else:
self.parallel = True
self.njobs = njobs if njobs > 0 else min(multiprocessing.cpu_count(),24)
def compute_phi_init(self, init_method, **init_kwargs):
if init_method == "uniform":
phi = np.log(self.sample.Delta)*np.ones(self.sample.n)
self.init_suffix = "_uniform_init"
elif init_method == "kde":
phi = -np.log(st.gaussian_kde(self.sample.X.T).evaluate(self.sample.X.T))
self.init_suffix = "_kde_init"
elif init_method == "nonconvex":
N0 = init_kwargs.get("N0", self.sample.N0dict.get(self.sample.d,6))
K = init_kwargs.get("K", 100)
tol = init_kwargs.get("tol", 1e-8)
maxIters = init_kwargs.get("maxIters", 1000)
self.sample.generate_feas_grid_mat(method ="uniform",N0=N0)
nclcd = NCLCD(self.sample,K=K,maxIters = maxIters,tol=0.1**tol)
nclcd.subgradient()
phi = nclcd.iterates[nclcd.disc_arg_min].phi
self.init_suffix = "_nc_init_%d_%d_%.0e_%d"%(K, maxIters, tol, self.sample.N0)
elif init_method =="given":
phi = init_kwargs.get("phi_start")
self.init_suffix = init_kwargs.get("init_suffix")
# self.phi_start = phi
return phi
def subgradient_approx(self,init_method=None,init_kwargs = dict(),initStepSize=5,stepSizeMode= "sqrt-decay-length",Nlist=None,N0list=None,thres_list= None,grid_method="uniform"):
self.algo_name = "subgradient (approx)"
if N0list is None:
using_N = True
else:
Nlist = [int((N0list[i]**self.sample.d)*self.sample.phat) for i in range(len(N0list))]
using_N = False
if thres_list is None:
maxStages = int(np.ceil(np.log2(self.maxIters)))
thres_list = [2**i for i in range(maxStages-len(Nlist)+1,maxStages+1)]
thres_list[-1] = min(thres_list[-1],self.maxIters)
self.N0list = N0list
self.Nlist = Nlist
if grid_method == "uniform":
self.algo_str = "subgradient_Riemann_%d_%s_%d_%d_%d"%(initStepSize,stepSizeMode, self.maxIters, len(N0list),max(N0list))
elif not using_N:
self.algo_str = "subgradient_rndapprox_%d_%s_%d_%d_%d"%(initStepSize,stepSizeMode, self.maxIters, len(N0list),max(N0list))
else:
self.algo_str = "subgradient_rndapprox_%d_%s_%d_%d_%d"%(initStepSize,stepSizeMode, self.maxIters, len(Nlist),max(Nlist))
self.algo_str += '_Copy%d'%self.random_state
phi = self.compute_phi_init(init_method,**init_kwargs)
self.algo_str += self.init_suffix
self.thres_list = thres_list
self.initStepSize = initStepSize
self.stepSizeMode = stepSizeMode
totalStages = len(Nlist)
cur_stage = 0
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
if using_N:
self.sample.generate_feas_grid_mat(method=grid_method, N = Nlist[cur_stage],rng=self.random)
else:
self.sample.generate_feas_grid_mat(method=grid_method,N0=N0list[cur_stage],rng=self.random)
cur_N = self.sample.N
Nlist[cur_stage] = cur_N
print("algorithm started...")
start_t = time.time()
for t in tnrange(self.maxIters+1):
if t > cur_thres:
cur_stage += 1
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
print("Iteration",t,":")
if using_N:
print("N changed to %d"%cur_N)
self.sample.generate_feas_grid_mat(method=grid_method, N = Nlist[cur_stage],rng=self.random)
else:
print("N0 changed to %d"%N0list[cur_stage])
self.sample.generate_feas_grid_mat(method=grid_method,N0=N0list[cur_stage],rng=self.random)
cur_N = self.sample.N
Nlist[cur_stage] = cur_N
print("N changed to %d"%cur_N)
cur = Iterate(phi = phi)
f = io.StringIO()
with contextlib.redirect_stdout(f):
get_all_from_phi_LP(cur, self.sample.X1, self.sample.Delta, self.w, self.sample.grid_mat, normalize = self.normalize, njobs= self.njobs )
cur.time = time.time()-start_t
if self.stepSizeMode == "sqrt-decay-length":
phi = cur.phi - self.initStepSize*cur.grad_phi / la.norm(cur.grad_phi) / np.sqrt(t+1)
elif self.stepSizeMode == "sqrt-decay-size":
phi = cur.phi - self.initStepSize*cur.grad_phi / np.sqrt(t+1)
elif self.stepSizeMode == "constant-length":
phi = cur.phi - self.initStepSize*cur.grad_phi / la.norm(cur.grad_phi)
elif self.stepSizeMode == "constant-size":
phi = cur.phi - self.initStepSize*cur.grad_phi
self.iterates.append(cur)
if cur.time > self.maxtime:
print("algorithm terminated due to time limit\n")
break
self.Nlist = Nlist
self.runtime = time.time() - start_t
print("algorithm finished!\n")
print("running time: ", "\033[1m",time_to_string(self.runtime),"\033[0;0m","\n" )
self.disc_obj_times = [iterate.time for iterate in self.iterates]
self.disc_objs = [iterate.obj for iterate in self.iterates]
self.disc_normalized_objs = [np.mean(iterate.phi)+np.log(iterate.integral)+1 for iterate in self.iterates]
if self.evaluation:
print("function evaluation started...")
self.real_objs = []
self.real_integrals = []
self.obj_times = []
self.arg_min = 0
self.eval_iters=1
self.min_obj = float('inf')
for i in tnrange(len(self.iterates)):
if i % self.eval_iters == 0:
phi = self.iterates[i].phi
obj, integral = self.calcExactFinalObjective(phi,verbose=0)
self.real_objs.append(obj)
self.real_integrals.append(integral)
self.obj_times.append(self.iterates[i].time)
if obj < self.min_obj:
self.min_obj = obj
self.arg_min = i
print("function evaluation finished!\n\n")
self.final_phi = self.iterates[self.arg_min].phi
self.final_time = self.iterates[self.arg_min].time
print("min obj achieved: ", "\033[1m",self.min_obj,"\033[0;0m")
if self.plot:
plt.figure(figsize=(16,9))
plt.scatter(self.final_time, self.min_obj, c="red",zorder = 1)
plt.plot(self.obj_times,self.real_objs,color ="orange",zorder=0)
plt.xlabel("time")
plt.ylabel("obj")
plt.show()
def subgradient_stoch(self,init_method=None,init_kwargs = dict(),initStepSize=5,stepSizeMode= "sqrt-decay-length",Nlist= [5000,10000,20000,40000,80000],thres_list= None):
self.algo_name = "subgradient (stochastic)"
self.algo_str = "subgradient_stochastic_%d_%s_%d_%d_%d"%(initStepSize,stepSizeMode, self.maxIters, len(Nlist),max(Nlist))
self.algo_str += '_Copy%d'%self.random_state
self.Nlist = Nlist
phi = self.compute_phi_init(init_method,**init_kwargs)
self.algo_str += self.init_suffix
if thres_list is None:
maxStages = int(np.ceil(np.log2(self.maxIters)))
thres_list = [2**i for i in range(maxStages-len(Nlist)+1,maxStages+1)]
thres_list[-1] = min(thres_list[-1],self.maxIters)
self.thres_list = thres_list
self.initStepSize = initStepSize
self.stepSizeMode = stepSizeMode
totalStages = len(Nlist)
cur_stage = 0
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
print("algorithm started...")
start_t = time.time()
for t in tnrange(self.maxIters+1):
if t > cur_thres:
cur_stage += 1
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
print("Iteration",t,":")
print("N changed to %d"%cur_N)
grid_mat = self.sample.generate_new_random_feas_grid_mat(cur_N, rng = self.random)
cur = Iterate(phi = phi)
f = io.StringIO()
with contextlib.redirect_stdout(f):
get_all_from_phi_LP(cur, self.sample.X1, self.sample.Delta, self.w, grid_mat, normalize = self.normalize, njobs= self.njobs )
cur.time = time.time()-start_t
if self.stepSizeMode == "sqrt-decay-length":
phi = cur.phi - self.initStepSize*cur.grad_phi / la.norm(cur.grad_phi) / np.sqrt(t+1)
elif self.stepSizeMode == "sqrt-decay-size":
phi = cur.phi - self.initStepSize*cur.grad_phi / np.sqrt(t+1)
elif self.stepSizeMode == "constant-length":
phi = cur.phi - self.initStepSize*cur.grad_phi / la.norm(cur.grad_phi)
elif self.stepSizeMode == "constant-size":
phi = cur.phi - self.initStepSize*cur.grad_phi
self.iterates.append(cur)
if cur.time > self.maxtime:
print("algorithm terminated due to time limit\n")
break
self.Nlist = Nlist
self.runtime = time.time() - start_t
print("algorithm finished!\n")
print("running time: ", "\033[1m",time_to_string(self.runtime),"\033[0;0m","\n" )
self.disc_obj_times = [iterate.time for iterate in self.iterates]
self.disc_objs = [iterate.obj for iterate in self.iterates]
self.disc_normalized_objs = [np.mean(iterate.phi)+np.log(iterate.integral)+1 for iterate in self.iterates]
if self.evaluation:
print("function evaluation started...")
self.real_objs = []
self.real_integrals = []
self.obj_times = []
self.arg_min = 0
self.eval_iters=1
self.min_obj = float('inf')
for i in tnrange(len(self.iterates)):
if i % self.eval_iters == 0:
phi = self.iterates[i].phi
obj, integral = self.calcExactFinalObjective(phi,verbose=0)
self.real_objs.append(obj)
self.real_integrals.append(integral)
self.obj_times.append(self.iterates[i].time)
if obj < self.min_obj:
self.min_obj = obj
self.arg_min = i
print("function evaluation finished!\n\n")
self.final_phi = self.iterates[self.arg_min].phi
self.final_time = self.iterates[self.arg_min].time
print("min obj achieved: ", "\033[1m",self.min_obj,"\033[0;0m")
if self.plot:
plt.figure(figsize=(16,9))
plt.scatter(self.final_time, self.min_obj, c="red",zorder = 1)
plt.plot(self.obj_times,self.real_objs,color ="orange",zorder=0)
plt.xlabel("time")
plt.ylabel("obj")
plt.show()
def randomized_smoothing_stoch(self,init_method=None,init_kwargs = dict(),D=2,sigma=1,beta=0.25,Nlist= [5000,10000,20000,40000,80000],thres_list= None,eta_mode = "constant"):
self.algo_name = "randomized_smoothing (stochastic)"
self.algo_str = "randomized_smoothing_stochastic_%d_%.0e_%.2f_%d_%d_%d"%(D,sigma,beta, self.maxIters, len(Nlist),max(Nlist))
self.algo_str += '_Copy%d'%self.random_state
phi = self.compute_phi_init(init_method,**init_kwargs)
self.algo_str += self.init_suffix
self.beta = beta
u = D*(self.sample.n**(beta))/2
if thres_list is None:
maxStages = int(np.ceil(np.log2(self.maxIters)))
thres_list = [2**i for i in range(maxStages-len(Nlist)+1,maxStages+1)]
thres_list[-1] = min(thres_list[-1],self.maxIters)
self.thres_list = thres_list
self.Nlist = Nlist
stage_iters = np.r_[thres_list[0], np.diff(thres_list)]
M = np.sqrt(np.sum([stage_iters[i]/Nlist[i] for i in range(len(Nlist))]))
eta = sigma*M/D
self.u = u
self.D = D
self.M= M
self.eta= eta
self.sigma = sigma
self.eta_mode = eta_mode
if self.eta_mode == "duchi":
self.algo_str += "_duchi"
totalStages = len(Nlist)
cur_stage = 0
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
phi_x = phi
phi_y = phi
phi_z = phi
s = np.zeros(self.sample.n)
theta_old= 1
theta = 1
print("algorithm started...")
start_t = time.time()
for t in tnrange(self.maxIters+1):
if t > cur_thres:
cur_stage += 1
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
print("Iteration",t,":")
print("N changed to %d"%cur_N)
grid_mat = self.sample.generate_new_random_feas_grid_mat(cur_N, rng = self.random)
ut = u*theta_old
phi_y = (1-theta_old)*phi_x+theta_old*phi_z
cur = Iterate(phi = phi_y+ut*randb(1,self.sample.n, rng = self.random).reshape(-1))
f = io.StringIO()
with contextlib.redirect_stdout(f):
get_all_from_phi_LP(cur, self.sample.X1, self.sample.Delta, self.w, grid_mat, normalize = self.normalize, njobs= self.njobs )
cur.time = time.time()-start_t
s += cur.grad_phi/theta_old
theta = 2/(1+np.sqrt(1+4/theta_old**2))
cur.phi_x = phi_x
cur.phi_y = phi_y
cur.phi_z = phi_z
if self.eta_mode == "duchi":
eta = self.eta * np.sqrt(t+1)
phi_z = phi - s*theta/(sigma*np.sqrt(self.sample.n)/u+eta)
phi_x = (1-theta_old)*phi_x+theta_old*phi_z
theta_old = theta
self.iterates.append(cur)
if cur.time > self.maxtime:
print("algorithm terminated due to time limit\n")
break
self.runtime = time.time() - start_t
print("algorithm finished!\n")
print("running time: ", "\033[1m",time_to_string(self.runtime),"\033[0;0m","\n" )
self.disc_obj_times = [iterate.time for iterate in self.iterates]
self.disc_objs = [iterate.obj for iterate in self.iterates]
self.disc_normalized_objs = [np.mean(iterate.phi)+np.log(iterate.integral)+1 for iterate in self.iterates]
if self.evaluation:
print("function evaluation started...")
self.real_objs = []
self.real_integrals = []
self.obj_times = []
self.arg_min = 0
self.eval_iters=1
self.min_obj = float('inf')
for i in tnrange(len(self.iterates)):
if i % self.eval_iters == 0:
phi = self.iterates[i].phi_x
obj, integral = self.calcExactFinalObjective(phi,verbose=0)
self.real_objs.append(obj)
self.real_integrals.append(integral)
self.obj_times.append(self.iterates[i].time)
if obj < self.min_obj:
self.min_obj = obj
self.arg_min = i
print("function evaluation finished!\n\n")
self.final_phi = self.iterates[self.arg_min].phi
self.final_time = self.iterates[self.arg_min].time
print("min obj achieved: ", "\033[1m",self.min_obj,"\033[0;0m")
if self.plot:
plt.figure(figsize=(16,9))
plt.scatter(self.final_time, self.min_obj, c="red",zorder = 1)
plt.plot(self.obj_times,self.real_objs,color ="orange",zorder=0)
plt.xlabel("time")
plt.ylabel("obj")
plt.show()
def randomized_smoothing_approx(self,init_method=None,init_kwargs = dict(),D=2,sigma=1,beta=0.25,Nlist=None,N0list=None,thres_list= None,grid_method="uniform",eta_mode = "constant"):
self.algo_name = "randomized_smoothing (approx)"
u = D*(self.sample.n**(beta))/2
self.beta = beta
if N0list is None:
using_N = True
else:
Nlist = [int((N0list[i]**self.sample.d)*self.sample.phat) for i in range(len(N0list))]
using_N = False
if thres_list is None:
maxStages = int(np.ceil(np.log2(self.maxIters)))
thres_list = [2**i for i in range(maxStages-len(Nlist)+1,maxStages+1)]
thres_list[-1] = min(thres_list[-1],self.maxIters)
self.thres_list = thres_list
self.N0list = N0list
self.Nlist = Nlist
if grid_method == "uniform":
self.algo_str = "randomized_smoothing_Riemann_%d_%.0e_%.2f_%d_%d_%d"%(D,sigma,beta, self.maxIters, len(N0list),max(N0list))
elif not using_N:
self.algo_str = "randomized_smoothing_rndapprox_%d_%.0e_%.2f_%d_%d_%d"%(D,sigma,beta, self.maxIters, len(N0list),max(N0list))
else:
self.algo_str = "randomized_smoothing_rndapprox_%d_%.0e_%.2f_%d_%d_%d"%(D,sigma,beta, self.maxIters, len(Nlist),max(Nlist))
self.algo_str += '_Copy%d'%self.random_state
phi = self.compute_phi_init(init_method,**init_kwargs)
self.algo_str += self.init_suffix
stage_iters = np.r_[thres_list[0], np.diff(thres_list)]
M = np.sqrt(np.sum([stage_iters[i]/Nlist[i] for i in range(len(Nlist))]))
eta = sigma*M/D
self.u = u
self.D = D
self.eta= eta
self.M= M
self.sigma = sigma
self.eta_mode = eta_mode
if self.eta_mode == "duchi":
self.algo_str += "_duchi"
totalStages = len(Nlist)
cur_stage = 0
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
if using_N:
self.sample.generate_feas_grid_mat(method=grid_method, N = Nlist[cur_stage],rng=self.random)
else:
self.sample.generate_feas_grid_mat(method=grid_method,N0=N0list[cur_stage],rng=self.random)
cur_N = self.sample.N
Nlist[cur_stage] = cur_N
phi_x = phi
phi_y = phi
phi_z = phi
s = np.zeros(self.sample.n)
theta_old= 1
theta = 1
print("algorithm started...")
start_t = time.time()
for t in tnrange(self.maxIters+1):
if t > cur_thres:
cur_stage += 1
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
print("Iteration",t,":")
if using_N:
print("N changed to %d"%cur_N)
self.sample.generate_feas_grid_mat(method=grid_method, N = Nlist[cur_stage],rng=self.random)
else:
print("N0 changed to %d"%N0list[cur_stage])
self.sample.generate_feas_grid_mat(method=grid_method,N0=N0list[cur_stage],rng=self.random)
cur_N = self.sample.N
Nlist[cur_stage] = cur_N
print("N changed to %d"%cur_N)
ut = u*theta_old
phi_y = (1-theta_old)*phi_x+theta_old*phi_z
cur = Iterate(phi = phi_y+ut*randb(1,self.sample.n,rng = self.random).reshape(-1))
f = io.StringIO()
with contextlib.redirect_stdout(f):
get_all_from_phi_LP(cur, self.sample.X1, self.sample.Delta, self.w, self.sample.grid_mat, normalize = self.normalize, njobs= self.njobs )
cur.time = time.time()-start_t
s += cur.grad_phi/theta_old
theta = 2/(1+np.sqrt(1+4/theta_old**2))
cur.phi_x = phi_x
cur.phi_y = phi_y
cur.phi_z = phi_z
if self.eta_mode == "duchi":
eta = self.eta * np.sqrt(t+1)
phi_z = phi - s*theta/(sigma*np.sqrt(self.sample.n)/u+eta)
phi_x = (1-theta_old)*phi_x+theta_old*phi_z
theta_old = theta
self.iterates.append(cur)
if cur.time > self.maxtime:
print("algorithm terminated due to time limit\n")
break
self.Nlist = Nlist
self.runtime = time.time() - start_t
print("algorithm finished!\n")
print("running time: ", "\033[1m",time_to_string(self.runtime),"\033[0;0m","\n" )
self.disc_obj_times = [iterate.time for iterate in self.iterates]
self.disc_objs = [iterate.obj for iterate in self.iterates]
self.disc_normalized_objs = [np.mean(iterate.phi)+np.log(iterate.integral)+1 for iterate in self.iterates]
if self.evaluation:
print("function evaluation started...")
self.real_objs = []
self.real_integrals = []
self.obj_times = []
self.arg_min = 0
self.eval_iters=1
self.min_obj = float('inf')
for i in tnrange(len(self.iterates)):
if i % self.eval_iters == 0:
phi = self.iterates[i].phi_x
obj, integral = self.calcExactFinalObjective(phi,verbose=0)
self.real_objs.append(obj)
self.real_integrals.append(integral)
self.obj_times.append(self.iterates[i].time)
if obj < self.min_obj:
self.min_obj = obj
self.arg_min = i
print("function evaluation finished!\n\n")
self.final_phi = self.iterates[self.arg_min].phi
self.final_time = self.iterates[self.arg_min].time
print("min obj achieved: ", "\033[1m",self.min_obj,"\033[0;0m")
if self.plot:
plt.figure(figsize=(16,9))
plt.scatter(self.final_time, self.min_obj, c="red",zorder = 1)
plt.plot(self.obj_times,self.real_objs,color ="orange",zorder=0)
plt.xlabel("time")
plt.ylabel("obj")
plt.show()
def nesterov_smoothing_stoch(self,init_method=None,init_kwargs = dict(),D=2,sigma=1,C1=1,Nlist= [5000,10000,20000,40000,80000],thres_list= None,eta_mode="constant"):
self.algo_name = "nesterov_smoothing (stochastic)"
self.algo_str = "nesterov_smoothing_stochastic_%d_%.0e_%.2f_%d_%d_%d"%(D,sigma,C1, self.maxIters, len(Nlist),max(Nlist))
self.algo_str += '_Copy%d'%self.random_state
phi = self.compute_phi_init(init_method,**init_kwargs)
self.algo_str += self.init_suffix
self.C1 = C1
u = D/2*C1
if thres_list is None:
maxStages = int(np.ceil(np.log2(self.maxIters)))
thres_list = [2**i for i in range(maxStages-len(Nlist)+1,maxStages+1)]
thres_list[-1] = min(thres_list[-1],self.maxIters)
self.thres_list = thres_list
self.Nlist = Nlist
stage_iters = np.r_[thres_list[0], np.diff(thres_list)]
M = np.sqrt(np.sum([stage_iters[i]/Nlist[i] for i in range(len(Nlist))]))
eta = sigma*M/D
self.u = u
self.D = D
self.M= M
self.eta= eta
self.sigma = sigma
self.eta_mode = eta_mode
if self.eta_mode == "duchi":
self.algo_str += "_duchi"
totalStages = len(Nlist)
cur_stage = 0
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
phi_x = phi
phi_y = phi
phi_z = phi
s = np.zeros(self.sample.n)
theta_old= 1
theta = 1
print("algorithm started...")
start_t = time.time()
for t in tnrange(self.maxIters+1):
if t > cur_thres:
cur_stage += 1
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
print("Iteration",t,":")
print("N changed to %d"%cur_N)
grid_mat = self.sample.generate_new_random_feas_grid_mat(cur_N, rng = self.random)
ut = u*theta_old
phi_y = (1-theta_old)*phi_x+theta_old*phi_z
cur = Iterate(phi = phi_y)
f = io.StringIO()
with contextlib.redirect_stdout(f):
get_all_from_phi_QP(cur, self.sample.X1, self.sample.Delta, self.w, grid_mat, ut, normalize = self.normalize, njobs= self.njobs )
cur.time = time.time()-start_t
s += cur.grad_phi/theta_old
theta = 2/(1+np.sqrt(1+4/theta_old**2))
cur.phi_x = phi_x
cur.phi_y = phi_y
cur.phi_z = phi_z
if self.eta_mode == "duchi":
eta = self.eta * np.sqrt(t+1)
phi_z = phi - s*theta/(sigma/u+eta)
phi_x = (1-theta_old)*phi_x+theta_old*phi_z
theta_old = theta
self.iterates.append(cur)
if cur.time > self.maxtime:
print("algorithm terminated due to time limit\n")
break
self.runtime = time.time() - start_t
print("algorithm finished!\n")
print("running time: ", "\033[1m",time_to_string(self.runtime),"\033[0;0m","\n" )
self.disc_obj_times = [iterate.time for iterate in self.iterates]
self.disc_objs = [iterate.obj for iterate in self.iterates]
self.disc_normalized_objs = [np.mean(iterate.phi)+np.log(iterate.integral)+1 for iterate in self.iterates]
if self.evaluation:
print("function evaluation started...")
self.real_objs = []
self.real_integrals = []
self.obj_times = []
self.arg_min = 0
self.eval_iters=1
self.min_obj = float('inf')
for i in tnrange(len(self.iterates)):
if i % self.eval_iters == 0:
phi = self.iterates[i].phi_x
obj, integral = self.calcExactFinalObjective(phi,verbose=0)
self.real_objs.append(obj)
self.real_integrals.append(integral)
self.obj_times.append(self.iterates[i].time)
if obj < self.min_obj:
self.min_obj = obj
self.arg_min = i
print("function evaluation finished!\n\n")
self.final_phi = self.iterates[self.arg_min].phi
self.final_time = self.iterates[self.arg_min].time
print("min obj achieved: ", "\033[1m",self.min_obj,"\033[0;0m")
if self.plot:
plt.figure(figsize=(16,9))
plt.scatter(self.final_time, self.min_obj, c="red",zorder = 1)
plt.plot(self.obj_times,self.real_objs,color ="orange",zorder=0)
plt.xlabel("time")
plt.ylabel("obj")
plt.show()
def nesterov_smoothing_approx(self,init_method=None,init_kwargs = dict(),D=2,sigma=1,C1=1,Nlist=None,N0list=None,thres_list= None,grid_method="uniform",eta_mode = "constant"):
self.algo_name = "nesterov_smoothing (approx)"
u = D/2*C1
self.C1 = C1
if N0list is None:
using_N = True
else:
Nlist = [int((N0list[i]**self.sample.d)*self.sample.phat) for i in range(len(N0list))]
using_N = False
if thres_list is None:
maxStages = int(np.ceil(np.log2(self.maxIters)))
thres_list = [2**i for i in range(maxStages-len(Nlist)+1,maxStages+1)]
thres_list[-1] = min(thres_list[-1],self.maxIters)
self.thres_list = thres_list
self.N0list = N0list
self.Nlist = Nlist
if grid_method == "uniform":
self.algo_str = "nesterov_smoothing_Riemann_%d_%.0e_%.2f_%d_%d_%d"%(D,sigma,C1, self.maxIters, len(N0list),max(N0list))
elif not using_N:
self.algo_str = "nesterov_smoothing_rndapprox_%d_%.0e_%.2f_%d_%d_%d"%(D,sigma,C1, self.maxIters, len(N0list),max(N0list))
else:
self.algo_str = "nesterov_smoothing_rndapprox_%d_%.0e_%.2f_%d_%d_%d"%(D,sigma,C1, self.maxIters, len(Nlist),max(Nlist))
self.algo_str += '_Copy%d'%self.random_state
phi = self.compute_phi_init(init_method,**init_kwargs)
self.algo_str += self.init_suffix
stage_iters = np.r_[thres_list[0], np.diff(thres_list)]
M = np.sqrt(np.sum([stage_iters[i]/Nlist[i] for i in range(len(Nlist))]))
eta = sigma*M/D
self.u = u
self.D = D
self.eta= eta
self.M= M
self.sigma = sigma
self.eta_mode = eta_mode
if self.eta_mode == "duchi":
self.algo_str += "_duchi"
totalStages = len(Nlist)
cur_stage = 0
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
if using_N:
self.sample.generate_feas_grid_mat(method=grid_method, N = Nlist[cur_stage],rng=self.random)
else:
self.sample.generate_feas_grid_mat(method=grid_method,N0=N0list[cur_stage],rng=self.random)
cur_N = self.sample.N
Nlist[cur_stage] = cur_N
phi_x = phi
phi_y = phi
phi_z = phi
s = np.zeros(self.sample.n)
theta_old= 1
theta = 1
print("algorithm started...")
start_t = time.time()
for t in tnrange(self.maxIters+1):
if t > cur_thres:
cur_stage += 1
cur_N = Nlist[cur_stage]
cur_thres = thres_list[cur_stage]
print("Iteration",t,":")
if using_N:
print("N changed to %d"%cur_N)
self.sample.generate_feas_grid_mat(method=grid_method, N = Nlist[cur_stage],rng=self.random)
else:
print("N0 changed to %d"%N0list[cur_stage])
self.sample.generate_feas_grid_mat(method=grid_method,N0=N0list[cur_stage],rng=self.random)
cur_N = self.sample.N
Nlist[cur_stage] = cur_N
print("N changed to %d"%cur_N)
ut = u*theta_old
phi_y = (1-theta_old)*phi_x+theta_old*phi_z
cur = Iterate(phi = phi_y)
f = io.StringIO()
with contextlib.redirect_stdout(f):
get_all_from_phi_QP(cur, self.sample.X1, self.sample.Delta, self.w, self.sample.grid_mat, ut, normalize = self.normalize, njobs= self.njobs )
cur.time = time.time()-start_t
s += cur.grad_phi/theta_old
theta = 2/(1+np.sqrt(1+4/theta_old**2))
cur.phi_x = phi_x
cur.phi_y = phi_y
cur.phi_z = phi_z
if self.eta_mode == "duchi":
eta = self.eta * np.sqrt(t+1)
phi_z = phi - s*theta/(sigma/u+eta)
phi_x = (1-theta_old)*phi_x+theta_old*phi_z
theta_old = theta
self.iterates.append(cur)
if cur.time > self.maxtime:
print("algorithm terminated due to time limit\n")
break
self.Nlist = Nlist
self.runtime = time.time() - start_t
print("algorithm finished!\n")
print("running time: ", "\033[1m",time_to_string(self.runtime),"\033[0;0m","\n" )
self.disc_obj_times = [iterate.time for iterate in self.iterates]
self.disc_objs = [iterate.obj for iterate in self.iterates]
self.disc_normalized_objs = [np.mean(iterate.phi)+np.log(iterate.integral)+1 for iterate in self.iterates]
if self.evaluation:
print("function evaluation started...")
self.real_objs = []
self.real_integrals = []
self.obj_times = []
self.arg_min = 0
self.eval_iters=1
self.min_obj = float('inf')
for i in tnrange(len(self.iterates)):
if i % self.eval_iters == 0:
phi = self.iterates[i].phi_x
obj, integral = self.calcExactFinalObjective(phi,verbose=0)
self.real_objs.append(obj)
self.real_integrals.append(integral)
self.obj_times.append(self.iterates[i].time)
if obj < self.min_obj:
self.min_obj = obj
self.arg_min = i
print("function evaluation finished!\n\n")
self.final_phi = self.iterates[self.arg_min].phi
self.final_time = self.iterates[self.arg_min].time
print("min obj achieved: ", "\033[1m",self.min_obj,"\033[0;0m")
if self.plot:
plt.figure(figsize=(16,9))
plt.scatter(self.final_time, self.min_obj, c="red",zorder = 1)
plt.plot(self.obj_times,self.real_objs,color ="orange",zorder=0)
plt.xlabel("time")
plt.ylabel("obj")
plt.show()
def calcExactFinalObjective(self,phi,verbose=0):
integral = self.sample.calcExactIntegral(phi,verbose=verbose)
return phi@self.w + np.log(integral) + 1, integral
def to_dict(self,contents = "all"):
d = dict()
d2 = dict()
if contents in ("all","info"):
for key in self.__dict__:
if key not in ['sample','iterates','random','solver_class','solver','ws_duals','solver_method','lbmodel','phi','v','rad','rad_constr','ub_constrs','lb_constrs','int_pos','status','stepModel']:
if key not in ["final_phi","w"]:
d[key] = getattr(self,key)
else:
d[key] = getattr(self,key).tolist()
d['n'] = self.sample.n
d['d'] = self.sample.d
if contents in ("all","hist"):
d2['phi_hist'] = [it.phi.tolist() for it in self.iterates]
d2['grad_phi_hist'] = [it.grad_phi.tolist() for it in self.iterates]
if contents =="info":
return d
elif contents =="hist":
return d2
elif contents=="all":
d.update(d2)
return d
def to_json(self,path):
d = dict()
d2 = dict()
for key in self.__dict__:
if key not in ['sample','iterates','random','solver_class','solver','ws_duals','solver_method','lbmodel','phi','v','rad','rad_constr','ub_constrs','lb_constrs','int_pos','status','stepModel']:
if key not in ["final_phi","w"]:
d[key] = getattr(self,key)
else:
d[key] = getattr(self,key).tolist()
d['n'] = self.sample.n
d['d'] = self.sample.d
d2['phi_hist'] = [it.phi.tolist() for it in self.iterates]
d2['grad_phi_hist'] = [it.grad_phi.tolist() for it in self.iterates]
with open(path+"info/%s_%s_info.json"%(self.sample.sample_str,self.algo_str),'w') as f:
json.dump(d,f)
with open(path+"hist/%s_%s_hist.json"%(self.sample.sample_str,self.algo_str),'w') as f:
json.dump(d2,f)
with open(path+"soln/%s_%s_soln.npy"%(self.sample.sample_str,self.algo_str),'wb') as f:
np.save(f, self.final_phi)
| 57,405 | 42.000749 | 208 | py |
LogConcComp | LogConcComp-main/src/QMC.py | import numpy as np
import numpy.linalg as la
from numbers import Number
from scipy import stats
def sequ(n,d,lb=0,ub=1,method=None,randomized=True, random_state = 1, rng= None,return_likelihood = False):
if rng is None:
rng = np.random.RandomState(seed=random_state)
if method is None:
seq = np.zeros((n,d))
elif method == "uniform":
n0 = int(np.power(n,1/d))
assert n == n0 **d, 'this method does not apply to n = %d'%n
tmp_list = np.linspace(0,1,n0+1)
tmp_list = (tmp_list[:-1]+tmp_list[1:])/2
dim_grid_list = [list(tmp_list) for i in range(d)]
seq = np.stack(np.meshgrid(*dim_grid_list)).T.reshape(n,d)
elif method == "random":
seq = rng.rand(n,d)
if randomized and method!="random":
seq += rng.rand(n,d)
seq = np.mod(seq,1.0)
seq = lb+seq*(ub-lb)
if not return_likelihood:
return seq
else:
if isinstance(ub-lb,Number):
return seq, np.ones(n)/np.power(ub-lb,d)
else:
return seq, np.ones(n)/np.prod(ub-lb)
def generate_multivariate_normal(Z,mu,Sigma):
C = la.cholesky(Sigma)
X = Z@C.T + mu
likelihood = np.exp(-np.sum(Z*Z,axis=1)/2)/np.power(2*np.pi,len(mu)/2)/np.sqrt(la.det(Sigma))
return X,likelihood
def seqn(n,d,mu=0,Sigma=1,method=None,randomized=True, random_state = 1,rng=None,return_likelihood = False):
if method != "random":
seq = sequ(n,d,0,1,method,randomized,random_state,rng,False)
seq = stats.norm.ppf(seq)
else:
if rng is None:
rng = np.random.RandomState(seed=random_state)
seq = rng.randn(n,d)
if isinstance(mu,Number):
mu = mu*np.ones(d)
if isinstance(Sigma,Number):
Sigma = np.diag(np.ones(d)*Sigma)
if return_likelihood:
return generate_multivariate_normal(seq, mu,Sigma)
else:
return generate_multivariate_normal(seq, mu,Sigma)[0]
class SeqGenerator:
def __init__(self, method = None, randomized=True, seed = 1, rng = None):
self.method = method
self.randomized = randomized
self.seed = seed
if rng is None:
self.rng = np.random.RandomState(seed=random_state)
else:
self.rng = rng
self.cur_state = seed
def rand(self,n,d, lb=0,ub=1,return_likelihood = False):
res = sequ(n,d,lb=lb,ub=ub,method=self.method,randomized=self.randomized, random_state = self.cur_state, rng= self.rng,return_likelihood = return_likelihood)
self.cur_state += n
return res
def randn(self,n,d,mu=0,Sigma=1,return_likelihood = False):
res = seqn(n,d,mu=mu,Sigma=Sigma,method=self.method,randomized=self.randomized, random_state = self.cur_state,rng=self.rng,return_likelihood = return_likelihood)
self.cur_state += n
return res
| 2,883 | 36.454545 | 169 | py |
LogConcComp | LogConcComp-main/src/utils.py | import os
import numpy as np
import numba as nb
def create_folder(storage_path):
if not os.path.isdir(storage_path):
os.makedirs(storage_path,exist_ok=True)
lsdir = os.listdir(storage_path)
for item in ["info","hist","soln","figs"]:
if item not in lsdir:
os.makedirs(storage_path+item+"/",exist_ok=True)
if item == "figs":
lsdir_figs = os.listdir(storage_path+item+"/")
for item1 in ["crop","raw"]:
if item1 not in lsdir_figs:
os.makedirs(storage_path+item+"/"+item1+"/",exist_ok=True)
def time_to_string(runtime):
seconds = runtime%60
runmins = (runtime-seconds)/60
mins = int(runmins%60)
runhrs = (runmins-mins)/60
hrs = int(runhrs)
return "%.2d:%.2d:%05.2f"%(hrs,mins,seconds)
def multivariate_laplace(n,d,rng=None, random_state=None):
rng = rng if rng is not None else np.random.RandomState(random_state)
X = rng.randn(n,d)
Z = rng.exponential(size=(n,1))
return X*np.sqrt(Z)
@nb.njit(cache=True)
def np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@nb.njit(cache=True)
def np_apply_along_axis_kd(funckd, axis, arr, k = -1):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
k = k if k > 0 else arr.shape[0]
result = np.empty((k,arr.shape[1]))
for i in range(arr.shape[1]):
result[:, i] = funckd(arr[:, i])
else:
k = k if k > 0 else arr.shape[1]
result = np.empty((arr.shape[0],k))
for i in range(arr.shape[0]):
result[i, :] = funckd(arr[i, :])
return result
@nb.njit(cache=True)
def split(n, B):
sep = n//B
rem = n%B
indices = []
last = 0
cur = 0
for i in range(B):
cur = last + sep + (i < rem)
indices.append(cur)
last = cur
return indices
| 2,223 | 27.512821 | 78 | py |
FUNIT | FUNIT-master/test_k_shot.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import numpy as np
from PIL import Image
import torch
import torch.backends.cudnn as cudnn
from torchvision import transforms
from utils import get_config
from trainer import Trainer
import argparse
os.environ['CUDA_VISIBLE_DEVICES']='1'
parser = argparse.ArgumentParser()
parser.add_argument('--config',
type=str,
default='configs/funit_animals.yaml')
parser.add_argument('--ckpt',
type=str,
default='pretrained/animal119_gen_00200000.pt')
parser.add_argument('--class_image_folder',
type=str,
default='images/n02138411')
parser.add_argument('--input',
type=str,
default='images/input_content.jpg')
parser.add_argument('--output',
type=str,
default='images/output.jpg')
opts = parser.parse_args()
cudnn.benchmark = True
opts.vis = True
config = get_config(opts.config)
config['batch_size'] = 1
config['gpus'] = 1
trainer = Trainer(config)
trainer.cuda()
trainer.load_ckpt(opts.ckpt)
trainer.eval()
transform_list = [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
transform_list = [transforms.Resize((128, 128))] + transform_list
transform = transforms.Compose(transform_list)
print('Compute average class codes for images in %s' % opts.class_image_folder)
images = os.listdir(opts.class_image_folder)
for i, f in enumerate(images):
fn = os.path.join(opts.class_image_folder, f)
img = Image.open(fn).convert('RGB')
img_tensor = transform(img).unsqueeze(0).cuda()
with torch.no_grad():
class_code = trainer.model.compute_k_style(img_tensor, 1)
if i == 0:
new_class_code = class_code
else:
new_class_code += class_code
final_class_code = new_class_code / len(images)
image = Image.open(opts.input)
image = image.convert('RGB')
content_img = transform(image).unsqueeze(0)
print('Compute translation for %s' % opts.input)
with torch.no_grad():
output_image = trainer.model.translate_simple(content_img, final_class_code)
image = output_image.detach().cpu().squeeze().numpy()
image = np.transpose(image, (1, 2, 0))
image = ((image + 1) * 0.5 * 255.0)
output_img = Image.fromarray(np.uint8(image))
output_img.save(opts.output, 'JPEG', quality=99)
print('Save output to %s' % opts.output)
| 2,618 | 31.7375 | 80 | py |
FUNIT | FUNIT-master/utils.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import yaml
import time
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import torchvision.utils as vutils
from data import ImageLabelFilelist
def update_average(model_tgt, model_src, beta=0.999):
with torch.no_grad():
param_dict_src = dict(model_src.named_parameters())
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert(p_src is not p_tgt)
p_tgt.copy_(beta*p_tgt + (1. - beta)*p_src)
def loader_from_list(
root,
file_list,
batch_size,
new_size=None,
height=128,
width=128,
crop=True,
num_workers=4,
shuffle=True,
center_crop=False,
return_paths=False,
drop_last=True):
transform_list = [transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
if center_crop:
transform_list = [transforms.CenterCrop((height, width))] + \
transform_list if crop else transform_list
else:
transform_list = [transforms.RandomCrop((height, width))] + \
transform_list if crop else transform_list
transform_list = [transforms.Resize(new_size)] + transform_list \
if new_size is not None else transform_list
if not center_crop:
transform_list = [transforms.RandomHorizontalFlip()] + transform_list
transform = transforms.Compose(transform_list)
dataset = ImageLabelFilelist(root,
file_list,
transform,
return_paths=return_paths)
loader = DataLoader(dataset,
batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers)
return loader
def get_evaluation_loaders(conf, shuffle_content=False):
batch_size = conf['batch_size']
num_workers = conf['num_workers']
new_size = conf['new_size']
width = conf['crop_image_width']
height = conf['crop_image_height']
content_loader = loader_from_list(
root=conf['data_folder_train'],
file_list=conf['data_list_train'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=num_workers,
shuffle=shuffle_content,
center_crop=True,
return_paths=True,
drop_last=False)
class_loader = loader_from_list(
root=conf['data_folder_test'],
file_list=conf['data_list_test'],
batch_size=batch_size * conf['k_shot'],
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=1,
shuffle=False,
center_crop=True,
return_paths=True,
drop_last=False)
return content_loader, class_loader
def get_train_loaders(conf):
batch_size = conf['batch_size']
num_workers = conf['num_workers']
new_size = conf['new_size']
width = conf['crop_image_width']
height = conf['crop_image_height']
train_content_loader = loader_from_list(
root=conf['data_folder_train'],
file_list=conf['data_list_train'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=num_workers)
train_class_loader = loader_from_list(
root=conf['data_folder_train'],
file_list=conf['data_list_train'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=num_workers)
test_content_loader = loader_from_list(
root=conf['data_folder_test'],
file_list=conf['data_list_test'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=1)
test_class_loader = loader_from_list(
root=conf['data_folder_test'],
file_list=conf['data_list_test'],
batch_size=batch_size,
new_size=new_size,
height=height,
width=width,
crop=True,
num_workers=1)
return (train_content_loader, train_class_loader, test_content_loader,
test_class_loader)
def get_config(config):
with open(config, 'r') as stream:
return yaml.load(stream, Loader=yaml.FullLoader)
def make_result_folders(output_directory):
image_directory = os.path.join(output_directory, 'images')
if not os.path.exists(image_directory):
print("Creating directory: {}".format(image_directory))
os.makedirs(image_directory)
checkpoint_directory = os.path.join(output_directory, 'checkpoints')
if not os.path.exists(checkpoint_directory):
print("Creating directory: {}".format(checkpoint_directory))
os.makedirs(checkpoint_directory)
return checkpoint_directory, image_directory
def __write_images(im_outs, dis_img_n, file_name):
im_outs = [images.expand(-1, 3, -1, -1) for images in im_outs]
image_tensor = torch.cat([images[:dis_img_n] for images in im_outs], 0)
image_grid = vutils.make_grid(image_tensor.data,
nrow=dis_img_n, padding=0, normalize=True)
vutils.save_image(image_grid, file_name, nrow=1)
def write_1images(image_outputs, image_directory, postfix):
display_image_num = image_outputs[0].size(0)
__write_images(image_outputs, display_image_num,
'%s/gen_%s.jpg' % (image_directory, postfix))
def _write_row(html_file, it, fn, all_size):
html_file.write("<h3>iteration [%d] (%s)</h3>" % (it, fn.split('/')[-1]))
html_file.write("""
<p><a href="%s">
<img src="%s" style="width:%dpx">
</a><br>
<p>
""" % (fn, fn, all_size))
return
def write_html(filename, it, img_save_it, img_dir, all_size=1536):
html_file = open(filename, "w")
html_file.write('''
<!DOCTYPE html>
<html>
<head>
<title>Experiment name = %s</title>
<meta http-equiv="refresh" content="30">
</head>
<body>
''' % os.path.basename(filename))
html_file.write("<h3>current</h3>")
_write_row(html_file, it, '%s/gen_train_current.jpg' % img_dir, all_size)
for j in range(it, img_save_it - 1, -1):
_write_row(html_file, j, '%s/gen_train_%08d.jpg' % (img_dir, j),
all_size)
html_file.write("</body></html>")
html_file.close()
def write_loss(iterations, trainer, train_writer):
members = [attr for attr in dir(trainer)
if ((not callable(getattr(trainer, attr))
and not attr.startswith("__"))
and ('loss' in attr
or 'grad' in attr
or 'nwd' in attr
or 'accuracy' in attr))]
for m in members:
train_writer.add_scalar(m, getattr(trainer, m), iterations + 1)
class Timer:
def __init__(self, msg):
self.msg = msg
self.start_time = None
def __enter__(self):
self.start_time = time.time()
def __exit__(self, exc_type, exc_value, exc_tb):
print(self.msg % (time.time() - self.start_time))
| 7,743 | 32.37931 | 77 | py |
FUNIT | FUNIT-master/data.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os.path
from PIL import Image
import torch.utils.data as data
def default_loader(path):
return Image.open(path).convert('RGB')
def default_filelist_reader(filelist):
im_list = []
with open(filelist, 'r') as rf:
for line in rf.readlines():
im_path = line.strip()
im_list.append(im_path)
return im_list
class ImageLabelFilelist(data.Dataset):
def __init__(self,
root,
filelist,
transform=None,
filelist_reader=default_filelist_reader,
loader=default_loader,
return_paths=False):
self.root = root
self.im_list = filelist_reader(os.path.join(filelist))
self.transform = transform
self.loader = loader
self.classes = sorted(
list(set([path.split('/')[0] for path in self.im_list])))
self.class_to_idx = {self.classes[i]: i for i in
range(len(self.classes))}
self.imgs = [(im_path, self.class_to_idx[im_path.split('/')[0]]) for
im_path in self.im_list]
self.return_paths = return_paths
print('Data loader')
print("\tRoot: %s" % root)
print("\tList: %s" % filelist)
print("\tNumber of classes: %d" % (len(self.classes)))
def __getitem__(self, index):
im_path, label = self.imgs[index]
path = os.path.join(self.root, im_path)
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, label, path
else:
return img, label
def __len__(self):
return len(self.imgs)
| 1,913 | 29.870968 | 76 | py |
FUNIT | FUNIT-master/networks.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import numpy as np
import torch
from torch import nn
from torch import autograd
from blocks import LinearBlock, Conv2dBlock, ResBlocks, ActFirstResBlock
def assign_adain_params(adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
num_adain_params += 2*m.num_features
return num_adain_params
class GPPatchMcResDis(nn.Module):
def __init__(self, hp):
super(GPPatchMcResDis, self).__init__()
assert hp['n_res_blks'] % 2 == 0, 'n_res_blk must be multiples of 2'
self.n_layers = hp['n_res_blks'] // 2
nf = hp['nf']
cnn_f = [Conv2dBlock(3, nf, 7, 1, 3,
pad_type='reflect',
norm='none',
activation='none')]
for i in range(self.n_layers - 1):
nf_out = np.min([nf * 2, 1024])
cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
cnn_f += [nn.ReflectionPad2d(1)]
cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)]
nf = np.min([nf * 2, 1024])
nf_out = np.min([nf * 2, 1024])
cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
cnn_c = [Conv2dBlock(nf_out, hp['num_classes'], 1, 1,
norm='none',
activation='lrelu',
activation_first=True)]
self.cnn_f = nn.Sequential(*cnn_f)
self.cnn_c = nn.Sequential(*cnn_c)
def forward(self, x, y):
assert(x.size(0) == y.size(0))
feat = self.cnn_f(x)
out = self.cnn_c(feat)
index = torch.LongTensor(range(out.size(0))).cuda()
out = out[index, y, :, :]
return out, feat
def calc_dis_fake_loss(self, input_fake, input_label):
resp_fake, gan_feat = self.forward(input_fake, input_label)
total_count = torch.tensor(np.prod(resp_fake.size()),
dtype=torch.float).cuda()
fake_loss = torch.nn.ReLU()(1.0 + resp_fake).mean()
correct_count = (resp_fake < 0).sum()
fake_accuracy = correct_count.type_as(fake_loss) / total_count
return fake_loss, fake_accuracy, resp_fake
def calc_dis_real_loss(self, input_real, input_label):
resp_real, gan_feat = self.forward(input_real, input_label)
total_count = torch.tensor(np.prod(resp_real.size()),
dtype=torch.float).cuda()
real_loss = torch.nn.ReLU()(1.0 - resp_real).mean()
correct_count = (resp_real >= 0).sum()
real_accuracy = correct_count.type_as(real_loss) / total_count
return real_loss, real_accuracy, resp_real
def calc_gen_loss(self, input_fake, input_fake_label):
resp_fake, gan_feat = self.forward(input_fake, input_fake_label)
total_count = torch.tensor(np.prod(resp_fake.size()),
dtype=torch.float).cuda()
loss = -resp_fake.mean()
correct_count = (resp_fake >= 0).sum()
accuracy = correct_count.type_as(loss) / total_count
return loss, accuracy, gan_feat
def calc_grad2(self, d_out, x_in):
batch_size = x_in.size(0)
grad_dout = autograd.grad(outputs=d_out.mean(),
inputs=x_in,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_in.size())
reg = grad_dout2.sum()/batch_size
return reg
class FewShotGen(nn.Module):
def __init__(self, hp):
super(FewShotGen, self).__init__()
nf = hp['nf']
nf_mlp = hp['nf_mlp']
down_class = hp['n_downs_class']
down_content = hp['n_downs_content']
n_mlp_blks = hp['n_mlp_blks']
n_res_blks = hp['n_res_blks']
latent_dim = hp['latent_dim']
self.enc_class_model = ClassModelEncoder(down_class,
3,
nf,
latent_dim,
norm='none',
activ='relu',
pad_type='reflect')
self.enc_content = ContentEncoder(down_content,
n_res_blks,
3,
nf,
'in',
activ='relu',
pad_type='reflect')
self.dec = Decoder(down_content,
n_res_blks,
self.enc_content.output_dim,
3,
res_norm='adain',
activ='relu',
pad_type='reflect')
self.mlp = MLP(latent_dim,
get_num_adain_params(self.dec),
nf_mlp,
n_mlp_blks,
norm='none',
activ='relu')
def forward(self, one_image, model_set):
# reconstruct an image
content, model_codes = self.encode(one_image, model_set)
model_code = torch.mean(model_codes, dim=0).unsqueeze(0)
images_trans = self.decode(content, model_code)
return images_trans
def encode(self, one_image, model_set):
# extract content code from the input image
content = self.enc_content(one_image)
# extract model code from the images in the model set
class_codes = self.enc_class_model(model_set)
class_code = torch.mean(class_codes, dim=0).unsqueeze(0)
return content, class_code
def decode(self, content, model_code):
# decode content and style codes to an image
adain_params = self.mlp(model_code)
assign_adain_params(adain_params, self.dec)
images = self.dec(content)
return images
class ClassModelEncoder(nn.Module):
def __init__(self, downs, ind_im, dim, latent_dim, norm, activ, pad_type):
super(ClassModelEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(ind_im, dim, 7, 1, 3,
norm=norm,
activation=activ,
pad_type=pad_type)]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1,
norm=norm,
activation=activ,
pad_type=pad_type)]
dim *= 2
for i in range(downs - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1,
norm=norm,
activation=activ,
pad_type=pad_type)]
self.model += [nn.AdaptiveAvgPool2d(1)]
self.model += [nn.Conv2d(dim, latent_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
return self.model(x)
class ContentEncoder(nn.Module):
def __init__(self, downs, n_res, input_dim, dim, norm, activ, pad_type):
super(ContentEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3,
norm=norm,
activation=activ,
pad_type=pad_type)]
for i in range(downs):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1,
norm=norm,
activation=activ,
pad_type=pad_type)]
dim *= 2
self.model += [ResBlocks(n_res, dim,
norm=norm,
activation=activ,
pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
return self.model(x)
class Decoder(nn.Module):
def __init__(self, ups, n_res, dim, out_dim, res_norm, activ, pad_type):
super(Decoder, self).__init__()
self.model = []
self.model += [ResBlocks(n_res, dim, res_norm,
activ, pad_type=pad_type)]
for i in range(ups):
self.model += [nn.Upsample(scale_factor=2),
Conv2dBlock(dim, dim // 2, 5, 1, 2,
norm='in',
activation=activ,
pad_type=pad_type)]
dim //= 2
self.model += [Conv2dBlock(dim, out_dim, 7, 1, 3,
norm='none',
activation='tanh',
pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class MLP(nn.Module):
def __init__(self, in_dim, out_dim, dim, n_blk, norm, activ):
super(MLP, self).__init__()
self.model = []
self.model += [LinearBlock(in_dim, dim, norm=norm, activation=activ)]
for i in range(n_blk - 2):
self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)]
self.model += [LinearBlock(dim, out_dim,
norm='none', activation='none')]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x.view(x.size(0), -1))
| 10,860 | 39.830827 | 78 | py |
FUNIT | FUNIT-master/funit_model.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import copy
import torch
import torch.nn as nn
from networks import FewShotGen, GPPatchMcResDis
def recon_criterion(predict, target):
return torch.mean(torch.abs(predict - target))
class FUNITModel(nn.Module):
def __init__(self, hp):
super(FUNITModel, self).__init__()
self.gen = FewShotGen(hp['gen'])
self.dis = GPPatchMcResDis(hp['dis'])
self.gen_test = copy.deepcopy(self.gen)
def forward(self, co_data, cl_data, hp, mode):
xa = co_data[0].cuda()
la = co_data[1].cuda()
xb = cl_data[0].cuda()
lb = cl_data[1].cuda()
if mode == 'gen_update':
# INSERT here the object detection
# then, for each objects, keep the bounding box and encode it,
# then merge (in some way, maybe replace the features), in the encoding
# of the whole image, the encoding of each objects using its bounding box
# information
c_xa = self.gen.enc_content(xa)
s_xa = self.gen.enc_class_model(xa)
s_xb = self.gen.enc_class_model(xb)
xt = self.gen.decode(c_xa, s_xb) # translation
xr = self.gen.decode(c_xa, s_xa) # reconstruction
l_adv_t, gacc_t, xt_gan_feat = self.dis.calc_gen_loss(xt, lb)
l_adv_r, gacc_r, xr_gan_feat = self.dis.calc_gen_loss(xr, la)
_, xb_gan_feat = self.dis(xb, lb)
_, xa_gan_feat = self.dis(xa, la)
l_c_rec = recon_criterion(xr_gan_feat.mean(3).mean(2),
xa_gan_feat.mean(3).mean(2))
l_m_rec = recon_criterion(xt_gan_feat.mean(3).mean(2),
xb_gan_feat.mean(3).mean(2))
l_x_rec = recon_criterion(xr, xa)
l_adv = 0.5 * (l_adv_t + l_adv_r)
acc = 0.5 * (gacc_t + gacc_r)
l_total = (hp['gan_w'] * l_adv + hp['r_w'] * l_x_rec + hp[
'fm_w'] * (l_c_rec + l_m_rec))
l_total.backward()
return l_total, l_adv, l_x_rec, l_c_rec, l_m_rec, acc
elif mode == 'dis_update':
xb.requires_grad_()
l_real_pre, acc_r, resp_r = self.dis.calc_dis_real_loss(xb, lb)
l_real = hp['gan_w'] * l_real_pre
l_real.backward(retain_graph=True)
l_reg_pre = self.dis.calc_grad2(resp_r, xb)
l_reg = 10 * l_reg_pre
l_reg.backward()
with torch.no_grad():
c_xa = self.gen.enc_content(xa)
s_xb = self.gen.enc_class_model(xb)
xt = self.gen.decode(c_xa, s_xb)
l_fake_p, acc_f, resp_f = self.dis.calc_dis_fake_loss(xt.detach(),
lb)
l_fake = hp['gan_w'] * l_fake_p
l_fake.backward()
l_total = l_fake + l_real + l_reg
acc = 0.5 * (acc_f + acc_r)
return l_total, l_fake_p, l_real_pre, l_reg_pre, acc
else:
assert 0, 'Not support operation'
def test(self, co_data, cl_data):
self.eval()
self.gen.eval()
self.gen_test.eval()
xa = co_data[0].cuda()
xb = cl_data[0].cuda()
c_xa_current = self.gen.enc_content(xa)
s_xa_current = self.gen.enc_class_model(xa)
s_xb_current = self.gen.enc_class_model(xb)
xt_current = self.gen.decode(c_xa_current, s_xb_current)
xr_current = self.gen.decode(c_xa_current, s_xa_current)
c_xa = self.gen_test.enc_content(xa)
s_xa = self.gen_test.enc_class_model(xa)
s_xb = self.gen_test.enc_class_model(xb)
xt = self.gen_test.decode(c_xa, s_xb)
xr = self.gen_test.decode(c_xa, s_xa)
self.train()
return xa, xr_current, xt_current, xb, xr, xt
def translate_k_shot(self, co_data, cl_data, k):
self.eval()
xa = co_data[0].cuda()
xb = cl_data[0].cuda()
c_xa_current = self.gen_test.enc_content(xa)
if k == 1:
c_xa_current = self.gen_test.enc_content(xa)
s_xb_current = self.gen_test.enc_class_model(xb)
xt_current = self.gen_test.decode(c_xa_current, s_xb_current)
else:
s_xb_current_before = self.gen_test.enc_class_model(xb)
s_xb_current_after = s_xb_current_before.squeeze(-1).permute(1,
2,
0)
s_xb_current_pool = torch.nn.functional.avg_pool1d(
s_xb_current_after, k)
s_xb_current = s_xb_current_pool.permute(2, 0, 1).unsqueeze(-1)
xt_current = self.gen_test.decode(c_xa_current, s_xb_current)
return xt_current
def compute_k_style(self, style_batch, k):
self.eval()
style_batch = style_batch.cuda()
s_xb_before = self.gen_test.enc_class_model(style_batch)
s_xb_after = s_xb_before.squeeze(-1).permute(1, 2, 0)
s_xb_pool = torch.nn.functional.avg_pool1d(s_xb_after, k)
s_xb = s_xb_pool.permute(2, 0, 1).unsqueeze(-1)
return s_xb
def translate_simple(self, content_image, class_code):
self.eval()
xa = content_image.cuda()
s_xb_current = class_code.cuda()
c_xa_current = self.gen_test.enc_content(xa)
xt_current = self.gen_test.decode(c_xa_current, s_xb_current)
return xt_current
| 5,659 | 41.238806 | 85 | py |
FUNIT | FUNIT-master/train.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import os
import sys
import argparse
import shutil
from tensorboardX import SummaryWriter
from utils import get_config, get_train_loaders, make_result_folders
from utils import write_loss, write_html, write_1images, Timer
from trainer import Trainer
import torch.backends.cudnn as cudnn
# Enable auto-tuner to find the best algorithm to use for your hardware.
cudnn.benchmark = True
# SAMUEL 18.03.2021 CUDA GPU setting
os.environ['CUDA_VISIBLE_DEVICES']='1'
# SAMUEL 18.03.2021 CUDA GPU setting
parser = argparse.ArgumentParser()
parser.add_argument('--config',
type=str,
default='configs/funit_animals.yaml',
help='configuration file for training and testing')
parser.add_argument('--output_path',
type=str,
default='.',
help="outputs path")
parser.add_argument('--multigpus',
action="store_true")
parser.add_argument('--batch_size',
type=int,
default=0)
parser.add_argument('--test_batch_size',
type=int,
default=4)
parser.add_argument("--resume",
action="store_true")
opts = parser.parse_args()
# Load experiment setting
config = get_config(opts.config)
max_iter = config['max_iter']
# Override the batch size if specified.
if opts.batch_size != 0:
config['batch_size'] = opts.batch_size
trainer = Trainer(config)
trainer.cuda()
if opts.multigpus:
ngpus = torch.cuda.device_count()
config['gpus'] = ngpus
print("Number of GPUs: %d" % ngpus)
trainer.model = torch.nn.DataParallel(
trainer.model, device_ids=range(ngpus))
else:
config['gpus'] = 1
loaders = get_train_loaders(config)
train_content_loader = loaders[0]
train_class_loader = loaders[1]
test_content_loader = loaders[2]
test_class_loader = loaders[3]
# Setup logger and output folders
model_name = os.path.splitext(os.path.basename(opts.config))[0]
train_writer = SummaryWriter(
os.path.join(opts.output_path + "/logs", model_name))
output_directory = os.path.join(opts.output_path + "/outputs", model_name)
checkpoint_directory, image_directory = make_result_folders(output_directory)
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml'))
iterations = trainer.resume(checkpoint_directory,
hp=config,
multigpus=opts.multigpus) if opts.resume else 0
while True:
for it, (co_data, cl_data) in enumerate(
zip(train_content_loader, train_class_loader)):
with Timer("Elapsed time in update: %f"):
d_acc = trainer.dis_update(co_data, cl_data, config)
g_acc = trainer.gen_update(co_data, cl_data, config,
opts.multigpus)
torch.cuda.synchronize()
print('D acc: %.4f\t G acc: %.4f' % (d_acc, g_acc))
if (iterations + 1) % config['log_iter'] == 0:
print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
write_loss(iterations, trainer, train_writer)
if ((iterations + 1) % config['image_save_iter'] == 0 or (
iterations + 1) % config['image_display_iter'] == 0):
if (iterations + 1) % config['image_save_iter'] == 0:
key_str = '%08d' % (iterations + 1)
write_html(output_directory + "/index.html", iterations + 1,
config['image_save_iter'], 'images')
else:
key_str = 'current'
with torch.no_grad():
for t, (val_co_data, val_cl_data) in enumerate(
zip(train_content_loader, train_class_loader)):
if t >= opts.test_batch_size:
break
val_image_outputs = trainer.test(val_co_data, val_cl_data,
opts.multigpus)
write_1images(val_image_outputs, image_directory,
'train_%s_%02d' % (key_str, t))
for t, (test_co_data, test_cl_data) in enumerate(
zip(test_content_loader, test_class_loader)):
if t >= opts.test_batch_size:
break
test_image_outputs = trainer.test(test_co_data,
test_cl_data,
opts.multigpus)
write_1images(test_image_outputs, image_directory,
'test_%s_%02d' % (key_str, t))
if (iterations + 1) % config['snapshot_save_iter'] == 0:
trainer.save(checkpoint_directory, iterations, opts.multigpus)
print('Saved model at iteration %d' % (iterations + 1))
iterations += 1
if iterations >= max_iter:
print("Finish Training")
sys.exit(0)
| 5,178 | 37.93985 | 78 | py |
FUNIT | FUNIT-master/trainer.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import copy
import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.optim import lr_scheduler
from funit_model import FUNITModel
def update_average(model_tgt, model_src, beta=0.999):
with torch.no_grad():
param_dict_src = dict(model_src.named_parameters())
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert(p_src is not p_tgt)
p_tgt.copy_(beta*p_tgt + (1. - beta)*p_src)
class Trainer(nn.Module):
def __init__(self, cfg):
super(Trainer, self).__init__()
self.model = FUNITModel(cfg)
lr_gen = cfg['lr_gen']
lr_dis = cfg['lr_dis']
dis_params = list(self.model.dis.parameters())
gen_params = list(self.model.gen.parameters())
self.dis_opt = torch.optim.RMSprop(
[p for p in dis_params if p.requires_grad],
lr=lr_gen, weight_decay=cfg['weight_decay'])
self.gen_opt = torch.optim.RMSprop(
[p for p in gen_params if p.requires_grad],
lr=lr_dis, weight_decay=cfg['weight_decay'])
self.dis_scheduler = get_scheduler(self.dis_opt, cfg)
self.gen_scheduler = get_scheduler(self.gen_opt, cfg)
self.apply(weights_init(cfg['init']))
self.model.gen_test = copy.deepcopy(self.model.gen)
def gen_update(self, co_data, cl_data, hp, multigpus):
self.gen_opt.zero_grad()
al, ad, xr, cr, sr, ac = self.model(co_data, cl_data, hp, 'gen_update')
self.loss_gen_total = torch.mean(al)
self.loss_gen_recon_x = torch.mean(xr)
self.loss_gen_recon_c = torch.mean(cr)
self.loss_gen_recon_s = torch.mean(sr)
self.loss_gen_adv = torch.mean(ad)
self.accuracy_gen_adv = torch.mean(ac)
self.gen_opt.step()
this_model = self.model.module if multigpus else self.model
update_average(this_model.gen_test, this_model.gen)
return self.accuracy_gen_adv.item()
def dis_update(self, co_data, cl_data, hp):
self.dis_opt.zero_grad()
al, lfa, lre, reg, acc = self.model(co_data, cl_data, hp, 'dis_update')
self.loss_dis_total = torch.mean(al)
self.loss_dis_fake_adv = torch.mean(lfa)
self.loss_dis_real_adv = torch.mean(lre)
self.loss_dis_reg = torch.mean(reg)
self.accuracy_dis_adv = torch.mean(acc)
self.dis_opt.step()
return self.accuracy_dis_adv.item()
def test(self, co_data, cl_data, multigpus):
this_model = self.model.module if multigpus else self.model
return this_model.test(co_data, cl_data)
def resume(self, checkpoint_dir, hp, multigpus):
this_model = self.model.module if multigpus else self.model
last_model_name = get_model_list(checkpoint_dir, "gen")
state_dict = torch.load(last_model_name)
this_model.gen.load_state_dict(state_dict['gen'])
this_model.gen_test.load_state_dict(state_dict['gen_test'])
iterations = int(last_model_name[-11:-3])
last_model_name = get_model_list(checkpoint_dir, "dis")
state_dict = torch.load(last_model_name)
this_model.dis.load_state_dict(state_dict['dis'])
state_dict = torch.load(os.path.join(checkpoint_dir, 'optimizer.pt'))
self.dis_opt.load_state_dict(state_dict['dis'])
self.gen_opt.load_state_dict(state_dict['gen'])
self.dis_scheduler = get_scheduler(self.dis_opt, hp, iterations)
self.gen_scheduler = get_scheduler(self.gen_opt, hp, iterations)
print('Resume from iteration %d' % iterations)
return iterations
def save(self, snapshot_dir, iterations, multigpus):
this_model = self.model.module if multigpus else self.model
# Save generators, discriminators, and optimizers
gen_name = os.path.join(snapshot_dir, 'gen_%08d.pt' % (iterations + 1))
dis_name = os.path.join(snapshot_dir, 'dis_%08d.pt' % (iterations + 1))
opt_name = os.path.join(snapshot_dir, 'optimizer.pt')
torch.save({'gen': this_model.gen.state_dict(),
'gen_test': this_model.gen_test.state_dict()}, gen_name)
torch.save({'dis': this_model.dis.state_dict()}, dis_name)
torch.save({'gen': self.gen_opt.state_dict(),
'dis': self.dis_opt.state_dict()}, opt_name)
def load_ckpt(self, ckpt_name):
state_dict = torch.load(ckpt_name)
self.model.gen.load_state_dict(state_dict['gen'])
self.model.gen_test.load_state_dict(state_dict['gen_test'])
def translate(self, co_data, cl_data):
return self.model.translate(co_data, cl_data)
def translate_k_shot(self, co_data, cl_data, k, mode):
return self.model.translate_k_shot(co_data, cl_data, k, mode)
def forward(self, *inputs):
print('Forward function not implemented.')
pass
def get_model_list(dirname, key):
if os.path.exists(dirname) is False:
return None
gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
os.path.isfile(os.path.join(dirname, f)) and
key in f and ".pt" in f]
if gen_models is None:
return None
gen_models.sort()
last_model_name = gen_models[-1]
return last_model_name
def get_scheduler(optimizer, hp, it=-1):
if 'lr_policy' not in hp or hp['lr_policy'] == 'constant':
scheduler = None # constant scheduler
elif hp['lr_policy'] == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=hp['step_size'],
gamma=hp['gamma'], last_epoch=it)
else:
return NotImplementedError('%s not implemented', hp['lr_policy'])
return scheduler
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun
| 6,871 | 39.662722 | 79 | py |
FUNIT | FUNIT-master/blocks.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import torch.nn.functional as F
from torch import nn
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm, activation, pad_type):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim,
norm=norm,
activation=activation,
pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class ResBlock(nn.Module):
def __init__(self, dim, norm='in', activation='relu', pad_type='zero'):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim, dim, 3, 1, 1,
norm=norm,
activation=activation,
pad_type=pad_type)]
model += [Conv2dBlock(dim, dim, 3, 1, 1,
norm=norm,
activation='none',
pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class ActFirstResBlock(nn.Module):
def __init__(self, fin, fout, fhid=None,
activation='lrelu', norm='none'):
super().__init__()
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
self.fhid = min(fin, fout) if fhid is None else fhid
self.conv_0 = Conv2dBlock(self.fin, self.fhid, 3, 1,
padding=1, pad_type='reflect', norm=norm,
activation=activation, activation_first=True)
self.conv_1 = Conv2dBlock(self.fhid, self.fout, 3, 1,
padding=1, pad_type='reflect', norm=norm,
activation=activation, activation_first=True)
if self.learned_shortcut:
self.conv_s = Conv2dBlock(self.fin, self.fout, 1, 1,
activation='none', use_bias=False)
def forward(self, x):
x_s = self.conv_s(x) if self.learned_shortcut else x
dx = self.conv_0(x)
dx = self.conv_1(dx)
out = x_s + dx
return out
class LinearBlock(nn.Module):
def __init__(self, in_dim, out_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
self.fc = nn.Linear(in_dim, out_dim, bias=use_bias)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0,
norm='none', activation='relu', pad_type='zero',
use_bias=True, activation_first=False):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
self.activation_first = activation_first
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, x):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
else:
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and \
self.bias is not None, "Please assign AdaIN weight first"
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(
x_reshaped, running_mean, running_var, self.weight, self.bias,
True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
| 6,986 | 34.647959 | 79 | py |
FUNIT | FUNIT-master/tools/extract_animalfaces.py | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license
(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import os
import argparse
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('imagenet_folder', type=str)
parser.add_argument('--output_folder', type=str)
parser.add_argument('--coor_file', type=str)
opts = parser.parse_args()
IMAGENET_TRAIN = opts.imagenet_folder
OUT_PUT_FOLDER = opts.output_folder
COOR_FILE = opts.coor_File
with open(COOR_FILE, 'rt') as f:
lines = f.readlines()
for l in lines:
ls = l.strip().split(' ')
img_name = os.path.join(IMAGENET_TRAIN, ls[0])
img = Image.open(img_name)
img = img.convert('RGB')
x = int(ls[1])
y = int(ls[2])
w = int(ls[3])
h = int(ls[4])
out_name = os.path.join(OUT_PUT_FOLDER,
'%s_%d_%d_%d_%d.jpg' % (ls[0], x, y, w, h))
crop = img.crop((x, y, w, h))
os.makedirs(os.path.dirname(out_name), exist_ok=True)
print(out_name)
crop.save(out_name)
| 1,077 | 27.368421 | 71 | py |
FUNIT | FUNIT-master/datasets/intermediate_and_scripts/split_and_filter.py | #split and filter the maximum amount of images according to the most restricting datasets in INIT dataset
from math import floor
#filenames of the files containing the paths for the dataset images
TRAIN_FILENAME_PATHS = ["cloudy_list.txt", "night_list.txt", "sunny_list.txt"]
TEST_FILENAME_PATHS = ["rainy_list.txt"]
#filenames for the output files containing the paths to the corresponding images
TRAIN_FILENAME = "roads_list_train.txt"
TEST_FILENAME = "roads_list_test.txt"
#this corresponds to an overall of taking about 5% of the INIT dataset
NUMBER_OF_IMAGES_PER_CLASS = 600
#return all the paths in the corresponding files
def retrieve_paths(filename_paths):
paths_list = []
for filename_path in filename_paths:
with open(filename_path) as file:
paths = file.readlines()
paths_list.append(paths)
return paths_list
train_paths_list = retrieve_paths(TRAIN_FILENAME_PATHS)
test_paths_list = retrieve_paths(TEST_FILENAME_PATHS)
#find a limiting class
min_train_paths = len(min(train_paths_list, key=lambda p: len(p)))
min_test_paths = len(min(train_paths_list, key=lambda p: len(p)))
if min_train_paths < NUMBER_OF_IMAGES_PER_CLASS or min_test_paths < NUMBER_OF_IMAGES_PER_CLASS:
raise Exception("There is not enough images for at least one class < NUMBER_OF_IMAGES_PER_CLASS={}".format(NUMBER_OF_IMAGES_PER_CLASS))
def filter_and_save_datasets(filename, paths_list, number_of_images=NUMBER_OF_IMAGES_PER_CLASS):
with open(filename, 'w') as result_file:
for path in paths_list:
for i in range(0, len(path), floor(len(path)/number_of_images)):
result_file.write(path[i])
filter_and_save_datasets(TRAIN_FILENAME, train_paths_list)
filter_and_save_datasets(TEST_FILENAME, test_paths_list)
| 1,791 | 40.674419 | 139 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/main.py | import argparse
import time
import math
from os import path, makedirs
import torch
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.backends import cudnn
from torchvision import datasets
from torchvision import transforms
from simsiam.loader import TwoCropsTransform
from simsiam.model_factory import SimSiam
from simsiam.criterion import SimSiamLoss
from simsiam.validation import KNNValidation
parser = argparse.ArgumentParser('arguments for training')
parser.add_argument('--data_root', type=str, help='path to dataset directory')
parser.add_argument('--exp_dir', type=str, help='path to experiment directory')
parser.add_argument('--trial', type=str, default='1', help='trial id')
parser.add_argument('--img_dim', default=32, type=int)
parser.add_argument('--arch', default='resnet18', help='model name is used for training')
parser.add_argument('--feat_dim', default=2048, type=int, help='feature dimension')
parser.add_argument('--num_proj_layers', type=int, default=2, help='number of projection layer')
parser.add_argument('--batch_size', type=int, default=512, help='batch_size')
parser.add_argument('--num_workers', type=int, default=8, help='num of workers to use')
parser.add_argument('--epochs', type=int, default=800, help='number of training epochs')
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--loss_version', default='simplified', type=str,
choices=['simplified', 'original'],
help='do the same thing but simplified version is much faster. ()')
parser.add_argument('--print_freq', default=10, type=int, help='print frequency')
parser.add_argument('--eval_freq', default=5, type=int, help='evaluate model frequency')
parser.add_argument('--save_freq', default=50, type=int, help='save model frequency')
parser.add_argument('--resume', default=None, type=str, help='path to latest checkpoint')
parser.add_argument('--learning_rate', type=float, default=0.05, help='learning rate')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
args = parser.parse_args()
def main():
if not path.exists(args.exp_dir):
makedirs(args.exp_dir)
trial_dir = path.join(args.exp_dir, args.trial)
logger = SummaryWriter(trial_dir)
print(vars(args))
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(args.img_dim, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
train_set = datasets.CIFAR10(root=args.data_root,
train=True,
download=True,
transform=TwoCropsTransform(train_transforms))
train_loader = DataLoader(dataset=train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
model = SimSiam(args)
optimizer = optim.SGD(model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
criterion = SimSiamLoss(args.loss_version)
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
criterion = criterion.cuda(args.gpu)
cudnn.benchmark = True
start_epoch = 1
if args.resume is not None:
if path.isfile(args.resume):
start_epoch, model, optimizer = load_checkpoint(model, optimizer, args.resume)
print("Loaded checkpoint '{}' (epoch {})"
.format(args.resume, start_epoch))
else:
print("No checkpoint found at '{}'".format(args.resume))
# routine
best_acc = 0.0
validation = KNNValidation(args, model.encoder)
for epoch in range(start_epoch, args.epochs+1):
adjust_learning_rate(optimizer, epoch, args)
print("Training...")
# train for one epoch
train_loss = train(train_loader, model, criterion, optimizer, epoch, args)
logger.add_scalar('Loss/train', train_loss, epoch)
if epoch % args.eval_freq == 0:
print("Validating...")
val_top1_acc = validation.eval()
print('Top1: {}'.format(val_top1_acc))
# save the best model
if val_top1_acc > best_acc:
best_acc = val_top1_acc
save_checkpoint(epoch, model, optimizer, best_acc,
path.join(trial_dir, '{}_best.pth'.format(args.trial)),
'Saving the best model!')
logger.add_scalar('Acc/val_top1', val_top1_acc, epoch)
# save the model
if epoch % args.save_freq == 0:
save_checkpoint(epoch, model, optimizer, val_top1_acc,
path.join(trial_dir, 'ckpt_epoch_{}_{}.pth'.format(epoch, args.trial)),
'Saving...')
print('Best accuracy:', best_acc)
# save model
save_checkpoint(epoch, model, optimizer, val_top1_acc,
path.join(trial_dir, '{}_last.pth'.format(args.trial)),
'Saving the model at the last epoch.')
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
progress = ProgressMeter(
len(train_loader),
[batch_time, losses],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, _) in enumerate(train_loader):
if args.gpu is not None:
images[0] = images[0].cuda(args.gpu, non_blocking=True)
images[1] = images[1].cuda(args.gpu, non_blocking=True)
# compute output
outs = model(im_aug1=images[0], im_aug2=images[1])
loss = criterion(outs['z1'], outs['z2'], outs['p1'], outs['p2'])
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
losses.update(loss.item(), images[0].size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
return losses.avg
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.learning_rate
# cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / args.epochs))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def save_checkpoint(epoch, model, optimizer, acc, filename, msg):
state = {
'epoch': epoch,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'top1_acc': acc
}
torch.save(state, filename)
print(msg)
def load_checkpoint(model, optimizer, filename):
checkpoint = torch.load(filename, map_location='cuda:0')
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
return start_epoch, model, optimizer
if __name__ == '__main__':
main()
| 9,087 | 33.687023 | 99 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/main_lincls.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import builtins
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from simsiam.resnet_cifar import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from PIL import Image
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('--num_cls', default=10, type=int, metavar='N',
help='number of classes in dataset (output dimention of models)')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch_size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning_rate', default=30., type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--schedule', default=[60, 80], nargs='*', type=int,
help='learning rate schedule (when to drop lr by a ratio)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight_decay', default=0., type=float,
metavar='W', help='weight decay (default: 0.)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=500, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist_url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--pretrained', default='', type=str, help='path to pretrained checkpoint')
def get_backbone(backbone_name, num_cls=10):
models = {'resnet18': ResNet18(low_dim=num_cls),
'resnet34': ResNet34(low_dim=num_cls),
'resnet50': ResNet50(low_dim=num_cls),
'resnet101': ResNet101(low_dim=num_cls),
'resnet152': ResNet152(low_dim=num_cls)}
return models[backbone_name]
best_acc1 = 0
def main():
args = parser.parse_args()
print(vars(args))
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
# suppress printing if not master
if args.multiprocessing_distributed and args.gpu != 0:
def print_pass(*args):
pass
builtins.print = print_pass
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
print("=> creating model '{}'".format(args.arch))
# model = models.__dict__[args.arch]()
model = get_backbone(args.arch, args.num_cls)
# freeze all layers but the last fc
for name, param in model.named_parameters():
if name not in ['fc.weight', 'fc.bias']:
param.requires_grad = False
# init the fc layer
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
# load from pre-trained, before DistributedDataParallel constructor
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location="cpu")
state_dict = checkpoint['state_dict']
new_state_dict = dict()
for old_key, value in state_dict.items():
if old_key.startswith('backbone') and 'fc' not in old_key:
new_key = old_key.replace('backbone.', '')
new_state_dict[new_key] = value
args.start_epoch = 0
msg = model.load_state_dict(new_state_dict, strict=False)
assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
if args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# optimize only the linear classifier
parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
assert len(parameters) == 2 # fc.weight, fc.bias
optimizer = torch.optim.SGD(parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
transform_train = transforms.Compose([
transforms.RandomResizedCrop(32, scale=(0.8, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
transforms.Resize(int(32 * (8 / 7)), interpolation=Image.BICUBIC),
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
trainset = datasets.CIFAR10(args.data, train=True, transform=transform_train)
valset = datasets.CIFAR10(args.data, train=False, transform=transform_test)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
else:
train_sampler = None
train_loader = DataLoader(trainset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
sampler=train_sampler,
pin_memory=True,
drop_last=True)
val_loader = DataLoader(valset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
drop_last=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best)
if epoch == args.start_epoch:
sanity_check(model.state_dict(), args.pretrained)
print('Best acc:', best_acc1)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
"""
Switch to eval mode:
Under the protocol of linear classification on frozen features/models,
it is not legitimate to change any part of the pre-trained model.
BatchNorm in train mode may revise running mean/std (even if it receives
no gradient), which are part of the model parameters too.
"""
model.eval()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def sanity_check(state_dict, pretrained_weights):
"""
Linear classifier should not change any weights other than the linear layer.
This sanity check asserts nothing wrong happens (e.g., BN stats updated).
"""
print("=> loading '{}' for sanity check".format(pretrained_weights))
checkpoint = torch.load(pretrained_weights, map_location="cpu")
state_dict_pre = checkpoint['state_dict']
for k in list(state_dict.keys()):
# only ignore fc layer
if 'fc.weight' in k or 'fc.bias' in k:
continue
k_pre = 'backbone.' + k[len('module.'):] \
if k.startswith('module.') else 'backbone.' + k
assert ((state_dict[k].cpu() == state_dict_pre[k_pre]).all()), \
'{} is changed in linear classifier training.'.format(k)
print("=> sanity check passed.")
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 20,587 | 38.066414 | 95 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/simsiam/model_factory.py | from torch import nn
from .resnet_cifar import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
class projection_MLP(nn.Module):
def __init__(self, in_dim, out_dim, num_layers=2):
super().__init__()
hidden_dim = out_dim
self.num_layers = num_layers
self.layer1 = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer3 = nn.Sequential(
nn.Linear(hidden_dim, out_dim),
nn.BatchNorm1d(out_dim, affine=False) # Page:5, Paragraph:2
)
def forward(self, x):
if self.num_layers == 2:
x = self.layer1(x)
x = self.layer3(x)
elif self.num_layers == 3:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x
class prediction_MLP(nn.Module):
def __init__(self, in_dim=2048):
super().__init__()
out_dim = in_dim
hidden_dim = int(out_dim / 4)
self.layer1 = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.layer2 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
return x
class SimSiam(nn.Module):
def __init__(self, args):
super(SimSiam, self).__init__()
self.backbone = SimSiam.get_backbone(args.arch)
out_dim = self.backbone.fc.weight.shape[1]
self.backbone.fc = nn.Identity()
self.projector = projection_MLP(out_dim, args.feat_dim,
args.num_proj_layers)
self.encoder = nn.Sequential(
self.backbone,
self.projector
)
self.predictor = prediction_MLP(args.feat_dim)
@staticmethod
def get_backbone(backbone_name):
return {'resnet18': ResNet18(),
'resnet34': ResNet34(),
'resnet50': ResNet50(),
'resnet101': ResNet101(),
'resnet152': ResNet152()}[backbone_name]
def forward(self, im_aug1, im_aug2):
z1 = self.encoder(im_aug1)
z2 = self.encoder(im_aug2)
p1 = self.predictor(z1)
p2 = self.predictor(z2)
return {'z1': z1, 'z2': z2, 'p1': p1, 'p2': p2}
| 2,575 | 24.76 | 76 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/simsiam/validation.py | # https://github.com/zhirongw/lemniscate.pytorch/blob/master/test.py
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from torch import nn
class KNNValidation(object):
def __init__(self, args, model, K=1):
self.model = model
self.device = torch.device('cuda' if next(model.parameters()).is_cuda else 'cpu')
self.args = args
self.K = K
base_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = datasets.CIFAR10(root=args.data_root,
train=True,
download=True,
transform=base_transforms)
self.train_dataloader = DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
val_dataset = datasets.CIFAR10(root=args.data_root,
train=False,
download=True,
transform=base_transforms)
self.val_dataloader = DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True)
def _topk_retrieval(self):
"""Extract features from validation split and search on train split features."""
n_data = self.train_dataloader.dataset.data.shape[0]
feat_dim = self.args.feat_dim
self.model.eval()
if str(self.device) == 'cuda':
torch.cuda.empty_cache()
train_features = torch.zeros([feat_dim, n_data], device=self.device)
with torch.no_grad():
for batch_idx, (inputs, _) in enumerate(self.train_dataloader):
inputs = inputs.to(self.device)
batch_size = inputs.size(0)
# forward
features = self.model(inputs)
features = nn.functional.normalize(features)
train_features[:, batch_idx * batch_size:batch_idx * batch_size + batch_size] = features.data.t()
train_labels = torch.LongTensor(self.train_dataloader.dataset.targets).cuda()
total = 0
correct = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(self.val_dataloader):
targets = targets.cuda(non_blocking=True)
batch_size = inputs.size(0)
features = self.model(inputs.to(self.device))
dist = torch.mm(features, train_features)
yd, yi = dist.topk(self.K, dim=1, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(batch_size, -1)
retrieval = torch.gather(candidates, 1, yi)
retrieval = retrieval.narrow(1, 0, 1).clone().view(-1)
total += targets.size(0)
correct += retrieval.eq(targets.data).sum().item()
top1 = correct / total
return top1
def eval(self):
return self._topk_retrieval()
| 3,662 | 38.815217 | 113 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/simsiam/resnet_cifar.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
# from lib.normalize import Normalize
from torch.autograd import Variable
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, low_dim=128):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc = nn.Linear(512*block.expansion, low_dim)
# self.l2norm = Normalize(2)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
# out = self.l2norm(out)
return out
def ResNet18(low_dim=128):
return ResNet(BasicBlock, [2,2,2,2], low_dim)
def ResNet34(low_dim=128):
return ResNet(BasicBlock, [3,4,6,3], low_dim)
def ResNet50(low_dim=128):
return ResNet(Bottleneck, [3,4,6,3], low_dim)
def ResNet101(low_dim=128):
return ResNet(Bottleneck, [3,4,23,3], low_dim)
def ResNet152(low_dim=128):
return ResNet(Bottleneck, [3,8,36,3], low_dim)
def test():
net = ResNet18()
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size())
# test()
| 4,245 | 32.433071 | 102 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/simsiam/criterion.py | from torch import nn
class SimSiamLoss(nn.Module):
def __init__(self, version='simplified'):
super().__init__()
self.ver = version
def asymmetric_loss(self, p, z):
if self.ver == 'original':
z = z.detach() # stop gradient
p = nn.functional.normalize(p, dim=1)
z = nn.functional.normalize(z, dim=1)
return -(p * z).sum(dim=1).mean()
elif self.ver == 'simplified':
z = z.detach() # stop gradient
return - nn.functional.cosine_similarity(p, z, dim=-1).mean()
def forward(self, z1, z2, p1, p2):
loss1 = self.asymmetric_loss(p1, z2)
loss2 = self.asymmetric_loss(p2, z1)
return 0.5 * loss1 + 0.5 * loss2
| 751 | 24.066667 | 73 | py |
SimSiam-91.9-top1-acc-on-CIFAR10 | SimSiam-91.9-top1-acc-on-CIFAR10-main/simsiam/loader.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from PIL import ImageFilter
import random
class TwoCropsTransform:
"""Take two random crops of one image as the query and key."""
def __init__(self, base_transform):
self.base_transform = base_transform
def __call__(self, x):
q = self.base_transform(x)
k = self.base_transform(x)
return [q, k]
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
| 758 | 26.107143 | 79 | py |
mIOHMM | mIOHMM-main/src/utils.py | import numpy as np
import pickle
import torch
def save_pickle(data, filename):
with open(filename, "wb") as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def load_pickle(filepath):
with open(filepath, "rb") as handle:
data = pickle.load(handle)
return data
def normalize(A, axis=None):
Z = torch.sum(A, axis=axis, keepdims=True)
idx = np.where(Z == 0)
Z[idx] = 1
return A / Z
def normalize_exp(log_P, axis=None):
a, _ = torch.max(log_P, keepdims=True, axis=axis)
P = normalize(torch.exp(log_P - a), axis=axis)
return P
| 586 | 19.964286 | 53 | py |
mIOHMM | mIOHMM-main/src/piomhmm.py | from scipy.special import gamma as gamma_fn
from sklearn.cluster import KMeans
from src.utils import normalize_exp
import math
import numpy as np
import pickle
import torch
torch.set_default_dtype(torch.float64)
class mHMM:
def __init__(
self,
data,
ins=None,
K=2,
k=5,
TM=None,
OM=None,
full_cov=False,
io=True,
state_io=False,
personalized_io=False,
personalized=False,
eps=0,
min_var=1e-6,
device="cpu",
priorV=False,
priorMu=False,
sample_num=1,
alpha=10.0,
beta=5.0,
UT=False,
var_fill=0.5,
VI_diag=False,
lr=0.001,
):
"""
personalized input-output hidden markov model. This class of models considers patient observations that are
modeled by several possible factors, which are turned on or off using flags. The most complete version of the
model is x_i,t | z_i,t = k, d_i,t ~ N(mu_k + R_i + (V_k + M_i)*D_i,t, sigma_k) where x_i,t is the observed data,
z_i,t is the latent state, d_i,t is the observed drug information, R_i is a personalized state effect, V_k is
a state-based drug effect, M_i is a personalized drug effect, and sigma_k is the covariance.
:param data: an n x t x d matrix of clinical observations
:param ins: an n x t matrix of input/drug information (note that this is assumed to be univariate)
:param k: number of latent states
:param TM: the time mask, indicates trailing zeros in the observation array
:param OM: the observation mask, indicates missing observations within the time series (i.e. missed visit)
:param full_cov: flag indicating if a full covariance matrix should be used, alternatively a diagonal covariance is used
:param io: flag indicating if the model is an input-output HMM; drugs should not be none if io=True
:param state_io: flag indicating if input-output effects should be a function of state, if io=True and
state_io=False, V_k = V for all k. This flag should not be True if io=False
:param personalized_io: flag indicating if input-output effects should be a function of patient (i.e. M_i is
'turned on'). This flag should not be True if io=False
:param personalized: flag indicating if personalized state effects should be applied (i.e. R_i is 'turned on').
:param eps: prevent division by zero
:param min_var: set a minimum allowable variance
:param device: either cpu or cuda
:param priorV: indicates if priors should be used for the state- and personalized-drug effects
:param priorMu: indicates if prios should be used for the state-means
:param sample_num: number of saples used in MC sampling; only 1 sample is currently supported
:param alpha: parameter of the inverse gamma distribution used as prior for V_k and M_i
:param beta: parameter of the inverse gamma distribution used as prior for V_k and M_i
:param UT: parameter to enforce an upper triangular structure for the transition matrix
:param var_fill: parameter to specify initial guess for variance
:param VI_diag: flag to indicate whether or not the variational distributions should have a diagonal covariance structure
"""
# number of HMM mixtures
self.K = K
# number of latent states per HMM
self.k_per_hmm = k
# number of latent states
self.k = k * K
# flag to indicate whether or not to use a full covariance matrix (alternative is diagonal)
self.full_cov = full_cov
# flag to indicate whether or not the model is input-output
self.io = io
# flag to indicate if io effects should be a function of state
self.state_io = state_io
# flag to indicate if io effects should be personalized
self.perso_io = personalized_io
# flag to indicate if personalized (non-io) effects should be included
self.perso = personalized
# flag to indicate whether or not to use GPU
self.device = device
self.tensor_constructor = (
torch.cuda.DoubleTensor
if device == "cuda" and torch.cuda.is_available()
else torch.Tensor
)
# flag to indicate whether or not to have a prior on V
self.priorV = priorV
# flag to indicate whether or not to have a prior on mu
self.priorMu = priorMu
# store the parameters of the IG prior
self.alpha = torch.tensor(
[alpha], dtype=torch.float64, requires_grad=False, device=self.device
).float()
self.beta = torch.tensor(
[beta], dtype=torch.float64, requires_grad=False, device=self.device
).float()
# flag to indicate upper triangular structure for transition matrix
self.ut = UT
# flag to indicate whether to use diagonal covariance for variational distribution
self.VI_diag = VI_diag
# store the data used in analysis
self.data = data.to(device=self.device) # n x t x d
self.n, self.t, self.d = self.data.shape
# store likelihood (this is the objective if no personalized effects are used)
self.ll = []
if self.perso_io and self.perso:
# case with both personalized state and medication effects
self.elbo = [] # objective
self.mu_hat = torch.zeros(
self.n, self.d, requires_grad=True, device=self.device
)
# vector for optimization of covariance which is mapped into lower triangular cholesky factor
if self.VI_diag:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
self.nu_hat = torch.zeros(
self.n, self.d, requires_grad=True, device=self.device
)
# vector for optimization of covariance which is mapped into lower triangular cholesky factor
if self.VI_diag:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
self.optimizer = torch.optim.Adam(
[self.mu_hat, self.tril_vec, self.nu_hat, self.tril], lr=lr
)
elif self.perso_io:
# case with personalized medication effects
self.elbo = [] # objective
self.mu_hat = torch.zeros(
self.n, self.d, requires_grad=True, device=self.device
)
# vector for optimization of covariance which is mapped into lower triangular cholesky factor
if self.VI_diag:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.tril_vec = torch.tensor(
0.1 * np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
self.optimizer = torch.optim.Adam([self.mu_hat, self.tril_vec], lr=lr)
elif self.perso:
# case with personalized state effects
self.elbo = [] # objective
self.nu_hat = torch.zeros(
self.n, self.d, requires_grad=True, device=self.device
)
# vector for optimization of covariance which is mapped into lower triangular cholesky factor
if self.VI_diag:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.tril = torch.tensor(
0.1 * np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
self.optimizer = torch.optim.Adam([self.nu_hat, self.tril], lr=lr, eps=1e-4)
# store the inputs used in analysis
if self.io:
self.ins = ins.to(self.device) # n x t x 1
# store the time mask
if TM is None:
self.tm = torch.ones(
self.n, self.t, requires_grad=False, device=self.device
)
else:
self.tm = TM.to(self.device) # n x t
# store the observation mask
if OM is None:
self.om = torch.ones(
self.n, self.t, requires_grad=False, device=self.device
)
else:
self.om = OM.to(self.device) # n x t
self.om_is = torch.ones(
(self.k, self.k, self.n, self.t - 1),
requires_grad=False,
device=device,
dtype=torch.bool,
)
for i in range(self.k):
self.om_is[i] = self.om[:, 1:].unsqueeze(0).repeat(self.k, 1, 1)
self.eps = eps
self.min_var = min_var
self.ini_var = var_fill
self.sample_num = sample_num
def initialize_model(self, km_init=True):
"""
Initializes the parameters of the PIOHMM model; internal method
km_init: flag to indicate if kmeans should be used to initialize the state means
"""
# All implementations of the model have the parameter set {mu, var, pi, A}
if km_init:
# initialize the means using kmeans
kmeans = KMeans(n_clusters=self.k, init="random").fit(
torch.reshape(self.data[:, :, :].cpu(), [self.n * self.t, self.d])
)
mu = torch.tensor(
kmeans.cluster_centers_, requires_grad=False, device=self.device
).float()
else:
# choose k initial points from data to initialize means
idxs = torch.from_numpy(np.random.choice(self.n, self.k, replace=False))
mu = self.data[idxs, 0, :]
if self.full_cov:
# create k random symmetric positive definite d x d matrices
R = 0.1 * torch.rand(self.k, self.d, self.d, requires_grad=False)
var = torch.stack(
[
0.5 * (R[i, :, :].squeeze() + torch.t(R[i, :, :].squeeze()))
+ self.d * torch.eye(self.d)
for i in range(self.k)
]
).to(self.device)
else:
var = self.tensor_constructor(self.k, self.d, device=self.device).fill_(
self.ini_var
)
# uniform prior
pi = torch.empty(self.k, requires_grad=False, device=self.device).fill_(
1.0 / self.k
)
# transition matrix
if self.ut:
# create UT matrices for each HMM
A = []
for _ in range(self.K):
A.append(
torch.triu(
torch.stack(
[
1.0
/ (self.k_per_hmm - i)
* torch.ones(
self.k_per_hmm,
requires_grad=False,
device=self.device,
)
for i in range(self.k_per_hmm)
]
)
)
)
# create the corresponding mask
new_triangular = torch.block_diag(
*[
torch.triu(
torch.ones(
self.k_per_hmm,
self.k_per_hmm,
requires_grad=False,
device=self.device,
)
)
for _ in range(self.K)
]
)
else:
# create non-UT matrices for each HMM
A = []
for _ in range(self.K):
A.append(
torch.stack(
[
1.0
/ self.k_per_hmm
* torch.ones(
self.k_per_hmm, requires_grad=False, device=self.device
)
for _ in range(self.k_per_hmm)
]
)
)
# create the corresponding mask
new_triangular = torch.block_diag(
*[
torch.ones(
self.k_per_hmm,
self.k_per_hmm,
requires_grad=False,
device=self.device,
)
for _ in range(self.K)
]
)
self.new_triangular = new_triangular
# obtain a block diagonal transition matrix
A = torch.block_diag(*A)
# for calculation stability
A += self.eps
params = {"mu": mu, "var": var, "pi": pi, "A": A}
# input transformation matrix
if self.io:
if self.state_io:
V = torch.zeros(self.k, self.d, requires_grad=False, device=self.device)
else:
V = torch.zeros(self.d, requires_grad=False, device=self.device)
params["V"] = V
# variational parameters
if self.perso_io:
# transformation matrix prior noise
# initialize using the mean of the IG distribution
mnoise = torch.tensor([0.5], device=self.device)
params["mnoise"] = mnoise
if self.priorV:
vnoise = torch.tensor([1.0], device=self.device)
params["vnoise"] = vnoise
if self.perso:
nnoise = torch.tensor([0.5], device=self.device)
params["nnoise"] = nnoise
if self.priorMu:
munoise = torch.tensor([1.0], device=self.device)
params["munoise"] = munoise
return params
def batch_mahalanobis(self, L, x, check=True):
"""
Computes the squared Mahalanobis distance :math:`\mathbf{x}^\top\mathbf{M}^{-1}\mathbf{x}`
for a factored :math:`\mathbf{M} = \mathbf{L}\mathbf{L}^\top`. internal method
Accepts batches for both L and x.
"""
flat_L = L.unsqueeze(0).reshape((-1,) + L.shape[-2:])
L_inv = (
torch.stack([torch.inverse(Li.t()) for Li in flat_L])
.view(L.shape)
.to(self.device)
)
batch_val = L_inv.shape[0]
if check:
return (
(
torch.stack(
[
x[i, :, :].unsqueeze(-1) * L_inv[i, :, :]
for i in range(batch_val)
]
)
)
.sum(-2)
.pow(2.0)
.sum(-1)
)
else:
return (
(
torch.stack(
[
x[i, :].unsqueeze(-1) * L_inv[i, :, :]
for i in range(batch_val)
]
)
)
.sum(-2)
.pow(2.0)
.sum(-1)
)
def batch_diag(self, bmat):
"""
Returns the diagonals of a batch of square matrices; internal method
"""
return bmat.reshape(bmat.shape[:-2] + (-1,))[..., :: bmat.size(-1) + 1]
def log_gaussian(self, params, m_sample=None, n_sample=None):
"""
Returns the density of the model data given the current parameters; internal method
:param params: set of model parameters
:param m_sample: current sample of m_i, only applicable for perso_io=True
:param n_sample: current sample of r_i, only applicable for perso=True
:return: log likelihood at each time point for each possible cluster component, k x n x t
"""
# unpack params
mu = params["mu"]
var = params["var"]
log_norm_constant = self.d * torch.log(
2 * torch.tensor(math.pi, device=self.device)
)
if self.full_cov:
try:
# This try statement helps catch issues related to singular covariance, which can be an issue that is difficult to trace
L = torch.linalg.cholesky(var)
except:
print(var)
print(mu)
print(params["A"])
print(params["V"])
with open("miohmm_var.pkl", "wb") as handle:
pickle.dump(var, handle)
r = self.data[None, :, :, :] - mu[:, None, None, :]
if self.io:
V = params["V"]
if self.state_io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
else:
r = r - V * self.ins[:, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
md = self.batch_mahalanobis(L, r)
log_det = 2 * self.batch_diag(L).abs().log().sum(-1).to(self.device)
log_p = -0.5 * (md + log_norm_constant + log_det[:, None, None])
else:
r = self.data[None, :, :, :] - mu[:, None, None, :]
if self.io:
V = params["V"]
if self.state_io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
else:
r = r - V * self.ins[:, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[:, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
r = r ** 2
log_p = -0.5 * (var.log()[:, None, None, :] + r / var[:, None, None, :])
log_p = log_p + log_norm_constant
log_p = log_p.sum(-1)
return log_p
def log_gaussian_prior(self, rv, mu, L):
"""
Returns the probability of random varaible rv with mean mu and variance var; does not support full covariance
structure; internal method
:param rv d
:param mu d
:param L cholesky matrix for covariance of RV
:return: log probability
"""
d = np.shape(rv)[0]
log_norm_constant = (
-0.5 * d * torch.log(2 * torch.tensor(math.pi, device=self.device))
)
r = rv - mu
md = self.batch_mahalanobis(L, r, check=False)
log_det = self.batch_diag(L).abs().log().sum(-1).to(self.device)
log_p = -0.5 * md + log_norm_constant - log_det
return log_p
def log_ig(self, noise):
"""
Returns the probability of the inverse gamma prior; internal method
:return:
"""
log_ig = (
self.alpha * torch.log(self.beta)
- torch.log(gamma_fn(self.alpha.cpu())).to(self.device)
- (self.alpha + 1.0) * torch.log(noise)
- self.beta / noise
)
return log_ig
def get_likelihoods(self, params, log=True, m_sample=None, n_sample=None):
"""
:param log: flag to indicate if likelihood should be returned in log domain; internal method
:return likelihoods: (k x n x t)
"""
log_likelihoods = self.log_gaussian(params, m_sample, n_sample)
if not log:
log_likelihoods.exp_()
# multiply the liklihoods by the observation mask
return (log_likelihoods * self.om[None, :, :]).to(self.device)
def get_exp_data(self, mu, var, V=None, m_sample=None, n_sample=None):
"""
Function to calculate the expectation of the conditional log-likelihood with respect to the variational
approximation q(M|X); internal method
:return: expectation of the conditional log-likelihood wrt the variational approximation
"""
if self.full_cov:
L = torch.linalg.cholesky(var)
else:
L = torch.zeros(self.k, self.d, self.d)
for i in range(self.k):
L[i, :, :] = torch.diag(torch.sqrt(var[i, :]))
r = self.data[None, :, :, :] - mu[:, None, None, :]
if self.io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
const = self.d * torch.log(
2 * torch.tensor(math.pi, device=self.device)
) # scalar
logdet = 2 * self.batch_diag(L).abs().log().sum(-1).to(self.device) # k
md1 = self.batch_mahalanobis(L, r) # k x n x t
out = -0.5 * (const + logdet[:, None, None] + md1)
return out
def get_exp_M(self, mnoise):
"""
Function to calculate the expectation of the prior with respect to the variational approximation q(M|X); internal method
:return: expectation of the prior on M wrt the variational approximation
"""
out = (
-self.d
* self.n
/ 2
* torch.log(2 * torch.tensor(math.pi, device=self.device) * mnoise)
- (
1 / mnoise / 2 * torch.einsum("kij, kij -> k", [self.L_hat, self.L_hat])
).sum()
- (
1 / mnoise / 2 * torch.einsum("ij,ij->i", [self.mu_hat, self.mu_hat])
).sum()
)
return out
def get_exp_V(self, V, vnoise):
"""
Function to calculate the expectation of the prior with respect to the variational approximation q(M|X); internal method
:return: expectation of the prior on M wrt the variational approximation
"""
out = (
-self.d
* self.k
/ 2
* torch.log(2 * torch.tensor(math.pi, device=self.device) * vnoise)
- (1 / vnoise / 2 * torch.einsum("ij,ij->i", [V, V])).sum()
)
return out
def get_exp_Mtilde(self, nnoise):
"""
Function to calculate the expectation of the prior with respect to the variational approximation q(M|X); internal method
:return: expectation of the prior on M wrt the variational approximation
"""
out = (
-self.d
* self.n
/ 2
* torch.log(2 * torch.tensor(math.pi, device=self.device) * nnoise)
- (
1 / nnoise / 2 * torch.einsum("kij, kij -> k", [self.N_hat, self.N_hat])
).sum()
- (
1 / nnoise / 2 * torch.einsum("ij,ij->i", [self.nu_hat, self.nu_hat])
).sum()
)
return out
def exp_log_joint(self, params, e_out, samples):
"""
Function to calculate the expectation of the joint likelihood with respect to the variational approximation; internal method
:return: log joint likelihood
"""
# unpack parameters
pi = params["pi"]
A = params["A"]
logA = A.log()
logA[torch.isinf(logA)] = 0
mu = params["mu"]
var = params["var"]
m_sample = samples["m_sample"]
n_sample = samples["n_sample"]
gamma = e_out["gamma"]
xi = e_out["xi"]
if self.io:
V = params["V"]
lj = (
(gamma[:, :, 0].exp() * pi[:, None].log()).sum()
+ (xi.exp() * logA[:, :, None, None] * self.om[None, None, :, 1:]).sum()
+ (
self.get_exp_data(
mu, var, V=V, m_sample=m_sample, n_sample=n_sample
)
* gamma.exp()
* self.om[None, :, :]
).sum()
)
else:
lj = (
(gamma[:, :, 0].exp() * pi[:, None].log()).sum()
+ (xi.exp() * logA[:, :, None, None] * self.om[None, None, :, 1:]).sum()
+ (
self.get_exp_data(mu, var, m_sample=m_sample, n_sample=n_sample)
* gamma.exp()
* self.om[None, :, :]
).sum()
)
if self.perso:
nnoise = params["nnoise"]
lj = lj + self.get_exp_Mtilde(nnoise)
if self.perso_io:
mnoise = params["mnoise"]
lj = lj + self.get_exp_M(mnoise)
if self.priorV:
vnoise = params["vnoise"]
mnoise = params["mnoise"]
V = params["V"]
lj = (
lj
+ self.log_ig(vnoise)
+ self.log_ig(mnoise)
+ self.get_exp_V(V, vnoise)
)
if self.priorMu:
munoise = params["munoise"]
nnoise = params["nnoise"]
lj = (
lj
+ self.log_ig(munoise)
+ self.log_ig(nnoise)
+ self.get_exp_V(mu, munoise)
)
return lj
def entropy(self, e_out):
"""
Function to calculate the entropy; internal method
:return:
"""
gamma = e_out["gamma"]
xi = e_out["xi"]
gamma_sum = gamma[:, :, 0].exp() * gamma[:, :, 0] * self.om[None, :, 0]
if self.ut:
xi_sum = 0
for i in range(self.n):
for j in range(1, self.t):
xi_sum = (
xi_sum
+ (
torch.triu(xi[:, :, i, j - 1]).exp()
* torch.triu(xi[:, :, i, j - 1])
* self.om[None, None, i, j]
).sum()
)
xi_sum = (
xi_sum
- (xi.exp() * gamma[:, None, :, :-1] * self.om[None, None, :, 1:]).sum()
)
else:
xi_sum = (
xi.exp() * (xi - gamma[:, None, :, :-1]) * self.om[None, None, :, 1:]
).sum()
et = -gamma_sum.sum() - xi_sum
if self.perso_io:
logdet = 2 * self.batch_diag(self.L_hat).abs().log().sum(-1)
diffe = (
0.5
* (logdet + self.d * np.log(2 * torch.tensor(math.pi)) + self.d).sum()
)
et = et + diffe
if self.perso:
logdet = 2 * self.batch_diag(self.N_hat).abs().log().sum(-1)
diffe = (
0.5
* (logdet + self.d * np.log(2 * torch.tensor(math.pi)) + self.d).sum()
)
et = et + diffe
return et
def variational_obj(self, params, e_out, samples):
"""
Function to calculate the elbo using the expectation of the joint likelihood and the entropy; internal method
:return:
"""
obj1 = -self.exp_log_joint(params, e_out, samples)
obj2 = -self.entropy(e_out)
self.elbo.append((obj1 + obj2).item())
return obj1 + obj2
def baseline_variational_obj(self, params, e_out, samples):
"""
Function to calculate the elbo when only one time point has been observed; internal method
"""
# unpack parameters
pi = params["pi"]
A = params["A"]
logA = A.log()
logA[torch.isinf(logA)] = 0
mu = params["mu"]
var = params["var"]
m_sample = samples["m_sample"]
n_sample = samples["n_sample"]
gamma = e_out["gamma"]
if self.io:
V = params["V"]
lj = (gamma[:, :, 0].exp() * pi[:, None].log()).sum() + (
self.get_exp_data(mu, var, V=V, m_sample=m_sample, n_sample=n_sample)
* gamma.exp()
* self.om[None, :, :]
).sum()
else:
lj = (gamma[:, :, 0].exp() * pi[:, None].log()).sum() + (
self.get_exp_data(mu, var, m_sample=m_sample, n_sample=n_sample)
* gamma.exp()
* self.om[None, :, :]
).sum()
if self.perso:
nnoise = params["nnoise"]
lj = lj + self.get_exp_Mtilde(nnoise)
if self.perso_io:
mnoise = params["mnoise"]
lj = lj + self.get_exp_M(mnoise)
if self.priorV:
vnoise = params["vnoise"]
mnoise = params["mnoise"]
V = params["V"]
lj = (
lj
+ self.log_ig(vnoise)
+ self.log_ig(mnoise)
+ self.get_exp_V(V, vnoise)
)
if self.priorMu:
munoise = params["munoise"]
nnoise = params["nnoise"]
lj = (
lj
+ self.log_ig(munoise)
+ self.log_ig(nnoise)
+ self.get_exp_V(mu, munoise)
)
gamma_sum = gamma[:, :, 0].exp() * gamma[:, :, 0] * self.om[None, :, 0]
et = -gamma_sum.sum()
if self.perso_io:
logdet = 2 * self.batch_diag(self.L_hat).abs().log().sum(-1)
diffe = (
0.5
* (logdet + self.d * np.log(2 * torch.tensor(math.pi)) + self.d).sum()
)
et = et + diffe
if self.perso:
logdet = 2 * self.batch_diag(self.N_hat).abs().log().sum(-1)
diffe = (
0.5
* (logdet + self.d * np.log(2 * torch.tensor(math.pi)) + self.d).sum()
)
et = et + diffe
self.elbo.append((-lj + -et).item())
return -lj - et
def forward(self, likelihood, params):
"""
Calculate the forward pass of the EM algorithm for the HMM (Baum-Welch); internal method
:param likelihood: log-likelihood of the data for the current parameters
:param params: current model parameters
:return: k x n x t log alpha's and the n x t scaling factors
Note this implementation uses the rescaled 'alpha-hats'
"""
# unpack params
pi = params["pi"]
A = params["A"]
logA = A.log()
alpha = torch.zeros(self.k, self.n, self.t, device=self.device)
scaling_factor = torch.zeros(self.n, self.t, device=self.device)
a = pi[:, None].log() + likelihood[:, :, 0]
scaling_factor[:, 0] = torch.logsumexp(a, dim=0)
alpha[:, :, 0] = a - scaling_factor[:, 0]
for i in range(1, self.t):
asample = alpha[
:, :, i - 1
] # this is the previous time point alpha, we need this for the recursion
# we'll use the log-sum-exp trick for stable calculation
a = likelihood[:, :, i] + torch.logsumexp(
asample[:, None, :] + logA[:, :, None], dim=0
)
scaling_factor[:, i] = torch.logsumexp(a, dim=0)
alpha[:, :, i] = a - scaling_factor[:, i]
# multiply the final results with the time mask to reset missing values to zero
alpha = alpha * self.tm[None, :, :]
scaling_factor = scaling_factor * self.tm
return (
alpha,
scaling_factor,
) # note that this is log alpha and log scaling factor
def backward(self, likelihood, params, scaling_factor):
"""
Calaculate the backward pass of the EM algorithm for the HMM (Baum-Welch); internal method
:param likelihood: log-likelihood of the data for the current parameters
:param params: current model parameters
:param scaling_factor: scaling factors calculated during the forward pass; required for numerical stability
:return: k x n x t log beta's
Note this implementation uses the rescaled 'beta-hats'
"""
# unpack params
logA = params["A"].log()
beta = torch.zeros(self.k, self.n, self.t, device=self.device)
for i in range(self.t - 2, -1, -1):
bsample = beta[
:, :, i + 1
] # this is the next time point beta, we need this for the recusion
# we'll use the log-sum-exp trick for stable calculation
b = torch.logsumexp(
bsample[None, :, :] + logA[:, :, None] + likelihood[None, :, :, i + 1],
dim=1,
)
tmi = self.tm[:, i + 1]
beta[:, :, i] = (b - scaling_factor[:, i + 1]) * tmi[None, :]
return beta # note that this is log beta
def e_step(self, params, fixSample=False):
"""
'expectation step' for the EM algorithm (Baum-Welch); internal method
:return: updates gamma, xi and the log-likelihood
"""
# default setting is to assume no personalized effects
m_sample = None
n_sample = None
if self.perso_io:
# sample from the variational approximation of M_i
if fixSample:
m_sample = self.mu_hat
else:
e_sample = torch.randn(self.n, self.d, device=self.device)
m_sample = (
torch.einsum("ijk,ik->ij", [self.L_hat, e_sample]) + self.mu_hat
)
if self.perso:
if fixSample:
n_sample = self.nu_hat
else:
e_sample = torch.randn(self.n, self.d, device=self.device)
n_sample = (
torch.einsum("ijk,ik->ij", [self.N_hat, e_sample]) + self.nu_hat
)
likelihood = self.get_likelihoods(params, m_sample=m_sample, n_sample=n_sample)
alpha, scaling_factor = self.forward(likelihood, params)
# NB: the exponentiated alpha sum over the first dimension should be one
beta = self.backward(likelihood, params, scaling_factor)
# the expontiated beta sum over the first dimension should be numerically well-behaved
gamma = alpha + beta # note this is log gamma
logA = params["A"].log()
xi = (
alpha[:, None, :, :-1]
+ beta[None, :, :, 1:]
+ likelihood[None, :, :, 1:]
+ logA[:, :, None, None]
- scaling_factor[None, None, :, 1:]
) # note this is log xi
pX = scaling_factor.sum()
e_out = {"xi": xi, "gamma": gamma, "pX": pX}
samples = {"m_sample": m_sample, "n_sample": n_sample}
if self.perso_io or self.perso:
self.optimizer.zero_grad()
self.variational_obj(params, e_out, samples).backward(retain_graph=True)
self.optimizer.step()
if self.perso_io:
# update the variational parameters mu_hat and L_hat using gradient descent
if self.VI_diag:
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
if self.perso:
if self.VI_diag:
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
return e_out, params, samples
def m_step(self, e_out, params, samples):
"""
fixed point equation for updating 'theta'
:return: updates mu_k, sigma_k, V, A, pi
"""
with torch.no_grad():
# un-pack parameters
gamma = e_out["gamma"]
xi = e_out["xi"]
var = params["var"]
if self.io:
V = params["V"]
if self.priorV:
vnoise = params["vnoise"]
if self.perso_io:
m_sample = samples["m_sample"]
if self.perso:
n_sample = samples["n_sample"]
if self.priorMu:
munoise = params["munoise"]
# compute `N_k` the proxy "number of points" assigned to each distribution.
# gamma is k x n x t
N_k1 = ((gamma[:, :, 0].exp()) * self.om[None, :, 0]).sum(1)
N_k = ((gamma.exp()) * self.om[None, :, :]).sum(-1).sum(-1)
# get the means by taking the weighted combination of points
r = self.data
if self.io:
if self.state_io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
else:
r = r - V * self.ins[:, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
else:
if self.perso:
r = r - n_sample[:, None, :]
if self.priorMu:
if self.full_cov:
if self.state_io:
num = torch.einsum(
"ijk,ijkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.om[:, :, None],
],
)
else:
num = torch.einsum(
"ijk,jkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.om[:, :, None],
],
)
denom = (
torch.einsum(
"i, ijk->ijk",
[
torch.sum(gamma.exp() * self.om[None, :, :], (1, 2)),
torch.stack(
[
torch.eye(self.d, device=self.device)
for _ in range(self.k)
]
),
],
)
+ var / munoise
)
MU_LU = torch.lu(denom)
mu = torch.lu_solve(num, *MU_LU)
else:
if self.state_io:
mu = torch.einsum(
"ijk,ijkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.om[:, :, None],
],
) / (torch.sum(gamma.exp(), (1, 2)) + var / munoise)
else:
mu = torch.einsum(
"ijk,jkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.om[:, :, None],
],
) / (torch.sum(gamma.exp(), (1, 2))[:, None] + var / munoise)
else:
if self.state_io:
mu = torch.einsum(
"ijk,ijkl->il",
[gamma.exp() * self.om[None, :, :], r * self.om[:, :, None]],
)
mu = mu / (N_k[:, None] + self.eps)
else:
mu = torch.einsum(
"ijk,jkl->il",
[gamma.exp() * self.om[None, :, :], r * self.om[:, :, None]],
)
mu = mu / (N_k[:, None] + self.eps)
# update the matrix which tansforms the drug information
if self.io:
r = self.data - mu[:, None, None, :]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
if self.priorV:
num = torch.einsum(
"ijk,ijkl->il",
[
gamma.exp() * self.om[None, :, :],
r * self.ins[None, :, :, None] * self.om[None, :, :, None],
],
)
denom = (
torch.einsum(
"i,ijk->ijk",
[
torch.sum(
gamma.exp()
* self.ins[None, :, :] ** 2
* self.om[None, :, :],
(1, 2),
),
torch.stack(
[
torch.eye(self.d, device=self.device)
for _ in range(self.k)
]
),
],
)
+ var / vnoise
)
V_LU = torch.lu(denom)
V = torch.lu_solve(num[:, :, None], *V_LU).squeeze()
else:
if self.state_io:
V = torch.einsum(
"ijk,ijkl->il",
[
gamma.exp() * self.om[None, :, :],
r
* self.ins[None, :, :, None]
* self.om[None, :, :, None],
],
)
denom = torch.sum(
gamma.exp()
* self.ins[None, :, :] ** 2
* self.om[None, :, :],
(1, 2),
)
V = V / denom[:, None]
else:
V = torch.einsum(
"ijk,ijkl->l",
[
gamma.exp() * self.om[None, :, :],
r * self.ins[:, :, None] * self.om[None, :, :, None],
],
)
V = V / torch.sum(
((gamma.exp()) * self.tm[None, :, :])
* ((self.ins[None, :, :] * self.om[None, :, :]) ** 2)
)
# compute the diagonal covar. matrix, by taking a weighted combination of
# the each point's square distance from the mean
r = self.data - mu[:, None, None]
if self.io:
if self.state_io:
r = r - V[:, None, None, :] * self.ins[None, :, :, None]
else:
r = r - V[None, None, None, :] * self.ins[None, :, :, None]
if self.perso_io:
r = r - m_sample[None, :, None, :] * self.ins[None, :, :, None]
if self.perso:
r = r - n_sample[None, :, None, :]
r = r * self.om[None, :, :, None]
if self.full_cov:
if self.perso_io:
var = (
(gamma[:, :, :, None, None].exp())
* self.om[None, :, :, None, None]
) * (torch.einsum("ijkl,ijkm->ijklm", [r, r]))
else:
var = (
(gamma[:, :, :, None, None].exp())
* self.om[None, :, :, None, None]
) * torch.einsum("ijkl,ijkm->ijklm", [r, r])
var = var.sum(1).sum(1) / (N_k[:, None, None] + self.eps)
# add variance ridge to prevent non psd covariance matrices
var = torch.stack(
[
var[i, :, :] + 1e-4 * torch.eye(self.d).to(self.device)
for i in range(self.k)
]
)
else:
var = torch.einsum(
"ijk,ijkl->il", [(gamma.exp()) * self.om[None, :, :], r ** 2]
)
var = var / (N_k[:, None] + self.eps)
var = torch.clamp(var, min=self.min_var)
if self.perso_io:
# compute the prior mnoise
if self.priorV:
mnoise = (
1
/ (2 * self.alpha + 2 + self.n * self.d)
* (
2 * self.beta
+ (torch.einsum("ij,ij->i", [m_sample, m_sample])).sum()
)
)
else:
mnoise = (
1
/ self.n
/ self.d
* (torch.einsum("ij,ij->i", [m_sample, m_sample])).sum()
)
if self.priorV:
vnoise = (
1
/ (2 * self.alpha + 2 + self.d * self.k)
* (2 * self.beta + (torch.einsum("ij, ij ->i", [V, V])).sum())
)
# CHECK DERIVATION HERE
if self.perso:
if self.priorMu:
nnoise = (
1
/ (2 * self.alpha + 2 + self.n * self.d)
* (
2 * self.beta
+ (torch.einsum("ij,ij->i", [n_sample, n_sample])).sum()
)
)
else:
nnoise = (
1
/ self.n
/ self.d
* (torch.einsum("ij,ij->i", [n_sample, n_sample])).sum()
)
if self.priorMu:
munoise = (
1
/ (2 * self.alpha + 2 + self.d * self.k)
* (2 * self.beta + (torch.einsum("ij, ij-> i", [mu, mu])).sum())
)
# recompute the mixing probabilities
pi = N_k1 / N_k1.sum() + self.eps
# recompute the transition matrix
# # xi is k x k x n x t - 1
logA = torch.logsumexp(
xi.masked_fill(self.om_is == False, -1e18).reshape(
self.k, self.k, self.n * (self.t - 1)
),
dim=-1,
)
A = normalize_exp(logA, axis=1) + self.eps * self.new_triangular
params = {
"mu": mu.to(self.device),
"var": var.to(self.device),
"pi": pi.to(self.device),
"A": A.to(self.device),
}
if self.io:
params["V"] = V.to(self.device)
if self.perso_io:
params["mnoise"] = mnoise.to(self.device)
if self.perso:
params["nnoise"] = nnoise.to(self.device)
if self.priorV:
params["vnoise"] = vnoise.to(self.device)
if self.priorMu:
params["munoise"] = munoise.to(self.device)
return params
def learn_model(
self,
num_iter=1000,
use_cc=False,
cc=1e-6,
intermediate_save=True,
load_model=False,
model_name=None,
):
"""
function to learn the parameters of the PIOHMM
:param num_iter: number of steps for the learning procedure
:param use_cc: flag to indicate if a convergence criteria should be used
:param cc: tolerance for the convergence criteria
:param intermediate_save: flag to indicate if parameters should be saved during training
:param load_model: flag to indicate if parameters should be loaded from a saved model
:param model_name: file name of model to be loaded
:return: depends on the type of model and will include parameters, variational parameters, liklihood, elbo
"""
if load_model:
load_params = torch.load(model_name)
A = load_params["A"]
mu = load_params["mu"]
var = load_params["var"]
pi = load_params["pi"]
V = load_params["V"]
params = {"mu": mu, "var": var, "pi": pi, "A": A, "V": V}
# variational parameters
if self.perso_io:
# transformation matrix prior noise
# initialize using the mean of the IG distribution
mnoise = torch.tensor([1.5], device=self.device)
params["mnoise"] = mnoise
if self.perso:
nnoise = torch.tensor([0.5], device=self.device)
params["nnoise"] = nnoise
if self.priorMu:
munoise = torch.tensor([1.0], device=self.device)
params["munoise"] = munoise
else:
params = self.initialize_model()
prev_cost = float("inf")
for _ in range(num_iter):
if _ % 500 == 0:
print("Iteration ", _, flush=True)
if intermediate_save:
if _ % 500 == 0:
print("Iteration ", _)
if self.device[:4] == "cuda":
print(torch.cuda.get_device_name(0))
print("Memory Usage:")
print(
"Allocated:",
round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1),
"GB",
)
print(
"Cached: ",
round(torch.cuda.memory_cached(0) / 1024 ** 3, 1),
"GB",
)
torch.save(
{
"params": params,
"elbo": self.elbo,
"entropy": self.ent,
"exp_ll": self.ell,
"log_prob": self.ll,
"mi": self.mu_hat,
"Li": self.L_hat,
},
"../results/PD_HMM_Model_iter"
+ str(_)
+ "_k"
+ str(self.k)
+ ".pkl",
)
# e-step, calculate the 'responsibilities'
e_out, params, samples = self.e_step(params)
# compute the cost and check for convergence
obj = e_out["pX"].item()
self.ll.append(obj)
if use_cc:
diff = prev_cost - obj
if np.abs(diff) < cc:
print("Breaking ", _)
break
prev_cost = obj
# m-step, update the parameters
params = self.m_step(e_out, params, samples)
params["A"] = params["A"] * self.new_triangular
if self.perso_io and self.perso:
return (
params,
e_out,
self.ll,
self.elbo,
self.mu_hat,
self.L_hat,
self.nu_hat,
self.N_hat,
)
elif self.perso:
return params, e_out, self.ll, self.elbo, self.nu_hat, self.N_hat
elif self.perso_io:
return params, e_out, self.ll, self.elbo, self.mu_hat, self.L_hat
else:
return params, e_out, self.ll
def est_test_pX(self, params):
"""
calaculate the marginal probability of the observed data
"""
likelihood = self.get_likelihoods(params, fixSample=True)
alpha, scaling_factor = self.forward(likelihood, params)
return scaling_factor.sum()
def learn_baseline_vi_params(self, params, num_iter=1000, intermediate_save=False):
"""
function to learn personalized parameters using only baseline data and fixed estimates of 'theta'
:param params: fixed estimates of 'theta'
:param num_iter: number of steps for the learning procedure
:param intermediate_save: flag to indicate if parameters should be saved during training
:return inferred parameters
"""
for _ in range(num_iter):
# default setting is to assume no personalized effects
m_sample = None
n_sample = None
if self.perso_io:
e_sample = torch.randn(self.n, self.d, device=self.device)
m_sample = (
torch.einsum("ijk,ik->ij", [self.L_hat, e_sample]) + self.mu_hat
)
if self.perso:
e_sample = torch.randn(self.n, self.d, device=self.device)
n_sample = (
torch.einsum("ijk,ik->ij", [self.N_hat, e_sample]) + self.nu_hat
)
likelihood = self.get_likelihoods(
params, m_sample=m_sample, n_sample=n_sample
)
pi = params["pi"]
gamma = pi[:, None, None].log() + likelihood
e_out = {"gamma": gamma}
samples = {"m_sample": m_sample, "n_sample": n_sample}
if self.perso_io or self.perso:
self.optimizer.zero_grad()
self.baseline_variational_obj(params, e_out, samples).backward(
retain_graph=True
)
self.optimizer.step()
if self.perso_io:
# update the variational parameters mu_hat and L_hat using gradient descent
if self.VI_diag:
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
if self.perso:
if self.VI_diag:
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
if self.perso_io and self.perso:
return (
params,
e_out,
self.ll,
self.elbo,
self.mu_hat,
self.L_hat,
self.nu_hat,
self.N_hat,
)
elif self.perso:
return params, e_out, self.ll, self.elbo, self.nu_hat, self.N_hat
elif self.perso_io:
return params, e_out, self.ll, self.elbo, self.mu_hat, self.L_hat
else:
return params, e_out, self.ll
def learn_vi_params(self, params, num_iter=1000, intermediate_save=False):
"""
function to learn personalized parameters using fixed estimates of 'theta'
:param params: fixed estimates of 'theta'
:param num_iter: number of steps for the learning procedure
:param intermediate_save: flag to indicate if parameters should be saved during training
:return inferred parameters
"""
for _ in range(num_iter):
if _ % 50 == 0:
print("Iteration ", _, flush=True)
if intermediate_save:
if _ % 500 == 0:
print("Iteration ", _)
if self.device[:4] == "cuda":
print(torch.cuda.get_device_name(0))
print("Memory Usage:")
print(
"Allocated:",
round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1),
"GB",
)
print(
"Cached: ",
round(torch.cuda.memory_cached(0) / 1024 ** 3, 1),
"GB",
)
torch.save(
{
"params": params,
"elbo": self.elbo,
"entropy": self.ent,
"exp_ll": self.ell,
"log_prob": self.ll,
"mi": self.mu_hat,
"Li": self.L_hat,
},
"../results/PD_HMM_Model_iter"
+ str(_)
+ "_k"
+ str(self.k)
+ ".pkl",
)
# e-step, calculate the 'responsibilities'
e_out, params, samples = self.e_step(params)
if self.perso_io and self.perso:
return (
params,
e_out,
self.ll,
self.elbo,
self.mu_hat,
self.L_hat,
self.nu_hat,
self.N_hat,
)
elif self.perso:
return params, e_out, self.ll, self.elbo, self.nu_hat, self.N_hat
elif self.perso_io:
return params, e_out, self.ll, self.elbo, self.mu_hat, self.L_hat
else:
return params, e_out, self.ll
def calc_pX(
self,
params,
num_samples=1,
importance_sampling=False,
mu_hat=None,
nu_hat=None,
L_hat=None,
N_hat=None,
fixSample=False,
):
"""
Function to calculate the test log likelihood
:param params: fixed estimate for 'theta'
:param num_samples: number of samples for importance sampling
:param importance_sampling: flag to indicate if importance sampling should be use
:param mu_hat: mean of the variational distribution for personalized medication effects
:param nu_hat: mean of the variational distrbution for personalized state effects
:param L_hat: cholesky factor for covariance matrix of the variational distribution for personalized medication effects
:param N_hat: cholesky factor for covariance matric of the variational distribution for personalized state effects
:param fixSample: flag to indicate if only the variational mean should be used
:return: test log likelihood
"""
if importance_sampling:
px = torch.zeros(num_samples, self.n)
for i in range(num_samples):
if self.perso_io:
e_sample = torch.randn(self.n, self.d, device=self.device)
m_sample = torch.einsum("ijk,ik->ij", [L_hat, e_sample]) + mu_hat
L_prior = torch.stack(
[
(
params["mnoise"].sqrt()
* torch.eye(self.d).to(self.device)
)
for _ in range(self.n)
]
).to(self.device)
sample_weight_m = self.log_gaussian_prior(
m_sample, torch.zeros(m_sample.shape).to(self.device), L_prior
) - self.log_gaussian_prior(m_sample, mu_hat, L_hat)
else:
m_sample = None
sample_weight_m = 0
if self.perso:
e_sample = torch.randn(self.n, self.d, device=self.device)
n_sample = torch.einsum("ijk,ik->ij", [N_hat, e_sample]) + nu_hat
N_prior = torch.stack(
[
(params["nnoise"].sqrt() * torch.eye(self.d)).to(
self.device
)
for _ in range(self.n)
]
).to(self.device)
sample_weight_n = self.log_gaussian_prior(
n_sample, torch.zeros(n_sample.shape).to(self.device), N_prior
) - self.log_gaussian_prior(n_sample, nu_hat, N_hat)
else:
n_sample = None
sample_weight_n = 0
likelihood = self.get_likelihoods(
params, m_sample=m_sample, n_sample=n_sample
)
alpha, scaling_factor = self.forward(likelihood, params)
# print((scaling_factor*sample_weight[:, None]).sum())
px[i, :] = scaling_factor.sum(-1) + sample_weight_m + sample_weight_n
out = torch.logsumexp(px, 0) - np.log(num_samples)
elif fixSample:
likelihood = self.get_likelihoods(params, m_sample=mu_hat, n_sample=nu_hat)
alpha, scaling_factor = self.forward(likelihood, params)
out = scaling_factor
else:
px = torch.zeros(num_samples, self.n)
for i in range(num_samples):
if self.perso_io:
mnoise = params["mnoise"]
# sample from the sampled from the MLE params
m_sample = mnoise.sqrt() * torch.randn(
self.n, self.d, device=self.device
)
else:
m_sample = None
if self.perso:
nnoise = params["nnoise"]
n_sample = nnoise.sqrt() * torch.randn(
self.n, self.d, device=self.device
)
else:
n_sample = None
likelihood = self.get_likelihoods(
params, m_sample=m_sample, n_sample=n_sample
)
alpha, scaling_factor = self.forward(likelihood, params)
px[i, :] = scaling_factor.sum(-1)
out = torch.logsumexp(px, 0) - np.log(num_samples)
return out
def predict_sequence(self, params, m_sample=None, n_sample=None):
"""
function to apply viterbi algorithm
:param params: fixed estimates of the model parameters
:param m_sample: value to use for the personalized medication effects
:param n_sample: value to use for the personalized state effects
:return mps: most probable sequence n x t
"""
likelihood = self.get_likelihoods(params, m_sample=m_sample, n_sample=n_sample)
mps = self.viterbi(likelihood, params)
return mps
def viterbi(self, likelihood, params):
"""
apply the viterbi algorithm to find the most probable sequence per patient; internal method
omega is the maximimum joint probability of the previous data and latent states ; the last value is the joint
distribution of the most probable path
:return:
"""
omega = torch.zeros(self.k, self.n, self.t).to(self.device)
psi = torch.zeros(self.k, self.n, self.t).to(self.device)
mps = torch.zeros(self.n, self.t).to(self.device)
logA = params["A"].log()
pi = params["pi"]
omega[:, :, 0] = pi[:, None].log() + likelihood[:, :, 0]
for i in range(1, self.t):
inner_max, psi[:, :, i] = torch.max(
logA[:, :, None] + omega[:, None, :, i - 1], dim=0
)
omega[:, :, i] = likelihood[:, :, i] + inner_max
mps[:, -1] = torch.argmax(omega[:, :, -1], dim=0)
val, _ = torch.max(omega[:, :, -1], dim=0)
for i in range(self.t - 2, -1, -1):
psi_sample = psi[:, :, i + 1]
mps[:, i] = torch.gather(psi_sample, 0, mps[:, i + 1].long().unsqueeze(0))
return mps
def change_data(
self, data, ins=None, OM=None, TM=None, reset_VI=True, params=[], lr=0.001
):
"""
Replace model dataset
:param data: new dataset to use; additional parameters are corresponding dataset features / descriptors
:return: none, updates to model params only
"""
self.data = data.to(self.device)
self.n = data.shape[0]
self.t = data.shape[1]
# store the inputs used in analysis
if self.io:
self.ins = ins.to(self.device) # n x t x 1
# store the time mask
if TM is None:
self.tm = torch.ones(
self.n, self.t, requires_grad=False, device=self.device
)
else:
self.tm = TM.to(self.device) # n x t
# store the observation mask
if OM is None:
self.om = torch.ones(
self.n, self.t, requires_grad=False, device=self.device
)
else:
self.om = OM.to(self.device) # n x t
if reset_VI:
if self.perso_io and self.perso:
self.elbo = []
mnoise = params["mnoise"]
mi_numpy = np.sqrt(mnoise.cpu().numpy()) * np.random.randn(
self.n, self.d
)
if self.device[:4] == "cuda":
self.mu_hat = (
torch.from_numpy(mi_numpy)
.float()
.cuda()
.to(self.device)
.requires_grad_()
)
else:
self.mu_hat = torch.from_numpy(mi_numpy).float().requires_grad_()
if self.VI_diag:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.tril_vec = torch.tensor(
0.01
* np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
nnoise = params["nnoise"]
ni_numpy = np.sqrt(nnoise.cpu().numpy()) * np.random.randn(
self.n, self.d
)
if self.device[:4] == "cuda":
self.nu_hat = (
torch.from_numpy(ni_numpy)
.float()
.cuda()
.to(self.device)
.requires_grad_()
)
else:
self.nu_hat = torch.from_numpy(ni_numpy).float().requires_grad_()
if self.VI_diag:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
device=self.device,
requires_grad=True,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.tril = torch.tensor(
0.01
* np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
self.optimizer = torch.optim.Adam(
[self.mu_hat, self.tril_vec, self.nu_hat, self.tril], lr=lr
)
elif self.perso_io:
self.elbo = []
mnoise = params["mnoise"]
mi_numpy = np.sqrt(mnoise.cpu().numpy()) * np.random.randn(
self.n, self.d
)
if self.device[:4] == "cuda":
self.mu_hat = (
torch.from_numpy(mi_numpy)
.float()
.cuda()
.to(self.device)
.requires_grad_()
)
else:
self.mu_hat = torch.from_numpy(mi_numpy).float().requires_grad_()
if self.VI_diag:
self.tril_vec = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril_vec
else:
self.tril_vec = torch.tensor(
0.01
* np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.L_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.L_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril_vec
self.optimizer = torch.optim.Adam([self.mu_hat, self.tril_vec], lr=lr)
elif self.perso:
self.elbo = []
nnoise = params["nnoise"]
ni_numpy = np.sqrt(nnoise.cpu().numpy()) * np.random.randn(
self.n, self.d
)
if self.device[:4] == "cuda":
self.nu_hat = (
torch.from_numpy(ni_numpy)
.float()
.cuda()
.to(self.device)
.requires_grad_()
)
else:
self.nu_hat = torch.from_numpy(ni_numpy).float().requires_grad_()
if self.VI_diag:
self.tril = torch.tensor(
0.01 * np.random.randn(self.n * self.d),
device=self.device,
requires_grad=True,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.stack([torch.eye(self.d) for _ in range(self.n)]) == 1
] = self.tril
else:
self.tril = torch.tensor(
0.01
* np.random.randn(self.n * int(0.5 * self.d * (self.d + 1))),
requires_grad=True,
device=self.device,
dtype=torch.float64,
)
self.N_hat = torch.zeros(self.n, self.d, self.d, device=self.device)
self.N_hat[
torch.tril(torch.ones(self.n, self.d, self.d)) == 1
] = self.tril
self.optimizer = torch.optim.Adam(
[self.nu_hat, self.tril], lr=lr, eps=1e-4
)
def forward_pred(self, params, m_sample=None, n_sample=None):
"""
function to forecast one-step-ahead
:return:
osapd: one-step-ahead predictive density
bs: belief state
lpe: log probability evidence
"""
pi = params["pi"]
A = params["A"]
likelihood = self.get_likelihoods(params, m_sample=m_sample, n_sample=n_sample)
alpha, scaling_factor = self.forward(likelihood, params)
osapd = torch.zeros(self.k, self.n, self.t + 1).to(
self.device
) # one-step-ahead predictive density
# there is no data yet at t=0, use pi
osapd[:, :, 0] = pi[:, None].log()
osapd[:, :, 1:] = torch.logsumexp(
A[:, :, None, None].log() + alpha[:, None, :, :], dim=0
)
bs = likelihood + osapd # belief state
lpe = torch.logsumexp(bs, dim=0) # log probability evidence
bs = (bs - lpe[None, :, :]).exp()
osapd = osapd.exp() # return values in probability space
return osapd, bs, lpe
def forward_sample(self, prob, ns=100):
"""
prob: k x n x t 'one-step-ahead predictive density' p(z_it=j | x_i1, ... x_it-1)
return: samples from the one-step-ahed predictive density
"""
vals = torch.zeros(ns, self.n, self.t - 1, self.d)
for i in range(self.t - 1):
for j in range(self.n):
m = torch.distributions.categorical.Categorical(prob[:, j, i])
for k in range(ns):
draw = m.sample()
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
self.mu[draw, :]
+ self.V[draw, :] * self.ins[j, i + 1]
+ self.mu_hat[j, :] * self.ins[j, i + 1],
covariance_matrix=self.var[draw, :, :]
+ torch.mm(self.L_hat[j, :, :], self.L_hat[j, :, :].t())
* self.ins[j, i + 1],
)
vals[k, j, i, :] = mvn.sample()
return vals
def load_model(self, filename, cpu=True):
"""
function to specifically add the variational model parameters because they are properties of the model; note that
variable names have been assumed here and the save file needs to be formatted accordingly
:param filename: name of filename containing model
:return: none
"""
if cpu:
trained_model = torch.load(filename, map_location=torch.device("cpu"))
else:
trained_model = torch.load(filename)
### IMPORTANT ####
# Note that this is currently not setup to continue training. tril_vec needs to be populated and have
# requires_grad = True to be able to continue training; this function is only to load in a model. Additional
# functionality is required to continue training
if self.perso_io:
self.mu_hat = trained_model["Mi"].to(self.device)
self.L_hat = trained_model["Li"].to(self.device)
if self.perso:
self.nu_hat = trained_model["ni"].to(self.device)
self.N_hat = trained_model["Ni"].to(self.device)
def baseline_risk(self, params, ns=500, type="sample", m_sample=None):
"""
Determine probabilities of state assignment at 1- and 2-years from baseline; sample the observed data using
those probabilities
:return:
"""
# unpack params
pi = params["pi"]
A = params["A"]
mu = params["mu"]
var = params["var"]
if self.io:
V = params["V"]
mnoise = params["mnoise"]
if type == "sample":
sample_1year = torch.zeros(ns, self.n, self.d)
sample_2year = torch.zeros(ns, self.n, self.d)
for k in range(ns):
if self.perso_io:
if m_sample is None:
m_sample = mnoise.sqrt() * torch.randn(size=(self.n, self.d))
likelihood = self.get_likelihoods(
params, log=False, m_sample=m_sample
) # k x n x t
# NB: one of the pi elements is zero so we don't work directly in the log space
p_z1 = (
pi[:, None]
* likelihood[:, :, 0].squeeze()
/ (pi[:, None] * likelihood[:, :, 0].squeeze()).sum(0)
)
p_z6month = (
A[:, :, None] * (A[:, :, None] * p_z1[:, None, :]).sum(0)
).sum(0)
p_z1year = p_z1
for i in range(4):
p_z1year = (A[:, :, None] * p_z1year[:, None, :]).sum(0)
p_z2year = p_z1year
for i in range(4):
p_z2year = (A[:, :, None] * p_z2year[:, None, :]).sum(0)
for j in range(self.n):
# check for underflow issues
if np.isnan(p_z1year[:, j].detach().numpy()).all():
p_z1year[:, j] = torch.zeros(self.k)
p_z1year[-1, j] = 1
p_z2year[:, j] = p_z1year[:, j]
m1 = torch.distributions.categorical.Categorical(p_z1year[:, j])
m2 = torch.distributions.categorical.Categorical(p_z2year[:, j])
draw = m1.sample()
if self.io:
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
mu[draw, :]
+ (V[draw, :] + m_sample[j, :]) * self.ins[j, 4],
covariance_matrix=var[draw, :, :],
)
else:
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
mu[draw, :], covariance_matrix=var[draw, :, :]
)
sample_1year[k, j, :] = mvn.sample()
draw = m2.sample()
if self.io:
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
mu[draw, :]
+ (V[draw, :] + m_sample[j, :]) * self.ins[j, 8],
covariance_matrix=var[draw, :, :],
)
else:
mvn = torch.distributions.multivariate_normal.MultivariateNormal(
mu[draw, :], covariance_matrix=var[draw, :, :]
)
sample_2year[k, j, :] = mvn.sample()
elif type == "mean":
sample_1year = torch.zeros(self.n, self.d)
sample_2year = torch.zeros(self.n, self.d)
if self.perso_io:
m_sample = mnoise.sqrt() * torch.randn(size=(self.n, self.d))
likelihood = self.get_likelihoods(
params, log=False, m_sample=m_sample
) # k x n x t
# NB: one of the pi elements is zero so we don't work directly in the log space
p_z1 = (
pi[:, None]
* likelihood[:, :, 0].squeeze()
/ (pi[:, None] * likelihood[:, :, 0].squeeze()).sum(0)
)
# print('Check sum:', p_z1.sum(0))
p_z6month = (A[:, :, None] * (A[:, :, None] * p_z1[:, None, :]).sum(0)).sum(
0
)
p_z1year = p_z1
for i in range(4):
p_z1year = (A[:, :, None] * p_z1year[:, None, :]).sum(0)
p_z2year = p_z1year
for i in range(4):
p_z2year = (A[:, :, None] * p_z2year[:, None, :]).sum(0)
meds1 = self.ins[:, 4]
meds2 = self.ins[:, 8]
sample_1year = (
torch.einsum("ij, ik->jk", [p_z1year, mu])
+ torch.einsum("ij, ik->jk", [p_z1year, V]) * meds1[:, None]
)
sample_2year = (
torch.einsum("ij, ik->jk", [p_z2year, mu])
+ torch.einsum("ij, ik->jk", [p_z2year, V]) * meds2[:, None]
)
else:
sample_1year = torch.zeros(self.n, self.d)
sample_2year = torch.zeros(self.n, self.d)
if self.perso_io:
m_sample = mnoise.sqrt() * torch.randn(size=(self.n, self.d))
likelihood = self.get_likelihoods(
params, log=False, m_sample=m_sample
) # k x n x t
# NB: one of the pi elements is zero so we don't work directly in the log space
p_z1 = (
pi[:, None]
* likelihood[:, :, 0].squeeze()
/ (pi[:, None] * likelihood[:, :, 0].squeeze()).sum(0)
)
p_z6month = (A[:, :, None] * (A[:, :, None] * p_z1[:, None, :]).sum(0)).sum(
0
)
p_z1year = p_z1
for i in range(4):
p_z1year = (A[:, :, None] * p_z1year[:, None, :]).sum(0)
p_z2year = p_z1year
for i in range(4):
p_z2year = (A[:, :, None] * p_z2year[:, None, :]).sum(0)
idx1 = torch.argmax(p_z1year, dim=0)
idx2 = torch.argmax(p_z2year, dim=0)
for j in range(self.n):
# check for underflow issues
if np.isnan(p_z1year[:, j]).all():
p_z1year[:, j] = torch.zeros(self.k)
p_z1year[-1, j] = 1
p_z2year[:, j] = p_z1year[:, j]
sample_1year[j, :] = mu[idx1[j]] + V[idx1[j]] * self.ins[j, 4]
sample_2year[j, :] = mu[idx2[j]] + V[idx2[j]] * self.ins[j, 8]
return p_z1year, p_z2year, sample_1year, sample_2year, p_z1, p_z6month
if __name__ == "__main__":
# Note that this is also contained in the jupyter notebook for more integrated visuals
n = 200 # number of samples
d = 1 # dimensionality of observations
t = 30 # number of time steps
k = 2 # number of states
K = 2 # number of HMM mixtures
A_1 = torch.tensor([[0.8, 0.2], [0.2, 0.8]]) # transition matrix
A_2 = torch.tensor([[0.2, 0.8], [0.8, 0.2]]) # transition matrix
A = torch.block_diag(*[A_1, A_2])
pi = torch.ones(k * K) / (k * K) # initial state distribution
mu = torch.tensor([0.0, 2.0, 0.0, 2.0]) # state means
var = torch.tensor([0.1, 0.1, 0.1, 0.1]) # state covariance
b = 1 # limit of the uniform distribution to specify personalized state effects
X = torch.zeros((n, t, d))
Z = torch.zeros((n, t), dtype=torch.long)
for i in range(n):
for j in range(t):
if j == 0:
Z[i, j] = torch.multinomial(pi, num_samples=1).byte()
m_dist = torch.distributions.normal.Normal(
mu.index_select(0, Z[i, j]), var.index_select(0, Z[i, j])
)
X[i, j, :] = m_dist.sample()
else:
Z[i, j] = torch.multinomial(A[Z[i, j - 1], :], num_samples=1)
m_dist = torch.distributions.normal.Normal(
mu.index_select(0, Z[i, j]), var.index_select(0, Z[i, j])
)
X[i, j, :] = m_dist.sample()
X_hat = torch.zeros(n, t, d)
l = 1.0 # lengthscale for the SE kernel
s = 0.1 # sigma^2 for the SE kernel
# build covariance matrix
var_x = torch.zeros(t, t)
t_vec = torch.range(0, t)
for j in range(t):
for jj in range(t):
r = (t_vec[j] - t_vec[jj]) ** 2
var_x[j, jj] = 1 / s * torch.exp(-r / (2 * l))
L = torch.cholesky(var_x)
b_stor = torch.zeros(n)
for i in range(n):
e = torch.randn(t)
b_stor[i] = 2 * b * torch.rand(1) - b
X_hat[i, :, :] = (
torch.einsum("ik,k->i", [L, e])[None, :, None]
+ X[i, :, :]
+ b_stor[i] * torch.ones(1, t, 1)
)
# fit a personalized hmm
piohmm = mHMM(
X_hat,
k=k,
K=K,
full_cov=False,
priorV=False,
io=False,
personalized=True,
personalized_io=False,
state_io=False,
UT=False,
device="cpu",
eps=1e-18,
priorMu=True,
var_fill=0.5,
)
piohmm_params, _, _, elbo, b_hat, _ = piohmm.learn_model(
num_iter=10000, intermediate_save=False
)
piohmm_mps, _, _ = piohmm.predict_sequence(piohmm_params, n_sample=b_hat)
piohmm_xhat = np.zeros((n, t))
piohmm_xvar = np.zeros((n, t))
for i in range(n):
for j in range(t):
idx = np.where(piohmm_mps[i, j].numpy() == np.arange(k))[0][0]
piohmm_xhat[i, j] = (
piohmm_params["mu"][idx].numpy() + b_hat[i].detach().numpy()
)
piohmm_xvar[i, j] = 2 * np.sqrt(piohmm_params["var"][idx].numpy())
torch.save(
"model_results.pkl",
{"params": piohmm_params, "mps": piohmm_mps, "piohmm_xhat": piohmm_xhat},
)
| 89,160 | 37.867044 | 136 | py |
mIOHMM | mIOHMM-main/src/__init__.py | 0 | 0 | 0 | py | |
mIOHMM | mIOHMM-main/experiments/synthetic.py | from src.piomhmm import mHMM
from src.utils import save_pickle
import matplotlib.pyplot as plt
import numpy as np
import torch
def pred(model_name, model, params, b_hat):
model_mps = model.predict_sequence(params, n_sample=b_hat)
xhat = np.zeros((n, t))
xvar = np.zeros((n, t))
for i in range(n):
for j in range(t):
idx = np.where(model_mps[i, j].cpu().numpy() == np.arange(model.k))[0][0]
if model_name in ["HMM", "mHMM"]:
xhat[i, j] = params["mu"][idx].cpu().numpy()
else:
xhat[i, j] = (
params["mu"][idx].cpu().numpy() + b_hat[i].cpu().detach().numpy()
)
xvar[i, j] = 2 * np.sqrt(params["var"][idx].cpu().numpy())
return {"xhat": xhat, "xvar": xvar}
torch.manual_seed(0)
torch.set_default_dtype(torch.float64)
torch.set_printoptions(precision=2)
device = "cpu"
# DATA GENERATION
n = 200 # number of samples
d = 1 # dimensionality of observations
t = 30 # number of time steps
k = 2 # number of states
K = 2 # number of HMM mixtures
# set parameters
A_1 = torch.tensor([[0.8, 0.2], [0.2, 0.8]]) # transition matrix
A_2 = torch.tensor([[0.2, 0.8], [0.8, 0.2]]) # transition matrix
A = torch.block_diag(*[A_1, A_2])
pi = torch.ones(k * K) / (k * K) # initial state distribution
mu = torch.tensor([0.0, 2.0, 0.0, 2.0]) # state means
var = torch.tensor([0.1, 0.1, 0.1, 0.1]) # state covariance
b = 1.0 # specify the range of a uniform distribution over personalized state effects, e.g. r_i ~ Unif[-b, b]
# simulate the model
X = torch.zeros((n, t, d))
Z = torch.zeros((n, t), dtype=torch.long)
for i in range(n):
for j in range(t):
if j == 0:
Z[i, j] = torch.multinomial(pi, num_samples=1).byte()
# D[i, j] = torch.rand(1)
m_dist = torch.distributions.normal.Normal(
mu.index_select(0, Z[i, j]), var.index_select(0, Z[i, j])
)
X[i, j, :] = m_dist.sample()
else:
Z[i, j] = torch.multinomial(A[Z[i, j - 1], :], num_samples=1)
# D[i, j] = torch.rand(1)
m_dist = torch.distributions.normal.Normal(
mu.index_select(0, Z[i, j]), var.index_select(0, Z[i, j])
)
X[i, j, :] = m_dist.sample()
# add noise
X_hat = torch.zeros(n, t, d)
l = 1.0 # lengthscale for the SE kernel
s = 0.1 # sigma^2 for the SE kernel
# build covariance matrix
var_x = torch.zeros(t, t)
t_vec = torch.range(0, t)
for j in range(t):
for jj in range(t):
r = (t_vec[j] - t_vec[jj]) ** 2
var_x[j, jj] = s * torch.exp(-r / (2 * l ** 2))
L = torch.cholesky(var_x)
b_stor = torch.zeros(n)
for i in range(n):
e = torch.randn(t)
b_stor[i] = 2 * b * torch.rand(1) - b
X_hat[i, :, :] = (
torch.einsum("ik,k->i", [L, e])[None, :, None]
+ X[i, :, :]
+ b_stor[i] * torch.ones(1, t, 1)
)
# plot a number of samples generated
fig, axs = plt.subplots(3, 5, dpi=200)
fig.set_size_inches(12, 5)
for i, ax in enumerate(axs.flatten()):
ax.plot(X[i, :].numpy(), label="$x_i$")
ax.plot(X_hat[i, :].numpy(), label="$\hat{x}_i$")
ax.set_title("Sample " + str(i))
fig.tight_layout()
ax.legend(
loc="lower center", bbox_to_anchor=(-2.2, -0.95), fancybox=True, shadow=True, ncol=5
)
# FITTING MODELS
print("fitting standard HMM...")
hmm = mHMM(
X_hat,
k=k,
K=1,
full_cov=False,
priorV=False,
io=False,
personalized=False,
personalized_io=False,
state_io=False,
device=device,
eps=1e-18,
)
hmm_params, _, ll_hmm = hmm.learn_model(num_iter=10000, intermediate_save=False)
print("fitting standard mHMM...")
mhmm = mHMM(
X_hat,
k=k,
K=K,
full_cov=False,
priorV=False,
io=False,
personalized=False,
personalized_io=False,
state_io=False,
device=device,
eps=1e-18,
)
mhmm_params, _, ll_mhmm = mhmm.learn_model(num_iter=10000, intermediate_save=False)
print("fitting personalized HMM...")
phmm = mHMM(
X_hat,
k=k,
K=1,
full_cov=False,
priorV=False,
io=False,
personalized=True,
personalized_io=False,
state_io=False,
device=device,
eps=1e-18,
)
phmm_params, _, _, elbo_phmm, b_hat_phmm, _ = phmm.learn_model(
num_iter=10000, intermediate_save=False
)
print("fitting personalized mHMM...")
mphmm = mHMM(
X_hat,
k=2,
K=2,
full_cov=False,
priorV=False,
io=False,
personalized=True,
personalized_io=False,
state_io=False,
device=device,
eps=1e-18,
)
mphmm_params, _, _, elbo_mphmm, b_hat_mphmm, _ = mphmm.learn_model(
num_iter=10000, intermediate_save=False
)
# RESULTS
# save outputs
outputs = {
"HMM": {"model": hmm, "params": hmm_params, "b_hat": None},
"mHMM": {"model": mhmm, "params": mhmm_params, "b_hat": None},
"PHMM": {"model": phmm, "params": phmm_params, "b_hat": b_hat_phmm},
"mPHMM": {"model": mphmm, "params": mphmm_params, "b_hat": b_hat_mphmm},
}
preds = {}
for model_name in outputs:
model = outputs[model_name]["model"]
params = outputs[model_name]["params"]
b_hat = outputs[model_name]["b_hat"]
preds[model_name] = pred(model_name, model, params, b_hat)
save_pickle({"outputs": outputs, "preds": preds}, "outputs/synthetic_all.pkl")
# figure
fig, axs = plt.subplots(3, 4, dpi=200)
fig.set_size_inches(12, 8)
for j in range(3):
for i in range(4):
ax = axs[j][i]
ax.plot(X_hat[i, :].numpy(), "k:", label="$\hat{x}_i$")
if j == 0:
xhat = preds["HMM"]["xhat"]
xvar = preds["HMM"]["xvar"]
ax.plot(xhat[i, :], label="HMM $\mu_k \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
xhat = preds["PHMM"]["xhat"]
xvar = preds["PHMM"]["xvar"]
ax.plot(xhat[i, :], label="PHMM $(\mu_k + r^{(i)}) \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
elif j == 1:
xhat = preds["mHMM"]["xhat"]
xvar = preds["mHMM"]["xvar"]
ax.plot(xhat[i, :], label="mHMM $\mu_k \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
xhat = preds["mPHMM"]["xhat"]
xvar = preds["mPHMM"]["xvar"]
ax.plot(xhat[i, :], label="mPHMM $(\mu_k + r_i) \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
else:
xhat = preds["PHMM"]["xhat"]
xvar = preds["PHMM"]["xvar"]
ax.plot(xhat[i, :], label="PHMM $(\mu_k + r_i) \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
xhat = preds["mPHMM"]["xhat"]
xvar = preds["mPHMM"]["xvar"]
ax.plot(xhat[i, :], label="mPHMM $(\mu_k + r_i) \pm 2\sigma_{k,i}$")
ax.fill_between(
np.arange(t),
xhat[i, :] - xvar[i, :],
xhat[i, :] + xvar[i, :],
alpha=0.5,
)
ax.set_xlabel("Time")
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_title("Sample " + str(i))
ax.set_ylim([-4, 4])
ax.legend(
loc="lower left",
bbox_to_anchor=(-2.7, -0.42),
fancybox=True,
shadow=True,
ncol=4,
)
fig.tight_layout()
fig.subplots_adjust(hspace=0.7)
fig.savefig(
"outputs/synthetic_x_hats.png",
dpi=400,
facecolor="w",
edgecolor="w",
orientation="portrait",
bbox_inches="tight",
pad_inches=0,
metadata={"Creator": None, "Producer": None, "CreationDate": None},
)
| 8,235 | 28 | 110 | py |
mIOHMM | mIOHMM-main/experiments/real.py | from src.piomhmm import mHMM
import numpy as np
import random
import torch
from src.utils import save_pickle, load_pickle
RANDOM_SEED = 0
torch.manual_seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
torch.set_default_dtype(torch.float64)
torch.set_printoptions(precision=2)
def preprocess(x, d):
# don't include samples which only have one measurement, i.e. aren't time series
remove_idx = np.where(np.sum(~np.isnan(x[:, :, 0]), axis=1) == 1)
x = np.delete(x, remove_idx, 0)
d = np.delete(d, remove_idx, 0)
# set any LEDD values greater than 5000 to 620 and rescale
d[d > 5000] = 620
d[np.isnan(d)] = 0
d = d / np.max(d)
# get time and observation masks
N, T, D, = x.shape
time_mask = np.ones((N, T))
for i in range(N):
ind = np.where(~np.isnan(x[i, :, 0]))[0][-1] + 1
time_mask[i, ind:] = 0
missing_mask = (~np.isnan(x[:, :, 0])).astype(float)
x[np.isnan(x)] = 0
# convert everything to tensors
X = torch.Tensor(x).float()
D = torch.Tensor(d).float()
TM = torch.Tensor(time_mask).float()
OM = torch.Tensor(missing_mask).bool()
return X, D, TM, OM
# data
data = load_pickle("processed/data_for_PIOHMM.pkl")
X_train, D_train, TM_train, OM_train = preprocess(data["x_train"], data["train_med"])
X_test, D_test, TM_test, OM_test = preprocess(data["x_test"], data["test_med"])
# experiment setting
device = "cpu"
k = 8 # number of hidden states
num_iter_train = 10000
num_iter_test = 5000
for K in [1, 2, 3, 4, 5]:
# mIOHMM
print("fitting mIOHMM...", flush=True)
model = mHMM(
X_train,
ins=D_train,
k=k,
K=K,
TM=TM_train,
OM=OM_train,
full_cov=False,
io=True,
personalized=False,
personalized_io=False,
state_io=True,
UT=True,
device=device,
eps=1e-18,
)
params_hat, e_out, ll = model.learn_model(
num_iter=num_iter_train, intermediate_save=False
)
training_pX = model.calc_pX(params_hat)
mIOHMM_model = {
"params": params_hat,
"e_out": e_out,
"ll": ll,
"training_pX": training_pX,
}
print("learning vi params ...\n", flush=True)
model.change_data(
X_test, ins=D_test, TM=TM_test, OM=OM_test, reset_VI=True, params=params_hat
)
params_hat, e_out_test, ll_test = model.learn_vi_params(
params_hat, num_iter=num_iter_test
)
test_pX = model.calc_pX(params_hat)
mIOHMM_model["params_test"] = params_hat
mIOHMM_model["e_out_test"] = e_out_test
mIOHMM_model["ll_test"] = ll_test
mIOHMM_model["test_pX"] = test_pX
save_pickle(
mIOHMM_model, "models/mIOHMM_" + str(K) + ".pkl",
)
| 2,760 | 26.61 | 85 | py |
FADO | FADO-master/documentation.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import inspect
def printDocumentation(obj=None):
"""
Prints the documentation strings of an object (class or function).
For classes print the documentation for all "public" and documented methods.
"""
if obj is None:
printDocumentation(printDocumentation)
return
#end
# Documentation for a function
if str(obj).startswith("<function"):
if not obj.__doc__: return
sig = str(inspect.signature(obj))
sig = sig.replace("(self, ","(")
sig = sig.replace("(self,","(")
sig = sig.replace("(self","(")
name = str(obj).split()[1] + sig
print("")
print(name)
print("-"*len(name))
print(inspect.getdoc(obj))
print("\n"+"*"*80)
return
#end
# Documentation for classes
className = str(obj).split(".")[-1].split("'")[0]
name = className + str(inspect.signature(obj))
print("")
print(name)
print("-"*len(name))
print(inspect.getdoc(obj))
print("\n"+"*"*80+"\n")
for methodName in dir(obj):
if methodName.startswith("_"): continue
method = getattr(obj,methodName)
printDocumentation(method)
#end
#end
| 1,937 | 30.258065 | 80 | py |
FADO | FADO-master/variable.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import copy
import numpy as np
class InputVariable:
"""
Class to define design variables.
Parameters
----------
x0 is the initial value.
parser specifies how the variable is written to file.
size >= 1 defines a vector variable whose x0, lb, and ub values are broadcast.
size == 0 means auto, i.e. size determined from x0, scale/lb/ub must be either compatible or scalar.
scale, an optimizer will see x/lb/ub * scale
lb/ub, the lower and upper bounds for the variable.
See also
--------
Parameter, a variable-like object that is not exposed to optimizers.
"""
def __init__(self, x0, parser, size=0, scale=1.0, lb=-1E20, ub=1E20):
self._parser = parser
if size == 0 and isinstance(x0,float): size=1
if size >= 1:
try:
assert(isinstance(x0,float))
assert(isinstance(lb,float))
assert(isinstance(ub,float))
assert(isinstance(scale,float))
except:
raise ValueError("If size is specified, x0, scale, lb, and ub must be scalars.")
#end
self._x0 = np.ones((size,))*x0
self._lb = np.ones((size,))*lb
self._ub = np.ones((size,))*ub
self._scale = np.ones((size,))*scale
else:
try:
size = x0.size
assert(size>=1)
self._x0 = x0
if not isinstance(lb,float):
assert(lb.size == size)
self._lb = lb
else:
self._lb = np.ones((size,))*lb
#end
if not isinstance(ub,float):
assert(ub.size == size)
self._ub = ub
else:
self._ub = np.ones((size,))*ub
#end
if not isinstance(scale,float):
assert(scale.size == size)
self._scale = scale
else:
self._scale = np.ones((size,))*scale
#end
except:
raise ValueError("Incompatible sizes of x0, scale, lb, and ub.")
#end
#end
self._size = size
self._x = copy.deepcopy(self._x0)
#end
def getSize(self):
return self._size
def getInitial(self):
return self._x0
def getCurrent(self):
return self._x
def getLowerBound(self):
return self._lb
def getUpperBound(self):
return self._ub
def getScale(self):
return self._scale
def get(self,name):
if name == "Initial":
return self.getInitial()
elif name == "Current":
return self.getCurrent()
elif name == "LowerBound":
return self.getLowerBound()
elif name == "UpperBound":
return self.getUpperBound()
elif name == "Scale":
return self.getScale()
else:
raise KeyError("Variable does not have field: `"+name+"`")
#end
#end
def setCurrent(self,x):
self._x[()] = x
def writeToFile(self,file):
self._parser.write(file,self._x)
#end
class Parameter:
"""
Class for optimization parameters, usually some value that is not an optimization
variable but needs to be ramped over its course, e.g. a penalty factor.
Parameters
----------
values : An indexable structure (e.g. range, list).
parser : How the values are written to file.
start : Initial index into values.
function : Can be used to further convert the current value.
"""
def __init__(self,values,parser,start=0,function=None):
self._values = values
self._parser = parser
self._function = function
# make sure starting possition is valid
self._upper = len(values)-1
self._index = max(0,min(self._upper,start))
def increment(self):
"""Move to the next value, return True if the last value was reached."""
self._index = max(0,min(self._upper,self._index+1))
return self.isAtTop()
def decrement(self):
"""Move to the previous value, return True if the first value was reached."""
self._index = max(0,min(self._upper,self._index-1))
return self.isAtBottom()
def writeToFile(self,file):
value = self._values[self._index]
if self._function != None:
value = self._function(value)
self._parser.write(file,value)
def isAtTop(self):
"""Return True if the current value is the last."""
return (self._index == self._upper)
def isAtBottom(self):
"""Return True if the current value is the first."""
return (self._index == 0)
#end
| 5,558 | 30.948276 | 104 | py |
FADO | FADO-master/function.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
import abc
class FunctionBase(abc.ABC):
"""Abstract base class to define the essential interface of Function objects."""
def __init__(self,name):
self._name = name
# inputs
self._variables = []
def getName(self,maxLen=0):
name = self._name
if maxLen==0: return name
if maxLen<len(name): name = name[:maxLen]
return name
def getVariables(self):
return self._variables
@abc.abstractmethod
def getValue(self):
return NotImplemented
@abc.abstractmethod
def getGradient(self,mask):
return NotImplemented
def getParameters(self):
return []
def resetValueEvalChain(self):
pass
def resetGradientEvalChain(self):
pass
def getValueEvalChain(self):
return []
def getGradientEvalChain(self):
return []
#end
class Function(FunctionBase):
"""
Defines a mathematical function R^n -> R as a series of evaluation steps.
Functions are associated with optimization drivers to define optimization problems,
they are not designed (nor intended) to be passed directly to optimization methods.
Parameters
----------
name : String to identify the function.
outFile : Where to read the result from.
outParser : Object used to read the outFile.
See also
--------
ExternalRun, currently the only way to define the evaluation steps.
Variable, the class used to define optimization variables.
"""
def __init__(self,name="",outFile="",outParser=None):
FunctionBase.__init__(self,name)
# where and how the output value is obtained
self.setOutput(outFile,outParser)
# evaluation pipelines for value and gradient
self._funEval = []
self._gradEval = []
# where and how their gradients are obtained
self._gradFiles = []
self._gradParse = []
# default value when evaluation fails
self._defaultValue = None
def addInputVariable(self,variable,gradFile,gradParser):
"""
Attach a variable object to the function.
Parameters
----------
variable : The variable object.
gradFile : Where to get the gradient of the function w.r.t. the variable.
gradParser : The object used to read the gradFile.
"""
self._variables.append(variable)
self._gradFiles.append(gradFile)
self._gradParse.append(gradParser)
def getParameters(self):
parameters = []
for evl in self._funEval:
parameters += evl.getParameters()
for evl in self._gradEval:
parameters += evl.getParameters()
return parameters
def setOutput(self,file,parser):
self._outFile = file
self._outParser = parser
def addValueEvalStep(self,evaluation):
"""Add a required step to compute the function value."""
self._funEval.append(evaluation)
def addGradientEvalStep(self,evaluation):
"""Add a required step to compute the function gradient."""
self._gradEval.append(evaluation)
# check if any evaluation is in error state
def _checkError(self,evals):
for evl in evals:
if evl.isError(): raise RuntimeError("Evaluations failed.")
#end
#end
def getValue(self):
"""
Get the function value, i.e. apply the parser to the output file.
Run the evaluation steps if they have not been executed yet.
Note that this method does not have parameters, the current value of the variables
is set via the Variable objects.
"""
# check if we can retrive the value
self._checkError(self._funEval)
for evl in self._funEval:
if not evl.isRun():
self._sequentialEval(self._funEval)
break
#end
return self._outParser.read(self._outFile)
def getGradient(self,mask=None):
"""
Get the gradient (as a dense vector) of the function, i.e. applies each variable's
parser. If no mask (dictionary) is provided simple concatenation is performed,
otherwise each variable's gradient is copied starting at an offset. Note that if a
mask is provided the size of the resulting vector is the sum of the sizes of the
variables used as keys for the dictionary.
Example
-------
addVariable(z,...) # z = [1, 1] and df/dz = [2, 2]
getGradient({x : 0, z : 3}) -> [0, 0, 0, 2, 2]
"""
# check if we can retrive the gradient
self._checkError(self._gradEval)
for evl in self._gradEval:
if not evl.isRun():
self._sequentialEval(self._gradEval)
break
#end
# determine size of gradient vector
size = 0
if mask is None: src = self._variables
else: src = mask.keys()
for var in src:
size += var.getSize()
# populate gradient vector
gradient = np.ndarray((size,))
idx = 0
for var,file,parser in zip(self._variables,self._gradFiles,self._gradParse):
grad = parser.read(file)
if var.getSize() == 1:
# Convert the value to a scalar if it is not yet.
try: grad = sum(grad)
except: pass
#end
if mask is not None: idx = mask[var]
try:
for val in grad:
gradient[idx] = val
idx += 1
except:
gradient[idx] = grad
idx += 1
#end
#end
return gradient
#end
def _sequentialEval(self,evals):
for evl in evals:
evl.initialize()
evl.run()
#end
#end
def resetValueEvalChain(self):
self._resetEvals(self._funEval)
def resetGradientEvalChain(self):
self._resetEvals(self._gradEval)
def _resetEvals(self,evals):
for evl in evals:
evl.finalize()
#end
#end
def getValueEvalChain(self):
return self._funEval
def getGradientEvalChain(self):
return self._gradEval
def hasDefaultValue(self):
return self._defaultValue is not None
def setDefaultValue(self,value):
"""Give a default value to the function, to be used in case the evaluation fails."""
self._defaultValue = value
def getDefaultValue(self):
return self._defaultValue
#end
class NonDiscreteness(FunctionBase):
"""
Continuous measure of non-discreteness (usually to use as a constraint).
The function is zero when the variables are at either bound (lower or upper)
and 1 (maximum) when they are at the mid-point.
"""
def __init__(self,name=""):
FunctionBase.__init__(self,name)
def addInputVariable(self,variable):
self._variables.append(variable)
def getValue(self):
y = 0.0
N = 0
for var in self._variables:
N += var.getSize()
x = var.getCurrent()
lb = var.getLowerBound()
ub = var.getUpperBound()
y += ((ub-x)*(x-lb)/(ub+lb)**2).sum()
return 4*y/N
def getGradient(self,mask=None):
# determine size of gradient vector
N = 0
for var in self._variables:
N += var.getSize()
size = 0
if mask is None:
size = N
else:
for var in mask.keys():
size += var.getSize()
# populate gradient vector
gradient = np.ndarray((size,))
idx = 0
for var in self._variables:
x = var.getCurrent()
lb = var.getLowerBound()
ub = var.getUpperBound()
grad = (4.0/N)*(ub+lb-2*x)/(ub+lb)**2
if mask is not None: idx = mask[var]
try:
for val in grad:
gradient[idx] = val
idx += 1
except:
gradient[idx] = grad
idx += 1
#end
#end
return gradient
#end
#end
| 8,997 | 28.405229 | 92 | py |
FADO | FADO-master/__init__.py | from variable import *
from function import *
from evaluation import *
from documentation import *
from tools import LabelReplacer
from tools import ArrayLabelReplacer
from tools import PreStringHandler
from tools import TableReader
from tools import LabeledTableReader
from tools import TableWriter
from tools import BoundConstraints
from tools import GradientScale
from drivers import ExteriorPenaltyDriver
from drivers import ScipyDriver
# Import IpOpt driver if possible.
try: from drivers import IpoptDriver
except: pass
from optimizers import goldenSection
from optimizers import quadraticInterp
from optimizers import fletcherReeves
| 640 | 29.52381 | 41 | py |
FADO | FADO-master/evaluation.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import os
import shutil
import subprocess as sp
class ExternalRun:
"""
Defines the execution of an external code (managed via Popen).
A lazy execution model is used, once run, a new process will not be started
until the "lazy" flags are explicitly cleared via "finalize()".
Parameters
----------
dir : The subdirectory within which the command will be run.
command : The shell command used to create the external process.
useSymLinks : If set to True, symbolic links are used for "data" files instead of copies.
"""
def __init__(self,dir,command,useSymLinks=False):
self._dataFiles = []
self._dataFilesDestination = []
self._confFiles = []
self._expectedFiles = []
self._workDir = dir
self._command = command
self._symLinks = useSymLinks
self._maxTries = 1
self._numTries = 0
self._process = None
self._variables = set()
self._parameters = []
self._stdout = None
self._stderr = None
self.finalize()
def _addAbsoluteFile(self,file,flist):
file = os.path.abspath(file)
if not os.path.isfile(file):
raise ValueError("File '"+file+"' not found.")
flist.append(file)
def addData(self,file,location="auto",destination=None):
"""
Adds a "data" file to the run, an immutable dependency of the process.
Parameters
----------
file : Path to the file.
location : Type of path, "relative" (to the parent of "dir"), "absolute" (the path
is immediately converted to an absolute path, the file must exist),
or "auto" (tries "absolute" first, falls back to "relative").
destination : Filename to be set at the destination. Discards any additional file path.
The default destination is the regular filename (i.e. "file").
"""
if destination is None: destination = file
self._dataFilesDestination.append(os.path.basename(destination))
if location == "relative":
self._dataFiles.append(file)
else:
try:
self._addAbsoluteFile(file,self._dataFiles)
except:
if location == "absolute": raise
# in "auto" mode, if absolute fails consider relative
else: self._dataFiles.append(file)
#end
#end
#end
def addConfig(self,file):
"""Add a "configuration" file to the run, a mutable dependency onto which
Parameters and Variables are written. The path ("file") is converted
to absolute immediately."""
self._addAbsoluteFile(file,self._confFiles)
def addParameter(self,param):
"""Add a parameter to the run. Parameters are written to the configuration
files before variables."""
self._parameters.append(param)
def addExpected(self,file):
"""Add an expected (output) file of the run, the presence of all expected
files in the working subdirectory indicates that the run succeeded."""
self._expectedFiles.append(os.path.join(self._workDir,file))
def setMaxTries(self,num):
"""Sets the maximum number of times a run is re-tried should it fail."""
self._maxTries = num
def getParameters(self):
return self._parameters
def updateVariables(self,variables):
"""
Update the set of variables associated with the run. This method is intended
to be part of the preprocessing done by driver classes. Unlike addParameter,
users do not need to call it explicitly.
"""
self._variables.update(variables)
def initialize(self):
"""
Initialize the run, create the subdirectory, copy/symlink the data and
configuration files, and write the parameters and variables to the latter.
Creates the process object, starting it in detached mode.
"""
if self._isIni: return
try:
os.mkdir(self._workDir)
for file, destination in zip(self._dataFiles, self._dataFilesDestination):
target = os.path.join(self._workDir,destination)
(shutil.copy,os.symlink)[self._symLinks](os.path.abspath(file),target)
for file in self._confFiles:
target = os.path.join(self._workDir,os.path.basename(file))
shutil.copy(file,target)
for par in self._parameters:
par.writeToFile(target)
for var in self._variables:
var.writeToFile(target)
self._createProcess()
self._isIni = True
self._isRun = False
self._isError = False
self._numTries = 0
except:
self._isError = True
raise
#end
#end
def _createProcess(self):
self._stdout = open(os.path.join(self._workDir,"stdout.txt"),"w")
self._stderr = open(os.path.join(self._workDir,"stderr.txt"),"w")
self._process = sp.Popen(self._command,cwd=self._workDir,
shell=True,stdout=self._stdout,stderr=self._stderr)
#end
def run(self,timeout=None):
"""Start the process and wait for it to finish."""
return self._exec(True,timeout)
def poll(self):
"""Polls the state of the process, does not wait for it to finish."""
return self._exec(False,None)
# Common implementation of "run" and "poll"
def _exec(self,wait,timeout):
if not self._isIni:
self._isError = True
raise RuntimeError("Run was not initialized.")
if self._numTries == self._maxTries:
self._isError = True
raise RuntimeError("Run failed.")
if self._isRun:
return self._retcode
if wait:
self._process.wait(timeout)
status = True
else:
status = self._process.poll() is not None
#end
if status:
self._numTries += 1
self._retcode = self._process.returncode
self._isRun = True
if not self._success():
if self._numTries < self._maxTries:
self.finalize()
self._createProcess()
self._isIni = True
#end
return self._exec(wait,timeout)
#end
self._numTries = 0
#end
return self._retcode
#end
def isIni(self):
"""Return True if the run was initialized."""
return self._isIni
def isRun(self):
"""Return True if the run has finished."""
return self._isRun
def isError(self):
"""Return True if the run has failed."""
return self._isError
def finalize(self):
"""Reset "lazy" flags, close the stdout and stderr of the process."""
try:
self._stdout.close()
self._stderr.close()
except:
pass
self._isIni = False
self._isRun = False
self._isError = False
self._retcode = -100
#end
# check whether expected files were created
def _success(self):
for file in self._expectedFiles:
if not os.path.isfile(file): return False
return True
#end
#end
| 8,188 | 33.552743 | 95 | py |
FADO | FADO-master/tools/__init__.py | from tools.file_parser import *
from tools.variable_transformation import *
| 76 | 24.666667 | 43 | py |
FADO | FADO-master/tools/file_parser.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
class LabelReplacer:
"""
Replaces all occurrences of a text label (passed to __init__) by value.
Values may be numeric or strings, only the first index of arrays is written.
See also
--------
ArrayLabelReplacer, to write entire arrays.
"""
def __init__(self,label):
self._label = label
def write(self,file,value):
with open(file) as f:
lines = f.readlines()
if isinstance(value,np.ndarray): value = value[0]
newLines = []
for line in lines:
newLines.append(line.replace(self._label,str(value)))
#end
with open(file,"w") as f:
f.writelines(newLines)
#end
#end
class ArrayLabelReplacer:
"""
Replaces all occurrences of a text label (passed to __init__) by an iterable value.
The different entries of value are joined by the delimiter passed to __init__.
See also
--------
LabelReplacer, to write scalar numeric values or text.
"""
def __init__(self,label,delim=","):
self._label = label
self._delim = delim
def write(self,file,value):
with open(file) as f:
lines = f.readlines()
valueStr = ""
for v in value:
valueStr += str(v)+self._delim
valueStr = valueStr.strip(self._delim)
newLines = []
for line in lines:
newLines.append(line.replace(self._label,valueStr))
#end
with open(file,"w") as f:
f.writelines(newLines)
#end
#end
class PreStringHandler:
"""
Read or write "delim"-separated values in front of a label (prefix-string),
which must start the line. Both label and delimiter are passed to __init__.
When reading the class can only handle the first occurrence of the label,
when writing every occurrence will be handled.
Example
-------
X= 1, 2, 3
PreStringHandler("X=") -> [1, 2, 3]
"""
def __init__(self,label,delim=","):
self._label = label
self._delim = delim
def read(self,file):
with open(file) as f:
lines = f.readlines()
data = []
for line in lines:
if line.startswith(self._label):
data = line.lstrip(self._label).strip().split(self._delim)
break
#end
if not data:
raise RuntimeError(self._label + " not found.")
size = len(data)
if size==1: return float(data[0])
value = np.ndarray((size,))
for i in range(size):
value[i] = float(data[i])
return value
#end
def write(self,file,value):
with open(file) as f:
lines = f.readlines()
# make scalars iterable
if isinstance(value,float) or isinstance(value,int):
value = [value]
newLine = ""
for i, line in enumerate(lines):
if line.startswith(self._label):
if not newLine:
newLine += self._label
for val in value:
newLine += str(val)+self._delim
newLine = newLine[0:-len(self._delim)]+"\n"
#end
lines[i] = newLine
#end
#end
with open(file,"w") as f:
f.writelines(lines)
#end
#end
class TableReader:
"""
Reads data (up to 2D arrays) from a table-like file, e.g. CSV.
Parameters
----------
row : Row of the table defined by start and end (use None for all rows, -1 for last row).
col : Column (same behavior as row).
start : Row and column (tuple) of the file defining the top left corner of the table.
end : Tuple defining the bottom right corner of the table (use None to capture everything).
delim : The delimiter used to separate columns.
Example
-------
col1 col2 col3
0 1 2
3 4 5
>>> TableReader(1,1,(1,1),(None,None)) -> 5
>>> TableReader(0,None,(1,0),(2,None)) -> [0, 1, 2]
"""
def __init__(self,row=0,col=0,start=(0,0),end=(None,None),delim=""):
self._row = row
self._col = col
self._end = end
self._start = start
self._delim = delim
def read(self,file):
with open(file) as f:
lines = f.readlines()
# skip header and footer rows
lines = lines[self._start[0]:self._end[0]]
numRow = len(lines)
# process lines
data = None
numCol = 0
for row, line in enumerate(lines):
for char in self._delim:
line = line.replace(char," ")
tmp = line.strip().split()[self._start[1]:self._end[1]]
if numCol == 0:
numCol = len(tmp)
data = np.ndarray((numRow,numCol))
elif numCol != len(tmp):
raise RuntimeError("Data is not in table format.")
#end
for col in range(numCol):
data[row,col] = float(tmp[col])
#end
if self._row is None:
if self._col is None:
return data
else:
return data[:,self._col]
#end
else:
if self._col is None:
return data[self._row,:]
else:
return data[self._row,self._col]
#end
#end
#end
#end
class LabeledTableReader(TableReader):
"""
Reads elements from a column of a table-like file identified by "label".
The entire file must be in table format, and the label appear on the first row.
Parameters
----------
label : Title of the column (usually a string).
delim : Delimiter character separating the columns.
rang : Row range, by default return the last value in the column.
See also
--------
TableReader, PreStringHandler
"""
def __init__(self,label,delim=",",rang=(-1,None)):
self._label = label
self._range = rang
TableReader.__init__(self,None,None,(1,0),(None,None),delim)
#end
def read(self,file):
with open(file) as f:
header = f.readline().split(self._delim)
header = [x.strip() for x in header]
self._col = header.index(self._label)
data = TableReader.read(self,file)[self._range[0]:self._range[1]]
if data.size == 1: data = data[0]
return data
#end
#end
class TableWriter:
"""
Writes data (up to 2D arrays) to table-like files.
Parameters
----------
delim : Set of characters used to separate the columns of the input data.
start : Row column tuple defining the top left corner of the target area in the file.
end : Bottom right corner of the target area.
delimChars : List of all characters used to separate the columns of the target file.
See also
--------
TableReader (start/end work the same way).
"""
def __init__(self,delim=" ",start=(0,0),end=(None,None),delimChars=""):
self._end = end
self._start = start
self._delim = delim
self._delimChars = delimChars
def write(self,file,values):
# load file
with open(file) as f:
lines = f.readlines()
# check if the values are remotely compatible with the file
if len(lines) < values.shape[0]: return # "soft fail"
# keep top, bottom, left, and right the same
newLines = lines[0:self._start[0]]
footerLines = []
if self._end[0] is not None: footerLines = lines[self._end[0]:]
# skip header and footer rows
lines = lines[self._start[0]:self._end[0]]
if not lines[-1].strip(): lines = lines[0:-1]
if len(lines) != values.shape[0]:
raise RuntimeError("Data and file have different number of rows.")
numCol = values.size/values.shape[0]
# process lines
for (line,row) in zip(lines,values):
for char in self._delimChars:
line = line.replace(char," ")
tmp = line.strip().split()
if numCol != len(tmp[self._start[1]:self._end[1]]):
raise RuntimeError("Data and file have different number of columns.")
#end
# reconstruct left and right parts
newLine = ""
for string in tmp[0:self._start[1]]:
newLine += string+self._delim
# handle case where row is not iterable
if values.ndim==1: row=[row]
for val in row:
newLine += str(val)+self._delim
if self._end[1] is not None:
for string in tmp[self._end[1]:]:
newLine += string+self._delim
newLines.append(newLine.strip()+"\n")
#end
# write file
with open(file,"w") as f:
f.writelines(newLines)
f.writelines(footerLines)
#end
#end
| 9,742 | 28.346386 | 99 | py |
FADO | FADO-master/tools/variable_transformation.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
class BoundConstraints:
"""
Creates a parameterization that respects bound constraints on variables.
Wraps function and gradient callables and clamps (via raised cosine transform)
the inputs in calls to those objects to the range [lb, ub].
"""
def __init__(self,fun,grad,lb,ub):
self._fun = fun
self._grad = grad
self._lb = lb
self._range = ub-lb
self._partials = None
self._y = None
#end
def __call__(self,x):
# y=0 : x=lb, y=pi/2 : x=ub
self._y = 0.5*np.pi*(x-self._lb)/self._range
self._partials = 0.5*np.pi*np.sin(2.0*self._y)
return self._lb+self._range*np.sin(self._y)**2.0
#end
def inverse(self,x):
return np.arcsin(np.sqrt((x-self._lb)/self._range))*2.0/np.pi*self._range+self._lb
#end
def fun(self,x):
return self._fun(self(x))
#end
def grad(self,x):
g = self._grad(self(x))
return g*self._partials
#end
#end
class GradientScale:
"""
Applies an inconsistent scaling to a gradient (i.e. without
scaling also the variables and/or the function).
"""
def __init__(self,grad,scale):
self._grad = grad
self._scale = scale
def grad(self,x):
return self._grad(x)*self._scale
#end
| 2,065 | 28.514286 | 90 | py |
FADO | FADO-master/drivers/base_driver.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import os
import shutil
import numpy as np
class DriverBase:
"""Base class for optimization drivers, implements the basic setup interface."""
# "structs" to store objective and constraint information
class _Objective:
def __init__(self,type,function,scale,weight):
if scale <= 0.0 or weight <= 0.0:
raise ValueError("Scale and weight must be positive.")
if type == "min":
self.scale = scale*weight
elif type == "max":
self.scale = -1.0*scale*weight
else:
raise ValueError("Type must be 'min' or 'max'.")
self.function = function
#end
class _Constraint:
def __init__(self,function,scale,bound=-1E20):
self.scale = scale
self.bound = bound
self.function = function
#end
class _Monitor:
def __init__(self,function):
self.function = function
#end
def __init__(self):
self._variables = []
self._varScales = None
self._parameters = []
# lazy evaluation flags, and current value of the variables
self._funReady = False
self._jacReady = False
self._nVar = 0
self._x = None
# functions by role
self._objectives = []
self._constraintsEQ = []
self._constraintsGT = []
self._monitors = []
# function values
self._ofval = None
self._eqval = None
self._gtval = None
self._monval = None
# map the start index of each variable in the design vector
self._variableStartMask = None
self._userDir = ""
self._workDir = "__WORKDIR__"
self._dirPrefix = "DSN_"
self._keepDesigns = True
self._failureMode = "HARD"
self._logObj = None
self._logColWidth = 13
self._hisObj = None
self._hisDelim = ", "
self._userPreProcessFun = None
self._userPreProcessGrad = None
self._userPostProcessFun = None
self._userPostProcessGrad = None
#end
def addObjective(self,type,function,scale=1.0,weight=1.0):
"""
Add an objective function to the optimization problem.
Parameters
----------
type : "min" or "max" for minimization or maximization.
function : A function object.
scale : Scale applied to the function, optimizer will see function*scale.
weight : Weight given to the objective, only relevant for multiple objectives.
"""
self._objectives.append(self._Objective(type,function,scale,weight))
def addEquality(self,function,target=0.0,scale=1.0):
"""
Add an equality constraint, function = target, the optimizer will see (function-target)*scale.
"""
if scale <= 0.0: raise ValueError("Scale must be positive.")
self._constraintsEQ.append(self._Constraint(function,scale,target))
def addLowerBound(self,function,bound=0.0,scale=1.0):
"""Add a lower bound inequality constraint."""
if scale <= 0.0: raise ValueError("Scale must be positive.")
self._constraintsGT.append(self._Constraint(function,scale,bound))
def addUpperBound(self,function,bound=0.0,scale=1.0):
"""Add an upper bound inequality constraint."""
if scale <= 0.0: raise ValueError("Scale must be positive.")
self._constraintsGT.append(self._Constraint(function,-1*scale,bound))
def addUpLowBound(self,function,lower=-1.0,upper=1.0):
"""Add a range constraint, this is converted into lower/upper bounds."""
if lower >= upper: raise ValueError("Upper bound must be greater than lower bound.")
scale = 1.0/(upper-lower)
self._constraintsGT.append(self._Constraint(function,scale,lower))
self._constraintsGT.append(self._Constraint(function,-1*scale,upper))
def addMonitor(self,function):
"""Add a function to monitor its value, does not participate in the optimization."""
self._monitors.append(self._Monitor(function))
def setWorkingDirectory(self,dir):
"""Set the name of the working directory where each iteration runs, it should not exist."""
self._workDir = dir
def getNumVariables(self):
"""Returns the size of the design vector."""
N=0
for var in self._variables: N+=var.getSize()
return N
def setLogger(self,obj,width=13):
"""Attach a log file object to the driver."""
self._logObj = obj
self._logColWidth = width
def setHistorian(self,obj,delim=", "):
"""Attach a history file object to the driver, function values printed every iteration."""
self._hisObj = obj
self._hisDelim = delim
# methods to retrieve information in a format that the optimizer understands
def _getConcatenatedVector(self,name):
x = np.ndarray((self.getNumVariables(),))
idx = 0
for var in self._variables:
for val in var.get(name):
x[idx] = val
idx += 1
#end
#end
return x
#end
def getInitial(self):
"""Returns the initial design vector."""
return self._getConcatenatedVector("Initial")*self._varScales
def getLowerBound(self):
"""Returns the lower bounds of the variables."""
return self._getConcatenatedVector("LowerBound")*self._varScales
def getUpperBound(self):
"""Returns the upper bounds of the variables."""
return self._getConcatenatedVector("UpperBound")*self._varScales
# update design variables with the design vector from the optimizer
def _setCurrent(self,x):
startIdx = 0
for var in self._variables:
endIdx = startIdx+var.getSize()
var.setCurrent(x[startIdx:endIdx]/var.getScale())
startIdx = endIdx
#end
#end
def _getVarsAndParsFromFun(self,functions):
for obj in functions:
for var in obj.function.getVariables():
if var not in self._variables: self._variables.append(var)
for par in obj.function.getParameters():
if par not in self._parameters: self._parameters.append(par)
# inform evaluations about which variables they depend on
for evl in obj.function.getValueEvalChain():
evl.updateVariables(obj.function.getVariables())
for evl in obj.function.getGradientEvalChain():
evl.updateVariables(obj.function.getVariables())
#end
#end
# build variable and parameter vectors from function data
def _preprocessVariables(self):
# build ordered non duplicated lists of variables and parameters
self._variables = []
self._parameters = []
self._getVarsAndParsFromFun(self._objectives)
self._getVarsAndParsFromFun(self._constraintsEQ)
self._getVarsAndParsFromFun(self._constraintsGT)
self._getVarsAndParsFromFun(self._monitors)
# map the start index of each variable in the design vector
idx = [0]
for var in self._variables[0:-1]:
idx.append(idx[-1]+var.getSize())
self._variableStartMask = dict(zip(self._variables,idx))
self._varScales = self._getConcatenatedVector("Scale")
# initialize current values such that evaluations are triggered on first call
self._nVar = self.getNumVariables()
self._x = np.ones([self._nVar,])*1e20
# store the absolute current path
self._userDir = os.path.abspath(os.curdir)
#end
def setStorageMode(self,keepDesigns=False,dirPrefix="DSN_"):
"""
Set whether to keep or discard (default) old optimization iterations.
Parameters
----------
keepDesigns : True to keep all designs.
dirPrefix : Prefix used to name folders with old designs.
"""
self._keepDesigns = keepDesigns
self._dirPrefix = dirPrefix
def setFailureMode(self,mode):
"""
Set the failure behavior, for "HARD" (default) an exception is throw if function evaluations fail,
for "SOFT" the driver catches exceptions and uses default function values (if they have them).
The "SOFT" mode is useful if the optimizer does not handle exceptions.
"""
assert mode == "HARD" or mode == "SOFT", "Mode must be either \"HARD\" (exceptions) or \"SOFT\" (default function values)."
self._failureMode = mode
def setUserPreProcessFun(self,callableOrString):
"""Set a preprocessing action executed before evaluating function values."""
self._userPreProcessFun = callableOrString
def setUserPreProcessGrad(self,callableOrString):
"""Set a preprocessing action executed before evaluating function gradients."""
self._userPreProcessGrad = callableOrString
def setUserPostProcessFun(self,callableOrString):
"""Set a postprocessing action executed after evaluating function values."""
self._userPostProcessFun = callableOrString
def setUserPostProcessGrad(self,callableOrString):
"""Set a postprocessing action executed after evaluating function gradients."""
self._userPostProcessGrad = callableOrString
def _resetAllValueEvaluations(self):
for obj in self._objectives:
obj.function.resetValueEvalChain()
for obj in self._constraintsEQ:
obj.function.resetValueEvalChain()
for obj in self._constraintsGT:
obj.function.resetValueEvalChain()
for obj in self._monitors:
obj.function.resetValueEvalChain()
#end
def _resetAllGradientEvaluations(self):
for obj in self._objectives:
obj.function.resetGradientEvalChain()
for obj in self._constraintsEQ:
obj.function.resetGradientEvalChain()
for obj in self._constraintsGT:
obj.function.resetGradientEvalChain()
for obj in self._monitors:
obj.function.resetGradientEvalChain()
#end
# Writes a line to the history file.
def _writeHisLine(self):
if self._hisObj is None: return
hisLine = str(self._funEval)+self._hisDelim
for val in self._ofval:
hisLine += str(val)+self._hisDelim
for val in self._eqval:
hisLine += str(val)+self._hisDelim
for val in self._gtval:
hisLine += str(val)+self._hisDelim
for val in self._monval:
hisLine += str(val)+self._hisDelim
hisLine = hisLine.strip(self._hisDelim)+"\n"
self._hisObj.write(hisLine)
#end
# Detect a change in the design vector, reset directories and evaluation state.
def _handleVariableChange(self, x):
assert x.size == self._nVar, "Wrong size of design vector."
newValues = (abs(self._x-x) > np.finfo(float).eps).any()
if not newValues: return False
# otherwise...
# update the values of the variables
self._setCurrent(x)
self._x[()] = x
# trigger evaluations
self._funReady = False
self._jacReady = False
self._resetAllValueEvaluations()
self._resetAllGradientEvaluations()
# manage working directories
os.chdir(self._userDir)
if os.path.isdir(self._workDir):
if self._keepDesigns:
dirName = self._dirPrefix+str(self._funEval).rjust(3,"0")
if os.path.isdir(dirName): shutil.rmtree(dirName)
os.rename(self._workDir,dirName)
else:
shutil.rmtree(self._workDir)
#end
#end
os.mkdir(self._workDir)
return True
#end
#end
| 12,580 | 35.466667 | 131 | py |
FADO | FADO-master/drivers/ipopt_driver.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import os
import time
import numpy as np
import ipyopt as opt
from drivers.constrained_optim_driver import ConstrainedOptimizationDriver
class IpoptDriver(ConstrainedOptimizationDriver):
"""
Driver to use with the Ipopt optimizer via IPyOpt.
"""
def __init__(self):
ConstrainedOptimizationDriver.__init__(self)
# sparse indices of the constraint gradient, for now assumed to be dense
self._sparseIndices = None
# the optimization problem
self._nlp = None
#end
def getNLP(self):
"""
Prepares and returns the optimization problem for Ipopt (an instance of ipyopt.Problem).
For convenience also does other preprocessing, must be called after all functions are set.
Do not destroy the driver after obtaining the problem.
"""
ConstrainedOptimizationDriver.preprocess(self)
conLowerBound = np.zeros([self._nCon,])
conUpperBound = np.zeros([self._nCon,])
i = len(self._constraintsEQ)
conUpperBound[i:(i+len(self._constraintsGT))] = 1e20
# assume row major storage for gradient sparsity
rg = range(self._nVar * self._nCon)
self._sparseIndices = (np.array([i // self._nVar for i in rg], dtype=int),
np.array([i % self._nVar for i in rg], dtype=int))
# create the optimization problem
self._nlp = opt.Problem(self._nVar, self.getLowerBound(), self.getUpperBound(),
self._nCon, conLowerBound, conUpperBound, self._sparseIndices, 0,
self._eval_f, self._eval_grad_f, self._eval_g, self._eval_jac_g)
return self._nlp
#end
# Method passed to Ipopt to get the objective value,
# evaluates all functions if necessary.
def _eval_f(self, x):
self._evaluateFunctions(x)
return self._ofval.sum()
#end
# Method passed to Ipopt to get the objective gradient, evaluates gradients and
# functions if necessary, otherwise it simply combines and scales the results.
def _eval_grad_f(self, x, out):
assert out.size >= self._nVar, "Wrong size of gradient vector (\"out\")."
self._jacTime -= time.time()
try:
self._evaluateGradients(x)
os.chdir(self._workDir)
out[()] = 0.0
for obj in self._objectives:
out += obj.function.getGradient(self._variableStartMask) * obj.scale
out /= self._varScales
# keep reference to result to use as fallback on next iteration if needed
self._old_grad_f = out
except:
if self._failureMode == "HARD": raise
if self._old_grad_f is None: out[()] = 0.0
else: out[()] = self._old_grad_f
#end
if not self._parallelEval:
self._runAction(self._userPostProcessGrad)
self._jacTime += time.time()
os.chdir(self._userDir)
return out
#end
# Method passed to Ipopt to expose the constraint vector, see also "_eval_f"
def _eval_g(self, x, out):
assert out.size >= self._nCon, "Wrong size of constraint vector (\"out\")."
self._evaluateFunctions(x)
i = 0
out[i:(i+len(self._constraintsEQ))] = self._eqval
i += len(self._constraintsEQ)
out[i:(i+len(self._constraintsGT))] = self._gtval
return out
#end
# Method passed to Ipopt to expose the constraint Jacobian, see also "_eval_grad_f".
def _eval_jac_g(self, x, out):
assert out.size >= self._nCon*self._nVar, "Wrong size of constraint Jacobian vector (\"out\")."
self._jacTime -= time.time()
try:
self._evaluateGradients(x)
os.chdir(self._workDir)
i = 0
mask = self._variableStartMask
for con in self._constraintsEQ:
out[i:(i+self._nVar)] = con.function.getGradient(mask) * con.scale / self._varScales
i += self._nVar
#end
for (con,f) in zip(self._constraintsGT, self._gtval):
if f < 0.0 or not self._asNeeded:
out[i:(i+self._nVar)] = con.function.getGradient(mask) * con.scale / self._varScales
else:
out[i:(i+self._nVar)] = 0.0
#end
i += self._nVar
#end
# keep reference to result to use as fallback on next iteration if needed
self._old_jac_g = out
except:
if self._failureMode == "HARD": raise
if self._old_jac_g is None: out[()] = 0.0
else: out[()] = self._old_jac_g
#end
if not self._parallelEval:
self._runAction(self._userPostProcessGrad)
self._jacTime += time.time()
os.chdir(self._userDir)
return out
#end
#end
| 5,641 | 33.402439 | 104 | py |
FADO | FADO-master/drivers/constrained_optim_driver.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import os
import time
import numpy as np
from drivers.parallel_eval_driver import ParallelEvalDriver
class ConstrainedOptimizationDriver(ParallelEvalDriver):
"""
Intermediate class to define common methods between the other constrained optimization drivers.
"""
def __init__(self):
ParallelEvalDriver.__init__(self)
# counters, flags, sizes...
self._nCon = 0
# list of constraints and variable bounds
self._constraints = []
self._bounds = []
# Current and old values of the gradients, as fallback in case of evaluation failure
self._grad_f = None
self._old_grad_f = None
self._jac_g = None
self._old_jac_g = None
#end
def update(self):
"""Update the problem parameters (triggers new evaluations)."""
for par in self._parameters: par.increment()
self._x[()] = 1e20
self._funReady = False
self._jacReady = False
self._resetAllValueEvaluations()
self._resetAllGradientEvaluations()
if self._hisObj is not None:
self._hisObj.write("Parameter update.\n")
#end
def setConstraintGradientEvalMode(self, onlyWhenActive=False):
"""
Set the evaluation mode for constraint gradients.
If onlyWhenActive==True the driver will not evaluate the gradients of
inactive constraints, this may be acceptable for some optimizers or if
the gradients are known to be zero in the inactive region.
"""
self._asNeeded = onlyWhenActive
#end
# Basic preparation of the optimization problem
def preprocess(self):
self._preprocessVariables()
self._ofval = np.zeros((len(self._objectives),))
self._eqval = np.zeros((len(self._constraintsEQ),))
self._gtval = np.zeros((len(self._constraintsGT),))
self._monval = np.zeros((len(self._monitors),))
# write the header for the history file
if self._hisObj is not None:
header = "ITER"+self._hisDelim
for obj in self._objectives:
header += obj.function.getName()+self._hisDelim
for obj in self._constraintsEQ:
header += obj.function.getName()+self._hisDelim
for obj in self._constraintsGT:
header += obj.function.getName()+self._hisDelim
for obj in self._monitors:
header += obj.function.getName()+self._hisDelim
header = header.strip(self._hisDelim)+"\n"
self._hisObj.write(header)
#end
# store number of constraints
self._nCon = len(self._constraintsEQ) + len(self._constraintsGT)
#end
#end
| 3,450 | 34.214286 | 99 | py |
FADO | FADO-master/drivers/exterior_penalty.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import os
import time
import copy
import numpy as np
from drivers.parallel_eval_driver import ParallelEvalDriver
class ExteriorPenaltyDriver(ParallelEvalDriver):
"""
Exterior Penalty method wrapper, exposes a penalized function and its gradient
to an optimizer via methods fun(x) and grad(x).
Implements the logic to ramp up/down the penalty factors for each constraint.
Parameters
----------
tol : Constraint violation tolerance.
freq : Frequency for auto updating the penalty factors, 0 disables auto update.
rini : Initial penalty factor.
rmax : Maximum penalty factor.
factorUp : Multiplicative increase rate for penalties of constraints out of tolerance.
factorDown : Multiplicative decrease rate for penalties of inactive constraints.
"""
def __init__(self, tol, freq=40, rini=8, rmax=1024, factorUp=4, factorDown=0.5):
ParallelEvalDriver.__init__(self, True)
# parameters of the method
self._tol = tol
self._freq = freq
self._rini = rini
self._rmax = rmax
self._cup = factorUp
self._cdown = factorDown
# constraint penalties
self._eqpen = None
self._gtpen = None
# gradient vector
self._grad = None
self._old_grad = None
# timers, counters, flags
self._isInit = False
self._isFeasible = False
self._logRowFormat = ""
#end
def preprocessVariables(self):
"""Setup method that must be called after all functions are added to the driver."""
self._preprocessVariables()
def preprocess(self):
"""Alias for preprocessVariables."""
self._preprocessVariables()
# method for lazy initialization
def _initialize(self):
if self._isInit: return
self._ofval = np.zeros((len(self._objectives),))
self._eqval = np.zeros((len(self._constraintsEQ),))
self._gtval = np.zeros((len(self._constraintsGT),))
self._monval = np.zeros((len(self._monitors),))
self._eqpen = np.ones((len(self._constraintsEQ),))*self._rini
self._gtpen = np.ones((len(self._constraintsGT),))*self._rini
self._grad = np.zeros((self.getNumVariables(),))
self._old_grad = copy.deepcopy(self._grad)
# write the header for the log file and set the format
if self._logObj is not None:
w = self._logColWidth
headerData = ["FUN EVAL","FUN TIME","GRAD EVAL","GRAD TIME","FEASIBLE"]
self._logRowFormat = "{:>W}"+"{:>W.3e}{:>W}"*2
for obj in self._objectives:
headerData.append(obj.function.getName(w-1))
self._logRowFormat += "{:>W.Pg}"
for obj in self._constraintsEQ:
headerData.append(obj.function.getName(w-1))
headerData.append("PEN COEFF")
self._logRowFormat += "{:>W.Pg}"*2
for obj in self._constraintsGT:
headerData.append(obj.function.getName(w-1))
headerData.append("PEN COEFF")
self._logRowFormat += "{:>W.Pg}"*2
for obj in self._monitors:
headerData.append(obj.function.getName(w-1))
self._logRowFormat += "{:>W.Pg}"
# right-align, set width in format and a precision that fits it
self._logRowFormat = self._logRowFormat.replace("W",str(w))+"\n"
self._logRowFormat = self._logRowFormat.replace("P",str(min(8,w-7)))
header = ""
for data in headerData:
header += data.rjust(w)
self._logObj.write(header+"\n")
#end
# write the header for the history file
if self._hisObj is not None:
header = "ITER"+self._hisDelim
for obj in self._objectives:
header += obj.function.getName()+self._hisDelim
for obj in self._constraintsEQ:
header += obj.function.getName()+self._hisDelim
for obj in self._constraintsGT:
header += obj.function.getName()+self._hisDelim
for obj in self._monitors:
header += obj.function.getName()+self._hisDelim
header = header.strip(self._hisDelim)+"\n"
self._hisObj.write(header)
#end
self._isInit = True
#end
def _writeLogLine(self):
if self._logObj is None: return
data = [self._funEval, self._funTime, self._jacEval, self._jacTime]
data.append(("NO","YES")[self._isFeasible])
for f in self._ofval:
data.append(f)
for (g,r) in zip(self._eqval,self._eqpen):
data.append(g)
data.append(r)
for (g,r) in zip(self._gtval,self._gtpen):
data.append(g)
data.append(r)
for f in self._monval:
data.append(f)
self._logObj.write(self._logRowFormat.format(*data))
#end
def fun(self,x):
"""Evaluate the penalized function at "x"."""
self._initialize()
self._evaluateFunctions(x)
# combine results
f = self._ofval.sum()
f += (self._eqpen*self._eqval**2).sum()
for (g,r) in zip(self._gtval,self._gtpen): f += r*min(0.0,g)*g
return f
#end
def grad(self,x):
"""Evaluate the gradient of the penalized function at "x"."""
try:
self._evaluateGradients(x)
return self._grad
except:
if self._failureMode == "HARD": raise
return self._old_grad
#end
#end
# this method decorates the parent method by combining the gradient
def _evaluateGradients(self,x):
self._initialize()
# if nothing is evaluated return without doing more work
if not ParallelEvalDriver._evaluateGradients(self,x): return
# evaluate all required gradients (skip those where the constraint is not active)
self._jacTime -= time.time()
os.chdir(self._workDir)
self._grad[()] = 0.0
for obj in self._objectives:
self._grad += obj.function.getGradient(self._variableStartMask)*obj.scale
for (obj,f,r) in zip(self._constraintsEQ,self._eqval,self._eqpen):
self._grad += 2.0*r*f*obj.function.getGradient(self._variableStartMask)*obj.scale
for (obj,f,r) in zip(self._constraintsGT,self._gtval,self._gtpen):
if f < 0.0:
self._grad += 2.0*r*f*obj.function.getGradient(self._variableStartMask)*obj.scale
self._grad /= self._varScales
if not self._parallelEval:
self._runAction(self._userPostProcessGrad)
self._jacTime += time.time()
os.chdir(self._userDir)
# update penalties and params (evaluating the gradient concludes an outer iteration)
if self._freq > 0:
if self._jacEval % self._freq == 0: self.update()
# make copy to use as fallback
self._old_grad[()] = self._grad
#end
def update(self,paramsIfFeasible=False):
"""
If a constraint is active and above tolerance increase the penalties, otherwise decrease them
(minimum and maximum are constrained).
Increment all Parameters associated with the Functions of the problem (via the evaluation steps).
If paramsIfFeasible=True the Parameter update only takes place if the current design is feasible.
"""
self._isFeasible = True
# equality (always active)
for i in range(self._eqpen.size):
if abs(self._eqval[i]) > self._tol:
self._eqpen[i] = min(self._eqpen[i]*self._cup,self._rmax)
self._isFeasible = False
# lower bound
for i in range(self._gtpen.size):
if self._gtval[i] < -self._tol:
self._gtpen[i] = min(self._gtpen[i]*self._cup,self._rmax)
self._isFeasible = False
elif self._gtval[i] > 0.0:
self._gtpen[i] = max(self._gtpen[i]*self._cdown,self._rini)
# update the values of the parameters
if not paramsIfFeasible or self._isFeasible:
for par in self._parameters:
par.increment()
# trigger new evaluations
self._x[()] = 1e20
self._funReady = False
self._jacReady = False
self._resetAllValueEvaluations()
self._resetAllGradientEvaluations()
# log update
self._writeLogLine()
#end
def feasibleDesign(self):
"""Return True if all constraints meet the tolerance."""
return self._isFeasible
#end
| 9,411 | 35.48062 | 105 | py |
FADO | FADO-master/drivers/scipy_driver.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import os
import time
import numpy as np
from drivers.constrained_optim_driver import ConstrainedOptimizationDriver
class ScipyDriver(ConstrainedOptimizationDriver):
"""
Driver to use with the SciPy optimizers, especially the constrained ones.
"""
def __init__(self):
ConstrainedOptimizationDriver.__init__(self)
#end
def preprocess(self):
"""
Prepares the optimization problem, including preprocessing variables,
and setting up the lists of constraints and variable bounds that SciPy
needs. Must be called after all functions are added to the driver.
"""
ConstrainedOptimizationDriver.preprocess(self)
class _fun:
def __init__(self,fun,idx):
self._f = fun
self._i = idx
def __call__(self,x):
return self._f(x,self._i)
#end
# setup the constraint list, the callbacks are the same for all
# constraints, an index argument (i) is used to distinguish them.
self._constraints = []
for i in range(self._nCon):
self._constraints.append({'type' : ('ineq','eq')[i<len(self._constraintsEQ)],
'fun' : _fun(self._eval_g,i),
'jac' : _fun(self._eval_jac_g,i)})
#end
# variable bounds
self._bounds = np.array((self.getLowerBound(),self.getUpperBound()),float).transpose()
# size the gradient and constraint jacobian
self._grad_f = np.zeros((self._nVar,))
self._old_grad_f = np.zeros((self._nVar,))
self._jac_g = np.zeros((self._nVar,self._nCon))
self._old_jac_g = np.zeros((self._nVar,self._nCon))
#end
def getConstraints(self):
"""Returns the constraint list that can be passed to SciPy."""
return self._constraints
def getBounds(self):
"""Return the variable bounds in a format compatible with SciPy."""
return self._bounds
def fun(self, x):
"""Method passed to SciPy to get the objective function value."""
# Evaluates all functions if necessary.
self._evaluateFunctions(x)
return self._ofval.sum()
#end
def grad(self, x):
"""Method passed to SciPy to get the objective function gradient."""
# Evaluates gradients and functions if necessary, otherwise it
# simply combines and scales the results.
self._jacTime -= time.time()
try:
self._evaluateGradients(x)
os.chdir(self._workDir)
self._grad_f[()] = 0.0
for obj in self._objectives:
self._grad_f += obj.function.getGradient(self._variableStartMask) * obj.scale
self._grad_f /= self._varScales
# keep copy of result to use as fallback on next iteration if needed
self._old_grad_f[()] = self._grad_f
except:
if self._failureMode == "HARD": raise
self._grad_f[()] = self._old_grad_f
#end
if not self._parallelEval:
self._runAction(self._userPostProcessGrad)
self._jacTime += time.time()
os.chdir(self._userDir)
return self._grad_f
#end
# Method passed to SciPy to expose the constraint vector.
def _eval_g(self, x, idx):
self._evaluateFunctions(x)
if idx < len(self._constraintsEQ):
out = self._eqval[idx]
else:
out = self._gtval[idx-len(self._constraintsEQ)]
#end
return out
#end
# Method passed to SciPy to expose the constraint Jacobian.
def _eval_jac_g(self, x, idx):
self._jacTime -= time.time()
try:
self._evaluateGradients(x)
os.chdir(self._workDir)
mask = self._variableStartMask
if idx < len(self._constraintsEQ):
con = self._constraintsEQ[idx]
f = -1.0 # for purposes of lazy evaluation equality is always active
else:
con = self._constraintsGT[idx-len(self._constraintsEQ)]
f = self._gtval[idx-len(self._constraintsEQ)]
#end
if f < 0.0 or not self._asNeeded:
self._jac_g[:,idx] = con.function.getGradient(mask) * con.scale / self._varScales
else:
self._jac_g[:,idx] = 0.0
#end
# keep reference to result to use as fallback on next iteration if needed
self._old_jac_g[:,idx] = self._jac_g[:,idx]
except:
if self._failureMode == "HARD": raise
self._jac_g[:,idx] = self._old_jac_g[:,idx]
#end
if not self._parallelEval:
self._runAction(self._userPostProcessGrad)
self._jacTime += time.time()
os.chdir(self._userDir)
return self._jac_g[:,idx]
#end
#end
| 5,647 | 32.820359 | 97 | py |
FADO | FADO-master/drivers/__init__.py | from drivers.exterior_penalty import *
from drivers.scipy_driver import *
# Import IpOpt driver if possible.
try: from drivers.ipopt_driver import *
except: pass
| 162 | 26.166667 | 39 | py |
FADO | FADO-master/drivers/parallel_eval_driver.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
import os
import time
import subprocess as sp
from drivers.base_driver import DriverBase
class ParallelEvalDriver(DriverBase):
"""
Intermediate class that adds parallel evaluation capabilities to the base driver.
In parallel mode, the evaluation steps of the functions are started asynchronously
as soon as all their dependencies are met.
Parameters
----------
asNeeded: If True, the gradients of constraints are only evaluated if they are
active, this is possible for the exterior penalty driver.
"""
def __init__(self, asNeeded = False):
DriverBase.__init__(self)
# timers, counters, etc.
self._funTime = 0
self._jacTime = 0
self._funEval = 0
self._jacEval = 0
# variables for parallelization of evaluations
self._asNeeded = asNeeded
self._parallelEval = False
self._funEvalGraph = None
self._jacEvalGraph = None
self._waitTime = 10.0
#end
def setEvaluationMode(self,parallel=True,waitTime=10.0):
"""
Set parallel or sequential (default) evaluation modes. In parallel mode the
driver will check if it can start new evaluations every "waitTime" seconds.
Builds the evaluation graphs (dependencies) for parallel execution.
"""
self._parallelEval = parallel
if not parallel: return # no need to build graphs
self._waitTime = waitTime
# get all unique evaluation steps
valEvals = set()
jacEvals = set()
def _addEvals(flist,vlist,jlist):
for obj in flist:
vlist.update(obj.function.getValueEvalChain())
jlist.update(obj.function.getGradientEvalChain())
#end
#end
_addEvals(self._objectives ,valEvals,jacEvals)
_addEvals(self._constraintsEQ,valEvals,jacEvals)
_addEvals(self._constraintsGT,valEvals,jacEvals)
_addEvals(self._monitors ,valEvals,jacEvals)
# for each unique evaluation list its direct dependencies
self._funEvalGraph = dict(zip(valEvals,[set() for i in range(len(valEvals))]))
self._jacEvalGraph = dict(zip(jacEvals,[set() for i in range(len(jacEvals))]))
def _addDependencies(flist,funGraph,jacGraph):
for obj in flist:
evals = obj.function.getValueEvalChain()
for i in range(1,len(evals)):
funGraph[evals[i]].add(evals[i-1])
evals = obj.function.getGradientEvalChain()
for i in range(1,len(evals)):
jacGraph[evals[i]].add(evals[i-1])
#end
#end
_addDependencies(self._objectives ,self._funEvalGraph,self._jacEvalGraph)
_addDependencies(self._constraintsEQ,self._funEvalGraph,self._jacEvalGraph)
_addDependencies(self._constraintsGT,self._funEvalGraph,self._jacEvalGraph)
_addDependencies(self._monitors ,self._funEvalGraph,self._jacEvalGraph)
#end
# run the active evaluations of a dependency graph
def _evalInParallel(self,dependGraph,active):
# to avoid exiting with dangling evaluations we need to catch
# all exceptions and throw when the infinite loop finishes
error = False
completed = lambda evl: evl.isRun() or evl.isError()
while True:
allRun = True
for evl,depList in dependGraph.items():
if not active[evl]: continue
# ensure all dependencies are active
for dep in depList:
active[dep] = True
# either running or finished, move on
if evl.isIni() or completed(evl):
try:
evl.poll() # (starts or updates internal state)
allRun &= completed(evl)
except:
error = True
#end
continue
#end
allRun &= completed(evl)
# if dependencies are met start evaluation, error is considered
# as "met" otherwise the outer loop would never exit
for dep in depList:
if not completed(dep): break
else:
try:
evl.initialize()
evl.poll()
except:
error = True
#end
#end
#end
if allRun: break
time.sleep(self._waitTime)
#end
if error: raise RuntimeError("Evaluations failed.")
#end
# run evaluations extracting maximum parallelism
def _evalFunInParallel(self):
self._funTime -= time.time()
# all function evaluations are active by definition
active = dict(zip(self._funEvalGraph.keys(), [True]*len(self._funEvalGraph)))
self._evalInParallel(self._funEvalGraph, active)
self._funTime += time.time()
#end
# same for gradients but having in mind which functions are active
def _evalJacInParallel(self):
self._jacTime -= time.time()
# determine what evaluations are active based on functions
active = dict(zip(self._jacEvalGraph.keys(), [False]*len(self._jacEvalGraph)))
for obj in self._objectives:
for evl in obj.function.getGradientEvalChain():
active[evl] = True
for obj in self._constraintsEQ:
for evl in obj.function.getGradientEvalChain():
active[evl] = True
for (obj,f) in zip(self._constraintsGT,self._gtval):
if f < 0.0 or not self._asNeeded:
for evl in obj.function.getGradientEvalChain():
active[evl] = True
# gradients are not needed for monitor functions
self._evalInParallel(self._jacEvalGraph, active)
self._jacTime += time.time()
#end
# runs a pre/post processing user action
def _runAction(self, action):
if action is None: return
os.chdir(self._userDir)
if isinstance(action,str):
sp.call(action,shell=True)
else:
action()
#end
#end
# Evaluate all functions (objectives and constraints), immediately
# retrieves and stores the results after shifting and scaling.
def _evaluateFunctions(self, x):
self._handleVariableChange(x)
# lazy evaluation
if self._funReady: return False
self._runAction(self._userPreProcessFun)
os.chdir(self._workDir)
if self._parallelEval:
try:
self._evalFunInParallel()
except:
if self._failureMode == "HARD": raise
#end
self._funEval += 1
self._funTime -= time.time()
def fetchValues(dst, src):
for i, obj in enumerate(src):
try:
dst[i] = obj.function.getValue()
except:
if obj.function.hasDefaultValue() and self._failureMode == "SOFT":
dst[i] = obj.function.getDefaultValue()
else:
raise
#end
#end
#end
fetchValues(self._ofval, self._objectives)
fetchValues(self._eqval, self._constraintsEQ)
fetchValues(self._gtval, self._constraintsGT)
fetchValues(self._monval, self._monitors)
self._funTime += time.time()
# monitor convergence (raw function values)
self._writeHisLine()
# shift constraints and scale as required
for i, obj in enumerate(self._objectives):
self._ofval[i] *= obj.scale
for i, obj in enumerate(self._constraintsEQ):
self._eqval[i] = (self._eqval[i] - obj.bound) * obj.scale
for i, obj in enumerate(self._constraintsGT):
self._gtval[i] = (self._gtval[i] - obj.bound) * obj.scale
self._runAction(self._userPostProcessFun)
os.chdir(self._userDir)
self._funReady = True
return True
#end
# Evaluates all gradients in parallel execution mode, otherwise
# it only runs the user preprocessing and the execution takes place
# when the results are read in "function.getGradient".
def _evaluateGradients(self, x):
# we assume that evaluating the gradients requires the functions
self._evaluateFunctions(x)
# lazy evaluation
if self._jacReady: return False
self._runAction(self._userPreProcessGrad)
os.chdir(self._workDir)
# evaluate everything, either in parallel or sequentially,
# in the latter case the evaluations occur when retrieving the values
if self._parallelEval:
self._evalJacInParallel()
self._runAction(self._userPostProcessGrad)
#end
os.chdir(self._userDir)
self._jacReady = True
self._jacEval += 1
return True
#end
#end
| 9,839 | 33.647887 | 86 | py |
FADO | FADO-master/examples/example4_SU2/example.py | # Shape optimization of fluid structure interaction
from FADO import *
import subprocess
subprocess.call("unzip -o data.zip",shell=True)
# Design variables ----------------------------------------------------- #
x0 = np.zeros((17,))
ffd = InputVariable(x0,ArrayLabelReplacer("__FFD_PTS__"),0,(1/0.03),-0.03,0.03)
# Parameters ----------------------------------------------------------- #
# switch from direct to adjoint mode and adapt settings
enable_direct = Parameter([""], LabelReplacer("%__DIRECT__"))
enable_adjoint = Parameter([""], LabelReplacer("%__ADJOINT__"))
# switch input mesh to perform deformation
mesh_in = Parameter(["MESH_FILENAME= mesh_ffd.su2"],\
LabelReplacer("MESH_FILENAME= mesh_def.su2"))
# switch from compliance to lift or drag
func_drag = Parameter(["OBJECTIVE_FUNCTION= DRAG"],\
LabelReplacer("OBJECTIVE_FUNCTION= TOPOL_COMPLIANCE"))
func_lift = Parameter(["OBJECTIVE_FUNCTION= LIFT"],\
LabelReplacer("OBJECTIVE_FUNCTION= TOPOL_COMPLIANCE"))
func_mom = Parameter(["OBJECTIVE_FUNCTION= MOMENT_Z"],\
LabelReplacer("OBJECTIVE_FUNCTION= TOPOL_COMPLIANCE"))
# Evaluations ---------------------------------------------------------- #
def_command = "SU2_DEF config.cfg"
geo_command = "SU2_GEO config_geo.cfg"
dir_command = "SU2_CFD config.cfg"
adj_command = "mpirun -n 4 SU2_CFD_AD config.cfg && mpirun -n 4 SU2_DOT_AD config.cfg"
max_tries = 1
# mesh deformation
deform = ExternalRun("DEFORM",def_command,True)
deform.setMaxTries(max_tries)
deform.addConfig("config.cfg")
deform.addConfig("configFlow.cfg")
deform.addConfig("configFEA.cfg")
deform.addData("mesh_ffd.su2")
deform.addExpected("mesh_def.su2")
deform.addParameter(enable_direct)
deform.addParameter(mesh_in)
# geometric properties
geometry = ExternalRun("GEOMETRY",geo_command,True)
geometry.setMaxTries(max_tries)
geometry.addConfig("config_geo.cfg")
geometry.addConfig("configFlow.cfg")
geometry.addConfig("configFEA.cfg")
geometry.addData("DEFORM/mesh_def.su2")
geometry.addExpected("of_func.csv")
# direct run
direct = ExternalRun("DIRECT",dir_command,True)
direct.setMaxTries(max_tries)
direct.addConfig("config.cfg")
direct.addConfig("configFlow.cfg")
direct.addConfig("configFEA.cfg")
direct.addData("DEFORM/mesh_def.su2")
direct.addExpected("solution_0.dat")
direct.addParameter(enable_direct)
def makeAdjRun(name, func=None) :
adj = ExternalRun(name,adj_command,True)
adj.setMaxTries(max_tries)
adj.addConfig("config.cfg")
adj.addConfig("configFlow.cfg")
adj.addConfig("configFEA.cfg")
adj.addData("DEFORM/mesh_def.su2")
adj.addData("DIRECT/solution_0.dat")
adj.addData("DIRECT/solution_1.dat")
adj.addExpected("of_grad.dat")
adj.addParameter(enable_adjoint)
if (func is not None) : adj.addParameter(func)
return adj
#end
# adjoints of lift
lift_adj = makeAdjRun("LIFT_ADJ",func_lift)
# adjoints of drag
drag_adj = makeAdjRun("DRAG_ADJ",func_drag)
# adjoints of moment
mom_adj = makeAdjRun("MOM_ADJ",func_mom)
# adjoints of compliance
comp_adj = makeAdjRun("COMP_ADJ")
# Functions ------------------------------------------------------------ #
lift = Function("lift","DIRECT/history_0.csv",LabeledTableReader('"CL"'))
lift.addInputVariable(ffd,"LIFT_ADJ/of_grad.dat",TableReader(None,0,(1,0)))
lift.addValueEvalStep(deform)
lift.addValueEvalStep(direct)
lift.addGradientEvalStep(lift_adj)
lift.setDefaultValue(0.0)
drag = Function("drag","DIRECT/history_0.csv",LabeledTableReader('"CD"'))
drag.addInputVariable(ffd,"DRAG_ADJ/of_grad.dat",TableReader(None,0,(1,0)))
drag.addValueEvalStep(deform)
drag.addValueEvalStep(direct)
drag.addGradientEvalStep(drag_adj)
drag.setDefaultValue(1.0)
mom = Function("mom","DIRECT/history_0.csv",LabeledTableReader('"CMz"'))
mom.addInputVariable(ffd,"MOM_ADJ/of_grad.dat",TableReader(None,0,(1,0)))
mom.addValueEvalStep(deform)
mom.addValueEvalStep(direct)
mom.addGradientEvalStep(mom_adj)
mom.setDefaultValue(-1.0)
comp = Function("comp","DIRECT/history_1.csv",LabeledTableReader('"TopComp"'))
comp.addInputVariable(ffd,"COMP_ADJ/of_grad.dat",TableReader(None,0,(1,0)))
comp.addValueEvalStep(deform)
comp.addValueEvalStep(direct)
comp.addGradientEvalStep(comp_adj)
comp.setDefaultValue(100.0)
min_thick = Function("t_min","GEOMETRY/of_func.csv",LabeledTableReader('"AIRFOIL_THICKNESS"'))
min_thick.addInputVariable(ffd,"GEOMETRY/of_grad.csv",LabeledTableReader('"AIRFOIL_THICKNESS"',',',(0,None)))
min_thick.addValueEvalStep(deform)
min_thick.addValueEvalStep(geometry)
min_thick.setDefaultValue(0.0)
# Driver --------------------------------------------------------------- #
driver = ScipyDriver()
driver.addObjective("min", drag, 125.0)
driver.addLowerBound(lift, 0.4, 2.5)
driver.addLowerBound(mom, -0.01, 100)
driver.addUpperBound(comp, 5.0, 0.2)
driver.addLowerBound(min_thick, 0.03, 30)
driver.setWorkingDirectory("OPTIM")
driver.setEvaluationMode(False,2.0)
driver.setStorageMode(True,"DSN_")
driver.setFailureMode("SOFT")
his = open("optim.his","w",1)
driver.setHistorian(his)
# Optimization, SciPy -------------------------------------------------- #
import scipy.optimize
driver.preprocess()
x = driver.getInitial()
options = {'disp': True, 'ftol': 1e-7, 'maxiter': 100}
optimum = scipy.optimize.minimize(driver.fun, x, method="SLSQP", jac=driver.grad,\
constraints=driver.getConstraints(), bounds=driver.getBounds(), options=options)
his.close()
| 5,417 | 31.25 | 109 | py |
FADO | FADO-master/examples/ipopt/topology_su2.py | # Plain topology optimization example
from FADO import *
import subprocess
import ipyopt
subprocess.call("unzip -o ../example1_SU2/data.zip",shell=True)
# Design variables
rho = InputVariable(0.5,TableWriter(" ",(1,-1)),1600,1.0,0.0,1.0)
# Parameters
adjointOutput = Parameter(["= NONE"], LabelReplacer("= RESTART"))
fType_objective = Parameter(["TOPOL_COMPLIANCE"], LabelReplacer("__FUNCTION__"))
fType_constraint = Parameter(["VOLUME_FRACTION"], LabelReplacer("__FUNCTION__"))
linIters_constraint = Parameter(["LINEAR_SOLVER_ITER= 1"],
LabelReplacer("LINEAR_SOLVER_ITER= 1000"))
beta = Parameter([0.01, 1, 4, 16, 64, 200],LabelReplacer("__BETA__"))
# Evaluations
direct = ExternalRun("DIRECT","SU2_CFD -t 1 settings.cfg",True)
direct.addConfig("settings.cfg")
direct.addConfig("element_properties.dat")
direct.addData("mesh.su2")
direct.addParameter(fType_objective)
direct.addParameter(beta)
objective = ExternalRun("OBJECTIVE","SU2_CFD_AD settings.cfg",True)
objective.addConfig("settings.cfg")
objective.addConfig("element_properties.dat")
objective.addData("mesh.su2")
objective.addData("DIRECT/solution.dat")
objective.addParameter(fType_objective)
objective.addParameter(adjointOutput)
objective.addParameter(beta)
constraint = ExternalRun("CONSTRAINT","SU2_CFD_AD settings.cfg",True)
constraint.addConfig("settings.cfg")
constraint.addConfig("element_properties.dat")
constraint.addData("mesh.su2")
constraint.addData("DIRECT/solution.dat")
constraint.addParameter(fType_constraint)
constraint.addParameter(linIters_constraint)
constraint.addParameter(adjointOutput)
constraint.addParameter(beta)
# Functions
fun1 = Function("compliance","DIRECT/history.csv",LabeledTableReader('"TopComp"'))
fun1.addInputVariable(rho,"OBJECTIVE/grad.dat",TableReader(None,0))
fun1.addValueEvalStep(direct)
fun1.addGradientEvalStep(objective)
fun2 = Function("solid_fraction","DIRECT/history.csv",LabeledTableReader('"VolFrac"'))
fun2.addInputVariable(rho,"CONSTRAINT/grad.dat",TableReader(None,0))
fun2.addValueEvalStep(direct)
fun2.addGradientEvalStep(constraint)
# Driver
driver = IpoptDriver()
driver.addObjective("min",fun1,800.0)
driver.addUpperBound(fun2,0.5,2.0)
driver.setEvaluationMode(True,0.05)
driver.setStorageMode(False)
nlp = driver.getNLP()
ipyopt.set_loglevel(ipyopt.LOGGING_DEBUG)
# Optimization
x = driver.getInitial()
# initial multipliers, we need to keep these for warm starts
conMult = np.zeros(1)
lbMult = np.zeros(x.size)
ubMult = np.zeros(x.size)
stop = False
while not stop:
stop = beta.isAtTop()
nlp.set(warm_start_init_point = ("yes","no")[beta.isAtBottom()], # use warm start ["no"]
nlp_scaling_method = "none", # we are already doing some scaling
accept_every_trial_step = "no", # can be used to force single ls per iteration
limited_memory_max_history = 15,# the "L" in L-BFGS
max_iter = (25,200)[stop],
tol = 1e-6, # this and max_iter are the main stopping criteria
acceptable_iter = 10,
acceptable_tol = 1e-3,
acceptable_obj_change_tol=1e-6, # Cauchy-type convergence over "acceptable_iter"
mu_min = 1e-8, # for very low values (e-10) the problem "flip-flops"
recalc_y_feas_tol = 0.1) # helps converging the dual problem with L-BFGS
x, obj, status = nlp.solve(x, mult_g = conMult, mult_x_L = lbMult, mult_x_U = ubMult)
print("\n\n")
driver.update()
#end
| 3,508 | 34.09 | 97 | py |
FADO | FADO-master/examples/ipopt/rosenbrock.py | # The basic Rosenbrock example using Ipopt via IPyOpt
from FADO import *
import ipyopt
from shutil import copy
copy("../rosenbrock/direct.py",".")
copy("../rosenbrock/adjoint.py",".")
copy("../rosenbrock/config_tmpl.txt",".")
copy("../rosenbrock/data1.txt",".")
copy("../rosenbrock/data2.txt",".")
# Design variables of the problem
# this defines initial value and how they are written to an arbitrary file
var1 = InputVariable(0.0,LabelReplacer("__X__"))
var2 = InputVariable(0.0,LabelReplacer("__Y__"))
# Parameters
# these parameters tailor the template config to each function
parData1 = Parameter(["data1.txt"],LabelReplacer("__DATA_FILE__"))
parData2 = Parameter(["data2.txt"],LabelReplacer("__DATA_FILE__"))
parFunc1 = Parameter(["rosenbrock"],LabelReplacer("__FUNCTION__"))
parFunc2 = Parameter(["constraint"],LabelReplacer("__FUNCTION__"))
# Evaluations
# "runs" that are needed to compute functions and their gradients
evalFun1 = ExternalRun("RUN1","python ../../direct.py config_tmpl.txt")
evalFun1.addConfig("config_tmpl.txt")
evalFun1.addData("data1.txt")
evalFun1.addParameter(parData1)
evalJac1 = ExternalRun("JAC1","python ../../adjoint.py config_tmpl.txt")
evalJac1.addConfig("config_tmpl.txt")
evalJac1.addData("data1.txt")
evalJac1.addData("RUN1/results.txt") # simulate we need data from the direct run
evalJac1.addParameter(parData1)
evalJac1.addParameter(parFunc1)
evalFun2 = ExternalRun("RUN2","python ../../direct.py config_tmpl.txt")
evalFun2.addConfig("config_tmpl.txt")
evalFun2.addData("data2.txt")
evalFun2.addParameter(parData2)
evalJac2 = ExternalRun("JAC2","python ../../adjoint.py config_tmpl.txt")
evalJac2.addConfig("config_tmpl.txt")
evalJac2.addData("data2.txt")
evalJac2.addData("RUN2/results.txt") # simulate we need data from the direct run
evalJac2.addParameter(parData2)
evalJac2.addParameter(parFunc1)
evalJac3 = ExternalRun("JAC3","python ../../adjoint.py config_tmpl.txt")
evalJac3.addConfig("config_tmpl.txt")
evalJac3.addData("data2.txt")
evalJac3.addData("RUN2/results.txt") # simulate we need data from the direct run
evalJac3.addParameter(parData2)
evalJac3.addParameter(parFunc2)
# Functions
# now variables, parameters, and evaluations are combined
fun1 = Function("Rosenbrock1","RUN1/results.txt",TableReader(0,0))
fun1.addInputVariable(var1,"JAC1/gradient.txt",TableReader(0,0))
fun1.addInputVariable(var2,"JAC1/gradient.txt",TableReader(1,0))
fun1.addValueEvalStep(evalFun1)
fun1.addGradientEvalStep(evalJac1)
fun2 = Function("Rosenbrock2","RUN2/results.txt",TableReader(0,0))
fun2.addInputVariable(var1,"JAC2/gradient.txt",TableReader(0,0))
fun2.addInputVariable(var2,"JAC2/gradient.txt",TableReader(1,0))
fun2.addValueEvalStep(evalFun2)
fun2.addGradientEvalStep(evalJac2)
fun3 = Function("Constraint2","RUN2/results.txt",TableReader(1,0))
fun3.addInputVariable(var1,"JAC3/gradient.txt",TableReader(0,0))
fun3.addInputVariable(var2,"JAC3/gradient.txt",TableReader(1,0))
fun3.addValueEvalStep(evalFun2)
fun3.addGradientEvalStep(evalJac3)
# Driver
# the optimization is defined by the objectives and constraints
driver = IpoptDriver()
driver.addObjective("min",fun1,0.5)
driver.addObjective("min",fun2,0.5)
driver.addUpperBound(fun3,2.0)
driver.setStorageMode(False)
driver.setEvaluationMode(True,1.0)
nlp = driver.getNLP()
ipyopt.set_loglevel(ipyopt.LOGGING_DEBUG)
# Optimization
x0 = driver.getInitial()
results = nlp.solve(x0)
# report the results
print(results)
| 3,438 | 34.091837 | 80 | py |
FADO | FADO-master/examples/example1_SU2/example.py | # Plain topology optimization example
from FADO import *
import scipy.optimize
import subprocess
subprocess.call("unzip -o data.zip",shell=True)
# Design variables
rho = InputVariable(0.5,TableWriter(" ",(1,-1)),1600,1.0,0.0,1.0)
# Parameters
output_adjoint = Parameter(["OUTPUT_FILES= NONE"],
LabelReplacer("OUTPUT_FILES= RESTART"))
fType_objective = Parameter(["REFERENCE_NODE"],LabelReplacer("__FUNCTION__"))
fType_constraint = Parameter(["VOLUME_FRACTION"],LabelReplacer("__FUNCTION__"))
linIters_constraint = Parameter(["LINEAR_SOLVER_ITER= 1"],
LabelReplacer("LINEAR_SOLVER_ITER= 1000"))
beta = Parameter([0.01, 1, 4, 16, 64, 200],LabelReplacer("__BETA__"))
# Evaluations
directRun = ExternalRun("DIRECT","SU2_CFD settings_tmpl.cfg",True)
directRun.addConfig("settings_tmpl.cfg")
directRun.addConfig("element_properties.dat")
directRun.addData("mesh.su2")
directRun.addParameter(fType_objective)
directRun.addParameter(beta)
adjointRun1 = ExternalRun("ADJOINT1","SU2_CFD_AD settings_tmpl.cfg",True)
adjointRun1.addConfig("settings_tmpl.cfg")
adjointRun1.addConfig("element_properties.dat")
adjointRun1.addData("mesh.su2")
adjointRun1.addData("DIRECT/solution.dat")
adjointRun1.addParameter(output_adjoint)
adjointRun1.addParameter(fType_objective)
adjointRun1.addParameter(beta)
adjointRun2 = ExternalRun("ADJOINT2","SU2_CFD_AD settings_tmpl.cfg",True)
adjointRun2.addConfig("settings_tmpl.cfg")
adjointRun2.addConfig("element_properties.dat")
adjointRun2.addData("mesh.su2")
adjointRun2.addData("DIRECT/solution.dat")
adjointRun2.addParameter(output_adjoint)
adjointRun2.addParameter(fType_constraint)
adjointRun2.addParameter(beta)
# Functions
fun1 = Function("reference_node","DIRECT/history.csv",LabeledTableReader('"RefNode"'))
fun1.addInputVariable(rho,"ADJOINT1/grad.dat",TableReader(None,0))
fun1.addValueEvalStep(directRun)
fun1.addGradientEvalStep(adjointRun1)
fun2 = Function("solid_fraction","DIRECT/history.csv",LabeledTableReader('"VolFrac"'))
fun2.addInputVariable(rho,"ADJOINT2/grad.dat",TableReader(None,0))
fun2.addValueEvalStep(directRun)
fun2.addGradientEvalStep(adjointRun2)
# Driver
update_iters = 25
driver = ExteriorPenaltyDriver(0.005,update_iters)
driver.addObjective("min",fun1,80.0)
driver.addUpperBound(fun2,0.5,2.0)
driver.preprocess()
driver.setEvaluationMode(False)
driver.setStorageMode(False)
# Optimization
x = driver.getInitial()
lb = driver.getLowerBound()
ub = driver.getUpperBound()
bounds = np.array((lb,ub),float).transpose()
max_iters = 200
options={'disp': True, 'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-12, 'maxiter': update_iters}
while not beta.isAtTop():
optimum = scipy.optimize.minimize(driver.fun, x, method="L-BFGS-B",\
jac=driver.grad, bounds=bounds, options=options)
x = optimum.x
max_iters -= optimum.nit
#end
options['maxiter'] = max_iters
optimum = scipy.optimize.minimize(driver.fun, x, method="L-BFGS-B",\
jac=driver.grad, bounds=bounds, options=options)
| 2,994 | 33.825581 | 90 | py |
FADO | FADO-master/examples/example3_SU2/example.py | # Shape and Topology optimization with 2 load cases
from FADO import *
import scipy.optimize
import os
import subprocess
subprocess.call("unzip -o data.zip",shell=True)
# Design variables
ffd = InputVariable(0.0,PreStringHandler("DV_VALUE= "),9,1.0,-15.0,15.0)
rho = InputVariable(0.5,TableWriter(" ",(1,-1)),4800,1.0,0.0,1.0)
# Parameters
# switch from displacement to volume fraction
func_constraint = Parameter(["OBJECTIVE_FUNCTION= VOLUME_FRACTION"],\
LabelReplacer("OBJECTIVE_FUNCTION= REFERENCE_NODE"))
# computing the volume constraint gradients does not require an accurate solution
iter_constraint = Parameter(["LINEAR_SOLVER_ITER= 1"], LabelReplacer("LINEAR_SOLVER_ITER= 1000"))
# two load cases
load_horizontal = Parameter(["1.0, 0.0"],LabelReplacer("__LOAD_DIR__"))
load_vertical = Parameter(["0.0, -1.0"],LabelReplacer("__LOAD_DIR__"))
# this is a meta-parameter, for SU2_GEO the FFD variables are the FD step sizes
vars_geometry = Parameter(["DV_VALUE=0.001"+",0.001"*(ffd.getSize()-1)],\
LabelReplacer("DV_VALUE= 0"+",0"*(ffd.getSize()-1)))
# filter kernels and radius
filter_type = Parameter(["CONICAL","ERODE,DILATE"],LabelReplacer("__FILTER__"))
filter_radius = Parameter([3.5,1.5],LabelReplacer("__RADIUS__"))
# the ramp for the filter kernel parameter
beta = Parameter([0.01, 64],LabelReplacer("__BETA__"))
# Evaluations
# mesh deformation
deform = ExternalRun("DEFORM","SU2_DEF settings.cfg",True)
deform.addConfig("settings.cfg")
deform.addData("mesh_ffd.su2")
# these parameters are not relevant but are needed to make a valid config
deform.addParameter(load_vertical)
deform.addParameter(filter_type)
deform.addParameter(filter_radius)
deform.addParameter(beta)
# geometric properties
geometry = ExternalRun("GEOMETRY","SU2_GEO settings.cfg",True)
geometry.addConfig("settings.cfg")
geometry.addData("DEFORM/mesh_def.su2",destination="mesh_ffd.su2")
geometry.addParameter(vars_geometry)
# these parameters are not relevant but are needed to make a valid config
geometry.addParameter(load_vertical)
geometry.addParameter(filter_type)
geometry.addParameter(filter_radius)
geometry.addParameter(beta)
# horizontal load case
hload_dir = ExternalRun("HLOAD","SU2_CFD -t 2 settings.cfg",True)
hload_dir.addConfig("settings.cfg")
hload_dir.addConfig("element_properties.dat")
hload_dir.addData("DEFORM/mesh_def.su2",destination="mesh_ffd.su2")
hload_dir.addParameter(load_horizontal)
hload_dir.addParameter(filter_type)
hload_dir.addParameter(filter_radius)
hload_dir.addParameter(beta)
# vertical load case
vload_dir = ExternalRun("VLOAD","SU2_CFD -t 2 settings.cfg",True)
vload_dir.addConfig("settings.cfg")
vload_dir.addConfig("element_properties.dat")
vload_dir.addData("DEFORM/mesh_def.su2",destination="mesh_ffd.su2")
vload_dir.addParameter(load_vertical)
vload_dir.addParameter(filter_type)
vload_dir.addParameter(filter_radius)
vload_dir.addParameter(beta)
adj_command = "mpirun -n 2 --bind-to none SU2_CFD_AD settings.cfg &&\
mv restart_adj_FUN.dat solution_adj_FUN.dat &&\
SU2_DOT_AD settings.cfg"
# volume fraction and its derivatives
volume = ExternalRun("VOLUME",adj_command.replace("FUN","volfrac"),True)
volume.addConfig("settings.cfg")
volume.addConfig("element_properties.dat")
volume.addData("DEFORM/mesh_def.su2",destination="mesh_ffd.su2")
volume.addData("direct.dat") # dummy solution file
volume.addParameter(func_constraint)
volume.addParameter(iter_constraint)
volume.addParameter(filter_type)
volume.addParameter(filter_radius)
volume.addParameter(beta)
# this one is not relevant
volume.addParameter(load_vertical)
# adjoints of the horizontal load
hload_adj = ExternalRun("HLOAD_ADJ",adj_command.replace("FUN","refnode"),True)
hload_adj.addConfig("settings.cfg")
hload_adj.addConfig("element_properties.dat")
hload_adj.addData("DEFORM/mesh_def.su2",destination="mesh_ffd.su2")
hload_adj.addData("HLOAD/direct.dat")
hload_adj.addParameter(load_horizontal)
hload_adj.addParameter(filter_type)
hload_adj.addParameter(filter_radius)
hload_adj.addParameter(beta)
# adjoints of the vertical load
vload_adj = ExternalRun("VLOAD_ADJ",adj_command.replace("FUN","refnode"),True)
vload_adj.addConfig("settings.cfg")
vload_adj.addConfig("element_properties.dat")
vload_adj.addData("DEFORM/mesh_def.su2",destination="mesh_ffd.su2")
vload_adj.addData("VLOAD/direct.dat")
vload_adj.addParameter(load_vertical)
vload_adj.addParameter(filter_type)
vload_adj.addParameter(filter_radius)
vload_adj.addParameter(beta)
# Functions
vert_disp = Function("vert_disp","VLOAD/history.csv",LabeledTableReader('"RefNode"'))
vert_disp.addInputVariable(ffd,"VLOAD_ADJ/of_grad.dat",TableReader(None,0,(1,0)))
vert_disp.addInputVariable(rho,"VLOAD_ADJ/grad.dat",TableReader(None,0))
vert_disp.addValueEvalStep(deform)
vert_disp.addValueEvalStep(vload_dir)
vert_disp.addGradientEvalStep(vload_adj)
horiz_disp = Function("horiz_disp","HLOAD/history.csv",LabeledTableReader('"RefNode"'))
horiz_disp.addInputVariable(ffd,"HLOAD_ADJ/of_grad.dat",TableReader(None,0,(1,0)))
horiz_disp.addInputVariable(rho,"HLOAD_ADJ/grad.dat",TableReader(None,0))
horiz_disp.addValueEvalStep(deform)
horiz_disp.addValueEvalStep(hload_dir)
horiz_disp.addGradientEvalStep(hload_adj)
vol_frac = Function("vol_frac","VLOAD/history.csv",LabeledTableReader('"VolFrac"'))
vol_frac.addInputVariable(ffd,"VOLUME/of_grad.dat",TableReader(None,0,(1,0)))
vol_frac.addInputVariable(rho,"VOLUME/grad.dat",TableReader(None,0))
vol_frac.addValueEvalStep(deform)
vol_frac.addValueEvalStep(vload_dir)
vol_frac.addGradientEvalStep(volume)
vol_total = Function("vol_total","GEOMETRY/of_func.csv",LabeledTableReader('"AIRFOIL_AREA"'))
vol_total.addInputVariable(ffd,"GEOMETRY/of_grad.csv",TableReader(None,1,(1,0),(None,None),","))
vol_total.addValueEvalStep(deform)
vol_total.addValueEvalStep(geometry)
# Driver
driver = ExteriorPenaltyDriver(0.01,0,8.0,256.0,np.sqrt(2))
driver.addObjective("min",vert_disp, 1.0,0.5)
driver.addObjective("min",horiz_disp,3.5,0.5)
driver.addUpperBound(vol_frac,0.5,2.0)
driver.addUpperBound(vol_total,4800,2e-4)
driver.setWorkingDirectory("currentDesign")
driver.preprocessVariables()
driver.setEvaluationMode(True,0.1)
driver.setStorageMode(True)
log = open("log.txt","w",1)
his = open("history.txt","w",1)
driver.setLogger(log)
driver.setHistorian(his)
# Optimization
x = driver.getInitial()
lb = driver.getLowerBound()
ub = driver.getUpperBound()
bounds = np.array((lb,ub),float).transpose()
update_iters = 18
max_iters = 1000
fin_tol = 1e-7
options={'disp': True, 'maxcor': 10, 'ftol': fin_tol*10, 'gtol': 1e-12, 'maxls': 5, 'maxiter': update_iters}
# respect constraints with grey settings
while not driver.feasibleDesign() and max_iters > 0:
optimum = scipy.optimize.minimize(driver.fun, x, method="L-BFGS-B",\
jac=driver.grad, bounds=bounds, options=options)
x = optimum.x
max_iters -= optimum.nit
driver.update(True)
#end
# converge to solid-void settings
options['ftol'] = fin_tol
options['maxiter'] = max_iters
optimum = scipy.optimize.minimize(driver.fun, x, method="L-BFGS-B",\
jac=driver.grad, bounds=bounds, options=options)
driver.update()
log.close()
his.close()
| 7,200 | 37.303191 | 108 | py |
FADO | FADO-master/examples/rosenbrock/example.py | # This example mimics the way an external code might work.
#
# For generality:
# 3 functions that result from 2 evaluation of the primal function
#
# Files:
# - direct.py is the primal "solver", it takes a data file and a config.
# Two data files form the two evaluations, it computes 2 functions.
# - adjoint.py computes the gradient of the requested function.
#
# The variables (x,y) go in the config, the constants go in the data.
# You "shouldn't" do this... but if nothing else works...
#import sys
#sys.path.append("../")
#sys.path.append("../../")
from FADO import *
# Design variables of the problem
# this defines initial value and how they are written to an arbitrary file
var1 = InputVariable(0.0,LabelReplacer("__X__"))
var2 = InputVariable(0.0,LabelReplacer("__Y__"))
# Parameters
# these parameters tailor the template config to each function
parData1 = Parameter(["data1.txt"],LabelReplacer("__DATA_FILE__"))
parData2 = Parameter(["data2.txt"],LabelReplacer("__DATA_FILE__"))
parFunc1 = Parameter(["rosenbrock"],LabelReplacer("__FUNCTION__"))
parFunc2 = Parameter(["constraint"],LabelReplacer("__FUNCTION__"))
# Evaluations
# "runs" that are needed to compute functions and their gradients
evalFun1 = ExternalRun("RUN1","python ../../direct.py config_tmpl.txt")
evalFun1.addConfig("config_tmpl.txt")
evalFun1.addData("data1.txt")
evalFun1.addParameter(parData1)
evalJac1 = ExternalRun("JAC1","python ../../adjoint.py config_tmpl.txt")
evalJac1.addConfig("config_tmpl.txt")
evalJac1.addData("data1.txt")
evalJac1.addData("RUN1/results.txt") # simulate we need data from the direct run
evalJac1.addParameter(parData1)
evalJac1.addParameter(parFunc1)
evalFun2 = ExternalRun("RUN2","python ../../direct.py config_tmpl.txt")
evalFun2.addConfig("config_tmpl.txt")
evalFun2.addData("data2.txt")
evalFun2.addParameter(parData2)
evalJac2 = ExternalRun("JAC2","python ../../adjoint.py config_tmpl.txt")
evalJac2.addConfig("config_tmpl.txt")
evalJac2.addData("data2.txt")
evalJac2.addData("RUN2/results.txt") # simulate we need data from the direct run
evalJac2.addParameter(parData2)
evalJac2.addParameter(parFunc1)
evalJac3 = ExternalRun("JAC3","python ../../adjoint.py config_tmpl.txt")
evalJac3.addConfig("config_tmpl.txt")
evalJac3.addData("data2.txt")
evalJac3.addData("RUN2/results.txt") # simulate we need data from the direct run
evalJac3.addParameter(parData2)
evalJac3.addParameter(parFunc2)
# Functions
# now variables, parameters, and evaluations are combined
fun1 = Function("Rosenbrock1","RUN1/results.txt",TableReader(0,0))
fun1.addInputVariable(var1,"JAC1/gradient.txt",TableReader(0,0))
fun1.addInputVariable(var2,"JAC1/gradient.txt",TableReader(1,0))
fun1.addValueEvalStep(evalFun1)
fun1.addGradientEvalStep(evalJac1)
fun2 = Function("Rosenbrock2","RUN2/results.txt",TableReader(0,0))
fun2.addInputVariable(var1,"JAC2/gradient.txt",TableReader(0,0))
fun2.addInputVariable(var2,"JAC2/gradient.txt",TableReader(1,0))
fun2.addValueEvalStep(evalFun2)
fun2.addGradientEvalStep(evalJac2)
fun3 = Function("Constraint2","RUN2/results.txt",TableReader(1,0))
fun3.addInputVariable(var1,"JAC3/gradient.txt",TableReader(0,0))
fun3.addInputVariable(var2,"JAC3/gradient.txt",TableReader(1,0))
fun3.addValueEvalStep(evalFun2)
fun3.addGradientEvalStep(evalJac3)
# Driver
# the optimization is defined by the objectives and constraints
driver = ExteriorPenaltyDriver(0.005,0)
driver.addObjective("min",fun1,0.5)
driver.addObjective("min",fun2,0.5)
driver.addUpperBound(fun3,2.0)
driver.preprocessVariables()
driver.setStorageMode(False)
class exampleAction:
def __init__(self, message):
self._message = message
def __call__(self):
print(self._message)
#end
driver.setUserPostProcessGrad(exampleAction("after gradient"))
# Optimization
# now the "fun" and "grad" methods of the driver can be passed to an optimizer
x = driver.getInitial()
options={'disp': True, 'maxcor': 10, 'ftol': 1e-6, 'gtol': 1e-5, 'maxiter': 200, 'maxls': 20}
optimum = fletcherReeves(driver.fun,x,driver.grad,options)
| 4,047 | 35.468468 | 93 | py |
FADO | FADO-master/examples/rosenbrock/adjoint.py | import sys
import time
#time.sleep(3)
config = sys.argv[1]
fid = open(config,"r")
lines = fid.readlines()
fid.close()
data = lines[0][0:-1]
x = float(lines[1])
y = float(lines[2])
mode = lines[3]
fid = open(data,"r")
lines = fid.readlines()
fid.close()
a = float(lines[0])
b = float(lines[1])
# Rosenbrock's function
f1 = (a-x)**2+b*(y-x**2)**2
# A simple linear constraint
f2 = x+y
if mode.startswith("rosenbrock"):
dfdx = 2*(x-a)+4*b*x*(x**2-y)
dfdy = 2*b*(y-x**2)
else:
dfdx = 1
dfdy = 1
#end
fid = open("gradient.txt","w")
fid.writelines([str(dfdx)+"\n",str(dfdy)+"\n"])
fid.close()
| 613 | 13.97561 | 47 | py |
FADO | FADO-master/examples/rosenbrock/direct.py | import sys
import time
#time.sleep(3)
config = sys.argv[1]
fid = open(config,"r")
lines = fid.readlines()
fid.close()
data = lines[0][0:-1]
x = float(lines[1])
y = float(lines[2])
fid = open(data,"r")
lines = fid.readlines()
fid.close()
a = float(lines[0])
b = float(lines[1])
# Rosenbrock's function
f1 = (a-x)**2+b*(y-x**2)**2
# A simple linear constraint
f2 = x+y
fid = open("results.txt","w")
fid.writelines([str(f1)+"\n",str(f2)+"\n"])
fid.close()
| 462 | 13.46875 | 43 | py |
FADO | FADO-master/examples/example2_SU2/example.py | # Plain topology optimization example using the Fletcher-Reeves method
from FADO import *
import subprocess
subprocess.call("unzip -o ../example1_SU2/data.zip",shell=True)
# Design variables
rho = InputVariable(0.5,TableWriter(" ",(1,-1)),1600,1.0,0.0,1.0)
# Parameters
fType_objective = Parameter(["REFERENCE_NODE"],LabelReplacer("__FUNCTION__"))
fType_constraint = Parameter(["VOLUME_FRACTION"],LabelReplacer("__FUNCTION__"))
beta = Parameter([0.01, 1, 4, 16, 64, 200],LabelReplacer("__BETA__"))
# Evaluations
directRun = ExternalRun("DIRECT","SU2_CFD settings_tmpl.cfg")
directRun.addConfig("settings_tmpl.cfg")
directRun.addConfig("element_properties.dat")
directRun.addData("mesh.su2")
directRun.addParameter(fType_objective)
directRun.addParameter(beta)
adjointRun1 = ExternalRun("ADJOINT1","SU2_CFD_AD settings_tmpl.cfg")
adjointRun1.addConfig("settings_tmpl.cfg")
adjointRun1.addConfig("element_properties.dat")
adjointRun1.addData("mesh.su2")
adjointRun1.addData("DIRECT/solution.dat")
adjointRun1.addParameter(fType_objective)
adjointRun1.addParameter(beta)
adjointRun2 = ExternalRun("ADJOINT2","SU2_CFD_AD settings_tmpl.cfg")
adjointRun2.addConfig("settings_tmpl.cfg")
adjointRun2.addConfig("element_properties.dat")
adjointRun2.addData("mesh.su2")
adjointRun2.addData("DIRECT/solution.dat")
adjointRun2.addParameter(fType_constraint)
adjointRun2.addParameter(beta)
# Functions
fun1 = Function("reference_node","DIRECT/history.csv",LabeledTableReader('"RefNode"'))
fun1.addInputVariable(rho,"ADJOINT1/grad.dat",TableReader(None,0))
fun1.addValueEvalStep(directRun)
fun1.addGradientEvalStep(adjointRun1)
fun2 = Function("solid_fraction","DIRECT/history.csv",LabeledTableReader('"VolFrac"'))
fun2.addInputVariable(rho,"ADJOINT2/grad.dat",TableReader(None,0))
fun2.addValueEvalStep(directRun)
fun2.addGradientEvalStep(adjointRun2)
# Driver
update_iters = 25
driver = ExteriorPenaltyDriver(0.005,0)
driver.addObjective("min",fun1,80.0)
driver.addUpperBound(fun2,0.5,2.0)
driver.preprocessVariables()
driver.setEvaluationMode(True,0.1)
driver.setStorageMode(False)
# Optimization
x = driver.getInitial()
lb = driver.getLowerBound()
ub = driver.getUpperBound()
transform = BoundConstraints(driver.fun,driver.grad,lb,ub)
max_iters = 200
options={'disp': True, 'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-12, 'maxiter': update_iters, 'maxls': 20}
while not beta.isAtTop():
x0 = transform.inverse(x)
optimum = fletcherReeves(transform.fun,x0,transform.grad,options,quadraticInterp)
x = transform(optimum["x"])
max_iters -= optimum["nit"]
driver.update(False)
#end
options['maxiter'] = max_iters
x0 = transform.inverse(x)
optimum = fletcherReeves(transform.fun,x0,transform.grad,options,quadraticInterp)
x = transform(optimum["x"])
| 2,755 | 32.609756 | 103 | py |
FADO | FADO-master/optimizers/fletcher_reeves.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
from optimizers.line_searches import goldenSection
import numpy as np
def fletcherReeves(fun,x,grad,options,lineSearch=goldenSection):
"""
Fletcher-Reeves method. The interface and options are similar to SciPy's L-BFGS-B.
Parameters
----------
fun : Callable function, should take a numpy array and return a float.
x : The starting point of the optimization.
grad : Callable gradient method, takes and returns a numpy array.
options : Dictionary of options:
"ftol" function-based tolerance [no default];
"gtol" norm of gradient-based tolerance [no default];
"maxiter" maximum number of iterations [no default];
"disp" True to print messages [False];
"maxcor" restart period of the method [x.size+1];
"maxls" maximum number of line searches per iteration [20];
"tolls" stopping criteria for line searches [1e-3].
lineSearch : The line search method used.
See also
--------
goldenSection and quadraticInterpolation line search methods.
"""
# unpack options
ftol = options["ftol"]
gtol = options["gtol"]
maxiter = options["maxiter"]
verbose = False
if "disp" in options.keys(): verbose = options["disp"]
restart = x.size+1
if "maxcor" in options.keys(): restart = options["maxcor"]
maxls = 20
if "maxls" in options.keys(): maxls = options["maxls"]
tolls = 0.001
if "tolls" in options.keys(): tolls = options["tolls"]
if verbose:
headerLine = ""
for data in ["ITER","FUN EVAL","LS EVAL","STEP","FUN EPS","GRAD EPS","FUN VAL"]:
headerLine += data.rjust(13)
logFormat = "{:>13}"*3+"{:>13.6g}"*4
print("\n"+"*"*33+" Fletcher-Reeves Method "+"*"*34+"\n")
print("Number of variables: "+str(x.size)+" Restart period: "+str(restart)+"\n")
print(headerLine)
#end
# initialize
feval = 1
jeval = 1
lbd = -1
f = fun(x)
G = grad(x)
success = False
# log
logData = [0, 1, 0, 0.0, 0.0, 0.0, f]
if verbose: print(logFormat.format(*logData))
# start
for i in range(maxiter):
# periodic restart
if i%restart==0 : S=-G
if verbose and i%10==0 and i>0: print(headerLine)
# line search
lsfun = lambda step: fun(x+step*S)
if lbd<=0: lbd = 1.0
else: lbd *= max(abs(S))/max(abs(S_old))
f_old = f
(lbd,f,nls) = lineSearch(lsfun,maxls,f,lbd,tolls)
feval += nls
# detect bad direction and restart
if f>f_old or lbd==0:
if i%restart!=0: # otherwise we already have S=-G
if verbose: print("Bad search direction, taking steepest descent.")
f = f_old
S = -G
(lbd,f,nls2) = lineSearch(lsfun,maxls,f,1.0,tolls)
nls += nls2
feval += nls2
#end
if f>f_old or lbd==0:
if verbose: print("Could not improve along steepest descent direction.")
f = f_old
S = 2*(1-np.random.rand(S.size))*max(abs(S))
(lbd,f,nls2) = lineSearch(lsfun,maxls,f,1.0,tolls)
nls += nls2
feval += nls2
#end
if f>f_old or lbd==0:
if verbose: print("Could not improve along random direction.")
f = f_old
break
#end
#end
# update search direction
x += lbd*S
G_old = G
S_old = S
G = grad(x)
jeval += 1
S = -G+G.dot(G)/G_old.dot(G_old)*S_old
# log
logData = [i+1, feval, nls, lbd, f_old-f, max(abs(G)), f]
if verbose: print(logFormat.format(*logData))
# convergence criteria
if f_old-f < ftol or max(abs(G)) < gtol:
success = True
break
#end
result = {"x" : x, "fun" : f, "jac" : G, "nit" : i+1,
"nfev" : feval, "njev" : jeval, "success" : success}
return result
#end
| 4,882 | 33.146853 | 91 | py |
FADO | FADO-master/optimizers/line_searches.py | # Copyright 2019-2023, FADO Contributors (cf. AUTHORS.md)
#
# This file is part of FADO.
#
# FADO is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FADO is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with FADO. If not, see <https://www.gnu.org/licenses/>.
def goldenSection(fun,maxiter,f0=None,lbd0=1,tol=1e-3):
"""1D minimization using the Golden Section method."""
# look for an interval that contains the minimum
# assuming we have a descent direction
feval = 0
x = [0.0, 0.0, lbd0]
if f0 is None:
f0 = fun(0.0)
feval += 1
y = [f0, f0, fun(lbd0)]
feval += 1
# while the function decreases the step is doubled
while y[2]<y[1]:
x[1] = x[2]
y[1] = y[2]
lbd0 *= 2.0
x[2] = x[1]+lbd0
y[2] = fun(x[2])
feval += 1
#end
# initial points
L2 = 0.381966*x[2] # 2/(3+sqrt(5))*x2
x = [0.0, L2, x[2]-L2, x[2]]
y = [fun(x[1]), fun(x[2])]
feval += 2
# iterate
while feval < maxiter:
if y[0] < y[1]: # keep left interval
x[3] = x[2]
x[2] = x[1]
# new test point
x[1] = x[0]+(x[3]-x[2])
y[1] = y[0]
y[0] = fun(x[1])
feval += 1
else: # keep right interval
x[0] = x[1]
x[1] = x[2]
# new test point
x[2] = x[3]-(x[1]-x[0])
y[0] = y[1]
y[1] = fun(x[2])
feval += 1
#end
# check convergence
if abs(x[2]-x[1])/L2 < tol:
break
#end
# minimum value found
if y[0] < y[1]:
x_opt = x[1]
y_min = y[0]
else:
x_opt = x[2]
y_min = y[1]
#end
return (x_opt,y_min,feval)
#end
def quadraticInterp(fun,maxiter,f0=None,lbd0=1,tol=1e-3):
"""1D minimization using the Quadratic Interpolation method."""
# initialize
feval = 0
x = [0.0, 0.0, 0.0]
if f0 is None:
f0 = fun(0.0)
feval += 1
y = [f0, 0.0, 0.0]
# bracket the minimum
# assuming we have a descent direction
f1 = fun(lbd0)
feval += 1
if f1 > f0:
x[2] = lbd0
y[2] = f1
x[1] = lbd0/2
y[1] = fun(x[1])
feval += 1
while f1 <= f0:
f2 = fun(2*lbd0)
feval += 1
if f2 > f1:
x[1] = lbd0
y[1] = f1
x[2] = 2*lbd0
y[2] = f2
break
else:
f1 = f2
lbd0 *= 2
#end
#end
# iterate
y_ref = max(max(y),-min(y),tol)
while True:
# compute x_opt'
det = (x[0]-x[1])*(x[1]-x[2])*(x[2]-x[0])
a = y[0]*x[1]*x[2]*(x[2]-x[1]) + y[1]*x[2]*x[0]*(x[0]-x[2]) + y[2]*x[0]*x[1]*(x[1]-x[0])
a /= det
b = y[0]*(x[1]**2-x[2]**2) + y[1]*(x[2]**2-x[0]**2) + y[2]*(x[0]**2-x[1]**2)
b /= det
c = -(y[0]*(x[1]-x[2]) + y[1]*(x[2]-x[0]) + y[2]*(x[0]-x[1]))/det
x_opt = -0.5*b/c
y_star = a+b*x_opt+c*x_opt**2
y_min = fun(x_opt)
feval += 1
if y_min > max(y):
print("The quadratic approximation is not convex.")
x_opt = sorted(zip(y,x))[0][1]
y_min = min(y)
break
#end
# check convergence
if abs(y_min-y_star)/y_ref < tol or feval >= maxiter:
break
# drop highest point
x.append(x_opt)
y.append(y_min)
yx = sorted(zip(y,x))
x = [z for _,z in yx][:-1]
y = [z for z,_ in yx][:-1]
#end
return (x_opt,y_min,feval)
#end
| 4,086 | 24.704403 | 96 | py |
FADO | FADO-master/optimizers/__init__.py | from optimizers.fletcher_reeves import *
from optimizers.line_searches import *
| 80 | 26 | 40 | py |
MostAccurableMNIST_keras | MostAccurableMNIST_keras-master/DeepCNN.py | import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, pooling, Input
from keras.layers.convolutional import Conv2D, ZeroPadding2D
from keras.layers.pooling import MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
from keras.optimizers import Adam, Adagrad, RMSprop, Adadelta
np.random.seed(777) # for reproducibility
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 28,28,1)
X_test = X_test.reshape(X_test.shape[0], 28,28,1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
cnn = Sequential()
cnn.add(ZeroPadding2D((2, 2), input_shape=(28, 28, 1)))
cnn.add(Conv2D(64, (5, 5), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(ZeroPadding2D((2, 2)))
cnn.add(Conv2D(128, (5, 5), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(MaxPooling2D(strides=(2, 2)))
cnn.add(ZeroPadding2D((2, 2)))
cnn.add(Conv2D(256, (5, 5), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(256, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(MaxPooling2D(strides=(2, 2)))
cnn.add(Dropout(0.2))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(512, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(Dropout(0.2))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(512, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(MaxPooling2D(strides=(2, 2)))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(1024, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(Dropout(0.2))
cnn.add(ZeroPadding2D((1, 1)))
cnn.add(Conv2D(1024, (3, 3), kernel_initializer='he_normal'))
cnn.add(Activation('relu'))
cnn.add(MaxPooling2D(strides=(2, 2)))
cnn.add(Flatten())
cnn.add(Dropout(0.5))
cnn.add(Dense(2048, activation="relu", kernel_initializer='he_normal'))
cnn.add(Dense(128, activation="relu", kernel_initializer='he_normal'))
cnn.add(Dense(10, activation="softmax"))
cnn.summary()
opt = Adagrad(lr=0.001, epsilon=1e-8, decay=0.)
cnn.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
cnn.fit(X_train, Y_train, batch_size=64, shuffle=True, epochs=50, validation_split=0.1)
score = cnn.evaluate(X_test, Y_test)
print(cnn.metrics_names)
print(score)
f = open("./saved/MNIST_DeepCNN_model.json", 'w')
f.write(cnn.to_json())
f.close()
cnn.save_weights('./saved/MNIST_DeepCNN_weight.h5') | 2,673 | 32.012346 | 87 | py |
EHR-features | EHR-features-master/boxplots_removing.py | import numpy as np
import matplotlib.pyplot as plt
import csv
file_name = './results/performance_removing.csv'
case_array = []
accuracy_rate = []
miss_rate = []
with open(file_name, 'r') as csvFile:
reader = csv.reader(csvFile)
next(reader)
for row in reader:
case_array.append(int(row[0]))
accuracy_rate.append(float(row[8]))
miss_rate.append(float(row[9]))
csvFile.close()
case_array = np.array(case_array)
accuracy_rate = np.array(accuracy_rate)
miss_rate = np.array(miss_rate)
find_1 = np.where(case_array == 1)
find_2 = np.where(case_array == 2)
find_3 = np.where(case_array == 3)
find_4 = np.where(case_array == 4)
find_5 = np.where(case_array == 5)
find_6 = np.where(case_array == 6)
find_7 = np.where(case_array == 7)
find_8 = np.where(case_array == 8)
find_9 = np.where(case_array == 9)
find_10 = np.where(case_array == 10)
find_11 = np.where(case_array == 11)
box1 = np.column_stack((accuracy_rate[find_1], accuracy_rate[find_2], accuracy_rate[find_3], accuracy_rate[find_4], accuracy_rate[find_5], accuracy_rate[find_6], accuracy_rate[find_7], accuracy_rate[find_8], accuracy_rate[find_9],accuracy_rate[find_10],accuracy_rate[find_11]))
box2 = np.column_stack((miss_rate[find_1], miss_rate[find_2], miss_rate[find_3], miss_rate[find_4], miss_rate[find_5], miss_rate[find_6], miss_rate[find_7], miss_rate[find_8], miss_rate[find_9], miss_rate[find_10], miss_rate[find_11]))
labelList =['All', 'All-A','All-HD', 'All-AG', 'All-HT','All-M', 'All-W', 'All-SS', 'All-G', 'All-BMI', 'All-RT']
fig, ax = plt.subplots(1, figsize=(9, 5))
bp1 = ax.boxplot(box1, positions=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], widths=0.2)
print(bp1.keys())
plt.setp(bp1['boxes'], color='red')
plt.setp(bp1['fliers'], color='red')
plt.setp(bp1['caps'], color='red')
plt.setp(bp1['medians'], color='red')
plt.setp(bp1['whiskers'], color='red')
ax2 = ax.twinx()
bp2 = ax2.boxplot(box2, positions=[1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3, 8.3, 9.3, 10.3, 11.3], widths=0.2)
plt.setp(bp2['boxes'], color='blue')
plt.setp(bp2['fliers'], color='blue')
plt.setp(bp2['caps'], color='blue')
plt.setp(bp2['medians'], color='blue')
plt.setp(bp2['whiskers'], color='blue')
ax.set_xticklabels(labelList)
ax.set_xticks([1.15, 2.15, 3.15, 4.15, 5.15, 6.15, 7.15, 8.15, 9.15, 10.15, 11.15])
ax.set_ylabel('Accuracy rate', fontsize=13, color='r')
ax.set_xlabel('Features combination', fontsize=14)
ax2.set_ylabel('Miss rate', fontsize=13, color='b')
ax.grid(True)
plt.savefig('./results/feature-removal.pdf', format='pdf')
plt.show() | 2,543 | 35.342857 | 277 | py |
EHR-features | EHR-features-master/boxplots_adding.py | import numpy as np
import matplotlib.pyplot as plt
import csv
file_name = './results/performance_adding.csv'
case_array = []
accuracy_rate = []
miss_rate = []
with open(file_name, 'r') as csvFile:
reader = csv.reader(csvFile)
next(reader)
for row in reader:
case_array.append(int(row[0]))
accuracy_rate.append(float(row[8]))
miss_rate.append(float(row[9]))
csvFile.close()
case_array = np.array(case_array)
accuracy_rate = np.array(accuracy_rate)
miss_rate = np.array(miss_rate)
find_1 = np.where(case_array == 1)
find_2 = np.where(case_array == 2)
find_3 = np.where(case_array == 3)
find_4 = np.where(case_array == 4)
find_5 = np.where(case_array == 5)
find_6 = np.where(case_array == 6)
find_7 = np.where(case_array == 7)
find_8 = np.where(case_array == 8)
box1 = np.column_stack((accuracy_rate[find_1], accuracy_rate[find_2], accuracy_rate[find_3], accuracy_rate[find_4], accuracy_rate[find_5], accuracy_rate[find_6], accuracy_rate[find_7], accuracy_rate[find_8]))
box2 = np.column_stack((miss_rate[find_1], miss_rate[find_2], miss_rate[find_3], miss_rate[find_4], miss_rate[find_5], miss_rate[find_6], miss_rate[find_7], miss_rate[find_8]))
labelList =['A+HD+AG', '+HT','+M', '+W', '+SS','+G', '+BMI', '+RT']
fig, ax = plt.subplots(1, figsize=(8, 5))
bp1 = ax.boxplot(box1, positions=[1, 2, 3, 4, 5, 6, 7, 8], widths = 0.2)
print(bp1.keys())
plt.setp(bp1['boxes'], color='red')
plt.setp(bp1['fliers'], color='red')
plt.setp(bp1['caps'], color='red')
plt.setp(bp1['medians'], color='red')
plt.setp(bp1['whiskers'], color='red')
ax2 = ax.twinx()
bp2 = ax2.boxplot(box2, positions=[1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3, 8.3], widths=0.2)
plt.setp(bp2['boxes'], color='blue')
plt.setp(bp2['fliers'], color='blue')
plt.setp(bp2['caps'], color='blue')
plt.setp(bp2['medians'], color='blue')
plt.setp(bp2['whiskers'], color='blue')
ax.set_xticklabels(labelList)
ax.set_xticks([1.15, 2.15, 3.15, 4.15, 5.15, 6.15, 7.15, 8.15])
ax.set_ylabel('Accuracy rate', fontsize=13, color='r')
ax.set_xlabel('Features combination', fontsize=14)
ax2.set_ylabel('Miss rate', fontsize=13, color='b')
ax.grid(True)
plt.savefig('./results/feature-addition.pdf', format='pdf')
plt.show() | 2,213 | 32.044776 | 208 | py |
EDGY | EDGY-master/DDF.py | import hydra
import hydra.utils as utils
import json
from pathlib import Path
import torch
import numpy as np
import librosa
from tqdm import tqdm
import pyloudnorm
from preprocess import preemphasis
from model import Encoder, Decoder
@hydra.main(config_path="Training/VQ-VAE/Configuration_files/DDF.yaml")
def DDF(cfg):
filter_list_path = Path(utils.to_absolute_path(cfg.filter_list))
with open(filter_list_path) as file:
filter_list = json.load(file)
in_dir = Path(utils.to_absolute_path(cfg.in_dir))
out_dir = Path(utils.to_absolute_path(cfg.out_dir))
out_dir.mkdir(exist_ok=True, parents=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = Encoder(**cfg.model.encoder)
decoder = Decoder(**cfg.model.decoder)
encoder.to(device)
decoder.to(device)
print("Load checkpoint from: {}:".format(cfg.checkpoint))
checkpoint_path = utils.to_absolute_path(cfg.checkpoint)
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
encoder.load_state_dict(checkpoint["encoder"])
decoder.load_state_dict(checkpoint["decoder"])
encoder.eval()
decoder.eval()
meter = pyloudnorm.Meter(cfg.preprocessing.sr)
#---------------------------------------
if cfg.privacy_preference == "Low":
for wav_path, speaker_id, out_filename in tqdm(filter_list):
wav_path = in_dir / wav_path
# librosa.load (it will return audio time series, and its sampling rate)
wav, _ = librosa.load(wav_path.with_suffix(".wav"), sr=cfg.preprocessing.sr)
ref_loudness = meter.integrated_loudness(wav)
wav = wav / np.abs(wav).max() * 0.999
path = out_dir / out_filename
# to return raw recording in mel-spectrogram without any filtering
if cfg.output_type == "Embedding":
mel = librosa.feature.melspectrogram(
preemphasis(wav, cfg.preprocessing.preemph),
sr=cfg.preprocessing.sr,
n_fft=cfg.preprocessing.n_fft,
n_mels=cfg.preprocessing.n_mels,
hop_length=cfg.preprocessing.hop_length,
win_length=cfg.preprocessing.win_length,
fmin=cfg.preprocessing.fmin,
power=1)
logmel = librosa.amplitude_to_db(mel, top_db=cfg.preprocessing.top_db)
logmel = logmel / cfg.preprocessing.top_db + 1
mel = torch.FloatTensor(logmel).squeeze().to(device).numpy()
np.savetxt(path.with_suffix(".mel.txt"), mel)
# to return raw recording in waveform without any filtering
if cfg.output_type == "Recording":
librosa.output.write_wav(path.with_suffix(".wav"), wav.astype(np.float32), sr=cfg.preprocessing.sr)
#---------------------------------------
if cfg.privacy_preference == "Moderate":
dataset_path = Path(utils.to_absolute_path("Training/Datasets")) / cfg.dataset.path
with open(dataset_path / "speakers.json") as file:
speakers = sorted(json.load(file))
for wav_path, speaker_id, out_filename in tqdm(filter_list):
wav_path = in_dir / wav_path
wav, _ = librosa.load(
wav_path.with_suffix(".wav"),
sr=cfg.preprocessing.sr)
ref_loudness = meter.integrated_loudness(wav)
wav = wav / np.abs(wav).max() * 0.999
mel = librosa.feature.melspectrogram(
preemphasis(wav, cfg.preprocessing.preemph),
sr=cfg.preprocessing.sr,
n_fft=cfg.preprocessing.n_fft,
n_mels=cfg.preprocessing.n_mels,
hop_length=cfg.preprocessing.hop_length,
win_length=cfg.preprocessing.win_length,
fmin=cfg.preprocessing.fmin,
power=1)
logmel = librosa.amplitude_to_db(mel, top_db=cfg.preprocessing.top_db)
logmel = logmel / cfg.preprocessing.top_db + 1
mel = torch.FloatTensor(logmel).unsqueeze(0).to(device)
speaker = torch.LongTensor([speakers.index(speaker_id)]).to(device)
path = out_dir / out_filename
if cfg.output_type == "Recording":
with torch.no_grad():
vq, _ = encoder.encode(mel)
output = decoder.generate(vq, speaker)
output_loudness = meter.integrated_loudness(output)
output = pyloudnorm.normalize.loudness(output, output_loudness, ref_loudness)
librosa.output.write_wav(path.with_suffix(".wav"), output.astype(np.float32), sr=cfg.preprocessing.sr)
if cfg.output_type == "Embedding":
with torch.no_grad():
vq, _ = encoder.encode(mel)
speaker = decoder.speaker(speaker)
vq = vq.squeeze().to(device).numpy()
speaker = speaker.squeeze().to(device).numpy()
np.savetxt(path.with_suffix(".vq.txt"), vq)
np.savetxt(path.with_suffix(".speaker.txt"), speaker)
#---------------------------------------
if cfg.privacy_preference == "High":
dataset_path = Path(utils.to_absolute_path("Training/Datasets")) / cfg.dataset.path
with open(dataset_path / "speakers.json") as file:
speakers = sorted(json.load(file))
for wav_path, speaker_id, out_filename in tqdm(filter_list):
wav_path = in_dir / wav_path
wav, _ = librosa.load(
wav_path.with_suffix(".wav"),sr=cfg.preprocessing.sr)
ref_loudness = meter.integrated_loudness(wav)
wav = wav / np.abs(wav).max() * 0.999
mel = librosa.feature.melspectrogram(
preemphasis(wav, cfg.preprocessing.preemph),
sr=cfg.preprocessing.sr,
n_fft=cfg.preprocessing.n_fft,
n_mels=cfg.preprocessing.n_mels,
hop_length=cfg.preprocessing.hop_length,
win_length=cfg.preprocessing.win_length,
fmin=cfg.preprocessing.fmin,
power=1)
logmel = librosa.amplitude_to_db(mel, top_db=cfg.preprocessing.top_db)
logmel = logmel / cfg.preprocessing.top_db + 1
mel = torch.FloatTensor(logmel).unsqueeze(0).to(device)
speaker = torch.LongTensor([speakers.index(speaker_id)]).to(device)
path = out_dir / out_filename
if cfg.output_type == "Recording":
with torch.no_grad():
vq, _ = encoder.encode(mel)
output = decoder.generate(vq, speaker)
output_loudness = meter.integrated_loudness(output)
output = pyloudnorm.normalize.loudness(output, output_loudness, ref_loudness)
librosa.output.write_wav(path.with_suffix(".wav"), output.astype(np.float32), sr=cfg.preprocessing.sr)
if cfg.output_type == "Embedding":
with torch.no_grad():
vq, _ = encoder.encode(mel)
vq = vq.squeeze().cpu().numpy()
np.savetxt(path.with_suffix(".vq.txt"), vq)
if __name__ == "__main__":
DDF()
| 6,913 | 43.320513 | 111 | py |
EDGY | EDGY-master/Training/VQ-VAE/dataset.py | import numpy as np
import torch
from torch.utils.data import Dataset
import json
from random import randint
from pathlib import Path
class SpeechDataset(Dataset):
def __init__(self, root, hop_length, sr, sample_frames):
self.root = Path(root)
self.hop_length = hop_length
self.sample_frames = sample_frames
with open(self.root / "speakers.json") as file:
self.speakers = sorted(json.load(file))
min_duration = (sample_frames + 2) * hop_length / sr
with open(self.root / "train.json") as file:
metadata = json.load(file)
self.metadata = [
Path(out_path) for _, _, duration, out_path in metadata
if duration > min_duration
]
def __len__(self):
return len(self.metadata)
def __getitem__(self, index):
path = self.metadata[index]
path = self.root.parent / path
audio = np.load(path.with_suffix(".wav.npy"))
mel = np.load(path.with_suffix(".mel.npy"))
pos = randint(1, mel.shape[-1] - self.sample_frames - 2)
mel = mel[:, pos - 1:pos + self.sample_frames + 1]
audio = audio[pos * self.hop_length:(pos + self.sample_frames) * self.hop_length + 1]
speaker = self.speakers.index(path.parts[-2])
return torch.LongTensor(audio), torch.FloatTensor(mel), speaker
| 1,381 | 31.139535 | 93 | py |
EDGY | EDGY-master/Training/VQ-VAE/models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
from tqdm import tqdm
import numpy as np
from preprocess import mulaw_decode
def get_gru_cell(gru):
gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size)
gru_cell.weight_hh.data = gru.weight_hh_l0.data
gru_cell.weight_ih.data = gru.weight_ih_l0.data
gru_cell.bias_hh.data = gru.bias_hh_l0.data
gru_cell.bias_ih.data = gru.bias_ih_l0.data
return gru_cell
class Encoder(nn.Module):
def __init__(self, in_channels, channels, n_embeddings, embedding_dim, jitter=0):
super(Encoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv1d(in_channels, channels, 3, 1, 0, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, channels, 3, 1, 1, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, channels, 4, 2, 1, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, channels, 3, 1, 1, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, channels, 3, 1, 1, bias=False),
nn.BatchNorm1d(channels),
nn.ReLU(True),
nn.Conv1d(channels, embedding_dim, 1)
)
self.codebook = VQEmbeddingEMA(n_embeddings, embedding_dim)
self.jitter = Jitter(jitter)
def forward(self, mels):
z = self.encoder(mels)
z, loss, perplexity = self.codebook(z.transpose(1, 2))
z = self.jitter(z)
return z, loss, perplexity
def encode(self, mel):
z = self.encoder(mel)
z, indices = self.codebook.encode(z.transpose(1, 2))
return z, indices
class Jitter(nn.Module):
def __init__(self, p):
super().__init__()
self.p = p
prob = torch.Tensor([p / 2, 1 - p, p / 2])
self.register_buffer("prob", prob)
def forward(self, x):
if not self.training or self.p == 0:
return x
else:
batch_size, sample_size, channels = x.size()
dist = Categorical(self.prob)
index = dist.sample(torch.Size([batch_size, sample_size])) - 1
index[:, 0].clamp_(0, 1)
index[:, -1].clamp_(-1, 0)
index += torch.arange(sample_size, device=x.device)
x = torch.gather(x, 1, index.unsqueeze(-1).expand(-1, -1, channels))
return x
class VQEmbeddingEMA(nn.Module):
def __init__(self, n_embeddings, embedding_dim, commitment_cost=0.25, decay=0.999, epsilon=1e-5):
super(VQEmbeddingEMA, self).__init__()
self.commitment_cost = commitment_cost
self.decay = decay
self.epsilon = epsilon
init_bound = 1 / 512
embedding = torch.Tensor(n_embeddings, embedding_dim)
embedding.uniform_(-init_bound, init_bound)
self.register_buffer("embedding", embedding)
self.register_buffer("ema_count", torch.zeros(n_embeddings))
self.register_buffer("ema_weight", self.embedding.clone())
def encode(self, x):
M, D = self.embedding.size()
x_flat = x.detach().reshape(-1, D)
distances = torch.addmm(torch.sum(self.embedding ** 2, dim=1) +
torch.sum(x_flat ** 2, dim=1, keepdim=True),
x_flat, self.embedding.t(),
alpha=-2.0, beta=1.0)
indices = torch.argmin(distances.float(), dim=-1)
quantized = F.embedding(indices, self.embedding)
quantized = quantized.view_as(x)
return quantized, indices
def forward(self, x):
M, D = self.embedding.size()
x_flat = x.detach().reshape(-1, D)
distances = torch.addmm(torch.sum(self.embedding ** 2, dim=1) +
torch.sum(x_flat ** 2, dim=1, keepdim=True),
x_flat, self.embedding.t(),
alpha=-2.0, beta=1.0)
indices = torch.argmin(distances.float(), dim=-1)
encodings = F.one_hot(indices, M).float()
quantized = F.embedding(indices, self.embedding)
quantized = quantized.view_as(x)
if self.training:
self.ema_count = self.decay * self.ema_count + (1 - self.decay) * torch.sum(encodings, dim=0)
n = torch.sum(self.ema_count)
self.ema_count = (self.ema_count + self.epsilon) / (n + M * self.epsilon) * n
dw = torch.matmul(encodings.t(), x_flat)
self.ema_weight = self.decay * self.ema_weight + (1 - self.decay) * dw
self.embedding = self.ema_weight / self.ema_count.unsqueeze(-1)
e_latent_loss = F.mse_loss(x, quantized.detach())
loss = self.commitment_cost * e_latent_loss
quantized = x + (quantized - x).detach()
avg_probs = torch.mean(encodings, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
return quantized, loss, perplexity
class Decoder(nn.Module):
def __init__(self, in_channels, n_speakers, speaker_embedding_dim,
conditioning_channels, mu_embedding_dim, rnn_channels,
fc_channels, bits, hop_length):
super().__init__()
self.rnn_channels = rnn_channels
self.quantization_channels = 2**bits
self.hop_length = hop_length
self.speaker_embedding = nn.Embedding(n_speakers, speaker_embedding_dim)
self.rnn1 = nn.GRU(in_channels + speaker_embedding_dim, conditioning_channels,
num_layers=2, batch_first=True, bidirectional=True)
self.mu_embedding = nn.Embedding(self.quantization_channels, mu_embedding_dim)
self.rnn2 = nn.GRU(mu_embedding_dim + 2*conditioning_channels, rnn_channels, batch_first=True)
self.fc1 = nn.Linear(rnn_channels, fc_channels)
self.fc2 = nn.Linear(fc_channels, self.quantization_channels)
def forward(self, x, z, speakers):
z = F.interpolate(z.transpose(1, 2), scale_factor=2)
z = z.transpose(1, 2)
speakers = self.speaker_embedding(speakers)
speakers = speakers.unsqueeze(1).expand(-1, z.size(1), -1)
z = torch.cat((z, speakers), dim=-1)
z, _ = self.rnn1(z)
z = F.interpolate(z.transpose(1, 2), scale_factor=self.hop_length)
z = z.transpose(1, 2)
x = self.mu_embedding(x)
x, _ = self.rnn2(torch.cat((x, z), dim=2))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def generate(self, z, speaker):
output = []
cell = get_gru_cell(self.rnn2)
z = F.interpolate(z.transpose(1, 2), scale_factor=2)
z = z.transpose(1, 2)
speaker = self.speaker_embedding(speaker)
speaker = speaker.unsqueeze(1).expand(-1, z.size(1), -1)
z = torch.cat((z, speaker), dim=-1)
z, _ = self.rnn1(z)
z = F.interpolate(z.transpose(1, 2), scale_factor=self.hop_length)
z = z.transpose(1, 2)
batch_size, sample_size, _ = z.size()
h = torch.zeros(batch_size, self.rnn_channels, device=z.device)
x = torch.zeros(batch_size, device=z.device).fill_(self.quantization_channels // 2).long()
for m in tqdm(torch.unbind(z, dim=1), leave=False):
x = self.mu_embedding(x)
h = cell(torch.cat((x, m), dim=1), h)
x = F.relu(self.fc1(h))
logits = self.fc2(x)
dist = Categorical(logits=logits)
x = dist.sample()
output.append(2 * x.float().item() / (self.quantization_channels - 1.) - 1.)
output = np.asarray(output, dtype=np.float64)
output = mulaw_decode(output, self.quantization_channels)
return output
def speaker(self, speaker):
speaker = self.speaker_embedding(speaker)
return speaker
| 7,998 | 35.861751 | 105 | py |
EDGY | EDGY-master/Training/VQ-VAE/train_VQ.py | import hydra
from hydra import utils
from itertools import chain
from pathlib import Path
from tqdm import tqdm
import apex.amp as amp
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from dataset import SpeechDataset
from models import Encoder, Decoder
def save_checkpoint(encoder, decoder, optimizer, amp, scheduler, step, checkpoint_dir):
checkpoint_state = {
"encoder": encoder.state_dict(),
"decoder": decoder.state_dict(),
"optimizer": optimizer.state_dict(),
"amp": amp.state_dict(),
"scheduler": scheduler.state_dict(),
"step": step}
checkpoint_dir.mkdir(exist_ok=True, parents=True)
checkpoint_path = checkpoint_dir / "model.ckpt-{}.pt".format(step)
torch.save(checkpoint_state, checkpoint_path)
print("Saved checkpoint: {}".format(checkpoint_path.stem))
@hydra.main(config_path="Training/VQ-VAE/Configuration_files/training.yaml")
def train_model(cfg):
tensorboard_path = Path(utils.to_absolute_path("tensorboard")) / cfg.checkpoint_dir
checkpoint_dir = Path(utils.to_absolute_path(cfg.checkpoint_dir))
writer = SummaryWriter(tensorboard_path)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = Encoder(**cfg.model.encoder)
decoder = Decoder(**cfg.model.decoder)
encoder.to(device)
decoder.to(device)
optimizer = optim.Adam(
chain(encoder.parameters(), decoder.parameters()),
lr=cfg.training.optimizer.lr)
[encoder, decoder], optimizer = amp.initialize([encoder, decoder], optimizer, opt_level="O1")
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=cfg.training.scheduler.milestones,
gamma=cfg.training.scheduler.gamma)
if cfg.resume:
print("Resume checkpoint from: {}:".format(cfg.resume))
resume_path = utils.to_absolute_path(cfg.resume)
checkpoint = torch.load(resume_path, map_location=lambda storage, loc: storage)
encoder.load_state_dict(checkpoint["encoder"])
decoder.load_state_dict(checkpoint["decoder"])
optimizer.load_state_dict(checkpoint["optimizer"])
amp.load_state_dict(checkpoint["amp"])
scheduler.load_state_dict(checkpoint["scheduler"])
global_step = checkpoint["step"]
else:
global_step = 0
root_path = Path(utils.to_absolute_path("datasets")) / cfg.dataset.path
dataset = SpeechDataset(
root=root_path,
hop_length=cfg.preprocessing.hop_length,
sr=cfg.preprocessing.sr,
sample_frames=cfg.training.sample_frames)
dataloader = DataLoader(
dataset,
batch_size=cfg.training.batch_size,
shuffle=True,
num_workers=cfg.training.n_workers,
pin_memory=True,
drop_last=True)
n_epochs = cfg.training.n_steps // len(dataloader) + 1
start_epoch = global_step // len(dataloader) + 1
for epoch in range(start_epoch, n_epochs + 1):
average_recon_loss = average_vq_loss = average_perplexity = 0
for i, (audio, mels, speakers) in enumerate(tqdm(dataloader), 1):
audio, mels, speakers = audio.to(device), mels.to(device), speakers.to(device)
optimizer.zero_grad()
z, vq_loss, perplexity = encoder(mels)
output = decoder(audio[:, :-1], z, speakers)
recon_loss = F.cross_entropy(output.transpose(1, 2), audio[:, 1:])
loss = recon_loss + vq_loss
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), 1)
optimizer.step()
scheduler.step()
average_recon_loss += (recon_loss.item() - average_recon_loss) / i
average_vq_loss += (vq_loss.item() - average_vq_loss) / i
average_perplexity += (perplexity.item() - average_perplexity) / i
global_step += 1
if global_step % cfg.training.checkpoint_interval == 0:
save_checkpoint(
encoder, decoder, optimizer, amp,
scheduler, global_step, checkpoint_dir)
writer.add_scalar("recon_loss/train", average_recon_loss, global_step)
writer.add_scalar("vq_loss/train", average_vq_loss, global_step)
writer.add_scalar("average_perplexity", average_perplexity, global_step)
print("epoch:{}, recon loss:{:.2E}, vq loss:{:.2E}, perpexlity:{:.3f}"
.format(epoch, average_recon_loss, average_vq_loss, average_perplexity))
if __name__ == "__main__":
train_model()
| 4,714 | 37.647541 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.