Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | qimera-main/pytorchcv/models/wrn.py | """
WRN for ImageNet-1K, implemented in PyTorch.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['WRN', 'wrn50_2']
import os
import torch.nn as nn
import torch.nn.init as init
class WRNConv(nn.Module):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
activate):
super(WRNConv, self).__init__()
self.activate = activate
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=True)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
def wrn_conv1x1(in_channels,
out_channels,
stride,
activate):
"""
1x1 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
activate=activate)
def wrn_conv3x3(in_channels,
out_channels,
stride,
activate):
"""
3x3 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
activate=activate)
class WRNBottleneck(nn.Module):
"""
WRN bottleneck block for residual path in WRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
"""
def __init__(self,
in_channels,
out_channels,
stride,
width_factor):
super(WRNBottleneck, self).__init__()
mid_channels = int(round(out_channels // 4 * width_factor))
self.conv1 = wrn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
stride=1,
activate=True)
self.conv2 = wrn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
stride=stride,
activate=True)
self.conv3 = wrn_conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
stride=1,
activate=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class WRNUnit(nn.Module):
"""
WRN unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
"""
def __init__(self,
in_channels,
out_channels,
stride,
width_factor):
super(WRNUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
self.body = WRNBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
width_factor=width_factor)
if self.resize_identity:
self.identity_conv = wrn_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activate=False)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class WRNInitBlock(nn.Module):
"""
WRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels):
super(WRNInitBlock, self).__init__()
self.conv = WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=2,
padding=3,
activate=True)
self.pool = nn.MaxPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x
class WRN(nn.Module):
"""
WRN model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
width_factor : float
Wide scale factor for width of layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
width_factor,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(WRN, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", WRNInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), WRNUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
width_factor=width_factor))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module('final_pool', nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_wrn(blocks,
width_factor,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create WRN model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_factor : float
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported WRN with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = WRN(
channels=channels,
init_block_channels=init_block_channels,
width_factor=width_factor,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def wrn50_2(**kwargs):
"""
WRN-50-2 model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn(blocks=50, width_factor=2.0, model_name="wrn50_2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
wrn50_2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn50_2 or weight_count == 68849128)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 11,401 | 26.474699 | 115 | py |
null | qimera-main/pytorchcv/models/wrn1bit_cifar.py | """
WRN-1bit for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Training wide residual networks for deployment using a single bit for each weight,'
https://arxiv.org/abs/1802.08530.
"""
__all__ = ['CIFARWRN1bit', 'wrn20_10_1bit_cifar10', 'wrn20_10_1bit_cifar100', 'wrn20_10_1bit_svhn',
'wrn20_10_32bit_cifar10', 'wrn20_10_32bit_cifar100', 'wrn20_10_32bit_svhn']
import os
import math
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class Binarize(torch.autograd.Function):
"""
Fake sign op for 1-bit weights.
"""
@staticmethod
def forward(ctx, x):
return math.sqrt(2.0 / (x.shape[1] * x.shape[2] * x.shape[3])) * x.sign()
@staticmethod
def backward(ctx, dy):
return dy
class Conv2d1bit(nn.Conv2d):
"""
Standard convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding=1,
dilation=1,
groups=1,
bias=False,
binarized=False):
super(Conv2d1bit, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.binarized = binarized
def forward(self, input):
weight = Binarize.apply(self.weight) if self.binarized else self.weight
bias = Binarize.apply(self.bias) if self.bias is not None and self.binarized else self.bias
return F.conv2d(
input=input,
weight=weight,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
def conv1x1_1bit(in_channels,
out_channels,
stride=1,
groups=1,
bias=False,
binarized=False):
"""
Convolution 1x1 layer with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
return Conv2d1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias,
binarized=binarized)
def conv3x3_1bit(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
binarized=False):
"""
Convolution 3x3 layer with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
return Conv2d1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
binarized=binarized)
class ConvBlock1bit(nn.Module):
"""
Standard convolution block with Batch normalization and ReLU activation, and binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
bn_affine=True,
activate=True,
binarized=False):
super(ConvBlock1bit, self).__init__()
self.activate = activate
self.conv = Conv2d1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
binarized=binarized)
self.bn = nn.BatchNorm2d(
num_features=out_channels,
affine=bn_affine)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block_1bit(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
bn_affine=True,
activate=True,
binarized=False):
"""
1x1 version of the standard convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
return ConvBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
bn_affine=bn_affine,
activate=activate,
binarized=binarized)
class PreConvBlock1bit(nn.Module):
"""
Convolution block with Batch normalization and ReLU pre-activation, and binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
bn_affine=True,
return_preact=False,
activate=True,
binarized=False):
super(PreConvBlock1bit, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(
num_features=in_channels,
affine=bn_affine)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = Conv2d1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
binarized=binarized)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv3x3_block_1bit(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bn_affine=True,
return_preact=False,
activate=True,
binarized=False):
"""
3x3 version of the pre-activated convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
return PreConvBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
bn_affine=bn_affine,
return_preact=return_preact,
activate=activate,
binarized=binarized)
class PreResBlock1bit(nn.Module):
"""
Simple PreResNet block for residual path in ResNet unit (with binarization).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
stride,
binarized=False):
super(PreResBlock1bit, self).__init__()
self.conv1 = pre_conv3x3_block_1bit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bn_affine=False,
return_preact=False,
binarized=binarized)
self.conv2 = pre_conv3x3_block_1bit(
in_channels=out_channels,
out_channels=out_channels,
bn_affine=False,
binarized=binarized)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class PreResUnit1bit(nn.Module):
"""
PreResNet unit with residual connection (with binarization).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
stride,
binarized=False):
super(PreResUnit1bit, self).__init__()
self.resize_identity = (stride != 1)
self.body = PreResBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
binarized=binarized)
if self.resize_identity:
self.identity_pool = nn.AvgPool2d(
kernel_size=3,
stride=2,
padding=1)
def forward(self, x):
identity = x
x = self.body(x)
if self.resize_identity:
identity = self.identity_pool(identity)
identity = torch.cat((identity, torch.zeros_like(identity)), dim=1)
x = x + identity
return x
class PreResActivation(nn.Module):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
"""
def __init__(self,
in_channels,
bn_affine=True):
super(PreResActivation, self).__init__()
self.bn = nn.BatchNorm2d(
num_features=in_channels,
affine=bn_affine)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class CIFARWRN1bit(nn.Module):
"""
WRN-1bit model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
binarized : bool, default True
Whether to use binarization.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
binarized=True,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARWRN1bit, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_1bit(
in_channels=in_channels,
out_channels=init_block_channels,
binarized=binarized))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), PreResUnit1bit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
binarized=binarized))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(
in_channels=in_channels,
bn_affine=False))
self.output = nn.Sequential()
self.output.add_module("final_conv", conv1x1_block_1bit(
in_channels=in_channels,
out_channels=num_classes,
activate=False,
binarized=binarized))
self.output.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = self.output(x)
x = x.view(x.size(0), -1)
return x
def get_wrn1bit_cifar(num_classes,
blocks,
width_factor,
binarized=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create WRN-1bit model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
width_factor : int
Wide scale factor for width of layers.
binarized : bool, default True
Whether to use binarization.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)]
init_block_channels *= width_factor
net = CIFARWRN1bit(
channels=channels,
init_block_channels=init_block_channels,
binarized=binarized,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def wrn20_10_1bit_cifar10(num_classes=10, **kwargs):
"""
WRN-20-10-1bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_cifar10", **kwargs)
def wrn20_10_1bit_cifar100(num_classes=100, **kwargs):
"""
WRN-20-10-1bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_cifar100", **kwargs)
def wrn20_10_1bit_svhn(num_classes=10, **kwargs):
"""
WRN-20-10-1bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_svhn", **kwargs)
def wrn20_10_32bit_cifar10(num_classes=10, **kwargs):
"""
WRN-20-10-32bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_cifar10", **kwargs)
def wrn20_10_32bit_cifar100(num_classes=100, **kwargs):
"""
WRN-20-10-32bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_cifar100", **kwargs)
def wrn20_10_32bit_svhn(num_classes=10, **kwargs):
"""
WRN-20-10-32bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(num_classes=num_classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(wrn20_10_1bit_cifar10, 10),
(wrn20_10_1bit_cifar100, 100),
(wrn20_10_1bit_svhn, 10),
(wrn20_10_32bit_cifar10, 10),
(wrn20_10_32bit_cifar100, 100),
(wrn20_10_32bit_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn20_10_1bit_cifar10 or weight_count == 26737140)
assert (model != wrn20_10_1bit_cifar100 or weight_count == 26794920)
assert (model != wrn20_10_1bit_svhn or weight_count == 26737140)
assert (model != wrn20_10_32bit_cifar10 or weight_count == 26737140)
assert (model != wrn20_10_32bit_cifar100 or weight_count == 26794920)
assert (model != wrn20_10_32bit_svhn or weight_count == 26737140)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 24,899 | 30.558935 | 115 | py |
null | qimera-main/pytorchcv/models/wrn_cifar.py | """
WRN for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['CIFARWRN', 'wrn16_10_cifar10', 'wrn16_10_cifar100', 'wrn16_10_svhn', 'wrn28_10_cifar10',
'wrn28_10_cifar100', 'wrn28_10_svhn', 'wrn40_8_cifar10', 'wrn40_8_cifar100', 'wrn40_8_svhn']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3
from .preresnet import PreResUnit, PreResActivation
class CIFARWRN(nn.Module):
"""
WRN model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARWRN, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) and (i != 0) else 1
stage.add_module("unit{}".format(j + 1), PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=False,
conv1_stride=False))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_wrn_cifar(num_classes,
blocks,
width_factor,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create WRN model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
width_factor : int
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CIFARWRN(
channels=channels,
init_block_channels=init_block_channels,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def wrn16_10_cifar10(num_classes=10, **kwargs):
"""
WRN-16-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar10", **kwargs)
def wrn16_10_cifar100(num_classes=100, **kwargs):
"""
WRN-16-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar100", **kwargs)
def wrn16_10_svhn(num_classes=10, **kwargs):
"""
WRN-16-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=16, width_factor=10, model_name="wrn16_10_svhn", **kwargs)
def wrn28_10_cifar10(num_classes=10, **kwargs):
"""
WRN-28-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar10", **kwargs)
def wrn28_10_cifar100(num_classes=100, **kwargs):
"""
WRN-28-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar100", **kwargs)
def wrn28_10_svhn(num_classes=10, **kwargs):
"""
WRN-28-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=28, width_factor=10, model_name="wrn28_10_svhn", **kwargs)
def wrn40_8_cifar10(num_classes=10, **kwargs):
"""
WRN-40-8 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar10", **kwargs)
def wrn40_8_cifar100(num_classes=100, **kwargs):
"""
WRN-40-8 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar100", **kwargs)
def wrn40_8_svhn(num_classes=10, **kwargs):
"""
WRN-40-8 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(num_classes=num_classes, blocks=40, width_factor=8, model_name="wrn40_8_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(wrn16_10_cifar10, 10),
(wrn16_10_cifar100, 100),
(wrn16_10_svhn, 10),
(wrn28_10_cifar10, 10),
(wrn28_10_cifar100, 100),
(wrn28_10_svhn, 10),
(wrn40_8_cifar10, 10),
(wrn40_8_cifar100, 100),
(wrn40_8_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn16_10_cifar10 or weight_count == 17116634)
assert (model != wrn16_10_cifar100 or weight_count == 17174324)
assert (model != wrn16_10_svhn or weight_count == 17116634)
assert (model != wrn28_10_cifar10 or weight_count == 36479194)
assert (model != wrn28_10_cifar100 or weight_count == 36536884)
assert (model != wrn28_10_svhn or weight_count == 36479194)
assert (model != wrn40_8_cifar10 or weight_count == 35748314)
assert (model != wrn40_8_cifar100 or weight_count == 35794484)
assert (model != wrn40_8_svhn or weight_count == 35748314)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 11,329 | 33.126506 | 119 | py |
null | qimera-main/pytorchcv/models/xception.py | """
Xception for ImageNet-1K, implemented in PyTorch.
Original paper: 'Xception: Deep Learning with Depthwise Separable Convolutions,' https://arxiv.org/abs/1610.02357.
"""
__all__ = ['Xception', 'xception']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1_block, conv3x3_block
class DwsConv(nn.Module):
"""
Depthwise separable convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0):
super(DwsConv, self).__init__()
self.dw_conv = nn.Conv2d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=in_channels,
bias=False)
self.pw_conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
bias=False)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class DwsConvBlock(nn.Module):
"""
Depthwise separable convolution block with batchnorm and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
activate):
super(DwsConvBlock, self).__init__()
self.activate = activate
if self.activate:
self.activ = nn.ReLU(inplace=False)
self.conv = DwsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding)
self.bn = nn.BatchNorm2d(num_features=out_channels)
def forward(self, x):
if self.activate:
x = self.activ(x)
x = self.conv(x)
x = self.bn(x)
return x
def dws_conv3x3_block(in_channels,
out_channels,
activate):
"""
3x3 version of the depthwise separable convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activate : bool
Whether activate the convolution block.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=1,
padding=1,
activate=activate)
class XceptionUnit(nn.Module):
"""
Xception unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the downsample polling.
reps : int
Number of repetitions.
start_with_relu : bool, default True
Whether start with ReLU activation.
grow_first : bool, default True
Whether start from growing.
"""
def __init__(self,
in_channels,
out_channels,
stride,
reps,
start_with_relu=True,
grow_first=True):
super(XceptionUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
activation=None)
self.body = nn.Sequential()
for i in range(reps):
if (grow_first and (i == 0)) or ((not grow_first) and (i == reps - 1)):
in_channels_i = in_channels
out_channels_i = out_channels
else:
if grow_first:
in_channels_i = out_channels
out_channels_i = out_channels
else:
in_channels_i = in_channels
out_channels_i = in_channels
activate = start_with_relu if (i == 0) else True
self.body.add_module("block{}".format(i + 1), dws_conv3x3_block(
in_channels=in_channels_i,
out_channels=out_channels_i,
activate=activate))
if stride != 1:
self.body.add_module("pool", nn.MaxPool2d(
kernel_size=3,
stride=stride,
padding=1))
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class XceptionInitBlock(nn.Module):
"""
Xception specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(XceptionInitBlock, self).__init__()
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
stride=2,
padding=0)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=64,
stride=1,
padding=0)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class XceptionFinalBlock(nn.Module):
"""
Xception specific final block.
"""
def __init__(self):
super(XceptionFinalBlock, self).__init__()
self.conv1 = dws_conv3x3_block(
in_channels=1024,
out_channels=1536,
activate=False)
self.conv2 = dws_conv3x3_block(
in_channels=1536,
out_channels=2048,
activate=True)
self.activ = nn.ReLU(inplace=True)
self.pool = nn.AvgPool2d(
kernel_size=10,
stride=1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.activ(x)
x = self.pool(x)
return x
class Xception(nn.Module):
"""
Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,'
https://arxiv.org/abs/1610.02357.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
in_channels=3,
in_size=(299, 299),
num_classes=1000):
super(Xception, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", XceptionInitBlock(
in_channels=in_channels))
in_channels = 64
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), XceptionUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=(2 if (j == 0) else 1),
reps=(2 if (j == 0) else 3),
start_with_relu=((i != 0) or (j != 0)),
grow_first=((i != len(channels) - 1) or (j != len(channels_per_stage) - 1))))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("final_block", XceptionFinalBlock())
self.output = nn.Linear(
in_features=2048,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_xception(model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create Xception model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
channels = [[128], [256], [728] * 9, [1024]]
net = Xception(
channels=channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def xception(**kwargs):
"""
Xception model from 'Xception: Deep Learning with Depthwise Separable Convolutions,'
https://arxiv.org/abs/1610.02357.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xception(model_name="xception", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
xception,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xception or weight_count == 22855952)
x = torch.randn(1, 3, 299, 299)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 11,572 | 27.717122 | 118 | py |
null | qimera-main/pytorchcv/models/xdensenet.py | """
X-DenseNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
"""
__all__ = ['XDenseNet', 'xdensenet121_2', 'xdensenet161_2', 'xdensenet169_2', 'xdensenet201_2', 'pre_xconv3x3_block',
'XDenseUnit']
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
class XConv2d(nn.Conv2d):
"""
X-Convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
groups : int, default 1
Number of groups.
expand_ratio : int, default 2
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
groups=1,
expand_ratio=2,
**kwargs):
super(XConv2d, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups=groups,
**kwargs)
self.expand_ratio = expand_ratio
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
grouped_in_channels = in_channels // groups
self.mask = torch.nn.Parameter(
data=torch.Tensor(out_channels, grouped_in_channels, *kernel_size),
requires_grad=False)
self.init_parameters()
def init_parameters(self):
shape = self.mask.shape
expand_size = max(shape[1] // self.expand_ratio, 1)
self.mask[:] = 0
for i in range(shape[0]):
jj = torch.randperm(shape[1], device=self.mask.device)[:expand_size]
self.mask[i, jj, :, :] = 1
def forward(self, input):
masked_weight = self.weight.mul(self.mask)
return F.conv2d(
input=input,
weight=masked_weight,
bias=self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
class PreXConvBlock(nn.Module):
"""
X-Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
return_preact=False,
activate=True,
expand_ratio=2):
super(PreXConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = XConv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
expand_ratio=expand_ratio)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_xconv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
return_preact=False,
activate=True,
expand_ratio=2):
"""
1x1 version of the pre-activated x-convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
return PreXConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
return_preact=return_preact,
activate=activate,
expand_ratio=expand_ratio)
def pre_xconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
return_preact=False,
activate=True,
expand_ratio=2):
"""
3x3 version of the pre-activated x-convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
return PreXConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
return_preact=return_preact,
activate=activate,
expand_ratio=expand_ratio)
class XDenseUnit(nn.Module):
"""
X-DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate,
expand_ratio):
super(XDenseUnit, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
self.conv1 = pre_xconv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
expand_ratio=expand_ratio)
self.conv2 = pre_xconv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
expand_ratio=expand_ratio)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = torch.cat((identity, x), dim=1)
return x
class XDenseNet(nn.Module):
"""
X-DenseNet model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int, default 2
Ratio of expansion.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
expand_ratio=2,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(XDenseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
if i != 0:
stage.add_module("trans{}".format(i + 1), TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2)))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), XDenseUnit(
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=dropout_rate,
expand_ratio=expand_ratio))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_xdensenet(blocks,
expand_ratio=2,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create X-DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
expand_ratio : int, default 2
Ratio of expansion.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported X-DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = XDenseNet(
channels=channels,
init_block_channels=init_block_channels,
expand_ratio=expand_ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def xdensenet121_2(**kwargs):
"""
X-DenseNet-121-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=121, model_name="xdensenet121_2", **kwargs)
def xdensenet161_2(**kwargs):
"""
X-DenseNet-161-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=161, model_name="xdensenet161_2", **kwargs)
def xdensenet169_2(**kwargs):
"""
X-DenseNet-169-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=169, model_name="xdensenet169_2", **kwargs)
def xdensenet201_2(**kwargs):
"""
X-DenseNet-201-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=201, model_name="xdensenet201_2", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
xdensenet121_2,
xdensenet161_2,
xdensenet169_2,
xdensenet201_2,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xdensenet121_2 or weight_count == 7978856)
assert (model != xdensenet161_2 or weight_count == 28681000)
assert (model != xdensenet169_2 or weight_count == 14149480)
assert (model != xdensenet201_2 or weight_count == 20013928)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 16,251 | 30.015267 | 117 | py |
null | qimera-main/pytorchcv/models/xdensenet_cifar.py | """
X-DenseNet for CIFAR/SVHN, implemented in PyTorch.
Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
"""
__all__ = ['CIFARXDenseNet', 'xdensenet40_2_k24_bc_cifar10', 'xdensenet40_2_k24_bc_cifar100',
'xdensenet40_2_k24_bc_svhn', 'xdensenet40_2_k36_bc_cifar10', 'xdensenet40_2_k36_bc_cifar100',
'xdensenet40_2_k36_bc_svhn']
import os
import torch
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3
from .preresnet import PreResActivation
from .densenet import TransitionBlock
from .xdensenet import pre_xconv3x3_block, XDenseUnit
class XDenseSimpleUnit(nn.Module):
"""
X-DenseNet simple unit for CIFAR.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
dropout_rate,
expand_ratio):
super(XDenseSimpleUnit, self).__init__()
self.use_dropout = (dropout_rate != 0.0)
inc_channels = out_channels - in_channels
self.conv = pre_xconv3x3_block(
in_channels=in_channels,
out_channels=inc_channels,
expand_ratio=expand_ratio)
if self.use_dropout:
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self, x):
identity = x
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = torch.cat((identity, x), dim=1)
return x
class CIFARXDenseNet(nn.Module):
"""
X-DenseNet model for CIFAR from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int, default 2
Ratio of expansion.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
dropout_rate=0.0,
expand_ratio=2,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARXDenseNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
unit_class = XDenseUnit if bottleneck else XDenseSimpleUnit
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
if i != 0:
stage.add_module("trans{}".format(i + 1), TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2)))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add_module("unit{}".format(j + 1), unit_class(
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=dropout_rate,
expand_ratio=expand_ratio))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_xdensenet_cifar(num_classes,
blocks,
growth_rate,
bottleneck,
expand_ratio=2,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create X-DenseNet model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
growth_rate : int
Growth rate.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
expand_ratio : int, default 2
Ratio of expansion.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (num_classes in [10, 100])
if bottleneck:
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
else:
assert ((blocks - 4) % 3 == 0)
layers = [(blocks - 4) // 3] * 3
init_block_channels = 2 * growth_rate
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = CIFARXDenseNet(
channels=channels,
init_block_channels=init_block_channels,
num_classes=num_classes,
bottleneck=bottleneck,
expand_ratio=expand_ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def xdensenet40_2_k24_bc_cifar10(num_classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=24) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="xdensenet40_2_k24_bc_cifar10", **kwargs)
def xdensenet40_2_k24_bc_cifar100(num_classes=100, **kwargs):
"""
X-DenseNet-BC-40-2 (k=24) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="xdensenet40_2_k24_bc_cifar100", **kwargs)
def xdensenet40_2_k24_bc_svhn(num_classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=24) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="xdensenet40_2_k24_bc_svhn", **kwargs)
def xdensenet40_2_k36_bc_cifar10(num_classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=36) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="xdensenet40_2_k36_bc_cifar10", **kwargs)
def xdensenet40_2_k36_bc_cifar100(num_classes=100, **kwargs):
"""
X-DenseNet-BC-40-2 (k=36) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="xdensenet40_2_k36_bc_cifar100", **kwargs)
def xdensenet40_2_k36_bc_svhn(num_classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=36) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(num_classes=num_classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="xdensenet40_2_k36_bc_svhn", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
(xdensenet40_2_k24_bc_cifar10, 10),
(xdensenet40_2_k24_bc_cifar100, 100),
(xdensenet40_2_k24_bc_svhn, 10),
(xdensenet40_2_k36_bc_cifar10, 10),
(xdensenet40_2_k36_bc_cifar100, 100),
(xdensenet40_2_k36_bc_svhn, 10),
]
for model, num_classes in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xdensenet40_2_k24_bc_cifar10 or weight_count == 690346)
assert (model != xdensenet40_2_k24_bc_cifar100 or weight_count == 714196)
assert (model != xdensenet40_2_k24_bc_svhn or weight_count == 690346)
assert (model != xdensenet40_2_k36_bc_cifar10 or weight_count == 1542682)
assert (model != xdensenet40_2_k36_bc_cifar100 or weight_count == 1578412)
assert (model != xdensenet40_2_k36_bc_svhn or weight_count == 1542682)
x = torch.randn(1, 3, 32, 32)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 12,852 | 33.831978 | 115 | py |
null | qimera-main/pytorchcv/models/zfnet.py | """
ZFNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
"""
__all__ = ['zfnet', 'zfnetb']
import os
from .alexnet import AlexNet
def get_zfnet(version="a",
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create ZFNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of ZFNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[96], [256], [512, 1024, 512]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
else:
raise ValueError("Unsupported ZFNet version {}".format(version))
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def zfnet(**kwargs):
"""
ZFNet model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_zfnet(model_name="zfnet", **kwargs)
def zfnetb(**kwargs):
"""
ZFNet-b model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_zfnet(version="b", model_name="zfnetb", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
zfnet,
zfnetb,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != zfnet or weight_count == 62357608)
assert (model != zfnetb or weight_count == 107627624)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| 3,659 | 26.727273 | 115 | py |
null | qimera-main/quantization_utils/quant_modules.py | # *
# @file Different utility functions
# Copyright (c) Yaohui Cai, Zhewei Yao, Zhen Dong, Amir Gholami
# All rights reserved.
# This file is part of ZeroQ repository.
#
# ZeroQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZeroQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZeroQ repository. If not, see <http://www.gnu.org/licenses/>.
# *
import torch
import time
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module, Parameter
from .quant_utils import *
import sys
class QuantAct(Module):
"""
Class to quantize given activations
"""
def __init__(self,
activation_bit,
full_precision_flag=False,
running_stat=True,
beta=0.9):
"""
activation_bit: bit-setting for activation
full_precision_flag: full precision or not
running_stat: determines whether the activation range is updated or froze
"""
super(QuantAct, self).__init__()
self.activation_bit = activation_bit
self.full_precision_flag = full_precision_flag
self.running_stat = running_stat
self.register_buffer('x_min', torch.zeros(1))
self.register_buffer('x_max', torch.zeros(1))
self.register_buffer('beta', torch.Tensor([beta]))
self.register_buffer('beta_t', torch.ones(1))
self.act_function = AsymmetricQuantFunction.apply
def __repr__(self):
return "{0}(activation_bit={1}, full_precision_flag={2}, running_stat={3}, Act_min: {4:.2f}, Act_max: {5:.2f})".format(
self.__class__.__name__, self.activation_bit,
self.full_precision_flag, self.running_stat, self.x_min.item(),
self.x_max.item())
def fix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = False
def unfix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = True
def forward(self, x):
"""
quantize given activation x
"""
if self.running_stat:
x_min = x.data.min()
x_max = x.data.max()
# in-place operation used on multi-gpus
# self.x_min += -self.x_min + min(self.x_min, x_min)
# self.x_max += -self.x_max + max(self.x_max, x_max)
self.beta_t = self.beta_t * self.beta
self.x_min = (self.x_min * self.beta + x_min * (1 - self.beta))/(1 - self.beta_t)
self.x_max = (self.x_max * self.beta + x_max * (1 - self.beta)) / (1 - self.beta_t)
if not self.full_precision_flag:
quant_act = self.act_function(x, self.activation_bit, self.x_min,
self.x_max)
return quant_act
else:
return x
class Quant_Linear(Module):
"""
Class to quantize given linear layer weights
"""
def __init__(self, weight_bit, full_precision_flag=False):
"""
weight: bit-setting for weight
full_precision_flag: full precision or not
running_stat: determines whether the activation range is updated or froze
"""
super(Quant_Linear, self).__init__()
self.full_precision_flag = full_precision_flag
self.weight_bit = weight_bit
self.weight_function = AsymmetricQuantFunction.apply
def __repr__(self):
s = super(Quant_Linear, self).__repr__()
s = "(" + s + " weight_bit={}, full_precision_flag={})".format(
self.weight_bit, self.full_precision_flag)
return s
def set_param(self, linear):
self.in_features = linear.in_features
self.out_features = linear.out_features
self.weight = Parameter(linear.weight.data.clone())
try:
self.bias = Parameter(linear.bias.data.clone())
except AttributeError:
self.bias = None
def forward(self, x):
"""
using quantized weights to forward activation x
"""
w = self.weight
x_transform = w.data.detach()
w_min = x_transform.min(dim=1).values
w_max = x_transform.max(dim=1).values
if not self.full_precision_flag:
w = self.weight_function(self.weight, self.weight_bit, w_min,
w_max)
else:
w = self.weight
return F.linear(x, weight=w, bias=self.bias)
class Quant_Conv2d(Module):
"""
Class to quantize given convolutional layer weights
"""
def __init__(self, weight_bit, full_precision_flag=False):
super(Quant_Conv2d, self).__init__()
self.full_precision_flag = full_precision_flag
self.weight_bit = weight_bit
self.weight_function = AsymmetricQuantFunction.apply
def __repr__(self):
s = super(Quant_Conv2d, self).__repr__()
s = "(" + s + " weight_bit={}, full_precision_flag={})".format(
self.weight_bit, self.full_precision_flag)
return s
def set_param(self, conv):
self.in_channels = conv.in_channels
self.out_channels = conv.out_channels
self.kernel_size = conv.kernel_size
self.stride = conv.stride
self.padding = conv.padding
self.dilation = conv.dilation
self.groups = conv.groups
self.weight = Parameter(conv.weight.data.clone())
try:
self.bias = Parameter(conv.bias.data.clone())
except AttributeError:
self.bias = None
def forward(self, x):
"""
using quantized weights to forward activation x
"""
w = self.weight
x_transform = w.data.contiguous().view(self.out_channels, -1)
w_min = x_transform.min(dim=1).values
w_max = x_transform.max(dim=1).values
if not self.full_precision_flag:
w = self.weight_function(self.weight, self.weight_bit, w_min,
w_max)
else:
w = self.weight
return F.conv2d(x, w, self.bias, self.stride, self.padding,
self.dilation, self.groups)
| 5,819 | 29.15544 | 121 | py |
null | qimera-main/quantization_utils/quant_utils.py | #*
# @file Different utility functions
# Copyright (c) Yaohui Cai, Zhewei Yao, Zhen Dong, Amir Gholami
# All rights reserved.
# This file is part of ZeroQ repository.
#
# ZeroQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZeroQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZeroQ repository. If not, see <http://www.gnu.org/licenses/>.
#*
import math
import numpy as np
from torch.autograd import Function, Variable
import torch
def clamp(input, min, max, inplace=False):
"""
Clamp tensor input to (min, max).
input: input tensor to be clamped
"""
if inplace:
input.clamp_(min, max)
return input
return torch.clamp(input, min, max)
def linear_quantize(input, scale, zero_point, inplace=False):
"""
Quantize single-precision input tensor to integers with the given scaling factor and zeropoint.
input: single-precision input tensor to be quantized
scale: scaling factor for quantization
zero_pint: shift for quantization
"""
# reshape scale and zeropoint for convolutional weights and activation
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
# mapping single-precision input to integer values with the given scale and zeropoint
if inplace:
input.mul_(scale).sub_(zero_point).round_()
return input
return torch.round(scale * input - zero_point)
def linear_dequantize(input, scale, zero_point, inplace=False):
"""
Map integer input tensor to fixed point float point with given scaling factor and zeropoint.
input: integer input tensor to be mapped
scale: scaling factor for quantization
zero_pint: shift for quantization
"""
# reshape scale and zeropoint for convolutional weights and activation
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
# mapping integer input to fixed point float point value with given scaling factor and zeropoint
if inplace:
input.add_(zero_point).div_(scale)
return input
return (input + zero_point) / scale
def asymmetric_linear_quantization_params(num_bits,
saturation_min,
saturation_max,
integral_zero_point=True,
signed=True):
"""
Compute the scaling factor and zeropoint with the given quantization range.
saturation_min: lower bound for quantization range
saturation_max: upper bound for quantization range
"""
n = 2**num_bits - 1
scale = n / torch.clamp((saturation_max - saturation_min), min=1e-8)
zero_point = scale * saturation_min
if integral_zero_point:
if isinstance(zero_point, torch.Tensor):
zero_point = zero_point.round()
else:
zero_point = float(round(zero_point))
if signed:
zero_point += 2**(num_bits - 1)
return scale, zero_point
class AsymmetricQuantFunction(Function):
"""
Class to quantize the given floating-point values with given range and bit-setting.
Currently only support inference, but not support back-propagation.
"""
@staticmethod
def forward(ctx, x, k, x_min=None, x_max=None):
"""
x: single-precision value to be quantized
k: bit-setting for x
x_min: lower bound for quantization range
x_max=None
"""
# if x_min is None or x_max is None or (sum(x_min == x_max) == 1
# and x_min.numel() == 1):
# x_min, x_max = x.min(), x.max()
scale, zero_point = asymmetric_linear_quantization_params(
k, x_min, x_max)
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
n = 2**(k - 1)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
quant_x = linear_dequantize(new_quant_x,
scale,
zero_point,
inplace=False)
return torch.autograd.Variable(quant_x)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None, None
| 5,077 | 35.271429 | 100 | py |
null | qimera-main/utils/__init__.py | from utils.lr_policy import *
from utils.compute import *
from utils.log_print import *
from utils.model_transform import *
# from utils.ifeige import * | 152 | 29.6 | 35 | py |
null | qimera-main/utils/compute.py | import numpy as np
import math
import torch
__all__ = ["compute_tencrop", "compute_singlecrop", "AverageMeter"]
def compute_tencrop(outputs, labels):
output_size = outputs.size()
outputs = outputs.view(output_size[0] / 10, 10, output_size[1])
outputs = outputs.sum(1).squeeze(1)
# compute top1
_, pred = outputs.topk(1, 1, True, True)
pred = pred.t()
top1_count = pred.eq(labels.data.view(
1, -1).expand_as(pred)).reshape(-1).float().sum(0)
top1_error = 100.0 - 100.0 * top1_count / labels.size(0)
top1_error = float(top1_error.cpu().numpy())
# compute top5
_, pred = outputs.topk(5, 1, True, True)
pred = pred.t()
top5_count = pred.eq(labels.data.view(
1, -1).expand_as(pred)).reshape(-1).float().sum(0)
top5_error = 100.0 - 100.0 * top5_count / labels.size(0)
top5_error = float(top5_error.cpu().numpy())
return top1_error, 0, top5_error
def compute_singlecrop(outputs, labels, loss, top5_flag=False, mean_flag=False):
with torch.no_grad():
if isinstance(outputs, list):
top1_loss = []
top1_error = []
top5_error = []
for i in range(len(outputs)):
top1_accuracy, top5_accuracy = accuracy(outputs[i], labels, topk=(1, 5))
top1_error.append(100 - top1_accuracy)
top5_error.append(100 - top5_accuracy)
top1_loss.append(loss[i].item())
else:
top1_accuracy, top5_accuracy = accuracy(outputs, labels, topk=(1,5))
top1_error = 100 - top1_accuracy
top5_error = 100 - top5_accuracy
top1_loss = loss.item()
if top5_flag:
return top1_error, top1_loss, top5_error
else:
return top1_error, top1_loss
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size).item())
return res
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
"""
reset all parameters
"""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""
update parameters
"""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count | 2,826 | 30.065934 | 88 | py |
null | qimera-main/utils/log_print.py | from termcolor import colored
import numpy as np
import datetime
__all__ = ["compute_remain_time", "print_result", "print_weight", "print_grad"]
single_train_time = 0
single_test_time = 0
single_train_iters = 0
single_test_iters = 0
def compute_remain_time(epoch, nEpochs, count, iters, data_time, iter_time, mode="Train"):
global single_train_time, single_test_time
global single_train_iters, single_test_iters
# compute cost time
if mode == "Train":
single_train_time = single_train_time * \
0.95 + 0.05 * (data_time + iter_time)
# single_train_time = data_time + iter_time
single_train_iters = iters
train_left_iter = single_train_iters - count + \
(nEpochs - epoch - 1) * single_train_iters
# print "train_left_iters", train_left_iter
test_left_iter = (nEpochs - epoch) * single_test_iters
else:
single_test_time = single_test_time * \
0.95 + 0.05 * (data_time + iter_time)
# single_test_time = data_time+iter_time
single_test_iters = iters
train_left_iter = (nEpochs - epoch - 1) * single_train_iters
test_left_iter = single_test_iters - count + \
(nEpochs - epoch - 1) * single_test_iters
left_time = single_train_time * train_left_iter + \
single_test_time * test_left_iter
total_time = (single_train_time * single_train_iters +
single_test_time * single_test_iters) * nEpochs
time_str = "TTime: {}, RTime: {}".format(datetime.timedelta(seconds=total_time),
datetime.timedelta(seconds=left_time))
return time_str, total_time, left_time
def print_result(epoch, nEpochs, count, iters, lr, data_time, iter_time, error, loss, top5error=None,
mode="Train", logger=None):
log_str = ">>> {}: [{:0>3d}|{:0>3d}], Iter: [{:0>3d}|{:0>3d}], LR: {:.6f}, DataTime: {:.4f}, IterTime: {:.4f}, ".format(
mode, epoch + 1, nEpochs, count, iters, lr, data_time, iter_time)
if isinstance(error, list) or isinstance(error, np.ndarray):
for i in range(len(error)):
log_str += "Error_{:d}: {:.4f}, Loss_{:d}: {:.4f}, ".format(i, error[i], i, loss[i])
else:
log_str += "Error: {:.4f}, Loss: {:.4f}, ".format(error, loss)
if top5error is not None:
if isinstance(top5error, list) or isinstance(top5error, np.ndarray):
for i in range(len(top5error)):
log_str += " Top5_Error_{:d}: {:.4f}, ".format(i, top5error[i])
else:
log_str += " Top5_Error: {:.4f}, ".format(top5error)
time_str, total_time, left_time = compute_remain_time(epoch, nEpochs, count, iters, data_time, iter_time, mode)
logger.info(log_str + time_str)
return total_time, left_time
def print_weight(layers, logger):
if isinstance(layers, MD.qConv2d):
logger.info(layers.weight)
elif isinstance(layers, MD.qLinear):
logger.info(layers.weight)
logger.info(layers.weight_mask)
logger.info("------------------------------------")
def print_grad(m, logger):
if isinstance(m, MD.qLinear):
logger.info(m.weight.data)
| 3,280 | 38.53012 | 124 | py |
null | qimera-main/utils/lr_policy.py | """
class LRPolicy
"""
import math
__all__ = ["LRPolicy"]
class LRPolicy:
"""
learning rate policy
"""
def __init__(self, lr, n_epochs, lr_policy="multi_step"):
self.lr_policy = lr_policy
self.params_dict = {}
self.n_epochs = n_epochs
self.base_lr = lr
self.lr = lr
def set_params(self, params_dict=None):
"""
set parameters of lr policy
"""
if self.lr_policy == "multi_step":
"""
params: decay_rate, step
"""
self.params_dict['decay_rate'] = params_dict['decay_rate']
self.params_dict['step'] = sorted(params_dict['step'])
if max(self.params_dict['step']) <= 1:
new_step_list = []
for ratio in self.params_dict['step']:
new_step_list.append(int(self.n_epochs * ratio))
self.params_dict['step'] = new_step_list
elif self.lr_policy == "step":
"""
params: end_lr, step
step: lr = base_lr*gamma^(floor(iter/step))
"""
self.params_dict['end_lr'] = params_dict['end_lr']
self.params_dict['step'] = params_dict['step']
max_iter = math.floor((self.n_epochs - 1.0) /
self.params_dict['step'])
if self.params_dict['end_lr'] == -1:
self.params_dict['gamma'] = params_dict['decay_rate']
else:
self.params_dict['gamma'] = math.pow(
self.params_dict['end_lr'] / self.base_lr, 1. / max_iter)
elif self.lr_policy == "linear":
"""
params: end_lr, step
"""
self.params_dict['end_lr'] = params_dict['end_lr']
self.params_dict['step'] = params_dict['step']
elif self.lr_policy == "exp":
"""
params: end_lr
exp: lr = base_lr*gamma^iter
"""
self.params_dict['end_lr'] = params_dict['end_lr']
self.params_dict['gamma'] = math.pow(
self.params_dict['end_lr'] / self.base_lr, 1. / (self.n_epochs - 1))
elif self.lr_policy == "inv":
"""
params: end_lr
inv: lr = base_lr*(1+gamma*iter)^(-power)
"""
self.params_dict['end_lr'] = params_dict['end_lr']
self.params_dict['power'] = params_dict['power']
self.params_dict['gamma'] = (math.pow(
self.base_lr / self.params_dict['end_lr'],
1. / self.params_dict['power']) - 1.) / (self.n_epochs - 1.)
elif self.lr_policy == "const":
"""
no params
const: lr = base_lr
"""
self.params_dict = None
else:
assert False, "invalid lr_policy" + self.lr_policy
def get_lr(self, epoch):
"""
get current learning rate
"""
if self.lr_policy == "multi_step":
gamma = 0
for step in self.params_dict['step']:
if epoch + 1.0 > step:
gamma += 1
lr = self.base_lr * math.pow(self.params_dict['decay_rate'], gamma)
elif self.lr_policy == "step":
lr = self.base_lr * \
math.pow(self.params_dict['gamma'], math.floor(
epoch * 1.0 / self.params_dict['step']))
elif self.lr_policy == "linear":
k = (self.params_dict['end_lr'] - self.base_lr) / \
math.ceil(self.n_epochs / self.params_dict['step'])
lr = k * math.ceil((epoch + 1) /
self.params_dict['step']) + self.base_lr
elif self.lr_policy == "inv":
lr = self.base_lr * \
math.pow(
1 + self.params_dict['gamma'] * epoch, -self.params_dict['power'])
elif self.lr_policy == "exp":
# power = math.floor((epoch + 1) / self.params_dict['step'])
# lr = self.base_lr * math.pow(self.params_dict['gamma'], power)
lr = self.base_lr * math.pow(self.params_dict['gamma'], epoch)
elif self.lr_policy == "const":
lr = self.base_lr
else:
assert False, "invalid lr_policy: " + self.lr_policy
self.lr = lr
return lr
| 4,374 | 32.396947 | 86 | py |
null | qimera-main/utils/model_transform.py | import torch.nn as nn
import torch
import numpy as np
__all__ = ["data_parallel", "model2list",
"list2sequential", "model2state_dict"]
def data_parallel(model, ngpus, gpu0=0):
"""
assign model to multi-gpu mode
:params model: target model
:params ngpus: number of gpus to use
:params gpu0: id of the master gpu
:return: model, type is Module or Sequantial or DataParallel
"""
if ngpus == 0:
assert False, "only support gpu mode"
gpu_list = list(range(gpu0, gpu0 + ngpus))
assert torch.cuda.device_count() >= gpu0 + ngpus, "Invalid Number of GPUs"
if isinstance(model, list):
for i in range(len(model)):
if ngpus >= 2:
if not isinstance(model[i], nn.DataParallel):
model[i] = torch.nn.DataParallel(model[i], gpu_list).cuda()
else:
model[i] = model[i].cuda()
else:
if ngpus >= 2:
if not isinstance(model, nn.DataParallel):
model = torch.nn.DataParallel(model, gpu_list).cuda()
else:
model = model.cuda()
return model
def model2list(model):
"""
convert model to list type
:param model: should be type of list or nn.DataParallel or nn.Sequential
:return: no return params
"""
if isinstance(model, nn.DataParallel):
model = list(model.module)
elif isinstance(model, nn.Sequential):
model = list(model)
return model
def list2sequential(model):
if isinstance(model, list):
model = nn.Sequential(*model)
return model
def model2state_dict(file_path):
model = torch.load(file_path)
if model['model'] is not None:
model_state_dict = model['model'].state_dict()
torch.save(model_state_dict, file_path.replace(
'.pth', 'state_dict.pth'))
else:
print((type(model)))
print(model)
print("skip")
| 1,928 | 27.791045 | 79 | py |
null | qimera-main/utils/opt_static.py | """
TODO: add doc for module
"""
import torch
__all__ = ["NetOption"]
"""
You can run your script with CUDA_VISIBLE_DEVICES=5,6 python your_script.py
or set the environment variable in the script by os.environ['CUDA_VISIBLE_DEVICES'] = '5,6'
to map GPU 5, 6 to device_ids 0, 1, respectively.
"""
class NetOption(object):
def __init__(self):
# ------------ General options ----------------------------------------
self.save_path = "" # log path
self.dataPath = "/home/dataset/" # path for loading data set
self.dataset = "cifar10" # options: imagenet | cifar10 | cifar100 | imagenet100 | mnist
self.manualSeed = 1 # manually set RNG seed
self.nGPU = 1 # number of GPUs to use by default
self.GPU = 0 # default gpu to use, options: range(nGPU)
# ------------- Data options -------------------------------------------
self.nThreads = 4 # number of data loader threads
# ------------- Training options ---------------------------------------
self.testOnly = False # run on validation set only
self.tenCrop = False # Ten-crop testing
# ---------- Optimization options --------------------------------------
self.nEpochs = 200 # number of total epochs to train
self.batchSize = 128 # mini-batch size
self.momentum = 0.9 # momentum
self.weightDecay = 1e-4 # weight decay 1e-4
self.opt_type = "SGD"
self.lr = 0.1 # initial learning rate
self.lrPolicy = "multi_step" # options: multi_step | linear | exp | fixed
self.power = 1 # power for learning rate policy (inv)
self.step = [0.6, 0.8] # step for linear or exp learning rate policy
self.endlr = 0.001 # final learning rate, oly for "linear lrpolicy"
self.decayRate = 0.1 # lr decay rate
# ---------- Model options ---------------------------------------------
self.netType = "PreResNet" # options: ResNet | PreResNet | GreedyNet | NIN | LeNet5
self.experimentID = "refator-test-01"
self.depth = 20 # resnet depth: (n-2)%6==0
self.nClasses = 10 # number of classes in the dataset
self.wideFactor = 1 # wide factor for wide-resnet
# ---------- Resume or Retrain options ---------------------------------------------
self.retrain = None # path to model to retrain with, load model state_dict only
self.resume = None # path to directory containing checkpoint, load state_dicts of model and optimizer, as well as training epoch
# ---------- Visualization options -------------------------------------
self.drawNetwork = True
self.drawInterval = 30
self.torch_version = torch.__version__
torch_version_split = self.torch_version.split("_")
self.torch_version = torch_version_split[0]
# check parameters
# self.paramscheck()
def paramscheck(self):
if self.torch_version != "0.2.0":
self.drawNetwork = False
print("|===>DrawNetwork is supported by PyTorch with version: 0.2.0. The used version is ", self.torch_version)
if self.netType in ["PreResNet", "ResNet"]:
self.save_path = "log_%s%d_%s_bs%d_lr%0.3f_%s/" % (
self.netType, self.depth, self.dataset,
self.batchSize, self.lr, self.experimentID)
else:
self.save_path = "log_%s_%s_bs%d_lr%0.3f_%s/" % (
self.netType, self.dataset,
self.batchSize, self.lr, self.experimentID)
if self.dataset in ["cifar10", "mnist"]:
self.nClasses = 10
elif self.dataset == "cifar100":
self.nClasses = 100
elif self.dataset == "imagenet" or "thi_imgnet":
self.nClasses = 1000
elif self.dataset == "imagenet100":
self.nClasses = 100
if self.depth >= 100:
self.drawNetwork = False
print("|===>draw network with depth over 100 layers, skip this step")
| 4,045 | 42.978261 | 137 | py |
null | qimera-main/utils/warmup.py | from torchlearning.mio import MIO
train_dataset = MIO("/home/datasets/imagenet_mio/train/")
test_dataset = MIO("/home/datasets/imagenet_mio/val/")
for i in range(train_dataset.size):
print(i)
train_dataset.fetchone(i)
for i in range(test_dataset.size):
print(i)
test_dataset.fetchone(i) | 304 | 26.727273 | 57 | py |
sacremoses | sacremoses-master/.appveyor.yml | image: Visual Studio 2019
platform: x64
configuration: Release
environment:
global:
# SDK v7.0 MSVC Express 2008's SetEnv.cmd script will fail if the
# /E:ON and /V:ON options are not enabled in the batch script intepreter
# See: http://stackoverflow.com/a/13751649/163740
# C.f. https://github.com/ogrisel/python-appveyor-demo/blob/master/appveyor.yml
CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\appveyor\\run_with_env.cmd"
matrix:
# For Python versions available on Appveyor, see
# https://www.appveyor.com/docs/lang/python
- PYTHON: "C:\\Python36-x64"
- PYTHON: "C:\\Python38-x64"
- PYTHON: "C:\\Python39-x64"
- PYTHON: "C:\\Python310-x64"
install:
# We need wheel installed to build wheels
- "%PYTHON%\\python.exe -m pip install wheel"
- "%PYTHON%\\python.exe -m pip install -r requirements.txt"
build: off
test_script:
# Put your test command here.
# If you don't need to build C extensions on 64-bit Python 3.4,
# you can remove "build.cmd" from the front of the command, as it's
# only needed to support those cases.
# Note that you must use the environment variable %PYTHON% to refer to
# the interpreter you're using - Appveyor does not do anything special
# to put the Python version you want to use on PATH.
- "%PYTHON%\\python.exe -m unittest discover sacremoses/test/ -v"
| 1,352 | 34.605263 | 83 | yml |
sacremoses | sacremoses-master/.travis.yml | language: python
os: linux
dist: focal
addons:
apt:
packages:
- expect-dev
python: # https://docs.travis-ci.com/user/languages/python/
- "3.6"
- "3.8"
- "3.9"
- "pypy3"
- "3.10.1"
cache: pip
install:
- pip install joblib # For parallelization.
- pip install click # For CLI.
- pip install tqdm # For progressbar.
- pip install regex # For regex (simpler access to perluniprops)
script:
- unbuffer python -m unittest discover sacremoses/test/ -v
| 492 | 16.607143 | 68 | yml |
sacremoses | sacremoses-master/CONTRIBUTORS.md | Contributors
====
|Contributor | Pull Request / Issue |
|:-|:-|
| [Bo Li](https://github.com/askender) | #5, #6 |
| [Patrick Düggelin](https://github.com/Patdue) | #9 |
| [Myle Ott](https://github.com/myleott) | #36 |
| [David Harrison](https://github.com/DavidHarrison) | #41, #47 |
| [yannvgn](https://github.com/yannvgn) | #56, #57 |
| [BLKSerene](https://github.com/BLKSerene) | #66 |
| [Shijie Wu](https://github.com/shijie-wu) | #67 |
| [Matt Post](https://github.com/mjpost) | #69 |
| [brandonherzog](https://github.com/brandonherzog) | #72 |
| [Thamme Gowda](https://github.com/thammegowda) | #103 |
| [Yu-Yang Huang](https://github.com/yuyang-huang) | #108 |
| 699 | 40.176471 | 66 | md |
sacremoses | sacremoses-master/README.md | # Sacremoses
[](https://travis-ci.org/alvations/sacremoses)
[](https://ci.appveyor.com/project/alvations/sacremoses)
[](https://pepy.tech/project/sacremoses)
# License
MIT License.
# Install
```
pip install -U sacremoses
```
NOTE: Sacremoses only supports Python 3 now (`sacremoses>=0.0.41`). If you're using Python 2, the last possible version is `sacremoses==0.0.40`.
# Usage (Python)
## Tokenizer and Detokenizer
```python
>>> from sacremoses import MosesTokenizer, MosesDetokenizer
>>> mt = MosesTokenizer(lang='en')
>>> text = 'This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf'
>>> expected_tokenized = 'This , is a sentence with weird \xbb symbols \u2026 appearing everywhere \xbf'
>>> tokenized_text = mt.tokenize(text, return_str=True)
>>> tokenized_text == expected_tokenized
True
>>> mt, md = MosesTokenizer(lang='en'), MosesDetokenizer(lang='en')
>>> sent = "This ain't funny. It's actually hillarious, yet double Ls. | [] < > [ ] & You're gonna shake it off? Don't?"
>>> expected_tokens = ['This', 'ain', ''t', 'funny', '.', 'It', ''s', 'actually', 'hillarious', ',', 'yet', 'double', 'Ls', '.', '|', '[', ']', '<', '>', '[', ']', '&', 'You', ''re', 'gonna', 'shake', 'it', 'off', '?', 'Don', ''t', '?']
>>> expected_detokens = "This ain't funny. It's actually hillarious, yet double Ls. | [] < > [] & You're gonna shake it off? Don't?"
>>> mt.tokenize(sent) == expected_tokens
True
>>> md.detokenize(tokens) == expected_detokens
True
```
## Truecaser
```python
>>> from sacremoses import MosesTruecaser, MosesTokenizer
# Train a new truecaser from a 'big.txt' file.
>>> mtr = MosesTruecaser()
>>> mtok = MosesTokenizer(lang='en')
# Save the truecase model to 'big.truecasemodel' using `save_to`
>> tokenized_docs = [mtok.tokenize(line) for line in open('big.txt')]
>>> mtr.train(tokenized_docs, save_to='big.truecasemodel')
# Save the truecase model to 'big.truecasemodel' after training
# (just in case you forgot to use `save_to`)
>>> mtr = MosesTruecaser()
>>> mtr.train('big.txt')
>>> mtr.save_model('big.truecasemodel')
# Truecase a string after training a model.
>>> mtr = MosesTruecaser()
>>> mtr.train('big.txt')
>>> mtr.truecase("THE ADVENTURES OF SHERLOCK HOLMES")
['the', 'adventures', 'of', 'Sherlock', 'Holmes']
# Loads a model and truecase a string using trained model.
>>> mtr = MosesTruecaser('big.truecasemodel')
>>> mtr.truecase("THE ADVENTURES OF SHERLOCK HOLMES")
['the', 'adventures', 'of', 'Sherlock', 'Holmes']
>>> mtr.truecase("THE ADVENTURES OF SHERLOCK HOLMES", return_str=True)
'the ADVENTURES OF SHERLOCK HOLMES'
>>> mtr.truecase("THE ADVENTURES OF SHERLOCK HOLMES", return_str=True, use_known=True)
'the adventures of Sherlock Holmes'
```
## Normalizer
```python
>>> from sacremoses import MosesPunctNormalizer
>>> mpn = MosesPunctNormalizer()
>>> mpn.normalize('THIS EBOOK IS OTHERWISE PROVIDED TO YOU "AS-IS."')
'THIS EBOOK IS OTHERWISE PROVIDED TO YOU "AS-IS."'
```
# Usage (CLI)
Since version `0.0.42`, the pipeline feature for CLI is introduced, thus there
are global options that should be set first before calling the commands:
- language
- processes
- encoding
- quiet
```shell
$ pip install -U sacremoses>=0.0.42
$ sacremoses --help
Usage: sacremoses [OPTIONS] COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...
Options:
-l, --language TEXT Use language specific rules when tokenizing
-j, --processes INTEGER No. of processes.
-e, --encoding TEXT Specify encoding of file.
-q, --quiet Disable progress bar.
--version Show the version and exit.
-h, --help Show this message and exit.
Commands:
detokenize
detruecase
normalize
tokenize
train-truecase
truecase
```
## Pipeline
Example to chain the following commands:
- `normalize` with `-c` option to remove control characters.
- `tokenize` with `-a` option for aggressive dash split rules.
- `truecase` with `-a` option to indicate that model is for ASR
- if `big.truemodel` exists, load the model with `-m` option,
- otherwise train a model and save it with `-m` option to `big.truemodel` file.
- save the output to console to the `big.txt.norm.tok.true` file.
```shell
cat big.txt | sacremoses -l en -j 4 \
normalize -c tokenize -a truecase -a -m big.truemodel \
> big.txt.norm.tok.true
```
## Tokenizer
```shell
$ sacremoses tokenize --help
Usage: sacremoses tokenize [OPTIONS]
Options:
-a, --aggressive-dash-splits Triggers dash split rules.
-x, --xml-escape Escape special characters for XML.
-p, --protected-patterns TEXT Specify file with patters to be protected in
tokenisation.
-c, --custom-nb-prefixes TEXT Specify a custom non-breaking prefixes file,
add prefixes to the default ones from the
specified language.
-h, --help Show this message and exit.
$ sacremoses -l en -j 4 tokenize < big.txt > big.txt.tok
100%|██████████████████████████████████| 128457/128457 [00:05<00:00, 24363.39it/s
$ wget https://raw.githubusercontent.com/moses-smt/mosesdecoder/master/scripts/tokenizer/basic-protected-patterns
$ sacremoses -l en -j 4 tokenize -p basic-protected-patterns < big.txt > big.txt.tok
100%|██████████████████████████████████| 128457/128457 [00:05<00:00, 22183.94it/s
```
## Detokenizer
```shell
$ sacremoses detokenize --help
Usage: sacremoses detokenize [OPTIONS]
Options:
-x, --xml-unescape Unescape special characters for XML.
-h, --help Show this message and exit.
$ sacremoses -l en -j 4 detokenize < big.txt.tok > big.txt.tok.detok
100%|██████████████████████████████████| 128457/128457 [00:16<00:00, 7931.26it/s]
```
## Truecase
```shell
$ sacremoses truecase --help
Usage: sacremoses truecase [OPTIONS]
Options:
-m, --modelfile TEXT Filename to save/load the modelfile.
[required]
-a, --is-asr A flag to indicate that model is for ASR.
-p, --possibly-use-first-token Use the first token as part of truecase
training.
-h, --help Show this message and exit.
$ sacremoses -j 4 truecase -m big.model < big.txt.tok > big.txt.tok.true
100%|██████████████████████████████████| 128457/128457 [00:09<00:00, 14257.27it/s]
```
## Detruecase
```shell
$ sacremoses detruecase --help
Usage: sacremoses detruecase [OPTIONS]
Options:
-j, --processes INTEGER No. of processes.
-a, --is-headline Whether the file are headlines.
-e, --encoding TEXT Specify encoding of file.
-h, --help Show this message and exit.
$ sacremoses -j 4 detruecase < big.txt.tok.true > big.txt.tok.true.detrue
100%|█████████████████████████████████| 128457/128457 [00:04<00:00, 26945.16it/s]
```
## Normalize
```shell
$ sacremoses normalize --help
Usage: sacremoses normalize [OPTIONS]
Options:
-q, --normalize-quote-commas Normalize quotations and commas.
-d, --normalize-numbers Normalize number.
-p, --replace-unicode-puncts Replace unicode punctuations BEFORE
normalization.
-c, --remove-control-chars Remove control characters AFTER normalization.
-h, --help Show this message and exit.
$ sacremoses -j 4 normalize < big.txt > big.txt.norm
100%|██████████████████████████████████| 128457/128457 [00:09<00:00, 13096.23it/s]
```
| 7,757 | 33.026316 | 287 | md |
sacremoses | sacremoses-master/SECURITY.md | 1 | 0 | 0 | md | |
sacremoses | sacremoses-master/setup.py | from setuptools import setup
console_scripts = """
[console_scripts]
sacremoses=sacremoses.cli:cli
"""
setup(
name = 'sacremoses',
packages = ['sacremoses'],
version = '0.0.53',
description = 'SacreMoses',
long_description = 'MosesTokenizer in Python',
author = '',
package_data={'sacremoses': ['data/perluniprops/*.txt', 'data/nonbreaking_prefixes/nonbreaking_prefix.*']},
url = 'https://github.com/alvations/sacremoses',
keywords = [],
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires = ['regex', 'click', 'joblib', 'tqdm'],
entry_points=console_scripts,
)
| 705 | 26.153846 | 109 | py |
sacremoses | sacremoses-master/sacremoses/__init__.py | from sacremoses.corpus import *
from sacremoses.tokenize import *
from sacremoses.truecase import *
from sacremoses.normalize import *
# from sacremoses.subwords import *
__version__ = "0.0.41"
| 196 | 20.888889 | 35 | py |
sacremoses | sacremoses-master/sacremoses/__main__.py | #!/usr/bin/env python
if __name__ == "__main__":
from sacremoses.cli import cli
cli()
| 96 | 12.857143 | 34 | py |
sacremoses | sacremoses-master/sacremoses/chinese.py | # -*- coding: utf-8 -*-
from functools import partial
# gbk <-> big5 mappings from Mafan + Jianfan
# https://github.com/hermanschaaf/mafan
# https://code.google.com/archive/p/python-jianfan/
simplified_chinese = (
gbk
) = "\u9515\u7691\u853c\u788d\u7231\u55f3\u5ad2\u7477\u66a7\u972d\u8c19\u94f5\u9e4c\u80ae\u8884\u5965\u5aaa\u9a9c\u9ccc\u575d\u7f62\u94af\u6446\u8d25\u5457\u9881\u529e\u7eca\u94a3\u5e2e\u7ed1\u9551\u8c24\u5265\u9971\u5b9d\u62a5\u9c8d\u9e28\u9f85\u8f88\u8d1d\u94a1\u72c8\u5907\u60eb\u9e4e\u8d32\u951b\u7ef7\u7b14\u6bd5\u6bd9\u5e01\u95ed\u835c\u54d4\u6ed7\u94cb\u7b5a\u8df8\u8fb9\u7f16\u8d2c\u53d8\u8fa9\u8fab\u82c4\u7f0f\u7b3e\u6807\u9aa0\u98d1\u98d9\u9556\u9573\u9cd4\u9cd6\u522b\u762a\u6fd2\u6ee8\u5bbe\u6448\u50a7\u7f24\u69df\u6ba1\u8191\u9554\u9acc\u9b13\u997c\u7980\u62e8\u94b5\u94c2\u9a73\u997d\u94b9\u9e41\u8865\u94b8\u8d22\u53c2\u8695\u6b8b\u60ed\u60e8\u707f\u9a96\u9eea\u82cd\u8231\u4ed3\u6ca7\u5395\u4fa7\u518c\u6d4b\u607b\u5c42\u8be7\u9538\u4faa\u9497\u6400\u63ba\u8749\u998b\u8c17\u7f20\u94f2\u4ea7\u9610\u98a4\u5181\u8c04\u8c36\u8487\u5fcf\u5a75\u9aa3\u89c7\u7985\u9561\u573a\u5c1d\u957f\u507f\u80a0\u5382\u7545\u4f25\u82cc\u6005\u960a\u9cb3\u949e\u8f66\u5f7b\u7817\u5c18\u9648\u886c\u4f27\u8c0c\u6987\u789c\u9f80\u6491\u79f0\u60e9\u8bda\u9a8b\u67a8\u67fd\u94d6\u94db\u75f4\u8fdf\u9a70\u803b\u9f7f\u70bd\u996c\u9e31\u51b2\u51b2\u866b\u5ba0\u94f3\u7574\u8e0c\u7b79\u7ef8\u4fe6\u5e31\u96e0\u6a71\u53a8\u9504\u96cf\u7840\u50a8\u89e6\u5904\u520d\u7ecc\u8e70\u4f20\u948f\u75ae\u95ef\u521b\u6006\u9524\u7f0d\u7eaf\u9e51\u7ef0\u8f8d\u9f8a\u8f9e\u8bcd\u8d50\u9e5a\u806a\u8471\u56f1\u4ece\u4e1b\u82c1\u9aa2\u679e\u51d1\u8f8f\u8e7f\u7a9c\u64ba\u9519\u9509\u9e7e\u8fbe\u54d2\u9791\u5e26\u8d37\u9a80\u7ed0\u62c5\u5355\u90f8\u63b8\u80c6\u60ee\u8bde\u5f39\u6b9a\u8d55\u7605\u7baa\u5f53\u6321\u515a\u8361\u6863\u8c20\u7800\u88c6\u6363\u5c9b\u7977\u5bfc\u76d7\u7118\u706f\u9093\u956b\u654c\u6da4\u9012\u7f14\u7c74\u8bcb\u8c1b\u7ee8\u89cc\u955d\u98a0\u70b9\u57ab\u7535\u5dc5\u94bf\u766b\u9493\u8c03\u94eb\u9cb7\u8c0d\u53e0\u9cbd\u9489\u9876\u952d\u8ba2\u94e4\u4e22\u94e5\u4e1c\u52a8\u680b\u51bb\u5cbd\u9e2b\u7aa6\u728a\u72ec\u8bfb\u8d4c\u9540\u6e0e\u691f\u724d\u7b03\u9ee9\u953b\u65ad\u7f0e\u7c16\u5151\u961f\u5bf9\u603c\u9566\u5428\u987f\u949d\u7096\u8db8\u593a\u5815\u94ce\u9e45\u989d\u8bb9\u6076\u997f\u8c14\u57a9\u960f\u8f6d\u9507\u9537\u9e57\u989a\u989b\u9cc4\u8bf6\u513f\u5c14\u9975\u8d30\u8fe9\u94d2\u9e38\u9c95\u53d1\u7f5a\u9600\u73d0\u77fe\u9492\u70e6\u8d29\u996d\u8bbf\u7eba\u94ab\u9c82\u98de\u8bfd\u5e9f\u8d39\u7eef\u9544\u9cb1\u7eb7\u575f\u594b\u6124\u7caa\u507e\u4e30\u67ab\u950b\u98ce\u75af\u51af\u7f1d\u8bbd\u51e4\u6ca3\u80a4\u8f90\u629a\u8f85\u8d4b\u590d\u8d1f\u8ba3\u5987\u7f1a\u51eb\u9a78\u7ec2\u7ecb\u8d59\u9eb8\u9c8b\u9cc6\u9486\u8be5\u9499\u76d6\u8d45\u6746\u8d76\u79c6\u8d63\u5c34\u64c0\u7ec0\u5188\u521a\u94a2\u7eb2\u5c97\u6206\u9550\u777e\u8bf0\u7f1f\u9506\u6401\u9e3d\u9601\u94ec\u4e2a\u7ea5\u9549\u988d\u7ed9\u4e98\u8d53\u7ee0\u9ca0\u9f9a\u5bab\u5de9\u8d21\u94a9\u6c9f\u82df\u6784\u8d2d\u591f\u8bdf\u7f11\u89cf\u86ca\u987e\u8bc2\u6bc2\u94b4\u9522\u9e2a\u9e44\u9e58\u5250\u6302\u9e39\u63b4\u5173\u89c2\u9986\u60ef\u8d2f\u8bd6\u63bc\u9e73\u9ccf\u5e7f\u72b7\u89c4\u5f52\u9f9f\u95fa\u8f68\u8be1\u8d35\u523d\u5326\u523f\u59ab\u6867\u9c91\u9cdc\u8f8a\u6eda\u886e\u7ef2\u9ca7\u9505\u56fd\u8fc7\u57da\u5459\u5e3c\u6901\u8748\u94ea\u9a87\u97e9\u6c49\u961a\u7ed7\u9889\u53f7\u704f\u98a2\u9602\u9e64\u8d3a\u8bc3\u9616\u86ce\u6a2a\u8f70\u9e3f\u7ea2\u9ec9\u8ba7\u836d\u95f3\u9c8e\u58f6\u62a4\u6caa\u6237\u6d52\u9e55\u54d7\u534e\u753b\u5212\u8bdd\u9a85\u6866\u94e7\u6000\u574f\u6b22\u73af\u8fd8\u7f13\u6362\u5524\u75ea\u7115\u6da3\u5942\u7f33\u953e\u9ca9\u9ec4\u8c0e\u9cc7\u6325\u8f89\u6bc1\u8d3f\u79fd\u4f1a\u70e9\u6c47\u8bb3\u8bf2\u7ed8\u8bd9\u835f\u54d5\u6d4d\u7f0b\u73f2\u6656\u8364\u6d51\u8be8\u9984\u960d\u83b7\u8d27\u7978\u94ac\u956c\u51fb\u673a\u79ef\u9965\u8ff9\u8ba5\u9e21\u7ee9\u7f09\u6781\u8f91\u7ea7\u6324\u51e0\u84df\u5242\u6d4e\u8ba1\u8bb0\u9645\u7ee7\u7eaa\u8ba6\u8bd8\u8360\u53fd\u54dc\u9aa5\u7391\u89ca\u9f51\u77f6\u7f81\u867f\u8dfb\u9701\u9c9a\u9cab\u5939\u835a\u988a\u8d3e\u94be\u4ef7\u9a7e\u90cf\u6d43\u94d7\u9553\u86f2\u6b7c\u76d1\u575a\u7b3a\u95f4\u8270\u7f04\u8327\u68c0\u78b1\u7877\u62e3\u6361\u7b80\u4fed\u51cf\u8350\u69db\u9274\u8df5\u8d31\u89c1\u952e\u8230\u5251\u996f\u6e10\u6e85\u6da7\u8c0f\u7f23\u620b\u622c\u7751\u9e63\u7b15\u9ca3\u97af\u5c06\u6d46\u848b\u6868\u5956\u8bb2\u9171\u7edb\u7f30\u80f6\u6d47\u9a84\u5a07\u6405\u94f0\u77eb\u4fa5\u811a\u997a\u7f34\u7ede\u8f7f\u8f83\u6322\u5ce4\u9e6a\u9c9b\u9636\u8282\u6d01\u7ed3\u8beb\u5c4a\u7596\u988c\u9c92\u7d27\u9526\u4ec5\u8c28\u8fdb\u664b\u70ec\u5c3d\u52b2\u8346\u830e\u537a\u8369\u9991\u7f19\u8d46\u89d0\u9cb8\u60ca\u7ecf\u9888\u9759\u955c\u5f84\u75c9\u7ade\u51c0\u522d\u6cfe\u8ff3\u5f2a\u80eb\u9753\u7ea0\u53a9\u65e7\u9604\u9e20\u9e6b\u9a79\u4e3e\u636e\u952f\u60e7\u5267\u8bb5\u5c66\u6989\u98d3\u949c\u9514\u7aad\u9f83\u9e43\u7ee2\u9529\u954c\u96bd\u89c9\u51b3\u7edd\u8c32\u73cf\u94a7\u519b\u9a8f\u76b2\u5f00\u51ef\u5240\u57b2\u5ffe\u607a\u94e0\u9534\u9f9b\u95f6\u94aa\u94d0\u9897\u58f3\u8bfe\u9a92\u7f02\u8f72\u94b6\u951e\u9894\u57a6\u6073\u9f88\u94ff\u62a0\u5e93\u88e4\u55be\u5757\u4fa9\u90d0\u54d9\u810d\u5bbd\u72ef\u9acb\u77ff\u65f7\u51b5\u8bd3\u8bf3\u909d\u5739\u7ea9\u8d36\u4e8f\u5cbf\u7aa5\u9988\u6e83\u532e\u8489\u6126\u8069\u7bd1\u9603\u951f\u9cb2\u6269\u9614\u86f4\u8721\u814a\u83b1\u6765\u8d56\u5d03\u5f95\u6d9e\u6fd1\u8d49\u7750\u94fc\u765e\u7c41\u84dd\u680f\u62e6\u7bee\u9611\u5170\u6f9c\u8c30\u63fd\u89c8\u61d2\u7f06\u70c2\u6ee5\u5c9a\u6984\u6593\u9567\u8934\u7405\u9606\u9512\u635e\u52b3\u6d9d\u5520\u5d02\u94d1\u94f9\u75e8\u4e50\u9cd3\u956d\u5792\u7c7b\u6cea\u8bd4\u7f27\u7bf1\u72f8\u79bb\u9ca4\u793c\u4e3d\u5389\u52b1\u783e\u5386\u6ca5\u96b6\u4fea\u90e6\u575c\u82c8\u8385\u84e0\u5456\u9026\u9a8a\u7f21\u67a5\u680e\u8f79\u783a\u9502\u9e42\u75a0\u7c9d\u8dde\u96f3\u9ca1\u9ce2\u4fe9\u8054\u83b2\u8fde\u9570\u601c\u6d9f\u5e18\u655b\u8138\u94fe\u604b\u70bc\u7ec3\u8539\u5941\u6f4b\u740f\u6b93\u88e2\u88e3\u9ca2\u7cae\u51c9\u4e24\u8f86\u8c05\u9b49\u7597\u8fbd\u9563\u7f2d\u948c\u9e69\u730e\u4e34\u90bb\u9cde\u51db\u8d41\u853a\u5eea\u6aa9\u8f9a\u8e8f\u9f84\u94c3\u7075\u5cad\u9886\u7eeb\u68c2\u86cf\u9cae\u998f\u5218\u6d4f\u9a9d\u7efa\u954f\u9e68\u9f99\u804b\u5499\u7b3c\u5784\u62e2\u9647\u830f\u6cf7\u73d1\u680a\u80e7\u783b\u697c\u5a04\u6402\u7bd3\u507b\u848c\u55bd\u5d5d\u9542\u7618\u8027\u877c\u9ac5\u82a6\u5362\u9885\u5e90\u7089\u63b3\u5364\u864f\u9c81\u8d42\u7984\u5f55\u9646\u5786\u64b8\u565c\u95fe\u6cf8\u6e0c\u680c\u6a79\u8f73\u8f82\u8f98\u6c07\u80ea\u9e2c\u9e6d\u823b\u9c88\u5ce6\u631b\u5b6a\u6ee6\u4e71\u8114\u5a08\u683e\u9e3e\u92ae\u62a1\u8f6e\u4f26\u4ed1\u6ca6\u7eb6\u8bba\u56f5\u841d\u7f57\u903b\u9523\u7ba9\u9aa1\u9a86\u7edc\u8366\u7321\u6cfa\u6924\u8136\u9559\u9a74\u5415\u94dd\u4fa3\u5c61\u7f15\u8651\u6ee4\u7eff\u6988\u891b\u950a\u5452\u5988\u739b\u7801\u8682\u9a6c\u9a82\u5417\u551b\u5b37\u6769\u4e70\u9ea6\u5356\u8fc8\u8109\u52a2\u7792\u9992\u86ee\u6ee1\u8c29\u7f26\u9558\u98a1\u9cd7\u732b\u951a\u94c6\u8d38\u9ebd\u6ca1\u9541\u95e8\u95f7\u4eec\u626a\u7116\u61d1\u9494\u9530\u68a6\u772f\u8c1c\u5f25\u89c5\u5e42\u8288\u8c27\u7315\u7962\u7ef5\u7f05\u6e11\u817c\u9efe\u5e99\u7f08\u7f2a\u706d\u60af\u95fd\u95f5\u7f17\u9e23\u94ed\u8c2c\u8c1f\u84e6\u998d\u6b81\u9546\u8c0b\u4ea9\u94bc\u5450\u94a0\u7eb3\u96be\u6320\u8111\u607c\u95f9\u94d9\u8bb7\u9981\u5185\u62df\u817b\u94cc\u9cb5\u64b5\u8f87\u9cb6\u917f\u9e1f\u8311\u8885\u8042\u556e\u954a\u954d\u9667\u8616\u55eb\u989f\u8e51\u67e0\u72de\u5b81\u62e7\u6cde\u82ce\u549b\u804d\u94ae\u7ebd\u8113\u6d53\u519c\u4fac\u54dd\u9a7d\u9495\u8bfa\u50a9\u759f\u6b27\u9e25\u6bb4\u5455\u6ca4\u8bb4\u6004\u74ef\u76d8\u8e52\u5e9e\u629b\u75b1\u8d54\u8f94\u55b7\u9e4f\u7eb0\u7f74\u94cd\u9a97\u8c1d\u9a88\u98d8\u7f25\u9891\u8d2b\u5ad4\u82f9\u51ed\u8bc4\u6cfc\u9887\u948b\u6251\u94fa\u6734\u8c31\u9564\u9568\u6816\u8110\u9f50\u9a91\u5c82\u542f\u6c14\u5f03\u8bab\u8572\u9a90\u7eee\u6864\u789b\u9880\u9883\u9ccd\u7275\u948e\u94c5\u8fc1\u7b7e\u8c26\u94b1\u94b3\u6f5c\u6d45\u8c34\u5811\u4f65\u8368\u60ad\u9a9e\u7f31\u6920\u94a4\u67aa\u545b\u5899\u8537\u5f3a\u62a2\u5af1\u6a2f\u6217\u709d\u9516\u9535\u956a\u7f9f\u8dc4\u9539\u6865\u4e54\u4fa8\u7fd8\u7a8d\u8bee\u8c2f\u835e\u7f32\u7857\u8df7\u7a83\u60ec\u9532\u7ba7\u94a6\u4eb2\u5bdd\u9513\u8f7b\u6c22\u503e\u9877\u8bf7\u5e86\u63ff\u9cad\u743c\u7a77\u8315\u86f1\u5def\u8d47\u866e\u9cc5\u8d8b\u533a\u8eaf\u9a71\u9f8b\u8bce\u5c96\u9612\u89d1\u9e32\u98a7\u6743\u529d\u8be0\u7efb\u8f81\u94e8\u5374\u9e4a\u786e\u9615\u9619\u60ab\u8ba9\u9976\u6270\u7ed5\u835b\u5a06\u6861\u70ed\u97e7\u8ba4\u7eab\u996a\u8f6b\u8363\u7ed2\u5d58\u877e\u7f1b\u94f7\u98a6\u8f6f\u9510\u86ac\u95f0\u6da6\u6d12\u8428\u98d2\u9cc3\u8d5b\u4f1e\u6bf5\u7cc1\u4e27\u9a9a\u626b\u7f2b\u6da9\u556c\u94ef\u7a51\u6740\u5239\u7eb1\u94e9\u9ca8\u7b5b\u6652\u917e\u5220\u95ea\u9655\u8d61\u7f2e\u8baa\u59d7\u9a9f\u9490\u9cdd\u5892\u4f24\u8d4f\u57a7\u6b87\u89de\u70e7\u7ecd\u8d4a\u6444\u6151\u8bbe\u538d\u6ee0\u7572\u7ec5\u5ba1\u5a76\u80be\u6e17\u8bdc\u8c02\u6e16\u58f0\u7ef3\u80dc\u5e08\u72ee\u6e7f\u8bd7\u65f6\u8680\u5b9e\u8bc6\u9a76\u52bf\u9002\u91ca\u9970\u89c6\u8bd5\u8c25\u57d8\u83b3\u5f11\u8f7c\u8d33\u94c8\u9ca5\u5bff\u517d\u7ef6\u67a2\u8f93\u4e66\u8d4e\u5c5e\u672f\u6811\u7ad6\u6570\u6445\u7ebe\u5e05\u95e9\u53cc\u8c01\u7a0e\u987a\u8bf4\u7855\u70c1\u94c4\u4e1d\u9972\u53ae\u9a77\u7f0c\u9536\u9e36\u8038\u6002\u9882\u8bbc\u8bf5\u64de\u85ae\u998a\u98d5\u953c\u82cf\u8bc9\u8083\u8c21\u7a23\u867d\u968f\u7ee5\u5c81\u8c07\u5b59\u635f\u7b0b\u836a\u72f2\u7f29\u7410\u9501\u5522\u7743\u736d\u631e\u95fc\u94ca\u9cce\u53f0\u6001\u949b\u9c90\u644a\u8d2a\u762b\u6ee9\u575b\u8c2d\u8c08\u53f9\u6619\u94bd\u952c\u9878\u6c64\u70eb\u50a5\u9967\u94f4\u9557\u6d9b\u7ee6\u8ba8\u97ec\u94fd\u817e\u8a8a\u9511\u9898\u4f53\u5c49\u7f07\u9e48\u9617\u6761\u7c9c\u9f86\u9ca6\u8d34\u94c1\u5385\u542c\u70c3\u94dc\u7edf\u6078\u5934\u94ad\u79c3\u56fe\u948d\u56e2\u629f\u9893\u8715\u9968\u8131\u9e35\u9a6e\u9a7c\u692d\u7ba8\u9f0d\u889c\u5a32\u817d\u5f2f\u6e7e\u987d\u4e07\u7ea8\u7efe\u7f51\u8f8b\u97e6\u8fdd\u56f4\u4e3a\u6f4d\u7ef4\u82c7\u4f1f\u4f2a\u7eac\u8c13\u536b\u8bff\u5e0f\u95f1\u6ca9\u6da0\u73ae\u97ea\u709c\u9c94\u6e29\u95fb\u7eb9\u7a33\u95ee\u960c\u74ee\u631d\u8717\u6da1\u7a9d\u5367\u83b4\u9f8c\u545c\u94a8\u4e4c\u8bec\u65e0\u829c\u5434\u575e\u96fe\u52a1\u8bef\u90ac\u5e91\u6003\u59a9\u9a9b\u9e49\u9e5c\u9521\u727a\u88ad\u4e60\u94e3\u620f\u7ec6\u9969\u960b\u73ba\u89cb\u867e\u8f96\u5ce1\u4fa0\u72ed\u53a6\u5413\u7856\u9c9c\u7ea4\u8d24\u8854\u95f2\u663e\u9669\u73b0\u732e\u53bf\u9985\u7fa1\u5baa\u7ebf\u82cb\u83b6\u85d3\u5c98\u7303\u5a34\u9e47\u75eb\u869d\u7c7c\u8df9\u53a2\u9576\u4e61\u8be6\u54cd\u9879\u8297\u9977\u9aa7\u7f03\u98e8\u8427\u56a3\u9500\u6653\u5578\u54d3\u6f47\u9a81\u7ee1\u67ad\u7bab\u534f\u631f\u643a\u80c1\u8c10\u5199\u6cfb\u8c22\u4eb5\u64b7\u7ec1\u7f2c\u950c\u8845\u5174\u9649\u8365\u51f6\u6c79\u9508\u7ee3\u9990\u9e3a\u865a\u5618\u987b\u8bb8\u53d9\u7eea\u7eed\u8be9\u987c\u8f69\u60ac\u9009\u7663\u7eda\u8c16\u94c9\u955f\u5b66\u8c11\u6cf6\u9cd5\u52cb\u8be2\u5bfb\u9a6f\u8bad\u8baf\u900a\u57d9\u6d54\u9c9f\u538b\u9e26\u9e2d\u54d1\u4e9a\u8bb6\u57ad\u5a05\u6860\u6c29\u9609\u70df\u76d0\u4e25\u5ca9\u989c\u960e\u8273\u538c\u781a\u5f66\u8c1a\u9a8c\u53a3\u8d5d\u4fe8\u5156\u8c33\u6079\u95eb\u917d\u9b47\u990d\u9f39\u9e2f\u6768\u626c\u75a1\u9633\u75d2\u517b\u6837\u7080\u7476\u6447\u5c27\u9065\u7a91\u8c23\u836f\u8f7a\u9e5e\u9cd0\u7237\u9875\u4e1a\u53f6\u9765\u8c12\u90ba\u6654\u70e8\u533b\u94f1\u9890\u9057\u4eea\u8681\u827a\u4ebf\u5fc6\u4e49\u8be3\u8bae\u8c0a\u8bd1\u5f02\u7ece\u8bd2\u5453\u5cc4\u9974\u603f\u9a7f\u7f22\u8f76\u8d3b\u9487\u9552\u9571\u7617\u8223\u836b\u9634\u94f6\u996e\u9690\u94df\u763e\u6a31\u5a74\u9e70\u5e94\u7f28\u83b9\u8424\u8425\u8367\u8747\u8d62\u9896\u8314\u83ba\u8426\u84e5\u6484\u5624\u6ee2\u6f46\u748e\u9e66\u763f\u988f\u7f42\u54df\u62e5\u4f63\u75c8\u8e0a\u548f\u955b\u4f18\u5fe7\u90ae\u94c0\u72b9\u8bf1\u83b8\u94d5\u9c7f\u8206\u9c7c\u6e14\u5a31\u4e0e\u5c7f\u8bed\u72f1\u8a89\u9884\u9a6d\u4f1b\u4fe3\u8c00\u8c15\u84e3\u5d5b\u996b\u9608\u59aa\u7ea1\u89ce\u6b24\u94b0\u9e46\u9e6c\u9f89\u9e33\u6e0a\u8f95\u56ed\u5458\u5706\u7f18\u8fdc\u6a7c\u9e22\u9f0b\u7ea6\u8dc3\u94a5\u7ca4\u60a6\u9605\u94ba\u90e7\u5300\u9668\u8fd0\u8574\u915d\u6655\u97f5\u90d3\u82b8\u607d\u6120\u7ead\u97eb\u6b92\u6c32\u6742\u707e\u8f7d\u6512\u6682\u8d5e\u74d2\u8db1\u933e\u8d43\u810f\u9a75\u51ff\u67a3\u8d23\u62e9\u5219\u6cfd\u8d5c\u5567\u5e3b\u7ba6\u8d3c\u8c2e\u8d60\u7efc\u7f2f\u8f67\u94e1\u95f8\u6805\u8bc8\u658b\u503a\u6be1\u76cf\u65a9\u8f97\u5d2d\u6808\u6218\u7efd\u8c35\u5f20\u6da8\u5e10\u8d26\u80c0\u8d75\u8bcf\u948a\u86f0\u8f99\u9517\u8fd9\u8c2a\u8f84\u9e67\u8d1e\u9488\u4fa6\u8bca\u9547\u9635\u6d48\u7f1c\u6862\u8f78\u8d48\u796f\u9e29\u6323\u7741\u72f0\u4e89\u5e27\u75c7\u90d1\u8bc1\u8be4\u5ce5\u94b2\u94ee\u7b5d\u7ec7\u804c\u6267\u7eb8\u631a\u63b7\u5e1c\u8d28\u6ede\u9a98\u6809\u6800\u8f75\u8f7e\u8d3d\u9e37\u86f3\u7d77\u8e2c\u8e2f\u89ef\u949f\u7ec8\u79cd\u80bf\u4f17\u953a\u8bcc\u8f74\u76b1\u663c\u9aa4\u7ea3\u7ec9\u732a\u8bf8\u8bdb\u70db\u77a9\u5631\u8d2e\u94f8\u9a7b\u4f2b\u69e0\u94e2\u4e13\u7816\u8f6c\u8d5a\u556d\u9994\u989e\u6869\u5e84\u88c5\u5986\u58ee\u72b6\u9525\u8d58\u5760\u7f00\u9a93\u7f12\u8c06\u51c6\u7740\u6d4a\u8bfc\u956f\u5179\u8d44\u6e0d\u8c18\u7f01\u8f8e\u8d40\u7726\u9531\u9f87\u9cbb\u8e2a\u603b\u7eb5\u506c\u90b9\u8bf9\u9a7a\u9cb0\u8bc5\u7ec4\u955e\u94bb\u7f35\u8e9c\u9cdf\u7ff1\u5e76\u535c\u6c89\u4e11\u6dc0\u8fed\u6597\u8303\u5e72\u768b\u7845\u67dc\u540e\u4f19\u79f8\u6770\u8bc0\u5938\u91cc\u51cc\u4e48\u9709\u637b\u51c4\u6266\u5723\u5c38\u62ac\u6d82\u6d3c\u5582\u6c61\u9528\u54b8\u874e\u5f5d\u6d8c\u6e38\u5401\u5fa1\u613f\u5cb3\u4e91\u7076\u624e\u672d\u7b51\u4e8e\u5fd7\u6ce8\u51cb\u8ba0\u8c2b\u90c4\u52d0\u51fc\u5742\u5785\u57b4\u57ef\u57dd\u82d8\u836c\u836e\u839c\u83bc\u83f0\u85c1\u63f8\u5412\u5423\u5494\u549d\u54b4\u5658\u567c\u56af\u5e5e\u5c99\u5d74\u5f77\u5fbc\u72b8\u72cd\u9980\u9987\u9993\u9995\u6123\u61b7\u61d4\u4e2c\u6e86\u6edf\u6eb7\u6f24\u6f74\u6fb9\u752f\u7e9f\u7ed4\u7ef1\u73c9\u67a7\u684a\u6849\u69d4\u6a65\u8f71\u8f77\u8d4d\u80b7\u80e8\u98da\u7173\u7145\u7198\u610d\u6dfc\u781c\u78d9\u770d\u949a\u94b7\u94d8\u94de\u9503\u950d\u950e\u950f\u9518\u951d\u952a\u952b\u953f\u9545\u954e\u9562\u9565\u9569\u9572\u7a06\u9e4b\u9e5b\u9e71\u75ac\u75b4\u75d6\u766f\u88e5\u8941\u8022\u98a5\u87a8\u9eb4\u9c85\u9c86\u9c87\u9c9e\u9cb4\u9cba\u9cbc\u9cca\u9ccb\u9cd8\u9cd9\u9792\u97b4\u9f44"
traditional_chinese = (
big5
) = "\u9312\u769a\u85f9\u7919\u611b\u566f\u5b21\u74a6\u66d6\u9744\u8af3\u92a8\u9d6a\u9aaf\u8956\u5967\u5abc\u9a41\u9c32\u58e9\u7f77\u9200\u64fa\u6557\u5504\u9812\u8fa6\u7d46\u9211\u5e6b\u7d81\u938a\u8b17\u525d\u98fd\u5bf6\u5831\u9b91\u9d07\u9f59\u8f29\u8c9d\u92c7\u72fd\u5099\u618a\u9d6f\u8cc1\u931b\u7e43\u7b46\u7562\u6583\u5e63\u9589\u84fd\u55f6\u6f77\u924d\u7bf3\u8e55\u908a\u7de8\u8cb6\u8b8a\u8faf\u8fae\u8290\u7df6\u7c69\u6a19\u9a43\u98ae\u98c6\u93e2\u9463\u9c3e\u9c49\u5225\u765f\u7015\u6ff1\u8cd3\u64ef\u5110\u7e7d\u6ab3\u6baf\u81cf\u944c\u9ad5\u9b22\u9905\u7a1f\u64a5\u7f3d\u9251\u99c1\u9911\u9238\u9d53\u88dc\u923d\u8ca1\u53c3\u8836\u6b98\u615a\u6158\u71e6\u9a42\u9ef2\u84bc\u8259\u5009\u6ec4\u5ec1\u5074\u518a\u6e2c\u60fb\u5c64\u8a6b\u9364\u5115\u91f5\u6519\u647b\u87ec\u995e\u8b92\u7e8f\u93df\u7522\u95e1\u986b\u56c5\u8ac2\u8b96\u8546\u61fa\u5b0b\u9a4f\u8998\u79aa\u9414\u5834\u5617\u9577\u511f\u8178\u5ee0\u66a2\u5000\u8407\u60b5\u95b6\u9be7\u9214\u8eca\u5fb9\u7868\u5875\u9673\u896f\u5096\u8af6\u6aec\u78e3\u9f54\u6490\u7a31\u61f2\u8aa0\u9a01\u68d6\u6a89\u92ee\u943a\u7661\u9072\u99b3\u6065\u9f52\u71be\u98ed\u9d1f\u6c96\u885d\u87f2\u5bf5\u9283\u7587\u8e8a\u7c4c\u7da2\u5114\u5e6c\u8b8e\u6ae5\u5eda\u92e4\u96db\u790e\u5132\u89f8\u8655\u82bb\u7d40\u8e95\u50b3\u91e7\u7621\u95d6\u5275\u6134\u9318\u7d9e\u7d14\u9d89\u7dbd\u8f1f\u9f6a\u8fad\u8a5e\u8cdc\u9dbf\u8070\u8525\u56ea\u5f9e\u53e2\u84ef\u9a44\u6a05\u6e4a\u8f33\u8ea5\u7ac4\u651b\u932f\u92bc\u9e7a\u9054\u5660\u97c3\u5e36\u8cb8\u99d8\u7d3f\u64d4\u55ae\u9132\u64a3\u81bd\u619a\u8a95\u5f48\u6bab\u8ce7\u7649\u7c1e\u7576\u64cb\u9ee8\u8569\u6a94\u8b9c\u78ad\u8960\u6417\u5cf6\u79b1\u5c0e\u76dc\u71fe\u71c8\u9127\u9419\u6575\u6ecc\u905e\u7de0\u7cf4\u8a46\u8ae6\u7d88\u89bf\u93d1\u985b\u9ede\u588a\u96fb\u5dd4\u923f\u7672\u91e3\u8abf\u929a\u9bdb\u8adc\u758a\u9c08\u91d8\u9802\u9320\u8a02\u92cc\u4e1f\u92a9\u6771\u52d5\u68df\u51cd\u5d20\u9d87\u7ac7\u72a2\u7368\u8b80\u8ced\u934d\u7006\u6add\u7258\u7be4\u9ef7\u935b\u65b7\u7dde\u7c6a\u514c\u968a\u5c0d\u61df\u9413\u5678\u9813\u920d\u71c9\u8e89\u596a\u58ae\u9438\u9d5d\u984d\u8a1b\u60e1\u9913\u8ae4\u580a\u95bc\u8edb\u92e8\u9354\u9d9a\u984e\u9853\u9c77\u8a92\u5152\u723e\u990c\u8cb3\u9087\u927a\u9d2f\u9b9e\u767c\u7f70\u95a5\u743a\u792c\u91e9\u7169\u8ca9\u98ef\u8a2a\u7d21\u9201\u9b74\u98db\u8ab9\u5ee2\u8cbb\u7dcb\u9428\u9be1\u7d1b\u58b3\u596e\u61a4\u7cde\u50e8\u8c50\u6953\u92d2\u98a8\u760b\u99ae\u7e2b\u8af7\u9cf3\u7043\u819a\u8f3b\u64ab\u8f14\u8ce6\u5fa9\u8ca0\u8a03\u5a66\u7e1b\u9ce7\u99d9\u7d31\u7d3c\u8cfb\u9ea9\u9b92\u9c12\u91d3\u8a72\u9223\u84cb\u8cc5\u687f\u8d95\u7a08\u8d1b\u5c37\u641f\u7d3a\u5ca1\u525b\u92fc\u7db1\u5d17\u6207\u93ac\u776a\u8aa5\u7e1e\u92ef\u64f1\u9d3f\u95a3\u927b\u500b\u7d07\u9398\u6f41\u7d66\u4e99\u8ce1\u7d86\u9bc1\u9f94\u5bae\u978f\u8ca2\u9264\u6e9d\u830d\u69cb\u8cfc\u5920\u8a6c\u7df1\u89af\u8831\u9867\u8a41\u8f42\u9237\u932e\u9d23\u9d60\u9dbb\u526e\u639b\u9d30\u6451\u95dc\u89c0\u9928\u6163\u8cab\u8a7f\u645c\u9e1b\u9c25\u5ee3\u7377\u898f\u6b78\u9f9c\u95a8\u8ecc\u8a6d\u8cb4\u528a\u532d\u528c\u5aaf\u6a9c\u9bad\u9c56\u8f25\u6efe\u889e\u7dc4\u9bc0\u934b\u570b\u904e\u581d\u54bc\u5e57\u69e8\u87c8\u927f\u99ed\u97d3\u6f22\u95de\u7d4e\u9821\u865f\u705d\u9865\u95a1\u9db4\u8cc0\u8a36\u95d4\u8823\u6a6b\u8f5f\u9d3b\u7d05\u9ecc\u8a0c\u8452\u958e\u9c5f\u58fa\u8b77\u6eec\u6236\u6ef8\u9d98\u5629\u83ef\u756b\u5283\u8a71\u9a4a\u6a3a\u93f5\u61f7\u58de\u6b61\u74b0\u9084\u7de9\u63db\u559a\u7613\u7165\u6e19\u5950\u7e6f\u9370\u9bc7\u9ec3\u8b0a\u9c09\u63ee\u8f1d\u6bc0\u8cc4\u7a62\u6703\u71f4\u532f\u8af1\u8aa8\u7e6a\u8a7c\u8588\u5666\u6fae\u7e62\u743f\u6689\u8477\u6e3e\u8ae2\u991b\u95bd\u7372\u8ca8\u798d\u9225\u944a\u64ca\u6a5f\u7a4d\u9951\u8de1\u8b4f\u96de\u7e3e\u7ddd\u6975\u8f2f\u7d1a\u64e0\u5e7e\u858a\u5291\u6fdf\u8a08\u8a18\u969b\u7e7c\u7d00\u8a10\u8a70\u85ba\u5630\u568c\u9a65\u74a3\u89ac\u9f4f\u78ef\u7f88\u8806\u8e8b\u973d\u9c6d\u9bfd\u593e\u83a2\u9830\u8cc8\u9240\u50f9\u99d5\u90df\u6d79\u92cf\u93b5\u87ef\u6bb2\u76e3\u5805\u7b8b\u9593\u8271\u7dd8\u7e6d\u6aa2\u583f\u9e7c\u63c0\u64bf\u7c21\u5109\u6e1b\u85a6\u6abb\u9452\u8e10\u8ce4\u898b\u9375\u8266\u528d\u991e\u6f38\u6ffa\u6f97\u8aeb\u7e11\u6214\u6229\u77bc\u9dbc\u7b67\u9c39\u97c9\u5c07\u6f3f\u8523\u69f3\u734e\u8b1b\u91ac\u7d73\u97c1\u81a0\u6f86\u9a55\u5b0c\u652a\u9278\u77ef\u50e5\u8173\u9903\u7e73\u7d5e\u8f4e\u8f03\u649f\u5da0\u9de6\u9bab\u968e\u7bc0\u6f54\u7d50\u8aa1\u5c46\u7664\u981c\u9b9a\u7dca\u9326\u50c5\u8b39\u9032\u6649\u71fc\u76e1\u52c1\u834a\u8396\u5df9\u85ce\u9949\u7e09\u8d10\u89b2\u9be8\u9a5a\u7d93\u9838\u975c\u93e1\u5f91\u75d9\u7af6\u51c8\u5244\u6d87\u9015\u5f33\u811b\u975a\u7cfe\u5ec4\u820a\u9b2e\u9ce9\u9df2\u99d2\u8209\u64da\u92f8\u61fc\u5287\u8a4e\u5c68\u6af8\u98b6\u9245\u92e6\u7ab6\u9f5f\u9d51\u7d79\u9308\u942b\u96cb\u89ba\u6c7a\u7d55\u8b4e\u73a8\u921e\u8ecd\u99ff\u76b8\u958b\u51f1\u5274\u584f\u613e\u6137\u93a7\u9347\u9f95\u958c\u9227\u92ac\u9846\u6bbc\u8ab2\u9a0d\u7dd9\u8efb\u9233\u9301\u9837\u58be\u61c7\u9f66\u93d7\u6473\u5eab\u8932\u56b3\u584a\u5108\u9136\u5672\u81be\u5bec\u736a\u9ad6\u7926\u66e0\u6cc1\u8a86\u8a91\u913a\u58d9\u7e8a\u8cba\u8667\u5dcb\u7aba\u994b\u6f70\u5331\u8562\u6192\u8075\u7c23\u95ab\u9315\u9be4\u64f4\u95ca\u8810\u881f\u81d8\u840a\u4f86\u8cf4\u5d0d\u5fa0\u6df6\u7028\u8cda\u775e\u9338\u7669\u7c5f\u85cd\u6b04\u6514\u7c43\u95cc\u862d\u703e\u8b95\u652c\u89bd\u61f6\u7e9c\u721b\u6feb\u5d50\u6b16\u6595\u946d\u8964\u746f\u95ac\u92c3\u6488\u52de\u6f87\u562e\u5d97\u92a0\u9412\u7646\u6a02\u9c33\u9433\u58d8\u985e\u6dda\u8a84\u7e32\u7c6c\u8c8d\u96e2\u9bc9\u79ae\u9e97\u53b2\u52f5\u792b\u6b77\u701d\u96b8\u5137\u9148\u58e2\u85f6\u849e\u863a\u56a6\u9090\u9a6a\u7e2d\u6aea\u6adf\u8f62\u792a\u92f0\u9e1d\u7658\u7cf2\u8e92\u9742\u9c7a\u9c67\u5006\u806f\u84ee\u9023\u942e\u6190\u6f23\u7c3e\u6582\u81c9\u93c8\u6200\u7149\u7df4\u861e\u5969\u7032\u7489\u6bae\u8933\u895d\u9c31\u7ce7\u6dbc\u5169\u8f1b\u8ad2\u9b4e\u7642\u907c\u9410\u7e5a\u91d5\u9def\u7375\u81e8\u9130\u9c57\u51dc\u8cc3\u85fa\u5ee9\u6a81\u8f54\u8eaa\u9f61\u9234\u9748\u5dba\u9818\u7dbe\u6b1e\u87f6\u9bea\u993e\u5289\u700f\u9a2e\u7db9\u93a6\u9dda\u9f8d\u807e\u56a8\u7c60\u58df\u650f\u96b4\u8622\u7027\u74cf\u6af3\u6727\u7931\u6a13\u5a41\u645f\u7c0d\u50c2\u851e\u560d\u5d81\u93e4\u763a\u802c\u87bb\u9acf\u8606\u76e7\u9871\u5eec\u7210\u64c4\u9e75\u865c\u9b6f\u8cc2\u797f\u9304\u9678\u58da\u64fc\u5695\u95ad\u7018\u6de5\u6ae8\u6ad3\u8f64\u8f05\u8f46\u6c0c\u81da\u9e15\u9dfa\u826b\u9c78\u5dd2\u6523\u5b7f\u7064\u4e82\u81e0\u5b4c\u6b12\u9e1e\u947e\u6384\u8f2a\u502b\u4f96\u6dea\u7db8\u8ad6\u5707\u863f\u7f85\u908f\u947c\u7c6e\u9a3e\u99f1\u7d61\u7296\u7380\u6ffc\u6b0f\u8161\u93cd\u9a62\u5442\u92c1\u4fb6\u5c62\u7e37\u616e\u6ffe\u7da0\u6ada\u8938\u92dd\u5638\u5abd\u746a\u78bc\u879e\u99ac\u7f75\u55ce\u561c\u5b24\u69aa\u8cb7\u9ea5\u8ce3\u9081\u8108\u52f1\u779e\u9945\u883b\u6eff\u8b3e\u7e35\u93dd\u9859\u9c3b\u8c93\u9328\u925a\u8cbf\u9ebc\u6c92\u9382\u9580\u60b6\u5011\u636b\u71dc\u61e3\u9346\u9333\u5922\u7787\u8b0e\u5f4c\u8993\u51aa\u7f8b\u8b10\u737c\u79b0\u7dbf\u7dec\u6fa0\u9766\u9efd\u5edf\u7df2\u7e46\u6ec5\u61ab\u95a9\u9594\u7de1\u9cf4\u9298\u8b2c\u8b28\u9a40\u9943\u6b7f\u93cc\u8b00\u755d\u926c\u5436\u9209\u7d0d\u96e3\u6493\u8166\u60f1\u9b27\u9403\u8a25\u9912\u5167\u64ec\u81a9\u922e\u9be2\u6506\u8f26\u9bf0\u91c0\u9ce5\u8526\u88ca\u8076\u5699\u9477\u93b3\u9689\u8617\u56c1\u9862\u8ea1\u6ab8\u7370\u5be7\u64f0\u6fd8\u82e7\u5680\u8079\u9215\u7d10\u81bf\u6fc3\u8fb2\u5102\u5665\u99d1\u91f9\u8afe\u513a\u7627\u6b50\u9dd7\u6bc6\u5614\u6f1a\u8b33\u616a\u750c\u76e4\u8e63\u9f90\u62cb\u76b0\u8ce0\u8f61\u5674\u9d6c\u7d15\u7f86\u9239\u9a19\u8ade\u99e2\u98c4\u7e39\u983b\u8ca7\u5b2a\u860b\u6191\u8a55\u6f51\u9817\u91d9\u64b2\u92ea\u6a38\u8b5c\u93f7\u9420\u68f2\u81cd\u9f4a\u9a0e\u8c48\u555f\u6c23\u68c4\u8a16\u8604\u9a0f\u7dba\u69bf\u78e7\u980e\u980f\u9c2d\u727d\u91ec\u925b\u9077\u7c3d\u8b19\u9322\u9257\u6f5b\u6dfa\u8b74\u5879\u50c9\u8541\u6173\u9a2b\u7e7e\u69e7\u9210\u69cd\u55c6\u58bb\u8594\u5f37\u6436\u5b19\u6aa3\u6227\u7197\u9306\u93d8\u93f9\u7fa5\u8e4c\u936c\u6a4b\u55ac\u50d1\u7ff9\u7ac5\u8a9a\u8b59\u854e\u7e70\u78fd\u8e7a\u7aca\u611c\u9365\u7bcb\u6b3d\u89aa\u5be2\u92df\u8f15\u6c2b\u50be\u9803\u8acb\u6176\u64b3\u9bd6\u74ca\u7aae\u7162\u86fa\u5df0\u8cd5\u87e3\u9c0d\u8da8\u5340\u8ec0\u9a45\u9f72\u8a58\u5d87\u95c3\u89b7\u9d1d\u9874\u6b0a\u52f8\u8a6e\u7da3\u8f07\u9293\u537b\u9d72\u78ba\u95cb\u95d5\u6128\u8b93\u9952\u64fe\u7e5e\u8558\u5b08\u6a48\u71b1\u97cc\u8a8d\u7d09\u98ea\u8ed4\u69ae\u7d68\u5db8\u8811\u7e1f\u92a3\u9870\u8edf\u92b3\u8706\u958f\u6f64\u7051\u85a9\u98af\u9c13\u8cfd\u5098\u6bff\u7cdd\u55aa\u9a37\u6383\u7e45\u6f80\u55c7\u92ab\u7a61\u6bba\u524e\u7d17\u93a9\u9bca\u7be9\u66ec\u91c3\u522a\u9583\u965c\u8d0d\u7e55\u8a15\u59cd\u9a38\u91e4\u9c54\u5891\u50b7\u8cde\u5770\u6ba4\u89f4\u71d2\u7d39\u8cd2\u651d\u61fe\u8a2d\u5399\u7044\u756c\u7d33\u5be9\u5b38\u814e\u6ef2\u8a75\u8ad7\u700b\u8072\u7e69\u52dd\u5e2b\u7345\u6fd5\u8a69\u6642\u8755\u5be6\u8b58\u99db\u52e2\u9069\u91cb\u98fe\u8996\u8a66\u8b1a\u5852\u8494\u5f12\u8efe\u8cb0\u9230\u9c23\u58fd\u7378\u7dac\u6a1e\u8f38\u66f8\u8d16\u5c6c\u8853\u6a39\u8c4e\u6578\u6504\u7d13\u5e25\u9582\u96d9\u8ab0\u7a05\u9806\u8aaa\u78a9\u720d\u9460\u7d72\u98fc\u5edd\u99df\u7de6\u9376\u9de5\u8073\u616b\u980c\u8a1f\u8aa6\u64fb\u85ea\u993f\u98bc\u93aa\u8607\u8a34\u8085\u8b16\u7a4c\u96d6\u96a8\u7d8f\u6b72\u8ab6\u5b6b\u640d\u7b4d\u84c0\u733b\u7e2e\u7463\u9396\u55e9\u8127\u737a\u64bb\u95e5\u9248\u9c28\u81fa\u614b\u9226\u9b90\u6524\u8caa\u7671\u7058\u58c7\u8b5a\u8ac7\u5606\u66c7\u926d\u931f\u9807\u6e6f\u71d9\u513b\u9933\u940b\u93dc\u6fe4\u7d73\u8a0e\u97dc\u92f1\u9a30\u8b04\u92bb\u984c\u9ad4\u5c5c\u7df9\u9d5c\u95d0\u689d\u7cf6\u9f60\u9c37\u8cbc\u9435\u5ef3\u807d\u70f4\u9285\u7d71\u615f\u982d\u9204\u79bf\u5716\u91f7\u5718\u6476\u9839\u86fb\u98e9\u812b\u9d15\u99b1\u99dd\u6a62\u7c5c\u9f09\u896a\u5aa7\u8183\u5f4e\u7063\u9811\u842c\u7d08\u7db0\u7db2\u8f1e\u97cb\u9055\u570d\u70ba\u6ff0\u7dad\u8466\u5049\u507d\u7def\u8b02\u885b\u8ac9\u5e43\u95c8\u6e88\u6f7f\u744b\u97d9\u7152\u9baa\u6eab\u805e\u7d0b\u7a69\u554f\u95bf\u7515\u64be\u8778\u6e26\u7aa9\u81e5\u8435\u9f77\u55da\u93a2\u70cf\u8aa3\u7121\u856a\u5433\u5862\u9727\u52d9\u8aa4\u9114\u5ee1\u61ae\u5af5\u9a16\u9d61\u9da9\u932b\u72a7\u8972\u7fd2\u9291\u6232\u7d30\u993c\u9b29\u74bd\u89a1\u8766\u8f44\u5cfd\u4fe0\u72f9\u5ec8\u5687\u7864\u9bae\u7e96\u8ce2\u929c\u9591\u986f\u96aa\u73fe\u737b\u7e23\u9921\u7fa8\u61b2\u7dda\u83a7\u859f\u861a\u5cf4\u736b\u5afb\u9df4\u7647\u8814\u79c8\u8e9a\u5ec2\u9472\u9109\u8a73\u97ff\u9805\u858c\u9909\u9a64\u7dd7\u9957\u856d\u56c2\u92b7\u66c9\u562f\u5635\u701f\u9a4d\u7d83\u689f\u7c2b\u5354\u633e\u651c\u8105\u8ae7\u5beb\u7009\u8b1d\u893b\u64f7\u7d32\u7e88\u92c5\u91c1\u8208\u9658\u6ece\u5147\u6d36\u92b9\u7e61\u9948\u9d42\u865b\u5653\u9808\u8a31\u6558\u7dd2\u7e8c\u8a61\u980a\u8ed2\u61f8\u9078\u766c\u7d62\u8afc\u9249\u93c7\u5b78\u8b14\u6fa9\u9c48\u52db\u8a62\u5c0b\u99b4\u8a13\u8a0a\u905c\u5864\u6f6f\u9c58\u58d3\u9d09\u9d28\u555e\u4e9e\u8a1d\u57e1\u5a6d\u690f\u6c2c\u95b9\u7159\u9e7d\u56b4\u5dd6\u984f\u95bb\u8277\u53ad\u786f\u5f65\u8afa\u9a57\u53b4\u8d17\u513c\u5157\u8b9e\u61e8\u9586\u91c5\u9b58\u995c\u9f34\u9d26\u694a\u63da\u760d\u967d\u7662\u990a\u6a23\u716c\u7464\u6416\u582f\u9059\u7aaf\u8b20\u85e5\u8efa\u9dc2\u9c29\u723a\u9801\u696d\u8449\u9768\u8b01\u9134\u66c4\u71c1\u91ab\u92a5\u9824\u907a\u5100\u87fb\u85dd\u5104\u61b6\u7fa9\u8a63\u8b70\u8abc\u8b6f\u7570\u7e79\u8a52\u56c8\u5da7\u98f4\u61cc\u9a5b\u7e0a\u8efc\u8cbd\u91d4\u93b0\u943f\u761e\u8264\u852d\u9670\u9280\u98f2\u96b1\u92a6\u766e\u6afb\u5b30\u9df9\u61c9\u7e93\u7469\u87a2\u71df\u7192\u8805\u8d0f\u7a4e\u584b\u9daf\u7e08\u93a3\u6516\u56b6\u7005\u7020\u74d4\u9e1a\u766d\u9826\u7f4c\u55b2\u64c1\u50ad\u7670\u8e34\u8a60\u93de\u512a\u6182\u90f5\u923e\u7336\u8a98\u8555\u92aa\u9b77\u8f3f\u9b5a\u6f01\u5a1b\u8207\u5dbc\u8a9e\u7344\u8b7d\u9810\u99ad\u50b4\u4fc1\u8adb\u8aed\u8577\u5d33\u98eb\u95be\u5ad7\u7d06\u89a6\u6b5f\u923a\u9d52\u9df8\u9f6c\u9d1b\u6df5\u8f45\u5712\u54e1\u5713\u7de3\u9060\u6ade\u9cf6\u9eff\u7d04\u8e8d\u9470\u7cb5\u6085\u95b1\u925e\u9116\u52fb\u9695\u904b\u860a\u919e\u6688\u97fb\u9106\u8553\u60f2\u614d\u7d1c\u97de\u6b9e\u6c33\u96dc\u707d\u8f09\u6522\u66ab\u8d0a\u74da\u8db2\u93e8\u8d13\u81df\u99d4\u947f\u68d7\u8cac\u64c7\u5247\u6fa4\u8cfe\u5616\u5e58\u7c00\u8cca\u8b56\u8d08\u7d9c\u7e52\u8ecb\u9358\u9598\u67f5\u8a50\u9f4b\u50b5\u6c08\u76de\u65ac\u8f3e\u5d84\u68e7\u6230\u7dbb\u8b6b\u5f35\u6f32\u5e33\u8cec\u8139\u8d99\u8a54\u91d7\u87c4\u8f4d\u937a\u9019\u8b2b\u8f12\u9dd3\u8c9e\u91dd\u5075\u8a3a\u93ae\u9663\u6e5e\u7e1d\u6968\u8eeb\u8cd1\u798e\u9d06\u6399\u775c\u7319\u722d\u5e40\u7665\u912d\u8b49\u8acd\u5d22\u9266\u931a\u7b8f\u7e54\u8077\u57f7\u7d19\u646f\u64f2\u5e5f\u8cea\u6eef\u9a2d\u6adb\u6894\u8ef9\u8f0a\u8d04\u9dd9\u8784\u7e36\u8e93\u8e91\u89f6\u9418\u7d42\u7a2e\u816b\u773e\u937e\u8b05\u8ef8\u76ba\u665d\u9a5f\u7d02\u7e10\u8c6c\u8af8\u8a85\u71ed\u77da\u56d1\u8caf\u9444\u99d0\u4f47\u6ae7\u9296\u5c08\u78da\u8f49\u8cfa\u56c0\u994c\u9873\u6a01\u838a\u88dd\u599d\u58ef\u72c0\u9310\u8d05\u589c\u7db4\u9a05\u7e0b\u8ac4\u6e96\u8457\u6fc1\u8ad1\u9432\u8332\u8cc7\u6f2c\u8aee\u7dc7\u8f1c\u8cb2\u7725\u9319\u9f5c\u9bd4\u8e64\u7e3d\u7e31\u50af\u9112\u8acf\u9a36\u9beb\u8a5b\u7d44\u93c3\u9246\u7e98\u8ea6\u9c52\u7ffa\u4e26\u8514\u6c88\u919c\u6fb1\u53e0\u9b25\u7bc4\u5e79\u81ef\u77fd\u6ac3\u5f8c\u5925\u7a2d\u5091\u8a23\u8a87\u88cf\u6de9\u9ebc\u9ef4\u649a\u6dd2\u6261\u8056\u5c4d\u64e1\u5857\u7aaa\u9935\u6c59\u9341\u9e79\u880d\u5f5c\u6e67\u904a\u7c72\u79a6\u9858\u5dbd\u96f2\u7ac8\u7d2e\u5284\u7bc9\u65bc\u8a8c\u8a3b\u96d5\u8a01\u8b7e\u90e4\u731b\u6c39\u962a\u58df\u5816\u57b5\u588a\u6abe\u8552\u8464\u84e7\u8493\u83c7\u69c1\u6463\u54a4\u551a\u54e2\u565d\u5645\u6485\u5288\u8b14\u8946\u5db4\u810a\u4eff\u50e5\u7341\u9e85\u9918\u9937\u994a\u9962\u695e\u6035\u61cd\u723f\u6f35\u7069\u6df7\u6feb\u7026\u6de1\u5be7\u7cf8\u7d5d\u7dd4\u7449\u6898\u68ec\u6848\u6a70\u6aeb\u8ef2\u8ee4\u8ceb\u8181\u8156\u98c8\u7cca\u7146\u6e9c\u6e63\u6e3a\u78b8\u6efe\u7798\u9208\u9255\u92e3\u92b1\u92e5\u92f6\u9426\u9427\u9369\u9340\u9343\u9307\u9384\u9387\u93bf\u941d\u9465\u9479\u9454\u7a6d\u9d93\u9da5\u9e0c\u7667\u5c59\u7602\u81d2\u8947\u7e48\u802e\u986c\u87ce\u9eaf\u9b81\u9b83\u9b8e\u9bd7\u9bdd\u9bf4\u9c5d\u9bff\u9c20\u9c35\u9c45\u97bd\u97dd\u9f47"
def convert_chinese(text, from_charset, to_charset):
output_text = ""
for ch in text:
idx = from_charset.find(ch)
output_text += ch if idx < 0 else to_charset[idx]
return output_text
simplify = partial(convert_chinese, from_charset=big5, to_charset=gbk)
tradify = partial(convert_chinese, from_charset=gbk, to_charset=big5)
| 28,937 | 1,032.5 | 14,160 | py |
sacremoses | sacremoses-master/sacremoses/cli.py | # -*- coding: utf-8 -*-
import os
from copy import deepcopy
from functools import partial
from functools import update_wrapper
import click
from sacremoses.tokenize import MosesTokenizer, MosesDetokenizer
from sacremoses.truecase import MosesTruecaser, MosesDetruecaser
from sacremoses.normalize import MosesPunctNormalizer
from sacremoses.util import parallelize_preprocess
# Hack to enable Python2.7 to use encoding.
import sys
import warnings
if sys.version_info[0] < 3:
import io
open = io.open
warnings.warn(
str(
"You should really be using Python3!!! "
"Tick tock, tick tock, https://pythonclock.org/"
)
)
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(chain=True, context_settings=CONTEXT_SETTINGS)
@click.option(
"--language", "-l", default="en", help="Use language specific rules when tokenizing"
)
@click.option("--processes", "-j", default=1, help="No. of processes.")
@click.option("--encoding", "-e", default="utf8", help="Specify encoding of file.")
@click.option(
"--quiet", "-q", is_flag=True, default=False, help="Disable progress bar."
)
@click.version_option()
def cli(language, encoding, processes, quiet):
pass
# TODO: Get rid of this when it's possible.
# https://github.com/alvations/sacremoses/issues/130
result_callback = cli.resultcallback if int(click.__version__.split('.')[0]) < 8 else cli.result_callback
@result_callback()
def process_pipeline(processors, encoding, **kwargs):
with click.get_text_stream("stdin", encoding=encoding) as fin:
iterator = fin # Initialize fin as the first iterator.
for proc in processors:
iterator = proc(list(iterator), **kwargs)
if iterator:
for item in iterator:
click.echo(item)
def processor(f, **kwargs):
"""Helper decorator to rewrite a function so that
it returns another function from it.
"""
def new_func(**kwargs):
def processor(stream, **kwargs):
return f(stream, **kwargs)
return partial(processor, **kwargs)
return update_wrapper(new_func, f, **kwargs)
def parallel_or_not(iterator, func, processes, quiet):
if processes == 1:
for line in iterator:
yield func(line)
else:
for outline in parallelize_preprocess(
func, iterator, processes, progress_bar=(not quiet)
):
yield outline
########################################################################
# Tokenize
########################################################################
@cli.command("tokenize")
@click.option(
"--aggressive-dash-splits",
"-a",
default=False,
is_flag=True,
help="Triggers dash split rules.",
)
@click.option(
"--xml-escape",
"-x",
default=True,
is_flag=True,
help="Escape special characters for XML.",
)
@click.option(
"--protected-patterns",
"-p",
help="Specify file with patters to be protected in tokenisation. Special values: :basic: :web:",
)
@click.option(
"--custom-nb-prefixes",
"-c",
help="Specify a custom non-breaking prefixes file, add prefixes to the default ones from the specified language.",
)
@processor
def tokenize_file(
iterator,
language,
processes,
quiet,
xml_escape,
aggressive_dash_splits,
protected_patterns,
custom_nb_prefixes,
):
moses = MosesTokenizer(
lang=language, custom_nonbreaking_prefixes_file=custom_nb_prefixes
)
if protected_patterns:
if protected_patterns == ":basic:":
protected_patterns = moses.BASIC_PROTECTED_PATTERNS
elif protected_patterns == ":web:":
protected_patterns = moses.WEB_PROTECTED_PATTERNS
else:
with open(protected_patterns, encoding="utf8") as fin:
protected_patterns = [pattern.strip() for pattern in fin.readlines()]
moses_tokenize = partial(
moses.tokenize,
return_str=True,
aggressive_dash_splits=aggressive_dash_splits,
escape=xml_escape,
protected_patterns=protected_patterns,
)
return parallel_or_not(iterator, moses_tokenize, processes, quiet)
########################################################################
# Detokenize
########################################################################
@cli.command("detokenize")
@click.option(
"--xml-unescape",
"-x",
default=True,
is_flag=True,
help="Unescape special characters for XML.",
)
@processor
def detokenize_file(
iterator,
language,
processes,
quiet,
xml_unescape,
):
moses = MosesDetokenizer(lang=language)
moses_detokenize = partial(moses.detokenize, return_str=True, unescape=xml_unescape)
return parallel_or_not(
list(map(str.split, iterator)), moses_detokenize, processes, quiet
)
########################################################################
# Normalize
########################################################################
@cli.command("normalize")
@click.option(
"--normalize-quote-commas",
"-q",
default=True,
is_flag=True,
help="Normalize quotations and commas.",
)
@click.option(
"--normalize-numbers", "-d", default=True, is_flag=True, help="Normalize number."
)
@click.option(
"--replace-unicode-puncts",
"-p",
default=False,
is_flag=True,
help="Replace unicode punctuations BEFORE normalization.",
)
@click.option(
"--remove-control-chars",
"-c",
default=False,
is_flag=True,
help="Remove control characters AFTER normalization.",
)
@processor
def normalize_file(
iterator,
language,
processes,
quiet,
normalize_quote_commas,
normalize_numbers,
replace_unicode_puncts,
remove_control_chars,
):
moses = MosesPunctNormalizer(
language,
norm_quote_commas=normalize_quote_commas,
norm_numbers=normalize_numbers,
pre_replace_unicode_punct=replace_unicode_puncts,
post_remove_control_chars=remove_control_chars,
)
moses_normalize = partial(moses.normalize)
return parallel_or_not(iterator, moses_normalize, processes, quiet)
########################################################################
# Train Truecase
########################################################################
@cli.command("train-truecase")
@click.option(
"--modelfile", "-m", required=True, help="Filename to save the modelfile."
)
@click.option(
"--is-asr",
"-a",
default=False,
is_flag=True,
help="A flag to indicate that model is for ASR.",
)
@click.option(
"--possibly-use-first-token",
"-p",
default=False,
is_flag=True,
help="Use the first token as part of truecasing.",
)
@processor
def train_truecaser(
iterator, language, processes, quiet, modelfile, is_asr, possibly_use_first_token
):
moses = MosesTruecaser(is_asr=is_asr)
# iterator_copy = deepcopy(iterator)
model = moses.train(
iterator,
possibly_use_first_token=possibly_use_first_token,
processes=processes,
progress_bar=(not quiet),
)
moses.save_model(modelfile)
########################################################################
# Truecase
########################################################################
@cli.command("truecase")
@click.option(
"--modelfile", "-m", required=True, help="Filename to save/load the modelfile."
)
@click.option(
"--is-asr",
"-a",
default=False,
is_flag=True,
help="A flag to indicate that model is for ASR.",
)
@click.option(
"--possibly-use-first-token",
"-p",
default=False,
is_flag=True,
help="Use the first token as part of truecase training.",
)
@processor
def truecase_file(
iterator, language, processes, quiet, modelfile, is_asr, possibly_use_first_token
):
# If model file doesn't exists, train a model.
if not os.path.isfile(modelfile):
iterator_copy = deepcopy(iterator)
truecaser = MosesTruecaser(is_asr=is_asr)
model = truecaser.train(
iterator_copy,
possibly_use_first_token=possibly_use_first_token,
processes=processes,
progress_bar=(not quiet),
)
truecaser.save_model(modelfile)
# Truecase the file.
moses = MosesTruecaser(load_from=modelfile, is_asr=is_asr)
moses_truecase = partial(moses.truecase, return_str=True)
return parallel_or_not(iterator, moses_truecase, processes, quiet)
########################################################################
# Detruecase
########################################################################
@cli.command("detruecase")
@click.option(
"--is-headline",
"-a",
default=False,
is_flag=True,
help="Whether the file are headlines.",
)
@processor
def detruecase_file(iterator, language, processes, quiet, is_headline):
moses = MosesDetruecaser()
moses_detruecase = partial(
moses.detruecase, return_str=True, is_headline=is_headline
)
return parallel_or_not(iterator, moses_detruecase, processes, quiet)
| 9,156 | 26.334328 | 118 | py |
sacremoses | sacremoses-master/sacremoses/corpus.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pkgutil
class Perluniprops:
"""
This class is used to read lists of characters from the Perl Unicode
Properties (see http://perldoc.perl.org/perluniprops.html).
The files in the perluniprop.zip are extracted using the Unicode::Tussle
module from http://search.cpan.org/~bdfoy/Unicode-Tussle-1.11/lib/Unicode/Tussle.pm
"""
def __init__(self):
self.datadir = (
os.path.dirname(os.path.abspath(__file__)) + "/data/perluniprops/"
)
# These are categories similar to the Perl Unicode Properties
self.available_categories = [
"Close_Punctuation",
"Currency_Symbol",
"IsAlnum",
"IsAlpha",
"IsLower",
"IsN",
"IsSc",
"IsSo",
"IsUpper",
"Line_Separator",
"Number",
"Open_Punctuation",
"Punctuation",
"Separator",
"Symbol",
"Lowercase_Letter",
"Titlecase_Letter",
"Uppercase_Letter",
"IsPf",
"IsPi",
"CJKSymbols",
"CJK",
]
def chars(self, category=None):
"""
This module returns a list of characters from the Perl Unicode Properties.
They are very useful when porting Perl tokenizers to Python.
>>> from sacremoses.corpus import Perluniprops
>>> pup = Perluniprops()
>>> list(pup.chars('Open_Punctuation'))[:5] == ['(', '[', '{', '\u0f3a', '\u0f3c']
True
>>> list(pup.chars('Currency_Symbol'))[:5] == ['$', '\xa2', '\xa3', '\xa4', '\xa5']
True
>>> pup.available_categories[:5]
['Close_Punctuation', 'Currency_Symbol', 'IsAlnum', 'IsAlpha', 'IsLower']
:return: a generator of characters given the specific unicode character category
"""
relative_path = os.path.join("data", "perluniprops", category + ".txt")
binary_data = pkgutil.get_data("sacremoses", relative_path)
for ch in binary_data.decode("utf-8"):
yield ch
class NonbreakingPrefixes:
"""
This is a class to read the nonbreaking prefixes textfiles from the
Moses Machine Translation toolkit. These lists are used in the Python port
of the Moses' word tokenizer.
"""
def __init__(self):
self.datadir = (
os.path.dirname(os.path.abspath(__file__)) + "/data/nonbreaking_prefixes/"
)
self.available_langs = {
"assamese": "as",
"bengali": "bn",
"catalan": "ca",
"czech": "cs",
"german": "de",
"greek": "el",
"english": "en",
"spanish": "es",
"estonian": "et",
"finnish": "fi",
"french": "fr",
"irish": "ga",
"gujarati": "gu",
"hindi": "hi",
"hungarian": "hu",
"icelandic": "is",
"italian": "it",
"kannada": "kn",
"lithuanian": "lt",
"latvian": "lv",
"malayalam": "ml",
"manipuri": "mni",
"marathi": "mr",
"dutch": "nl",
"oriya": "or",
"punjabi": "pa",
"polish": "pl",
"portuguese": "pt",
"romanian": "ro",
"russian": "ru",
"slovak": "sk",
"slovenian": "sl",
"swedish": "sv",
"tamil": "ta",
"telugu": "te",
"tetum": "tdt",
"cantonese": "yue",
"chinese": "zh",
}
# Also, add the lang IDs as the keys.
self.available_langs.update({v: v for v in self.available_langs.values()})
def words(self, lang=None, ignore_lines_startswith="#"):
"""
This module returns a list of nonbreaking prefixes for the specified
language(s).
>>> from sacremoses.corpus import NonbreakingPrefixes
>>> nbp = NonbreakingPrefixes()
>>> list(nbp.words('en'))[:10] == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']
True
>>> list(nbp.words('ta'))[:5] == ['\u0bb0', '\u0bc2', '\u0ba4\u0bbf\u0bb0\u0bc1', '\u0b8f', '\u0baa\u0bc0']
True
:return: a generator words for the specified language(s).
"""
# If *lang* in list of languages available, allocate apt fileid.
if lang in self.available_langs:
filenames = ["nonbreaking_prefix." + self.available_langs[lang]]
# Use non-breaking prefixes for all languages when lang==None.
elif lang == None:
filenames = [
"nonbreaking_prefix." + v for v in set(self.available_langs.values())
]
else:
filenames = ["nonbreaking_prefix.en"]
for filename in filenames:
relative_path = os.path.join("data", "nonbreaking_prefixes", filename)
binary_data = pkgutil.get_data("sacremoses", relative_path)
for line in binary_data.decode("utf-8").splitlines():
line = line.strip()
if line and not line.startswith(ignore_lines_startswith):
yield line
__all__ = ["Perluniprops", "NonbreakingPrefixes"]
| 5,393 | 33.356688 | 119 | py |
sacremoses | sacremoses-master/sacremoses/indic.py | #
# Created by: Thamme Gowda ; June 2020
#
# https://en.wikipedia.org/wiki/Virama
VIRAMAS = [
"\u094D", # Devanagari ◌्
"\u09CD", # Bengali ◌্
"\u0A4D", # Gurmukhi ◌੍
"\u0ACD", # Gujarati ◌્
"\u0B4D", # Oriya ◌୍
"\u0BCD", # Tamil ◌்
"\u0C4D", # Telugu ◌్
"\u0CCD", # Kannada ◌್
"\u0D3B", # Malayalam Sign Vertical Bar ◌഻
"\u0D3C", # Malayalam Sign Circular ◌഻
"\u0D4D", # Malayalam ◌്
"\u0EBA", # Lao Sign Pali ◌຺
"\u1039", # Myanmar ◌္
"\u1714", # Tagalog ◌᜔
"\u1BAB", # Sundanese ◌᮫
"\uA8C4", # Saurashtra ◌꣄
"\uA8F3", # Devanagari Sign Candrabindu ꣳ
"\uA8F4", # Devanagari Sign Double Candrabindu ꣴ
"\uA953", # Rejang ꥓
"\uAAF6", # Meetei Mayek ◌꫶
"\U00010A3F", # Kharoshthi ◌𐨿
"\U00011046", # Brahmi ◌𑁆
"\U000110B9", # Kaithi ◌𑂹
"\U00011133", # Chakma ◌𑄳
"\U000111C0", # Sharada 𑇀
"\U00011235", # Khojki 𑈵
"\U000112EA", # Khudawadi ◌𑋪
"\U0001134D", # Grantha 𑍍
"\U00011442", # Newa ◌𑑂
"\U000114C2", # Tirhuta ◌𑓂
"\U000115BF", # Siddham ◌𑖿
"\U0001163F", # Modi ◌𑘿
"\U000116B6", # Takri 𑚶
"\U00011839", # Dogra ◌𑠹
"\U000119E0", # Nandinagari ◌𑧠
"\U00011A34", # Zanabazar Square ◌𑨴
"\U00011C3F", # Bhaiksuki ◌𑰿
"\U00011D45", # Masaram Gondi ◌𑵅
"\U00011D97", # Gunjala Gondi ◌𑶗
"\u0DCA", # Sinhala hal kirīma ්
]
# https://en.wikipedia.org/wiki/Nuqta
NUKTAS = [
"\u093C", # Devanagari ◌़
"\u09BC", # Bengali ◌়
"\u0A3C", # Gurmukhi ◌਼
"\u0ABC", # Gujarati ◌઼
"\u0AFD", # Gujarati Sign Three-Dot Above ◌૽
"\u0AFE", # Gujarati Sign Circle Above ◌૾
"\u0AFF", # Gujarati Sign Two-Circle Above ◌૿
"\u0B3C", # Oriya ◌଼
"\u0CBC", # Kannada ◌಼
"\u1C37", # Lepcha ◌᰷
"\U000110BA", # Kaithi ◌𑂺
"\U00011173", # Mahajani ◌𑅳
"\U000111CA", # Sharada ◌𑇊
"\U00011236", # Khojki ◌𑈶
"\U000112E9", # Khudawadi ◌𑋩
"\U0001133C", # Grantha ◌𑌼
"\U00011446", # Newa ◌𑑆
"\U000114C3", # Tirhuta ◌𑓃
"\U000115C0", # Siddham ◌𑗀
"\U000116B7", # Takri ◌𑚷
"\U0001183A", # Dogra ◌𑠺
"\U00011D42", # Masaram Gondi ◌𑵂
"\U0001E94A", # Adlam ◌𞥊
]
| 2,248 | 29.391892 | 53 | py |
sacremoses | sacremoses-master/sacremoses/normalize.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import regex
from itertools import chain
class MosesPunctNormalizer:
"""
This is a Python port of the Moses punctuation normalizer from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/normalize-punctuation.perl
"""
EXTRA_WHITESPACE = [ # lines 21 - 30
(r"\r", r""),
(r"\(", r" ("),
(r"\)", r") "),
(r" +", r" "),
(r"\) ([.!:?;,])", r")\g<1>"),
(r"\( ", r"("),
(r" \)", r")"),
(r"(\d) %", r"\g<1>%"),
(r" :", r":"),
(r" ;", r";"),
]
NORMALIZE_UNICODE_IF_NOT_PENN = [(r"`", r"'"), (r"''", r' " ')] # lines 33 - 34
NORMALIZE_UNICODE = [ # lines 37 - 50
("„", r'"'),
("“", r'"'),
("”", r'"'),
("–", r"-"),
("—", r" - "),
(r" +", r" "),
("´", r"'"),
("([a-zA-Z])‘([a-zA-Z])", r"\g<1>'\g<2>"),
("([a-zA-Z])’([a-zA-Z])", r"\g<1>'\g<2>"),
("‘", r"'"),
("‚", r"'"),
("’", r"'"),
(r"''", r'"'),
("´´", r'"'),
("…", r"..."),
]
FRENCH_QUOTES = [ # lines 52 - 57
("\u00A0«\u00A0", r'"'),
("«\u00A0", r'"'),
("«", r'"'),
("\u00A0»\u00A0", r'"'),
("\u00A0»", r'"'),
("»", r'"'),
]
HANDLE_PSEUDO_SPACES = [ # lines 59 - 67
("\u00A0%", r"%"),
("nº\u00A0", "nº "),
("\u00A0:", r":"),
("\u00A0ºC", " ºC"),
("\u00A0cm", r" cm"),
("\u00A0\\?", "?"),
("\u00A0\\!", "!"),
("\u00A0;", r";"),
(",\u00A0", r", "),
(r" +", r" "),
]
EN_QUOTATION_FOLLOWED_BY_COMMA = [(r'"([,.]+)', r'\g<1>"')]
DE_ES_FR_QUOTATION_FOLLOWED_BY_COMMA = [
(r',"', r'",'),
(r'(\.+)"(\s*[^<])', r'"\g<1>\g<2>'), # don't fix period at end of sentence
]
DE_ES_CZ_CS_FR = [
("(\\d)\u00A0(\\d)", r"\g<1>,\g<2>"),
]
OTHER = [
("(\\d)\u00A0(\\d)", r"\g<1>.\g<2>"),
]
# Regex substitutions from replace-unicode-punctuation.perl
# https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
REPLACE_UNICODE_PUNCTUATION = [
(",", ","),
(r"。\s*", ". "),
("、", ","),
("”", '"'),
("“", '"'),
("∶", ":"),
(":", ":"),
("?", "?"),
("《", '"'),
("》", '"'),
(")", ")"),
("!", "!"),
("(", "("),
(";", ";"),
("」", '"'),
("「", '"'),
("0", "0"),
("1", "1"),
("2", "2"),
("3", "3"),
("4", "4"),
("5", "5"),
("6", "6"),
("7", "7"),
("8", "8"),
("9", "9"),
(r".\s*", ". "),
("~", "~"),
("’", "'"),
("…", "..."),
("━", "-"),
("〈", "<"),
("〉", ">"),
("【", "["),
("】", "]"),
("%", "%"),
]
def __init__(
self,
lang="en",
penn=True,
norm_quote_commas=True,
norm_numbers=True,
pre_replace_unicode_punct=False,
post_remove_control_chars=False,
):
"""
:param language: The two-letter language code.
:type lang: str
:param penn: Normalize Penn Treebank style quotations.
:type penn: bool
:param norm_quote_commas: Normalize quotations and commas
:type norm_quote_commas: bool
:param norm_numbers: Normalize numbers
:type norm_numbers: bool
"""
self.substitutions = [
self.EXTRA_WHITESPACE,
self.NORMALIZE_UNICODE,
self.FRENCH_QUOTES,
self.HANDLE_PSEUDO_SPACES,
]
if penn: # Adds the penn substitutions after extra_whitespace regexes.
self.substitutions.insert(1, self.NORMALIZE_UNICODE_IF_NOT_PENN)
if norm_quote_commas:
if lang == "en":
self.substitutions.append(self.EN_QUOTATION_FOLLOWED_BY_COMMA)
elif lang in ["de", "es", "fr"]:
self.substitutions.append(self.DE_ES_FR_QUOTATION_FOLLOWED_BY_COMMA)
if norm_numbers:
if lang in ["de", "es", "cz", "cs", "fr"]:
self.substitutions.append(self.DE_ES_CZ_CS_FR)
else:
self.substitutions.append(self.OTHER)
self.substitutions = list(chain(*self.substitutions))
self.pre_replace_unicode_punct = pre_replace_unicode_punct
self.post_remove_control_chars = post_remove_control_chars
def normalize(self, text):
"""
Returns a string with normalized punctuation.
"""
# Optionally, replace unicode puncts BEFORE normalization.
if self.pre_replace_unicode_punct:
text = self.replace_unicode_punct(text)
# Actual normalization.
for regexp, substitution in self.substitutions:
# print(regexp, substitution)
text = re.sub(regexp, substitution, str(text))
# print(text)
# Optionally, replace unicode puncts BEFORE normalization.
if self.post_remove_control_chars:
text = self.remove_control_chars(text)
return text.strip()
def replace_unicode_punct(self, text):
for regexp, substitution in self.REPLACE_UNICODE_PUNCTUATION:
text = re.sub(regexp, substitution, str(text))
return text
def remove_control_chars(self, text):
return regex.sub(r"\p{C}", "", text)
| 5,577 | 26.89 | 110 | py |
sacremoses | sacremoses-master/sacremoses/sent_tokenize.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from sacremoses.corpus import Perluniprops
from sacremoses.corpus import NonbreakingPrefixes
perluniprops = Perluniprops()
nonbreaking_prefixes = NonbreakingPrefixes()
class MosesSentTokenizer(object):
"""
This is a Python port of the Moses Tokenizer from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/ems/support/split-sentences.perl
"""
raise NotImplementedError
r"""
# Perl Unicode Properties character sets.
IsPi = str("".join(perluniprops.chars("IsPi")))
IsUpper = str("".join(perluniprops.chars("IsUpper")))
IsPf = str("".join(perluniprops.chars("IsPf")))
Punctuation = str("".join(perluniprops.chars("Punctuation")))
CJK = str("".join(perluniprops.chars("CJK")))
CJKSymbols = str("".join(perluniprops.chars("CJKSymbols")))
IsAlnum = str("".join(perluniprops.chars("IsAlnum")))
# Remove ASCII junk.
DEDUPLICATE_SPACE = r"\s+", r" "
# Non-period end of sentence markers (?!) followed by sentence starters.
NONPERIOD_UPPER = r"([?!]) +([\'\"\(\[\¿\¡\p{startpunct}]*[\p{upper}])".format(startpunct=IsPi, upper=IsUpper), r"\1\n\2"
# Multi-dots followed by sentence starters.
MULTDOT_UPPER = r"(\.[\.]+) +([\'\"\(\[\¿\¡\p{startpunct}]*[\p{upper}])".format(startpunct=IsPi, upper=IsUpper), r"\1\n\2"
# Add breaks for sentences that end with some sort of punctuation
# inside a quote or parenthetical and are followed by a possible
# sentence starter punctuation and upper case.
QUOTES_UPPER = r"([?!\.][\ ]*[\'\"\)\]\p{endpunct}]+) +([\'\"\(\[\¿\¡\p{startpunct}]*[\ ]*[\p{upper}])".format(endpunct=IsPf, startpunct=IsPi, upper=IsUpper), r"\1\n\2"
# Add breaks for sentences that end with some sort of punctuation,
# and are followed by a sentence starter punctuation and upper case.
ENDPUNCT_UPPER = r"([?!\.]) +([\'\"\(\[\¿\¡\p{startpunct}]+[\ ]*[\p{upper}])".format(startpunct=IsPi, upper=IsUpper), r"\1\n\2"
IS_EOS = r"([\p{alphanum}\.\-]*)([\'\"\)\]\%\p{endpunct}]*)(\.+)$".format(alphanum=IsAlnum, endpunct=IsPf)
def __init__(self, lang="en", custom_nonbreaking_prefixes_file=None):
# Load custom nonbreaking prefixes file.
if custom_nonbreaking_prefixes_file:
self.NONBREAKING_PREFIXES = []
with open(custom_nonbreaking_prefixes_file, 'r') as fin:
for line in fin:
line = line.strip()
if line and not line.startswith("#"):
if line not in self.NONBREAKING_PREFIXES:
self.NONBREAKING_PREFIXES.append(line)
detokenized_text = ""
tokens = text.split()
# Iterate through every token till the last 2nd token.
for i, token in enumerate(iter(tokens[:-1])):
if re.search(IS_EOS, token):
pass
"""
| 2,911 | 40.6 | 172 | py |
sacremoses | sacremoses-master/sacremoses/subwords.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import copy
from collections import Counter, defaultdict
from functools import reduce
from sacremoses.util import pairwise
class SubwordTokenizer(object):
"""
This is a Python port of the Subword NMT from
https://github.com/rsennrich/subword-nmt
"""
def __init__(self, filename):
#
self.vocab = self.get_vocabulary(filename)
self.stats, self.indices = self.get_pair_statistics()
self.big_stats = copy.deepcopy(self.stats)
def get_vocabulary(self, filename, is_dict=False):
vocab = Counter()
with open(filename) as fin:
if is_dict:
for line in fin:
word, count = line.strip().split(" ")
vocab[word] += int(count)
else:
vocab.update(fin.read().split())
# Converts the string keys to tuples of characters,
# adds "\uE000" to the last character.
vocab = Counter(
{tuple(k[:-1]) + (k[-1] + "\uE000",): v for (k, v) in vocab.items()}
)
return vocab.most_common()
def get_pair_statistics(self):
"""Count frequency of all symbol pairs, and create index"""
# Data structure of pair frequencies
stats = Counter()
# Index from pairs to words
indices = defaultdict(lambda: Counter())
for i, (word, freq) in enumerate(self.vocab):
for prev, curr in pairwise(word):
stats[prev, curr] += freq
indices[prev, curr][i] += 1
return stats, indices
def modify_token(self, token, pair):
"""
From https://stackoverflow.com/a/40367074/610569
>>> modify_token(('s', 'h', 'e', 'r', 'l', 'o', 'c', 'k'), ('h', 'e'))
('S', 'he', 'r', 'l', 'o', 'c', 'k')
"""
first, second = pair
pair_str = "".join(pair).replace("\\", "\\\\")
f = (
lambda acc, e: acc[:-1] + (pair_str,)
if acc[-1] == first and e == second
else acc + (e,)
)
return reduce(f, token[1:], (token[0],))
def replace_pair(self, pair):
"""Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB'"""
changes = []
for j, freq in self.indices[pair].items():
if freq < 1:
continue
word, freq = self.vocab[j]
new_word = self.modify_token(word, pair)
self.vocab[j] = (new_word, freq)
changes.append((j, new_word, word, freq))
return changes
def update_pair_statistics(self, pair, changed):
"""
Minimally update the indices and frequency of symbol pairs
if we merge a pair of symbols, only pairs that overlap with occurrences
of this pair are affected, and need to be updated.
"""
self.stats[pair] = 0
self.indices[pair] = Counter()
first, second = pair
new_pair = first + second
for j, word, old_word, freq in changed:
# Find all instances of pair in the old_word, and update frequency/indices around it
i = 0
# Keep moving down the old_word string until we cannot find
# the first char in the new_pair.
while True:
try:
# Find the next occurence of the first character in the new_pair.
i = old_word.index(first, i)
except ValueError:
break
# Checks that old_word[i:i+1] is the same as new_pair.
# (i) `i < len(old_word)-1` checks that the index i is not the last character.
# (ii) `old_word[i+1]` checks that the char after the index is the second char in the new_pair.
if i < len(old_word) - 1 and old_word[i + 1] == second:
# `if i` checks that i is non-zero.
# We can skip the first char since there's no previous bigram.
if i:
# Find the previous bigram and reduce its count.
prev = old_word[i - 1 : i + 1]
self.stats[prev] -= freq
self.indices[prev][j] -= 1
# `if < len(old_word)-2` checks that the new_pair is not at the end of the old_word.
if i < len(old_word) - 2:
# The multiple if conditions that follows checks that the bigram after i and i+1
# is not the same as new_pair to avoid double-counting consecutive pairs.
# (i) `old_word[i+2] != first` checks that two chars after i, it isn't the same as
# the first char in the new_pair.
# (ii) `old_word[i+3] != second` checks that three chars after i, it isn't the same
# as the second char in the new_pair.
# (iii) `i >= len(old_word)-3` checks that the i index is one of the last 4 chars in old_word.
# @rico: Is the `i >= len(old_word)-3` check to avoid IndexError?
if (
old_word[i + 2] != first
or i >= len(old_word) - 3
or old_word[i + 3] != second
):
# Find the next bigram and reduce its count.
# `nex` is the next bigram after new_pair.
nex = old_word[i + 1 : i + 3]
self.stats[nex] -= freq
self.indices[nex][j] -= 1
# Now we move the ith index to two chars to the right when
# old_word[i:i+1] is the same as new_pair.
i += 2
else: # Otherwise, we move one char to the right.
i += 1
# Find all instances of pair in the new *word*, and update frequency/indices around it
# Reset the index to the start of the string.
i = 0
# Similarly, we keep moving down the new *word* string until we cannot find
# the first char in the new_pair.
while True:
try:
i = word.index(new_pair, i)
except ValueError:
break
# We are sure that the new_pair is in the new *word* so there's no need to
# do an outer check as what was done in the old_word.
if (
i
): # `if i` checks that i is non-zero, skip the first char since there's no previous bigram.
prev = word[i - 1 : i + 1]
# This time, we add the frequency back to the statistics and indices.
self.stats[prev] += freq
self.indices[prev][j] += 1
# The multiple if conditions that follows checks that the bigram after i and i+1
# is not the same as new_pair to avoid double-counting consecutive pairs.
# `i < len(word)-1` checks if i is not the last character.
# `word[i+1]` checks that the next char is not the new_pair.
if i < len(word) - 1 and word[i + 1] != new_pair:
# `nex` is the next bigram after new_pair.
nex = word[i : i + 2]
# We add the frequency back to the statistics and indices.
self.stats[nex] += freq
self.indices[nex][j] += 1
# We move one char down the new *word*
i += 1
def learn(self, num_symbols, min_freq=2, jump=1, is_dict=None):
# threshold is inspired by Zipfian assumption, but should only affect speed
threshold = max(self.stats.values()) / 10
for i in range(num_symbols):
most_freq_tokens = self.stats.most_common(jump)
for token, count in most_freq_tokens:
changes = self.replace_pair(token)
self.update_pair_statistics(token, changes)
self.stats[token] = 0
if self.stats[token] < min_freq:
return
| 8,383 | 44.814208 | 118 | py |
sacremoses | sacremoses-master/sacremoses/tokenize.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from sacremoses.corpus import Perluniprops
from sacremoses.corpus import NonbreakingPrefixes
from sacremoses.util import is_cjk
from sacremoses.indic import VIRAMAS, NUKTAS
perluniprops = Perluniprops()
nonbreaking_prefixes = NonbreakingPrefixes()
class MosesTokenizer(object):
"""
This is a Python port of the Moses Tokenizer from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
"""
# Perl Unicode Properties character sets.
IsN = str("".join(perluniprops.chars("IsN")))
IsAlnum = str(
"".join(perluniprops.chars("IsAlnum")) + "".join(VIRAMAS) + "".join(NUKTAS)
)
IsSc = str("".join(perluniprops.chars("IsSc")))
IsSo = str("".join(perluniprops.chars("IsSo")))
IsAlpha = str(
"".join(perluniprops.chars("IsAlpha")) + "".join(VIRAMAS) + "".join(NUKTAS)
)
IsLower = str("".join(perluniprops.chars("IsLower")))
# Remove ASCII junk.
DEDUPLICATE_SPACE = r"\s+", r" "
ASCII_JUNK = r"[\000-\037]", r""
# Neurotic Perl heading space, multi-space and trailing space chomp.
# These regexes are kept for reference purposes and shouldn't be used!!
MID_STRIP = r" +", r" " # Use DEDUPLICATE_SPACE instead.
LEFT_STRIP = r"^ ", r"" # Uses text.lstrip() instead.
RIGHT_STRIP = r" $", r"" # Uses text.rstrip() instead.
# Pad all "other" special characters not in IsAlnum.
PAD_NOT_ISALNUM = r"([^{}\s\.'\`\,\-])".format(IsAlnum), r" \1 "
# Splits all hyphens (regardless of circumstances), e.g.
# 'foo-bar' -> 'foo @-@ bar'
AGGRESSIVE_HYPHEN_SPLIT = (
r"([{alphanum}])\-(?=[{alphanum}])".format(alphanum=IsAlnum),
r"\1 @-@ ",
)
# Make multi-dots stay together.
REPLACE_DOT_WITH_LITERALSTRING_1 = r"\.([\.]+)", " DOTMULTI\1"
REPLACE_DOT_WITH_LITERALSTRING_2 = r"DOTMULTI\.([^\.])", "DOTDOTMULTI \1"
REPLACE_DOT_WITH_LITERALSTRING_3 = r"DOTMULTI\.", "DOTDOTMULTI"
# Separate out "," except if within numbers (5,300)
# e.g. A,B,C,D,E > A , B,C , D,E
# First application uses up B so rule can't see B,C
# two-step version here may create extra spaces but these are removed later
# will also space digit,letter or letter,digit forms (redundant with next section)
COMMA_SEPARATE_1 = r"([^{}])[,]".format(IsN), r"\1 , "
COMMA_SEPARATE_2 = r"[,]([^{}])".format(IsN), r" , \1"
COMMA_SEPARATE_3 = r"([{}])[,]$".format(IsN), r"\1 , "
# Attempt to get correct directional quotes.
DIRECTIONAL_QUOTE_1 = r"^``", r"`` "
DIRECTIONAL_QUOTE_2 = r'^"', r"`` "
DIRECTIONAL_QUOTE_3 = r"^`([^`])", r"` \1"
DIRECTIONAL_QUOTE_4 = r"^'", r"` "
DIRECTIONAL_QUOTE_5 = r'([ ([{<])"', r"\1 `` "
DIRECTIONAL_QUOTE_6 = r"([ ([{<])``", r"\1 `` "
DIRECTIONAL_QUOTE_7 = r"([ ([{<])`([^`])", r"\1 ` \2"
DIRECTIONAL_QUOTE_8 = r"([ ([{<])'", r"\1 ` "
# Replace ... with _ELLIPSIS_
REPLACE_ELLIPSIS = r"\.\.\.", r" _ELLIPSIS_ "
# Restore _ELLIPSIS_ with ...
RESTORE_ELLIPSIS = r"_ELLIPSIS_", r"\.\.\."
# Pad , with tailing space except if within numbers, e.g. 5,300
COMMA_1 = r"([^{numbers}])[,]([^{numbers}])".format(numbers=IsN), r"\1 , \2"
COMMA_2 = r"([{numbers}])[,]([^{numbers}])".format(numbers=IsN), r"\1 , \2"
COMMA_3 = r"([^{numbers}])[,]([{numbers}])".format(numbers=IsN), r"\1 , \2"
# Pad unicode symbols with spaces.
SYMBOLS = r"([;:@#\$%&{}{}])".format(IsSc, IsSo), r" \1 "
# Separate out intra-token slashes. PTB tokenization doesn't do this, so
# the tokens should be merged prior to parsing with a PTB-trained parser.
# e.g. "and/or" -> "and @/@ or"
INTRATOKEN_SLASHES = (
r"([{alphanum}])\/([{alphanum}])".format(alphanum=IsAlnum),
r"$1 \@\/\@ $2",
)
# Splits final period at end of string.
FINAL_PERIOD = r"""([^.])([.])([\]\)}>"']*) ?$""", r"\1 \2\3"
# Pad all question marks and exclamation marks with spaces.
PAD_QUESTION_EXCLAMATION_MARK = r"([?!])", r" \1 "
# Handles parentheses, brackets and converts them to PTB symbols.
PAD_PARENTHESIS = r"([\]\[\(\){}<>])", r" \1 "
CONVERT_PARENTHESIS_1 = r"\(", "-LRB-"
CONVERT_PARENTHESIS_2 = r"\)", "-RRB-"
CONVERT_PARENTHESIS_3 = r"\[", "-LSB-"
CONVERT_PARENTHESIS_4 = r"\]", "-RSB-"
CONVERT_PARENTHESIS_5 = r"\{", "-LCB-"
CONVERT_PARENTHESIS_6 = r"\}", "-RCB-"
# Pads double dashes with spaces.
PAD_DOUBLE_DASHES = r"--", " -- "
# Adds spaces to start and end of string to simplify further regexps.
PAD_START_OF_STR = r"^", " "
PAD_END_OF_STR = r"$", " "
# Converts double quotes to two single quotes and pad with spaces.
CONVERT_DOUBLE_TO_SINGLE_QUOTES = r'"', " '' "
# Handles single quote in possessives or close-single-quote.
HANDLES_SINGLE_QUOTES = r"([^'])' ", r"\1 ' "
# Pad apostrophe in possessive or close-single-quote.
APOSTROPHE = r"([^'])'", r"\1 ' "
# Prepend space on contraction apostrophe.
CONTRACTION_1 = r"'([sSmMdD]) ", r" '\1 "
CONTRACTION_2 = r"'ll ", r" 'll "
CONTRACTION_3 = r"'re ", r" 're "
CONTRACTION_4 = r"'ve ", r" 've "
CONTRACTION_5 = r"n't ", r" n't "
CONTRACTION_6 = r"'LL ", r" 'LL "
CONTRACTION_7 = r"'RE ", r" 'RE "
CONTRACTION_8 = r"'VE ", r" 'VE "
CONTRACTION_9 = r"N'T ", r" N'T "
# Informal Contractions.
CONTRACTION_10 = r" ([Cc])annot ", r" \1an not "
CONTRACTION_11 = r" ([Dd])'ye ", r" \1' ye "
CONTRACTION_12 = r" ([Gg])imme ", r" \1im me "
CONTRACTION_13 = r" ([Gg])onna ", r" \1on na "
CONTRACTION_14 = r" ([Gg])otta ", r" \1ot ta "
CONTRACTION_15 = r" ([Ll])emme ", r" \1em me "
CONTRACTION_16 = r" ([Mm])ore'n ", r" \1ore 'n "
CONTRACTION_17 = r" '([Tt])is ", r" '\1 is "
CONTRACTION_18 = r" '([Tt])was ", r" '\1 was "
CONTRACTION_19 = r" ([Ww])anna ", r" \1an na "
# Clean out extra spaces
CLEAN_EXTRA_SPACE_1 = r" *", r" "
CLEAN_EXTRA_SPACE_2 = r"^ *", r""
CLEAN_EXTRA_SPACE_3 = r" *$", r""
# Neurotic Perl regexes to escape special characters.
ESCAPE_AMPERSAND = r"&", r"&"
ESCAPE_PIPE = r"\|", r"|"
ESCAPE_LEFT_ANGLE_BRACKET = r"<", r"<"
ESCAPE_RIGHT_ANGLE_BRACKET = r">", r">"
ESCAPE_SINGLE_QUOTE = r"\'", r"'"
ESCAPE_DOUBLE_QUOTE = r"\"", r"""
ESCAPE_LEFT_SQUARE_BRACKET = r"\[", r"["
ESCAPE_RIGHT_SQUARE_BRACKET = r"]", r"]"
EN_SPECIFIC_1 = r"([^{alpha}])[']([^{alpha}])".format(alpha=IsAlpha), r"\1 ' \2"
EN_SPECIFIC_2 = (
r"([^{alpha}{isn}])[']([{alpha}])".format(alpha=IsAlpha, isn=IsN),
r"\1 ' \2",
)
EN_SPECIFIC_3 = r"([{alpha}])[']([^{alpha}])".format(alpha=IsAlpha), r"\1 ' \2"
EN_SPECIFIC_4 = r"([{alpha}])[']([{alpha}])".format(alpha=IsAlpha), r"\1 '\2"
EN_SPECIFIC_5 = r"([{isn}])[']([s])".format(isn=IsN), r"\1 '\2"
ENGLISH_SPECIFIC_APOSTROPHE = [
EN_SPECIFIC_1,
EN_SPECIFIC_2,
EN_SPECIFIC_3,
EN_SPECIFIC_4,
EN_SPECIFIC_5,
]
FR_IT_SPECIFIC_1 = r"([^{alpha}])[']([^{alpha}])".format(alpha=IsAlpha), r"\1 ' \2"
FR_IT_SPECIFIC_2 = r"([^{alpha}])[']([{alpha}])".format(alpha=IsAlpha), r"\1 ' \2"
FR_IT_SPECIFIC_3 = r"([{alpha}])[']([^{alpha}])".format(alpha=IsAlpha), r"\1 ' \2"
FR_IT_SPECIFIC_4 = r"([{alpha}])[']([{alpha}])".format(alpha=IsAlpha), r"\1' \2"
FR_IT_SPECIFIC_APOSTROPHE = [
FR_IT_SPECIFIC_1,
FR_IT_SPECIFIC_2,
FR_IT_SPECIFIC_3,
FR_IT_SPECIFIC_4,
]
NON_SPECIFIC_APOSTROPHE = r"\'", " ' "
TRAILING_DOT_APOSTROPHE = r"\.' ?$", " . ' "
BASIC_PROTECTED_PATTERN_1 = r"<\/?\S+\/?>"
BASIC_PROTECTED_PATTERN_2 = r'<\S+( [a-zA-Z0-9]+\="?[^"]")+ ?\/?>'
BASIC_PROTECTED_PATTERN_3 = r"<\S+( [a-zA-Z0-9]+\='?[^']')+ ?\/?>"
BASIC_PROTECTED_PATTERN_4 = r"[\w\-\_\.]+\@([\w\-\_]+\.)+[a-zA-Z]{2,}"
BASIC_PROTECTED_PATTERN_5 = r"(http[s]?|ftp):\/\/[^:\/\s]+(\/\w+)*\/[\w\-\.]+"
MOSES_PENN_REGEXES_1 = [
DEDUPLICATE_SPACE,
ASCII_JUNK,
DIRECTIONAL_QUOTE_1,
DIRECTIONAL_QUOTE_2,
DIRECTIONAL_QUOTE_3,
DIRECTIONAL_QUOTE_4,
DIRECTIONAL_QUOTE_5,
DIRECTIONAL_QUOTE_6,
DIRECTIONAL_QUOTE_7,
DIRECTIONAL_QUOTE_8,
REPLACE_ELLIPSIS,
COMMA_1,
COMMA_2,
COMMA_3,
SYMBOLS,
INTRATOKEN_SLASHES,
FINAL_PERIOD,
PAD_QUESTION_EXCLAMATION_MARK,
PAD_PARENTHESIS,
CONVERT_PARENTHESIS_1,
CONVERT_PARENTHESIS_2,
CONVERT_PARENTHESIS_3,
CONVERT_PARENTHESIS_4,
CONVERT_PARENTHESIS_5,
CONVERT_PARENTHESIS_6,
PAD_DOUBLE_DASHES,
PAD_START_OF_STR,
PAD_END_OF_STR,
CONVERT_DOUBLE_TO_SINGLE_QUOTES,
HANDLES_SINGLE_QUOTES,
APOSTROPHE,
CONTRACTION_1,
CONTRACTION_2,
CONTRACTION_3,
CONTRACTION_4,
CONTRACTION_5,
CONTRACTION_6,
CONTRACTION_7,
CONTRACTION_8,
CONTRACTION_9,
CONTRACTION_10,
CONTRACTION_11,
CONTRACTION_12,
CONTRACTION_13,
CONTRACTION_14,
CONTRACTION_15,
CONTRACTION_16,
CONTRACTION_17,
CONTRACTION_18,
CONTRACTION_19,
]
MOSES_PENN_REGEXES_2 = [
RESTORE_ELLIPSIS,
CLEAN_EXTRA_SPACE_1,
CLEAN_EXTRA_SPACE_2,
CLEAN_EXTRA_SPACE_3,
ESCAPE_AMPERSAND,
ESCAPE_PIPE,
ESCAPE_LEFT_ANGLE_BRACKET,
ESCAPE_RIGHT_ANGLE_BRACKET,
ESCAPE_SINGLE_QUOTE,
ESCAPE_DOUBLE_QUOTE,
]
MOSES_ESCAPE_XML_REGEXES = [
ESCAPE_AMPERSAND,
ESCAPE_PIPE,
ESCAPE_LEFT_ANGLE_BRACKET,
ESCAPE_RIGHT_ANGLE_BRACKET,
ESCAPE_SINGLE_QUOTE,
ESCAPE_DOUBLE_QUOTE,
ESCAPE_LEFT_SQUARE_BRACKET,
ESCAPE_RIGHT_SQUARE_BRACKET,
]
BASIC_PROTECTED_PATTERNS = [
BASIC_PROTECTED_PATTERN_1,
BASIC_PROTECTED_PATTERN_2,
BASIC_PROTECTED_PATTERN_3,
BASIC_PROTECTED_PATTERN_4,
BASIC_PROTECTED_PATTERN_5,
]
WEB_PROTECTED_PATTERNS = [
r"((https?|ftp|rsync)://|www\.)[^ ]*", # URLs
r"[\w\-\_\.]+\@([\w\-\_]+\.)+[a-zA-Z]{2,}", # Emails user@host.domain
r"@[a-zA-Z0-9_]+", # @handler such as twitter/github ID
r"#[a-zA-Z0-9_]+", # @hashtag
# TODO: emojis especially the multi codepoints
]
def __init__(self, lang="en", custom_nonbreaking_prefixes_file=None):
# Initialize the object.
super(MosesTokenizer, self).__init__()
self.lang = lang
# Initialize the language specific nonbreaking prefixes.
self.NONBREAKING_PREFIXES = [
_nbp.strip() for _nbp in nonbreaking_prefixes.words(lang)
]
# Load custom nonbreaking prefixes file.
if custom_nonbreaking_prefixes_file:
self.NONBREAKING_PREFIXES = []
with open(custom_nonbreaking_prefixes_file, "r") as fin:
for line in fin:
line = line.strip()
if line and not line.startswith("#"):
if line not in self.NONBREAKING_PREFIXES:
self.NONBREAKING_PREFIXES.append(line)
self.NUMERIC_ONLY_PREFIXES = [
w.rpartition(" ")[0]
for w in self.NONBREAKING_PREFIXES
if self.has_numeric_only(w)
]
# Add CJK characters to alpha and alnum.
if self.lang in ["zh", "ja", "ko", "cjk"]:
cjk_chars = ""
if self.lang in ["ko", "cjk"]:
cjk_chars += str("".join(perluniprops.chars("Hangul")))
if self.lang in ["zh", "cjk"]:
cjk_chars += str("".join(perluniprops.chars("Han")))
if self.lang in ["ja", "cjk"]:
cjk_chars += str("".join(perluniprops.chars("Hiragana")))
cjk_chars += str("".join(perluniprops.chars("Katakana")))
cjk_chars += str("".join(perluniprops.chars("Han")))
self.IsAlpha += cjk_chars
self.IsAlnum += cjk_chars
# Overwrite the alnum regexes.
self.PAD_NOT_ISALNUM = r"([^{}\s\.'\`\,\-])".format(self.IsAlnum), r" \1 "
self.AGGRESSIVE_HYPHEN_SPLIT = (
r"([{alphanum}])\-(?=[{alphanum}])".format(alphanum=self.IsAlnum),
r"\1 @-@ ",
)
self.INTRATOKEN_SLASHES = (
r"([{alphanum}])\/([{alphanum}])".format(alphanum=self.IsAlnum),
r"$1 \@\/\@ $2",
)
def replace_multidots(self, text):
text = re.sub(r"\.([\.]+)", r" DOTMULTI\1", text)
while re.search(r"DOTMULTI\.", text):
text = re.sub(r"DOTMULTI\.([^\.])", r"DOTDOTMULTI \1", text)
text = re.sub(r"DOTMULTI\.", "DOTDOTMULTI", text)
return text
def restore_multidots(self, text):
while re.search(r"DOTDOTMULTI", text):
text = re.sub(r"DOTDOTMULTI", r"DOTMULTI.", text)
return re.sub(r"DOTMULTI", r".", text)
def islower(self, text):
return not set(text).difference(set(self.IsLower))
def isanyalpha(self, text):
return any(set(text).intersection(set(self.IsAlpha)))
def has_numeric_only(self, text):
return bool(re.search(r"[\s]+(\#NUMERIC_ONLY\#)", text))
def handles_nonbreaking_prefixes(self, text):
# Splits the text into tokens to check for nonbreaking prefixes.
tokens = text.split()
num_tokens = len(tokens)
for i, token in enumerate(tokens):
# Checks if token ends with a fullstop.
token_ends_with_period = re.search(r"^(\S+)\.$", token)
if token_ends_with_period:
prefix = token_ends_with_period.group(1)
# Checks for 3 conditions if
# i. the prefix contains a fullstop and
# any char in the prefix is within the IsAlpha charset
# ii. the prefix is in the list of nonbreaking prefixes and
# does not contain #NUMERIC_ONLY#
# iii. the token is not the last token and that the
# next token contains all lowercase.
if (
("." in prefix and self.isanyalpha(prefix))
or (
prefix in self.NONBREAKING_PREFIXES
and prefix not in self.NUMERIC_ONLY_PREFIXES
)
or (
i != num_tokens - 1
and tokens[i + 1]
and self.islower(tokens[i + 1][0])
)
):
pass # No change to the token.
# Checks if the prefix is in NUMERIC_ONLY_PREFIXES
# and ensures that the next word is a digit.
elif (
prefix in self.NUMERIC_ONLY_PREFIXES
and (i + 1) < num_tokens
and re.search(r"^[0-9]+", tokens[i + 1])
):
pass # No change to the token.
else: # Otherwise, adds a space after the tokens before a dot.
tokens[i] = prefix + " ."
return " ".join(tokens) # Stitch the tokens back.
def escape_xml(self, text):
for regexp, substitution in self.MOSES_ESCAPE_XML_REGEXES:
text = re.sub(regexp, substitution, text)
return text
def penn_tokenize(self, text, return_str=False):
"""
This is a Python port of the Penn treebank tokenizer adapted by the Moses
machine translation community.
"""
# Converts input string into unicode.
text = str(text)
# Perform a chain of regex substituitions using MOSES_PENN_REGEXES_1
for regexp, substitution in self.MOSES_PENN_REGEXES_1:
text = re.sub(regexp, substitution, text)
# Handles nonbreaking prefixes.
text = self.handles_nonbreaking_prefixes(text)
# Restore ellipsis, clean extra spaces, escape XML symbols.
for regexp, substitution in self.MOSES_PENN_REGEXES_2:
text = re.sub(regexp, substitution, text)
return text if return_str else text.split()
def tokenize(
self,
text,
aggressive_dash_splits=False,
return_str=False,
escape=True,
protected_patterns=None,
):
"""
Python port of the Moses tokenizer.
:param tokens: A single string, i.e. sentence text.
:type tokens: str
:param aggressive_dash_splits: Option to trigger dash split rules .
:type aggressive_dash_splits: bool
"""
# Converts input string into unicode.
text = str(text)
# De-duplicate spaces and clean ASCII junk
for regexp, substitution in [self.DEDUPLICATE_SPACE, self.ASCII_JUNK]:
text = re.sub(regexp, substitution, text)
if protected_patterns:
# Find the tokens that needs to be protected.
protected_tokens = [
match.group()
for protected_pattern in protected_patterns
for match in re.finditer(protected_pattern, text, re.IGNORECASE)
]
# Apply the protected_patterns.
for i, token in enumerate(protected_tokens):
substituition = "THISISPROTECTED" + str(i).zfill(3)
text = text.replace(token, substituition)
# Strips heading and trailing spaces.
text = text.strip()
# FIXME!!!
"""
# For Finnish and Swedish, seperate out all "other" special characters.
if self.lang in ["fi", "sv"]:
# In Finnish and Swedish, the colon can be used inside words
# as an apostrophe-like character:
# USA:n, 20:een, EU:ssa, USA:s, S:t
regexp, substitution = self.FI_SV_COLON_APOSTROPHE
text = re.sub(regexp, substitution, text)
# If a colon is not immediately followed by lower-case characters,
# separate it out anyway.
regexp, substitution = self.FI_SV_COLON_NO_LOWER_FOLLOW
text = re.sub(regexp, substitution, text)
else:
"""
# Separate special characters outside of IsAlnum character set.
regexp, substitution = self.PAD_NOT_ISALNUM
text = re.sub(regexp, substitution, text)
# Aggressively splits dashes
if aggressive_dash_splits:
regexp, substitution = self.AGGRESSIVE_HYPHEN_SPLIT
text = re.sub(regexp, substitution, text)
# Replaces multidots with "DOTDOTMULTI" literal strings.
text = self.replace_multidots(text)
# Separate out "," except if within numbers e.g. 5,300
for regexp, substitution in [
self.COMMA_SEPARATE_1,
self.COMMA_SEPARATE_2,
self.COMMA_SEPARATE_3,
]:
text = re.sub(regexp, substitution, text)
# (Language-specific) apostrophe tokenization.
if self.lang == "en":
for regexp, substitution in self.ENGLISH_SPECIFIC_APOSTROPHE:
text = re.sub(regexp, substitution, text)
elif self.lang in ["fr", "it"]:
for regexp, substitution in self.FR_IT_SPECIFIC_APOSTROPHE:
text = re.sub(regexp, substitution, text)
# FIXME!!!
##elif self.lang == "so":
## for regexp, substitution in self.SO_SPECIFIC_APOSTROPHE:
## text = re.sub(regexp, substitution, text)
else:
regexp, substitution = self.NON_SPECIFIC_APOSTROPHE
text = re.sub(regexp, substitution, text)
# Handles nonbreaking prefixes.
text = self.handles_nonbreaking_prefixes(text)
# Cleans up extraneous spaces.
regexp, substitution = self.DEDUPLICATE_SPACE
text = re.sub(regexp, substitution, text).strip()
# Split trailing ".'".
regexp, substituition = self.TRAILING_DOT_APOSTROPHE
text = re.sub(regexp, substituition, text)
# Restore the protected tokens.
if protected_patterns:
for i, token in enumerate(protected_tokens):
substituition = "THISISPROTECTED" + str(i).zfill(3)
text = text.replace(substituition, token)
# Restore multidots.
text = self.restore_multidots(text)
if escape:
# Escape XML symbols.
text = self.escape_xml(text)
return text if return_str else text.split()
class MosesDetokenizer(object):
"""
This is a Python port of the Moses Detokenizer from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/detokenizer.perl
"""
# Currency Symbols.
IsAlnum = str("".join(perluniprops.chars("IsAlnum")))
IsAlpha = str("".join(perluniprops.chars("IsAlpha")))
IsSc = str("".join(perluniprops.chars("IsSc")))
AGGRESSIVE_HYPHEN_SPLIT = r" \@\-\@ ", r"-"
# Merge multiple spaces.
ONE_SPACE = re.compile(r" {2,}"), " "
# Unescape special characters.
UNESCAPE_FACTOR_SEPARATOR = r"|", r"|"
UNESCAPE_LEFT_ANGLE_BRACKET = r"<", r"<"
UNESCAPE_RIGHT_ANGLE_BRACKET = r">", r">"
UNESCAPE_DOUBLE_QUOTE = r""", r'"'
UNESCAPE_SINGLE_QUOTE = r"'", r"'"
UNESCAPE_SYNTAX_NONTERMINAL_LEFT = r"[", r"["
UNESCAPE_SYNTAX_NONTERMINAL_RIGHT = r"]", r"]"
UNESCAPE_AMPERSAND = r"&", r"&"
# The legacy regexes are used to support outputs from older Moses versions.
UNESCAPE_FACTOR_SEPARATOR_LEGACY = r"&bar;", r"|"
UNESCAPE_SYNTAX_NONTERMINAL_LEFT_LEGACY = r"&bra;", r"["
UNESCAPE_SYNTAX_NONTERMINAL_RIGHT_LEGACY = r"&ket;", r"]"
MOSES_UNESCAPE_XML_REGEXES = [
UNESCAPE_FACTOR_SEPARATOR_LEGACY,
UNESCAPE_FACTOR_SEPARATOR,
UNESCAPE_LEFT_ANGLE_BRACKET,
UNESCAPE_RIGHT_ANGLE_BRACKET,
UNESCAPE_SYNTAX_NONTERMINAL_LEFT_LEGACY,
UNESCAPE_SYNTAX_NONTERMINAL_RIGHT_LEGACY,
UNESCAPE_DOUBLE_QUOTE,
UNESCAPE_SINGLE_QUOTE,
UNESCAPE_SYNTAX_NONTERMINAL_LEFT,
UNESCAPE_SYNTAX_NONTERMINAL_RIGHT,
UNESCAPE_AMPERSAND,
]
FINNISH_MORPHSET_1 = [
"N",
"n",
"A",
"a",
"\xc4",
"\xe4",
"ssa",
"Ssa",
"ss\xe4",
"Ss\xe4",
"sta",
"st\xe4",
"Sta",
"St\xe4",
"hun",
"Hun",
"hyn",
"Hyn",
"han",
"Han",
"h\xe4n",
"H\xe4n",
"h\xf6n",
"H\xf6n",
"un",
"Un",
"yn",
"Yn",
"an",
"An",
"\xe4n",
"\xc4n",
"\xf6n",
"\xd6n",
"seen",
"Seen",
"lla",
"Lla",
"ll\xe4",
"Ll\xe4",
"lta",
"Lta",
"lt\xe4",
"Lt\xe4",
"lle",
"Lle",
"ksi",
"Ksi",
"kse",
"Kse",
"tta",
"Tta",
"ine",
"Ine",
]
FINNISH_MORPHSET_2 = ["ni", "si", "mme", "nne", "nsa"]
FINNISH_MORPHSET_3 = [
"ko",
"k\xf6",
"han",
"h\xe4n",
"pa",
"p\xe4",
"kaan",
"k\xe4\xe4n",
"kin",
]
FINNISH_REGEX = r"^({})({})?({})$".format(
str("|".join(FINNISH_MORPHSET_1)),
str("|".join(FINNISH_MORPHSET_2)),
str("|".join(FINNISH_MORPHSET_3)),
)
def __init__(self, lang="en"):
super(MosesDetokenizer, self).__init__()
self.lang = lang
def unescape_xml(self, text):
for regexp, substitution in self.MOSES_UNESCAPE_XML_REGEXES:
text = re.sub(regexp, substitution, text)
return text
def tokenize(self, tokens, return_str=True, unescape=True):
"""
Python port of the Moses detokenizer.
:param tokens: A list of strings, i.e. tokenized text.
:type tokens: list(str)
:return: str
"""
# Convert the list of tokens into a string and pad it with spaces.
text = r" {} ".format(" ".join(tokens))
# Converts input string into unicode.
text = str(text)
# Detokenize the agressive hyphen split.
regexp, substitution = self.AGGRESSIVE_HYPHEN_SPLIT
text = re.sub(regexp, substitution, text)
if unescape:
# Unescape the XML symbols.
text = self.unescape_xml(text)
# Keep track of no. of quotation marks.
quote_counts = {"'": 0, '"': 0, "``": 0, "`": 0, "''": 0}
# The *prepend_space* variable is used to control the "effects" of
# detokenization as the function loops through the list of tokens and
# changes the *prepend_space* accordingly as it sequentially checks
# through the language specific and language independent conditions.
prepend_space = " "
detokenized_text = ""
tokens = text.split()
# Iterate through every token and apply language specific detokenization rule(s).
for i, token in enumerate(iter(tokens)):
# Check if the first char is CJK.
if is_cjk(token[0]) and self.lang != "ko":
# Perform left shift if this is a second consecutive CJK word.
if i > 0 and is_cjk(tokens[i - 1][-1]):
detokenized_text += token
# But do nothing special if this is a CJK word that doesn't follow a CJK word
else:
detokenized_text += prepend_space + token
prepend_space = " "
# If it's a currency symbol.
elif re.search(r"^[" + self.IsSc + r"\(\[\{\¿\¡]+$", token):
# Perform right shift on currency and other random punctuation items
detokenized_text += prepend_space + token
prepend_space = ""
elif re.search(r"^[\,\.\?\!\:\;\\\%\}\]\)]+$", token):
# In French, these punctuations are prefixed with a non-breakable space.
if self.lang == "fr" and re.search(r"^[\?\!\:\;\\\%]$", token):
detokenized_text += " "
# Perform left shift on punctuation items.
detokenized_text += token
prepend_space = " "
elif (
self.lang == "en"
and i > 0
and re.search(r"^['][{}]".format(self.IsAlpha), token)
):
# and re.search('[{}]$'.format(self.IsAlnum), tokens[i-1])):
# For English, left-shift the contraction.
detokenized_text += token
prepend_space = " "
elif (
self.lang == "cs"
and i > 1
and re.search(
r"^[0-9]+$", tokens[-2]
) # If the previous previous token is a number.
and re.search(r"^[.,]$", tokens[-1]) # If previous token is a dot.
and re.search(r"^[0-9]+$", token)
): # If the current token is a number.
# In Czech, left-shift floats that are decimal numbers.
detokenized_text += token
prepend_space = " "
elif (
self.lang in ["fr", "it", "ga"]
and i <= len(tokens) - 2
and re.search(r"[{}][']$".format(self.IsAlpha), token)
and re.search(r"^[{}]".format(self.IsAlpha), tokens[i + 1])
): # If the next token is alpha.
# For French and Italian, right-shift the contraction.
detokenized_text += prepend_space + token
prepend_space = ""
elif (
self.lang == "cs"
and i <= len(tokens) - 3
and re.search(r"[{}][']$".format(self.IsAlpha), token)
and re.search(r"^[-–]$", tokens[i + 1])
and re.search(r"^li$|^mail.*", tokens[i + 2], re.IGNORECASE)
): # In Perl, ($words[$i+2] =~ /^li$|^mail.*/i)
# In Czech, right-shift "-li" and a few Czech dashed words (e.g. e-mail)
detokenized_text += prepend_space + token + tokens[i + 1]
next(tokens, None) # Advance over the dash
prepend_space = ""
# Combine punctuation smartly.
elif re.search(r"""^[\'\"„“`]+$""", token):
normalized_quo = token
if re.search(r"^[„“”]+$", token):
normalized_quo = '"'
quote_counts[normalized_quo] = quote_counts.get(normalized_quo, 0)
if self.lang == "cs" and token == "„":
quote_counts[normalized_quo] = 0
if self.lang == "cs" and token == "“":
quote_counts[normalized_quo] = 1
if quote_counts[normalized_quo] % 2 == 0:
if (
self.lang == "en"
and token == "'"
and i > 0
and re.search(r"[s]$", tokens[i - 1])
):
# Left shift on single quote for possessives ending
# in "s", e.g. "The Jones' house"
detokenized_text += token
prepend_space = " "
else:
# Right shift.
detokenized_text += prepend_space + token
prepend_space = ""
quote_counts[normalized_quo] += 1
else:
# Left shift.
detokenized_text += token
prepend_space = " "
quote_counts[normalized_quo] += 1
elif (
self.lang == "fi"
and re.search(r":$", tokens[i - 1])
and re.search(self.FINNISH_REGEX, token)
):
# Finnish : without intervening space if followed by case suffix
# EU:N EU:n EU:ssa EU:sta EU:hun EU:iin ...
detokenized_text += prepend_space + token
prepend_space = " "
else:
detokenized_text += prepend_space + token
prepend_space = " "
# Merge multiple spaces.
regexp, substitution = self.ONE_SPACE
detokenized_text = re.sub(regexp, substitution, detokenized_text)
# Removes heading and trailing spaces.
detokenized_text = detokenized_text.strip()
return detokenized_text if return_str else detokenized_text.split()
def detokenize(self, tokens, return_str=True, unescape=True):
"""Duck-typing the abstract *tokenize()*."""
return self.tokenize(tokens, return_str, unescape)
__all__ = ["MosesTokenizer", "MosesDetokenizer"]
| 31,128 | 36.595411 | 93 | py |
sacremoses | sacremoses-master/sacremoses/truecase.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import re
from collections import defaultdict, Counter
from functools import partial
from itertools import chain
from sacremoses.corpus import Perluniprops
from sacremoses.util import parallelize_preprocess, grouper
# Hack to enable Python2.7 to use encoding.
import sys
if sys.version_info[0] < 3:
import io
import warnings
open = io.open
warnings.warn(
str(
"You should really be using Python3!!! "
"Tick tock, tick tock, https://pythonclock.org/"
)
)
perluniprops = Perluniprops()
class MosesTruecaser(object):
"""
This is a Python port of the Moses Truecaser from
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/train-truecaser.perl
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/truecase.perl
"""
# Perl Unicode Properties character sets.
Lowercase_Letter = str("".join(perluniprops.chars("Lowercase_Letter")))
Uppercase_Letter = str("".join(perluniprops.chars("Uppercase_Letter")))
Titlecase_Letter = str("".join(perluniprops.chars("Uppercase_Letter")))
def __init__(self, load_from=None, is_asr=None, encoding="utf8"):
"""
:param load_from:
:type load_from:
:param is_asr: A flag to indicate that model is for ASR. ASR input has
no case, make sure it is lowercase, and make sure known are cased
eg. 'i' to be uppercased even if i is known.
:type is_asr: bool
"""
# Initialize the object.
super(MosesTruecaser, self).__init__()
# Initialize the language specific nonbreaking prefixes.
self.SKIP_LETTERS_REGEX = re.compile(
"[{}{}{}]".format(
self.Lowercase_Letter, self.Uppercase_Letter, self.Titlecase_Letter
)
)
self.XML_SPLIT_REGX = re.compile("(<.*(?<=>))(.*)((?=</)[^>]*>)")
self.SENT_END = {".", ":", "?", "!"}
self.DELAYED_SENT_START = {
"(",
"[",
'"',
"'",
"'",
""",
"[",
"]",
}
self.encoding = encoding
self.is_asr = is_asr
if load_from:
self.model = self._load_model(load_from)
def learn_truecase_weights(self, tokens, possibly_use_first_token=False):
"""
This function checks through each tokens in a sentence and returns the
appropriate weight of each surface token form.
"""
# Keep track of first tokens in the sentence(s) of the line.
is_first_word = True
truecase_weights = []
for i, token in enumerate(tokens):
# Skip XML tags.
if re.search(r"(<\S[^>]*>)", token):
continue
# Skip if sentence start symbols.
elif token in self.DELAYED_SENT_START:
continue
# Resets the `is_first_word` after seeing sent end symbols.
if not is_first_word and token in self.SENT_END:
is_first_word = True
continue
# Skips tokens with nothing to case.
if not self.SKIP_LETTERS_REGEX.search(token):
is_first_word = False
continue
# If it's not the first word,
# then set the current word weight to 1.
current_word_weight = 0
if not is_first_word:
current_word_weight = 1
# Otherwise check whether user wants to optionally
# use the first word.
elif possibly_use_first_token:
# Gated special handling of first word of sentence.
# Check if first characer of token is lowercase.
if token[0].islower():
current_word_weight = 1
elif i == 1:
current_word_weight = 0.1
is_first_word = False
if current_word_weight > 0:
truecase_weights.append((token.lower(), token, current_word_weight))
return truecase_weights
def _train(
self,
document_iterator,
save_to=None,
possibly_use_first_token=False,
processes=1,
progress_bar=False,
):
"""
:param document_iterator: The input document, each outer list is a sentence,
the inner list is the list of tokens for each sentence.
:type document_iterator: iter(list(str))
:param possibly_use_first_token: When True, on the basis that the first
word of a sentence is always capitalized; if this option is provided then:
a) if a sentence-initial token is *not* capitalized, then it is counted, and
b) if a capitalized sentence-initial token is the only token of the segment,
then it is counted, but with only 10% of the weight of a normal token.
:type possibly_use_first_token: bool
:returns: A dictionary of the best, known objects as values from `_casing_to_model()`
:rtype: {'best': dict, 'known': Counter}
"""
casing = defaultdict(Counter)
train_truecaser = partial(
self.learn_truecase_weights,
possibly_use_first_token=possibly_use_first_token,
)
token_weights = chain(
*parallelize_preprocess(
train_truecaser, document_iterator, processes, progress_bar=progress_bar
)
)
# Collect the token_weights from every sentence.
for lowercase_token, surface_token, weight in token_weights:
casing[lowercase_token][surface_token] += weight
# Save to file if specified.
if save_to:
self._save_model_from_casing(casing, save_to)
return self._casing_to_model(casing)
def train(
self,
documents,
save_to=None,
possibly_use_first_token=False,
processes=1,
progress_bar=False,
):
"""
Default duck-type of _train(), accepts list(list(str)) as input documents.
"""
self.model = None # Clear the model first.
self.model = self._train(
documents,
save_to,
possibly_use_first_token,
processes,
progress_bar=progress_bar,
)
return self.model
def train_from_file(
self,
filename,
save_to=None,
possibly_use_first_token=False,
processes=1,
progress_bar=False,
):
"""
Duck-type of _train(), accepts a filename to read as a `iter(list(str))`
object.
"""
with open(filename, encoding=self.encoding) as fin:
# document_iterator = map(str.split, fin.readlines())
document_iterator = (
line.split() for line in fin.readlines()
) # Lets try a generator comprehension for Python2...
self.model = None # Clear the model first.
self.model = self._train(
document_iterator,
save_to,
possibly_use_first_token,
processes,
progress_bar=progress_bar,
)
return self.model
def train_from_file_object(
self,
file_object,
save_to=None,
possibly_use_first_token=False,
processes=1,
progress_bar=False,
):
"""
Duck-type of _train(), accepts a file object to read as a `iter(list(str))`
object.
"""
# document_iterator = map(str.split, file_object.readlines())
document_iterator = (
line.split() for line in file_object.readlines()
) # Lets try a generator comprehension for Python2...
self.model = None # Clear the model first.
self.model = self._train(
document_iterator,
save_to,
possibly_use_first_token,
processes,
progress_bar=progress_bar,
)
return self.model
def truecase(self, text, return_str=False, use_known=False):
"""
Truecase a single sentence / line of text.
:param text: A single string, i.e. sentence text.
:type text: str
:param use_known: Use the known case if a word is a known word but not the first word.
:type use_known: bool
"""
check_model_message = str(
"\nUse Truecaser.train() to train a model.\n"
"Or use Truecaser('modefile') to load a model."
)
assert hasattr(self, "model"), check_model_message
# Keep track of first tokens in the sentence(s) of the line.
is_first_word = True
truecased_tokens = []
tokens = self.split_xml(text)
# best_cases = best_cases if best_cases else self.model['best']
# known_cases = known_cases if known_cases else self.model['known']
for i, token in enumerate(tokens):
# Append XML tags and continue
if re.search(r"(<\S[^>]*>)", token):
truecased_tokens.append(token)
continue
# Note this shouldn't happen other if | are escaped as |
# To make the truecaser resilient,
# we'll just any token starting with pipes as they are.
if token == "|" or token.startswith("|"):
truecased_tokens.append(token)
continue
# Reads the word token and factors separatedly
token, other_factors = re.search(r"^([^\|]+)(.*)", token).groups()
# Lowercase the ASR tokens.
if self.is_asr:
token = token.lower()
# The actual case replacement happens here.
# "Most frequent" case of the word.
best_case = self.model["best"].get(token.lower(), None)
# Other known cases of the word.
known_case = self.model["known"].get(token, None)
# If it's the start of sentence.
if is_first_word and best_case: # Truecase sentence start.
token = best_case
elif known_case: # Don't change known tokens.
token = known_case if use_known else token
elif (
best_case
): # Truecase otherwise unknown tokens? Heh? From https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/truecase.perl#L66
token = best_case
# Else, it's an unknown word, don't change the word.
# Concat the truecased `word` with the `other_factors`
token = token + other_factors
# Adds the truecased word.
truecased_tokens.append(token)
# Resets sentence start if this token is an ending punctuation.
is_first_word = token in self.SENT_END
if token in self.DELAYED_SENT_START:
is_first_word = False
# return ' '.join(tokens)
return " ".join(truecased_tokens) if return_str else truecased_tokens
def truecase_file(self, filename, return_str=True):
with open(filename, encoding=self.encoding) as fin:
for line in fin:
truecased_tokens = self.truecase(line.strip())
# Yield the truecased line.
yield " ".join(truecased_tokens) if return_str else truecased_tokens
@staticmethod
def split_xml(line):
"""
Python port of split_xml function in Moses' truecaser:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/truecaser.perl
:param line: Input string, should be tokenized, separated by space.
:type line: str
"""
line = line.strip()
tokens = []
while line:
# Assumes that xml tag is always separated by space.
has_xml = re.search(r"^\s*(<\S[^>]*>)(.*)$", line)
# non-XML test.
is_non_xml = re.search(r"^\s*([^\s<>]+)(.*)$", line)
# '<' or '>' occurs in word, but it's not an XML tag
xml_cognates = re.search(r"^\s*(\S+)(.*)$", line)
if has_xml:
potential_xml, line_next = has_xml.groups()
# exception for factor that is an XML tag
if (
re.search(r"^\S", line)
and len(tokens) > 0
and re.search(r"\|$", tokens[-1])
):
tokens[-1] += potential_xml
# If it's a token with factors, join with the previous token.
is_factor = re.search(r"^(\|+)(.*)$", line_next)
if is_factor:
tokens[-1] += is_factor.group(1)
line_next = is_factor.group(2)
else:
tokens.append(
potential_xml + " "
) # Token hack, unique to sacremoses.
line = line_next
elif is_non_xml:
tokens.append(is_non_xml.group(1)) # Token hack, unique to sacremoses.
line = is_non_xml.group(2)
elif xml_cognates:
tokens.append(
xml_cognates.group(1)
) # Token hack, unique to sacremoses.
line = xml_cognates.group(2)
else:
raise Exception("ERROR: huh? {}".format(line))
tokens[-1] = tokens[-1].strip() # Token hack, unique to sacremoses.
return tokens
def _casing_to_model(self, casing):
"""
:returns: A tuple of the (best, known) objects.
:rtype: tuple(dict, Counter)
"""
best = {}
known = Counter()
for token_lower in casing:
tokens = casing[token_lower].most_common()
# Set the most frequent case as the "best" case.
best[token_lower] = tokens[0][0]
# If it's asr, throw away everything
if not self.is_asr:
for token, count in tokens[1:]:
# Note: This is rather odd that the counts are thrown away...
# from https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/truecase.perl#L34
known[token] += 1
model = {"best": best, "known": known, "casing": casing}
return model
def save_model(self, filename):
self._save_model_from_casing(self.model["casing"], filename)
def _save_model_from_casing(self, casing, filename):
"""
Outputs the truecaser model file in the same output format as
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/recaser/train-truecaser.perl
:param casing: The dictionary of tokens counter from `train()`.
:type casing: default(Counter)
"""
with open(filename, "w", encoding=self.encoding) as fout:
for token in casing:
total_token_count = sum(casing[token].values())
tokens_counts = []
for i, (token, count) in enumerate(casing[token].most_common()):
if i == 0:
out_token = "{} ({}/{})".format(token, count, total_token_count)
else:
out_token = "{} ({})".format(token, count, total_token_count)
tokens_counts.append(out_token)
print(" ".join(tokens_counts), end="\n", file=fout)
def _load_model(self, filename):
"""
Loads pre-trained truecasing file.
:returns: A dictionary of the best, known objects as values from `_casing_to_model()`
:rtype: {'best': dict, 'known': Counter}
"""
casing = defaultdict(Counter)
with open(filename, encoding=self.encoding) as fin:
for line in fin:
line = line.strip().split()
for token, count in grouper(line, 2):
count = count.split("/")[0].strip("()")
casing[token.lower()][token] = int(count)
# Returns the best and known object from `_casing_to_model()`
return self._casing_to_model(casing)
class MosesDetruecaser(object):
def __init__(self):
# Initialize the object.
super(MosesDetruecaser, self).__init__()
self.SENT_END = {".", ":", "?", "!"}
self.DELAYED_SENT_START = {
"(",
"[",
'"',
"'",
"'",
""",
"[",
"]",
}
# Some predefined tokens that will always be in lowercase.
self.ALWAYS_LOWER = {
"a",
"after",
"against",
"al-.+",
"and",
"any",
"as",
"at",
"be",
"because",
"between",
"by",
"during",
"el-.+",
"for",
"from",
"his",
"in",
"is",
"its",
"last",
"not",
"of",
"off",
"on",
"than",
"the",
"their",
"this",
"to",
"was",
"were",
"which",
"will",
"with",
}
def detruecase(self, text, is_headline=False, return_str=False):
"""
Detruecase the translated files from a model that learnt from truecased
tokens.
:param text: A single string, i.e. sentence text.
:type text: str
"""
# `cased_tokens` keep tracks of detruecased tokens.
cased_tokens = []
sentence_start = True
# Capitalize token if it's at the sentence start.
for token in text.split():
token = token[:1].upper() + token[1:] if sentence_start else token
cased_tokens.append(token)
if token in self.SENT_END:
sentence_start = True
elif not token in self.DELAYED_SENT_START:
sentence_start = False
# Check if it's a headline, if so then use title case.
if is_headline:
cased_tokens = [
token if token in self.ALWAYS_LOWER else token[:1].upper() + token[1:]
for token in cased_tokens
]
return " ".join(cased_tokens) if return_str else cased_tokens
__all__ = ["MosesTruecaser", "MosesDetruecaser"]
| 18,635 | 34.701149 | 150 | py |
sacremoses | sacremoses-master/sacremoses/util.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from itertools import tee, zip_longest
from xml.sax.saxutils import escape, unescape
from joblib import Parallel, delayed
from tqdm import tqdm
class CJKChars(object):
"""
An object that enumerates the code points of the CJK characters as listed on
http://en.wikipedia.org/wiki/Basic_Multilingual_Plane#Basic_Multilingual_Plane
"""
# Hangul Jamo (1100–11FF)
Hangul_Jamo = (4352, 4607) # (ord("\u1100"), ord("\u11ff"))
# CJK Radicals Supplement (2E80–2EFF)
# Kangxi Radicals (2F00–2FDF)
# Ideographic Description Characters (2FF0–2FFF)
# CJK Symbols and Punctuation (3000–303F)
# Hiragana (3040–309F)
# Katakana (30A0–30FF)
# Bopomofo (3100–312F)
# Hangul Compatibility Jamo (3130–318F)
# Kanbun (3190–319F)
# Bopomofo Extended (31A0–31BF)
# CJK Strokes (31C0–31EF)
# Katakana Phonetic Extensions (31F0–31FF)
# Enclosed CJK Letters and Months (3200–32FF)
# CJK Compatibility (3300–33FF)
# CJK Unified Ideographs Extension A (3400–4DBF)
# Yijing Hexagram Symbols (4DC0–4DFF)
# CJK Unified Ideographs (4E00–9FFF)
# Yi Syllables (A000–A48F)
# Yi Radicals (A490–A4CF)
CJK_Radicals = (11904, 42191) # (ord("\u2e80"), ord("\ua4cf"))
# Phags-pa (A840–A87F)
Phags_Pa = (43072, 43135) # (ord("\ua840"), ord("\ua87f"))
# Hangul Syllables (AC00–D7AF)
Hangul_Syllables = (44032, 55215) # (ord("\uAC00"), ord("\uD7AF"))
# CJK Compatibility Ideographs (F900–FAFF)
CJK_Compatibility_Ideographs = (63744, 64255) # (ord("\uF900"), ord("\uFAFF"))
# CJK Compatibility Forms (FE30–FE4F)
CJK_Compatibility_Forms = (65072, 65103) # (ord("\uFE30"), ord("\uFE4F"))
# Range U+FF65–FFDC encodes halfwidth forms, of Katakana and Hangul characters
Katakana_Hangul_Halfwidth = (65381, 65500) # (ord("\uFF65"), ord("\uFFDC"))
# Ideographic Symbols and Punctuation (16FE0–16FFF)
Ideographic_Symbols_And_Punctuation = (
94176,
94207,
) # (ord("\U00016FE0"), ord("\U00016FFF"))
# Tangut (17000-187FF)
# Tangut Components (18800-18AFF)
Tangut = (94208, 101119) # (ord("\U00017000"), ord("\U00018AFF"))
# Kana Supplement (1B000-1B0FF)
# Kana Extended-A (1B100-1B12F)
Kana_Supplement = (110592, 110895) # (ord("\U0001B000"), ord("\U0001B12F"))
# Nushu (1B170-1B2FF)
Nushu = (110960, 111359) # (ord("\U0001B170"), ord("\U0001B2FF"))
# Supplementary Ideographic Plane (20000–2FFFF)
Supplementary_Ideographic_Plane = (
131072,
196607,
) # (ord("\U00020000"), ord("\U0002FFFF"))
ranges = [
Hangul_Jamo,
CJK_Radicals,
Phags_Pa,
Hangul_Syllables,
CJK_Compatibility_Ideographs,
CJK_Compatibility_Forms,
Katakana_Hangul_Halfwidth,
Tangut,
Kana_Supplement,
Nushu,
Supplementary_Ideographic_Plane,
]
def is_cjk(character):
"""
This checks for CJK character.
>>> CJKChars().ranges
[(4352, 4607), (11904, 42191), (43072, 43135), (44032, 55215), (63744, 64255), (65072, 65103), (65381, 65500), (94208, 101119), (110592, 110895), (110960, 111359), (131072, 196607)]
>>> is_cjk('\u33fe')
True
>>> is_cjk('\uFE5F')
False
:param character: The character that needs to be checked.
:type character: char
:return: bool
"""
return any(
[
start <= ord(character) <= end
for start, end in [
(4352, 4607),
(11904, 42191),
(43072, 43135),
(44032, 55215),
(63744, 64255),
(65072, 65103),
(65381, 65500),
(94208, 101119),
(110592, 110895),
(110960, 111359),
(131072, 196607),
]
]
)
def xml_escape(text):
"""
This function transforms the input text into an "escaped" version suitable
for well-formed XML formatting.
Note that the default xml.sax.saxutils.escape() function don't escape
some characters that Moses does so we have to manually add them to the
entities dictionary.
>>> input_str = ''')| & < > ' " ] ['''
>>> expected_output = ''')| & < > ' " ] ['''
>>> escape(input_str) == expected_output
True
>>> xml_escape(input_str)
')| & < > ' " ] ['
:param text: The text that needs to be escaped.
:type text: str
:rtype: str
"""
return escape(
text,
entities={
r"'": r"'",
r'"': r""",
r"|": r"|",
r"[": r"[",
r"]": r"]",
},
)
def xml_unescape(text):
"""
This function transforms the "escaped" version suitable
for well-formed XML formatting into humanly-readable string.
Note that the default xml.sax.saxutils.unescape() function don't unescape
some characters that Moses does so we have to manually add them to the
entities dictionary.
>>> from xml.sax.saxutils import unescape
>>> s = ')| & < > ' " ] ['
>>> expected = ''')| & < > \' " ] ['''
>>> xml_unescape(s) == expected
True
:param text: The text that needs to be unescaped.
:type text: str
:rtype: str
"""
return unescape(
text,
entities={
r"'": r"'",
r""": r'"',
r"|": r"|",
r"[": r"[",
r"]": r"]",
},
)
def pairwise(iterable):
"""
From https://docs.python.org/3/library/itertools.html#recipes
s -> (s0,s1), (s1,s2), (s2, s3), ...
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def grouper(iterable, n, fillvalue=None):
"""Collect data into fixed-length chunks or blocks
from https://stackoverflow.com/a/16789869/610569
"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def parallelize_preprocess(func, iterator, processes, progress_bar=False):
iterator = tqdm(iterator) if progress_bar else iterator
if processes <= 1:
return map(func, iterator)
return Parallel(n_jobs=processes)(delayed(func)(line) for line in iterator)
| 6,482 | 29.294393 | 189 | py |
sacremoses | sacremoses-master/sacremoses/data/nonbreaking_prefixes/nonbreaking_prefix.cs | Bc
BcA
Ing
Ing.arch
MUDr
MVDr
MgA
Mgr
JUDr
PhDr
RNDr
PharmDr
ThLic
ThDr
Ph.D
Th.D
prof
doc
CSc
DrSc
dr. h. c
PaedDr
Dr
PhMr
DiS
abt
ad
a.i
aj
angl
anon
apod
atd
atp
aut
bd
biogr
b.m
b.p
b.r
cca
cit
cizojaz
c.k
col
čes
čín
čj
ed
facs
fasc
fol
fot
franc
h.c
hist
hl
hrsg
ibid
il
ind
inv.č
jap
jhdt
jv
koed
kol
korej
kl
krit
lat
lit
m.a
maď
mj
mp
násl
např
nepubl
něm
no
nr
n.s
okr
odd
odp
obr
opr
orig
phil
pl
pokrač
pol
port
pozn
př.kr
př.n.l
přel
přeprac
příl
pseud
pt
red
repr
resp
revid
rkp
roč
roz
rozš
samost
sect
sest
seš
sign
sl
srv
stol
sv
šk
šk.ro
špan
tab
t.č
tis
tj
tř
tzv
univ
uspoř
vol
vl.jm
vs
vyd
vyobr
zal
zejm
zkr
zprac
zvl
n.p
např
než
MUDr
abl
absol
adj
adv
ak
ak. sl
akt
alch
amer
anat
angl
anglosas
arab
arch
archit
arg
astr
astrol
att
bás
belg
bibl
biol
boh
bot
bulh
círk
csl
č
čas
čes
dat
děj
dep
dět
dial
dór
dopr
dosl
ekon
epic
etnonym
eufem
f
fam
fem
fil
film
form
fot
fr
fut
fyz
gen
geogr
geol
geom
germ
gram
hebr
herald
hist
hl
hovor
hud
hut
chcsl
chem
ie
imp
impf
ind
indoevr
inf
instr
interj
ión
iron
it
kanad
katalán
klas
kniž
komp
konj
konkr
kř
kuch
lat
lék
les
lid
lit
liturg
lok
log
m
mat
meteor
metr
mod
ms
mysl
n
náb
námoř
neklas
něm
nesklon
nom
ob
obch
obyč
ojed
opt
part
pas
pejor
pers
pf
pl
plpf
práv
prep
předl
přivl
r
rcsl
refl
reg
rkp
ř
řec
s
samohl
sg
sl
souhl
spec
srov
stfr
střv
stsl
subj
subst
superl
sv
sz
táz
tech
telev
teol
trans
typogr
var
vedl
verb
vl. jm
voj
vok
vůb
vulg
výtv
vztaž
zahr
zájm
zast
zejm
zeměd
zkr
zř
mj
dl
atp
sport
Mgr
horn
MVDr
JUDr
RSDr
Bc
PhDr
ThDr
Ing
aj
apod
PharmDr
pomn
ev
slang
nprap
odp
dop
pol
st
stol
p. n. l
před n. l
n. l
př. Kr
po Kr
př. n. l
odd
RNDr
tzv
atd
tzn
resp
tj
p
br
č. j
čj
č. p
čp
a. s
s. r. o
spol. s r. o
p. o
s. p
v. o. s
k. s
o. p. s
o. s
v. r
v z
ml
vč
kr
mld
hod
popř
ap
event
rus
slov
rum
švýc
P. T
zvl
hor
dol
S.O.S | 1,823 | 3.676923 | 12 | cs |
sacremoses | sacremoses-master/sacremoses/data/nonbreaking_prefixes/nonbreaking_prefix.pl | adw
afr
akad
al
Al
am
amer
arch
art
Art
artyst
astr
austr
bałt
bdb
bł
bm
br
bryg
bryt
centr
ces
chem
chiń
chir
c.k
c.o
cyg
cyw
cyt
czes
czw
cd
Cd
czyt
ćw
ćwicz
daw
dcn
dekl
demokr
det
diec
dł
dn
dot
dol
dop
dost
dosł
h.c
ds
dst
duszp
dypl
egz
ekol
ekon
elektr
em
ew
fab
farm
fot
fr
gat
gastr
geogr
geol
gimn
głęb
gm
godz
górn
gosp
gr
gram
hist
hiszp
hr
Hr
hot
id
in
im
iron
jn
kard
kat
katol
k.k
kk
kol
kl
k.p.a
kpc
k.p.c
kpt
kr
k.r
krak
k.r.o
kryt
kult
laic
łac
niem
woj
nb
np
Nb
Np
pol
pow
m.in
pt
ps
Pt
Ps
cdn
jw
ryc
rys
Ryc
Rys
tj
tzw
Tzw
tzn
zob
ang
ub
ul
pw
pn
pl
al
k
n
nr #NUMERIC_ONLY#
Nr #NUMERIC_ONLY#
ww
wł
ur
zm
żyd
żarg
żyw
wył
bp
bp
wyst
tow
Tow
o
sp
Sp
st
spółdz
Spółdz
społ
spółgł
stoł
stow
Stoł
Stow
zn
zew
zewn
zdr
zazw
zast
zaw
zał
zal
zam
zak
zakł
zagr
zach
adw
Adw
lek
Lek
med
mec
Mec
doc
Doc
dyw
dyr
Dyw
Dyr
inż
Inż
mgr
Mgr
dh
dr
Dh
Dr
p
P
red
Red
prof
prok
Prof
Prok
hab
płk
Płk
nadkom
Nadkom
podkom
Podkom
ks
Ks
gen
Gen
por
Por
reż
Reż
przyp
Przyp
śp
św
śW
Śp
Św
ŚW
szer
Szer
pkt #NUMERIC_ONLY#
str #NUMERIC_ONLY#
tab #NUMERIC_ONLY#
Tab #NUMERIC_ONLY#
tel
ust #NUMERIC_ONLY#
par #NUMERIC_ONLY#
poz
pok
oo
oO
Oo
OO
r #NUMERIC_ONLY#
l #NUMERIC_ONLY#
s #NUMERIC_ONLY#
najśw
Najśw
A
B
C
D
E
F
G
H
I
J
K
L
M
N
O
P
Q
R
S
T
U
V
W
X
Y
Z
Ś
Ć
Ż
Ź
Dz
| 1,265 | 3.457746 | 18 | pl |
sacremoses | sacremoses-master/sacremoses/test/test_corpus.py | # -*- coding: utf-8 -*-
"""
Tests for corpus.py
"""
import sys
import doctest
import unittest
from sacremoses import corpus
class CorpusTest(unittest.TestCase):
def test_perluniprops_chars_sanity_check(self):
perluniprops = corpus.Perluniprops()
for category in perluniprops.available_categories:
if sys.version_info[0] >= 3: # Python 3
with self.subTest(category=category):
count = 0
for char in perluniprops.chars(category=category):
self.assertIsInstance(char, str)
count += 1
self.assertGreater(count, 0)
else:
self.assertEqual(
all(
isinstance(char, str)
for char in perluniprops.chars(category=category)
),
True,
)
def test_perluniprops_chars_manual(self):
perluniprops = corpus.Perluniprops()
self.assertListEqual(
list(perluniprops.chars("Open_Punctuation"))[:5],
["(", "[", "{", "\u0f3a", "\u0f3c"],
)
self.assertListEqual(
list(perluniprops.chars("Currency_Symbol"))[:5],
["$", "\xa2", "\xa3", "\xa4", "\xa5"],
)
def test_nonbreaking_prefixes_sanity_check(self):
nonbreaking_prefixes = corpus.NonbreakingPrefixes()
for language in nonbreaking_prefixes.available_langs.values():
if sys.version_info[0] >= 3: # Python 3
with self.subTest(language=language):
count = 0
for word in nonbreaking_prefixes.words(lang=language):
self.assertIsInstance(word, str)
count += 1
self.assertGreater(count, 0)
else:
self.assertEqual(
all(
isinstance(word, str)
for word in nonbreaking_prefixes.words(lang=language)
),
True,
)
def test_nonbreaking_prefixes_manual(self):
nonbreaking_prefixes = corpus.NonbreakingPrefixes()
self.assertListEqual(
list(nonbreaking_prefixes.words("en"))[:10],
["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"],
)
self.assertListEqual(
list(nonbreaking_prefixes.words("ta"))[:5],
["\u0bb0", "\u0bc2", "\u0ba4\u0bbf\u0bb0\u0bc1", "\u0b8f", "\u0baa\u0bc0"],
)
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(corpus))
return tests
| 2,698 | 33.164557 | 87 | py |
sacremoses | sacremoses-master/sacremoses/test/test_no_redos_has_numeric_only.py | import time
import unittest
from collections import defaultdict
from sacremoses.corpus import NonbreakingPrefixes
from sacremoses.tokenize import MosesTokenizer
class HasNumericOnlyPatched(unittest.TestCase):
def test_expected_num_only_prefixes(self):
"""Testing if the functionality of the NUMERIC_ONLY_PREFIXES parsing is the same without redos-able regex."""
expected_prefixes = {'as': [], 'bn': [], 'ca': [], 'cs': [], 'de': [], 'el': [],
'en': [('No', 'No #NUMERIC_ONLY#'), ('Art', 'Art #NUMERIC_ONLY#'),
('pp', 'pp #NUMERIC_ONLY#')],
'es': [], 'et': [], 'fi': [], 'fr': [],
'ga': [('lch', 'lch #NUMERIC_ONLY#'), ('lgh', 'lgh #NUMERIC_ONLY#'),
('uimh', 'uimh #NUMERIC_ONLY#')],
'gu': [], 'hi': [],
'hu': [('jan', 'jan #NUMERIC_ONLY#'), ('Jan', 'Jan #NUMERIC_ONLY#'),
('Feb', 'Feb #NUMERIC_ONLY#'), ('feb', 'feb #NUMERIC_ONLY#'),
('márc', 'márc #NUMERIC_ONLY#'), ('Márc', 'Márc #NUMERIC_ONLY#'),
('ápr', 'ápr #NUMERIC_ONLY#'), ('Ápr', 'Ápr #NUMERIC_ONLY#'),
('máj', 'máj #NUMERIC_ONLY#'), ('Máj', 'Máj #NUMERIC_ONLY#'),
('jún', 'jún #NUMERIC_ONLY#'), ('Jún', 'Jún #NUMERIC_ONLY#'),
('Júl', 'Júl #NUMERIC_ONLY#'), ('júl', 'júl #NUMERIC_ONLY#'),
('aug', 'aug #NUMERIC_ONLY#'), ('Aug', 'Aug #NUMERIC_ONLY#'),
('Szept', 'Szept #NUMERIC_ONLY#'), ('szept', 'szept #NUMERIC_ONLY#'),
('okt', 'okt #NUMERIC_ONLY#'), ('Okt', 'Okt #NUMERIC_ONLY#'),
('nov', 'nov #NUMERIC_ONLY#'), ('Nov', 'Nov #NUMERIC_ONLY#'),
('dec', 'dec #NUMERIC_ONLY#'), ('Dec', 'Dec #NUMERIC_ONLY#'),
('tel', 'tel #NUMERIC_ONLY#'), ('Tel', 'Tel #NUMERIC_ONLY#'),
('Fax', 'Fax #NUMERIC_ONLY#'), ('fax', 'fax #NUMERIC_ONLY#')],
'is': [('no', 'no #NUMERIC_ONLY#'), ('No', 'No #NUMERIC_ONLY#'),
('nr', 'nr #NUMERIC_ONLY#'), ('Nr', 'Nr #NUMERIC_ONLY#'),
('nR', 'nR #NUMERIC_ONLY#'), ('NR', 'NR #NUMERIC_ONLY#')],
'it': [('No', 'No #NUMERIC_ONLY#'), ('Art', 'Art #NUMERIC_ONLY#'),
('pp', 'pp #NUMERIC_ONLY#')], 'kn': [],
'lt': [('No', 'No #NUMERIC_ONLY#')],
'lv': [('Nr', 'Nr #NUMERIC_ONLY#')],
'ml': [], 'mni': [], 'mr': [],
'nl': [('Nr', 'Nr #NUMERIC_ONLY#'), ('nr', 'nr #NUMERIC_ONLY#')],
'or': [], 'pa': [],
'pl': [('nr', 'nr #NUMERIC_ONLY#'), ('Nr', 'Nr #NUMERIC_ONLY#'),
('pkt', 'pkt #NUMERIC_ONLY#'), ('str', 'str #NUMERIC_ONLY#'),
('tab', 'tab #NUMERIC_ONLY#'), ('Tab', 'Tab #NUMERIC_ONLY#'),
('ust', 'ust #NUMERIC_ONLY#'), ('par', 'par #NUMERIC_ONLY#'),
('r', 'r #NUMERIC_ONLY#'), ('l', 'l #NUMERIC_ONLY#'),
('s', 's #NUMERIC_ONLY#')],
'pt': [('No', 'No #NUMERIC_ONLY#'), ('Art', 'Art #NUMERIC_ONLY#'),
('p', 'p #NUMERIC_ONLY#'), ('pp', 'pp #NUMERIC_ONLY#')],
'ro': [], 'ru': [], 'sk': [],
'sl': [('št', 'št #NUMERIC_ONLY#'), ('Št', 'Št #NUMERIC_ONLY#')],
'sv': [], 'ta': [], 'te': [],
'tdt': [('No', 'No #NUMERIC_ONLY#'), ('Art', 'Art #NUMERIC_ONLY#'),
('p', 'p #NUMERIC_ONLY#'), ('pp', 'pp #NUMERIC_ONLY#')],
'yue': [('No', 'No #NUMERIC_ONLY#'), ('Nr', 'Nr #NUMERIC_ONLY#')],
'zh': [('No', 'No #NUMERIC_ONLY#'), ('Nr', 'Nr #NUMERIC_ONLY#')]}
nonbreaking_prefixes = NonbreakingPrefixes()
moses = MosesTokenizer()
lang2numonlyprefix = defaultdict(list)
for lang in nonbreaking_prefixes.available_langs.values():
lang2numonlyprefix[lang] = [(w.rpartition(" ")[0], w)
for w in nonbreaking_prefixes.words(lang) if moses.has_numeric_only(w)]
assert lang2numonlyprefix == expected_prefixes
def test_stress_has_numeric_only_prefixes(self):
"""Stress testing to prevent redos."""
moses = MosesTokenizer()
for i in range(1, 10):
start_time = time.perf_counter()
payload = " " + " " * (i*500) + ""
moses.has_numeric_only(payload)
stop_time = time.perf_counter() - start_time
assert stop_time < 20
| 5,222 | 64.2875 | 117 | py |
sacremoses | sacremoses-master/sacremoses/test/test_normalizer.py | # -*- coding: utf-8 -*-
"""
Tests for MosesTokenizer
"""
import unittest
from sacremoses.normalize import MosesPunctNormalizer
class TestNormalizer(unittest.TestCase):
def test_moses_normalize_documents(self):
moses = MosesPunctNormalizer()
# Examples from normalizing big.txt
inputs = [
"The United States in 1805 (color map) _Facing_ 193",
"=Formation of the Constitution.=--(1) The plans before the convention,",
"directions--(1) The infective element must be eliminated. When the ulcer",
"College of Surgeons, Edinburgh.)]",
]
expected = [
"The United States in 1805 (color map) _Facing_ 193",
"=Formation of the Constitution.=-- (1) The plans before the convention,",
"directions-- (1) The infective element must be eliminated. When the ulcer",
"College of Surgeons, Edinburgh.) ]",
]
for text, expect in zip(inputs, expected):
assert moses.normalize(text) == expect
def test_moses_normalize_quote_comma(self):
moses_norm_quote = MosesPunctNormalizer("en", norm_quote_commas=True)
moses_no_norm_quote = MosesPunctNormalizer("en", norm_quote_commas=False)
text = 'THIS EBOOK IS OTHERWISE PROVIDED TO YOU "AS-IS".'
expected_norm_quote = 'THIS EBOOK IS OTHERWISE PROVIDED TO YOU "AS-IS."'
assert moses_norm_quote.normalize(text) == expected_norm_quote
expected_no_norm_quote = 'THIS EBOOK IS OTHERWISE PROVIDED TO YOU "AS-IS".'
assert moses_no_norm_quote.normalize(text) == expected_no_norm_quote
def test_moses_normalize_numbers(self):
# See https://stackoverflow.com/a/55233871/610569
moses_norm_num = MosesPunctNormalizer("en", norm_numbers=True)
moses_no_norm_num = MosesPunctNormalizer("en", norm_numbers=False)
text = "12{}123".format("\u00A0")
expected = "12.123"
assert moses_norm_num.normalize(text) == expected
text = expected = "12 123"
assert moses_no_norm_num.normalize(text) == expected
def test_moses_noralize_single_apostrophe(self):
moses_norm_num = MosesPunctNormalizer("en")
text = "yesterday ’s reception"
expected = "yesterday 's reception"
assert moses_norm_num.normalize(text) == expected
def test_replace_unicode_punct(self):
moses_norm_unicode = MosesPunctNormalizer()
text = "0《123》 456% 【789】"
expected = '0"123" 456% [789]'
assert moses_norm_unicode.replace_unicode_punct(text) == expected
def test_normalization_pipeline(self):
moses_norm_unicode = MosesPunctNormalizer(
pre_replace_unicode_punct=True, post_remove_control_chars=True
)
text = "0《123》 456% '' 【789】"
expected = '0"123" 456% " [789]'
assert moses_norm_unicode.normalize(text) == expected
| 2,948 | 39.39726 | 88 | py |
sacremoses | sacremoses-master/sacremoses/test/test_tokenizer.py | # -*- coding: utf-8 -*-
"""
Tests for MosesTokenizer
"""
import unittest
from sacremoses.tokenize import MosesTokenizer, MosesDetokenizer
class TestTokenzier(unittest.TestCase):
def test_moses_tokenize(self):
moses = MosesTokenizer()
# Tokenize a sentence.
text = (
"This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf"
)
expected_tokens = "This , is a sentence with weird \xbb symbols \u2026 appearing everywhere \xbf"
tokenized_text = moses.tokenize(text, return_str=True)
assert tokenized_text == expected_tokens
# The nonbreaking prefixes should tokenize the final fullstop.
assert moses.tokenize("abc def.") == ["abc", "def", "."]
# The nonbreaking prefixes should deal the situation when numeric only prefix is the last token.
# In below example, "pp" is the last element, and there is no digit after it.
assert moses.tokenize("2016, pp.") == ["2016", ",", "pp", "."]
# Test escape_xml
text = "This ain't funny. It's actually hillarious, yet double Ls. | [] < > [ ] & You're gonna shake it off? Don't?"
expected_tokens_with_xmlescape = [
"This",
"ain",
"'t",
"funny",
".",
"It",
"'s",
"actually",
"hillarious",
",",
"yet",
"double",
"Ls",
".",
"|",
"[",
"]",
"<",
">",
"[",
"]",
"&",
"You",
"'re",
"gonna",
"shake",
"it",
"off",
"?",
"Don",
"'t",
"?",
]
expected_tokens_wo_xmlescape = [
"This",
"ain",
"'t",
"funny",
".",
"It",
"'s",
"actually",
"hillarious",
",",
"yet",
"double",
"Ls",
".",
"|",
"[",
"]",
"<",
">",
"[",
"]",
"&",
"You",
"'re",
"gonna",
"shake",
"it",
"off",
"?",
"Don",
"'t",
"?",
]
assert moses.tokenize(text, escape=True) == expected_tokens_with_xmlescape
assert moses.tokenize(text, escape=False) == expected_tokens_wo_xmlescape
# Test to check https://github.com/alvations/sacremoses/issues/19
text = "this 'is' the thing"
expected_tokens = ["this", "'", "is", "'", "the", "thing"]
assert moses.tokenize(text, escape=True) == expected_tokens
def test_aggressive_split(self):
moses = MosesTokenizer()
expected_tokens_wo_aggressive_dash_split = ["foo-bar"]
expected_tokens_with_aggressive_dash_split = ["foo", "@-@", "bar"]
assert moses.tokenize("foo-bar") == expected_tokens_wo_aggressive_dash_split
assert (
moses.tokenize("foo-bar", aggressive_dash_splits=True)
== expected_tokens_with_aggressive_dash_split
)
def test_opening_brackets(self):
moses = MosesTokenizer()
text = "By the mid 1990s a version of the game became a Latvian television series (with a parliamentary setting, and played by Latvian celebrities)."
# echo By the mid 1990s a version of the game became a Latvian television series (with a parliamentary setting, and played by Latvian celebrities). | perl mosesdecoder\scripts\tokenizer\tokenizer.perl en
expected_tokens = "By the mid 1990s a version of the game became a Latvian television series ( with a parliamentary setting , and played by Latvian celebrities ) .".split()
assert moses.tokenize(text) == expected_tokens
def test_dot_splitting(self):
moses = MosesTokenizer()
text = "The meeting will take place at 11:00 a.m. Tuesday."
expected_tokens = (
"The meeting will take place at 11 : 00 a.m. Tuesday .".split()
)
self.assertEqual(moses.tokenize(text), expected_tokens)
def test_trailing_dot_apostrophe(self):
moses = MosesTokenizer()
text = "'Hello.'"
expected_tokens = "'Hello . '".split()
self.assertEqual(moses.tokenize(text), expected_tokens)
# FIXME: Implement https://github.com/moses-smt/mosesdecoder/pull/204
@unittest.skip("This is not implemented yet.")
def test_final_dot_unconditionally(self):
# Make sure that it works for examples on
# https://github.com/moses-smt/mosesdecoder/pull/204
text = "'So am I."
expected_tokens = "'So am I .".split()
self.assertEqual(moses.tokenize(text), expected_tokens)
moses = MosesTokenizer(lang="fr")
text = "Des gens admirent une œuvre d'art."
expected_tokens = "Des gens admirent une œuvre d' art .".split()
self.assertEqual(moses.tokenize(text), expected_tokens)
moses = MosesTokenizer(lang="de")
text = "...schwer wie ein iPhone 5."
expected_tokens = "... schwer wie ein iPhone 5 .".split()
self.assertEqual(moses.tokenize(text), expected_tokens)
moses = MosesTokenizer(lang="cz")
text = "Dvě děti, které běží bez bot."
expected_tokens = "Dvě děti , které běží bez bot .".split()
self.assertEqual(moses.tokenize(text), expected_tokens)
# TODO: Make sure that non-breaking words remain non breaking.
def test_protect_patterns(self):
moses = MosesTokenizer()
text = "this is a webpage https://stackoverflow.com/questions/6181381/how-to-print-variables-in-perl that kicks ass"
expected_tokens = [
"this",
"is",
"a",
"webpage",
"https://stackoverflow.com/questions/6181381/how-to-print-variables-in-perl",
"that",
"kicks",
"ass",
]
assert (
moses.tokenize(text, protected_patterns=moses.BASIC_PROTECTED_PATTERNS)
== expected_tokens
)
# Testing against pattern from https://github.com/alvations/sacremoses/issues/35
noe_patterns = [
r"(?:http|ftp)s?://" # http:// or https://
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?))"
r"(?::\d+)?" # optional port
r"(?:/\w+)*"
r"(?:(?:\.[a-z]+)|/?)"
]
assert moses.tokenize(text, protected_patterns=noe_patterns) == expected_tokens
def test_final_comma_split_after_number(self):
moses = MosesTokenizer()
text = "Sie sollten vor dem Upgrade eine Sicherung dieser Daten erstellen (wie unter Abschnitt 4.1.1, „Sichern aller Daten und Konfigurationsinformationen“ beschrieben). "
expected_tokens = [
"Sie",
"sollten",
"vor",
"dem",
"Upgrade",
"eine",
"Sicherung",
"dieser",
"Daten",
"erstellen",
"(",
"wie",
"unter",
"Abschnitt",
"4.1.1",
",",
"„",
"Sichern",
"aller",
"Daten",
"und",
"Konfigurationsinformationen",
"“",
"beschrieben",
")",
".",
]
self.assertEqual(moses.tokenize(text), expected_tokens)
class TestDetokenizer(unittest.TestCase):
def test_moses_detokenize(self):
mt = MosesTokenizer()
md = MosesDetokenizer()
text = (
"This, is a sentence with weird\xbb symbols\u2026 appearing everywhere\xbf"
)
expected_tokens = mt.tokenize(text)
expected_detokens = "This, is a sentence with weird \xbb symbols \u2026 appearing everywhere \xbf"
assert md.detokenize(expected_tokens) == expected_detokens
text = "This ain't funny. It's actually hillarious, yet double Ls. | [] < > [ ] & You're gonna shake it off? Don't?"
expected_tokens = [
"This",
"ain",
"'t",
"funny",
".",
"It",
"'s",
"actually",
"hillarious",
",",
"yet",
"double",
"Ls",
".",
"|",
"[",
"]",
"<",
">",
"[",
"]",
"&",
"You",
"'re",
"gonna",
"shake",
"it",
"off",
"?",
"Don",
"'t",
"?",
]
expected_detokens = "This ain't funny. It's actually hillarious, yet double Ls. | [] < > [] & You're gonna shake it off? Don't?"
assert mt.tokenize(text) == expected_tokens
assert md.detokenize(expected_tokens) == expected_detokens
def test_detokenize_with_aggressive_split(self):
mt = MosesTokenizer()
md = MosesDetokenizer()
text = "foo-bar"
assert md.detokenize(mt.tokenize(text, aggressive_dash_splits=True)) == text
def test_opening_brackets(self):
tokenizer = MosesTokenizer()
detokenizer = MosesDetokenizer()
text = "By the mid 1990s a version of the game became a Latvian television series (with a parliamentary setting, and played by Latvian celebrities)."
assert detokenizer.detokenize(tokenizer.tokenize(text)) == text
def test_french_apostrophes(self):
tokenizer = MosesTokenizer(lang="fr")
detokenizer = MosesDetokenizer(lang="fr")
text = "L'amitié nous a fait forts d'esprit"
assert detokenizer.detokenize(tokenizer.tokenize(text)) == text
def test_chinese_tokenization(self):
tokenizer = MosesTokenizer(lang="zh")
text = "记者 应谦 美国"
assert tokenizer.tokenize(text) == ["记者", "应谦", "美国"]
def test_korean_tokenization(self):
tokenizer = MosesTokenizer(lang="ko")
detokenizer = MosesDetokenizer(lang="ko")
text = "세계 에서 가장 강력한."
assert tokenizer.tokenize(text) == ["세계", "에서", "가장", "강력한", "."]
assert detokenizer.detokenize(tokenizer.tokenize(text)) == text
def test_japanese_tokenization(self):
tokenizer = MosesTokenizer(lang="ja")
text = "電話でんわの邪魔じゃまをしないでください"
assert tokenizer.tokenize(text) == [text]
def test_mixed_cjk_tokenization(self):
tokenizer = MosesTokenizer()
detokenizer = MosesDetokenizer()
text = "Japan is 日本 in Japanese."
assert tokenizer.tokenize(text) == [
"Japan",
"is",
"日",
"本",
"in",
"Japanese",
".",
]
assert detokenizer.detokenize(tokenizer.tokenize(text)) == text
| 11,247 | 32.47619 | 211 | py |
sacremoses | sacremoses-master/sacremoses/test/test_truecaser.py | # -*- coding: utf-8 -*-
"""
Tests for MosesTokenizer
"""
import os
import unittest
import urllib.request
from sacremoses.truecase import MosesTruecaser, MosesDetruecaser
def get_content(url):
with urllib.request.urlopen(url) as response:
return response.read() # Returns http.client.HTTPResponse.
class TestTruecaser(unittest.TestCase):
def test_moses_truecase_documents(self):
moses = MosesTruecaser()
# Train the model from documents.
docs = [line.split() for line in self.big_txt.split("\n")]
moses.train(docs)
# Test all self.input_output test cases.
for _input, _output in self.input_output.items():
assert moses.truecase(_input) == _output
def test_moses_truecase_file(self):
moses = MosesTruecaser()
# Train the model from file.
moses.train_from_file("big.txt")
# Test all self.input_output test cases.
for _input, _output in self.input_output.items():
assert moses.truecase(_input) == _output
def setUp(self):
# Check if the Norvig's big.txt file exists.
if os.path.isfile("big.txt"):
with open("big.txt") as fin:
self.big_txt = fin.read()
else: # Otherwise, download the big.txt.
try: # Download from the original norvig.com
self.big_txt = get_content("https://norvig.com/big.txt").decode("utf8")
except: # Otherwise get it from the github gist mirror.
big_text_url = str(
"https://gist.githubusercontent.com/alvations/"
"6e878bab0eda2624167aa7ec13fc3e94/raw/"
"4fb3bac1da1ba7a172ff1936e96bee3bc8892931/"
"big.txt"
)
self.big_txt = get_content(big_text_url).decode("utf8")
with open("big.txt", "w") as fout:
fout.write(self.big_txt)
# Test case where inputs are all caps.
caps_input = "THE ADVENTURES OF SHERLOCK HOLMES"
expected_caps_output = ["the", "ADVENTURES", "OF", "SHERLOCK", "HOLMES"]
# Test normal input to truecase.
normal_input = str(
"You can also find out about how to make a donation "
"to Project Gutenberg, and how to get involved."
)
expecte_normal_output = [
"you",
"can",
"also",
"find",
"out",
"about",
"how",
"to",
"make",
"a",
"donation",
"to",
"Project",
"Gutenberg,",
"and",
"how",
"to",
"get",
"involved.",
]
# Keep a key-value pairs of in/outputs.
self.input_output = {
caps_input: expected_caps_output,
normal_input: expecte_normal_output,
}
class TestDetruecaser(unittest.TestCase):
def test_moses_detruecase_str(self):
moses = MosesDetruecaser()
text = "the adventures of Sherlock Holmes"
expected = ["The", "adventures", "of", "Sherlock", "Holmes"]
expected_str = "The adventures of Sherlock Holmes"
assert moses.detruecase(text) == expected
assert moses.detruecase(text, return_str=True) == expected_str
def test_moses_detruecase_headline(self):
moses = MosesDetruecaser()
text = "the adventures of Sherlock Holmes"
expected = ["The", "Adventures", "of", "Sherlock", "Holmes"]
expected_str = "The Adventures of Sherlock Holmes"
assert moses.detruecase(text, is_headline=True) == expected
assert moses.detruecase(text, is_headline=True, return_str=True) == expected_str
def test_moses_detruecase_allcaps(self):
moses = MosesDetruecaser()
text = "MLB Baseball standings"
expected = ["MLB", "Baseball", "standings"]
expected_str = "MLB Baseball standings"
assert moses.detruecase(text) == expected
assert moses.detruecase(text, return_str=True) == expected_str
| 4,123 | 33.949153 | 88 | py |
null | DecSPS-main/README.md | # DecSPS
Official code for "Dynamics of SGD with Stochastic Polyak Stepsizes: Truly Adaptive Variants and Convergence to Exact Solution"
https://arxiv.org/pdf/2205.04583.pdf
| 175 | 34.2 | 127 | md |
null | DecSPS-main/Real Datasets/methods.py | import numpy as np
from numpy import linalg as la
import random
import math
def SGD(cost, grad, hess, K, gamma, x0, batch_size, n):
#batches
batch = []
for i in range(K): batch.append(random.sample(range(n),batch_size))
## initialization
x = [x0 for i in range(K)]
f = np.zeros((K,))
gammas = np.zeros((K-1,))
f[0] = cost(x0,range(n))
for k in range(K-1):
gammas[k] = gamma
x[k+1] = x[k] - gammas[k]*grad(x[k],batch[k])
f[k+1] = cost(x[k+1],range(n))
name = 'SGD, step='+"{:.2f}".format(gamma)
return name, f, gammas
def SGD_decr(cost,grad,hess,nexp, K_record_times, compute_hess, gamma_init, decr, th, x0, batch_size, n):
#number of iterations
K = K_record_times[-1]
#init
f = np.zeros((len(K_record_times),nexp))
gammas_rec = np.zeros((len(K_record_times),nexp))
## optimization
for e in range(nexp):
#batches
batch = []
for i in range(K+1): batch.append(random.sample(range(n),batch_size))
#iterations
i_record = 0
x = [x0 for i in range(K+2)]
gammas = np.zeros((K+2,))
for k in range(K+1):
# stepsize selection
if k<th:
gammas[k] = gamma_init
else:
if decr == 'sqrt':
gammas[k] = gamma_init/math.sqrt(k-th+1)
else:
gammas[k] = (gamma_init/(k-th+1))
#record
if k==K_record_times[i_record]:
gammas_rec[i_record,e] = gammas[k]
f[i_record,e] = cost(x[k],range(n))
i_record = i_record+1
# update
x[k+1] = x[k] - gammas[k]*grad(x[k],batch[k])
## name
name = r'SGD, $\gamma_k='+"{:.2f}".format(gamma_init)+'/\sqrt{k+1}$'
return name, f, gammas_rec
# def SPS_max(cost,grad,hess,nexp, K_record_times, compute_hess, c, gamma_max, x0, batch_size, n):
# #number of iterations
# K = K_record_times[-1]
# #init
# f = np.zeros((len(K_record_times),nexp))
# mus = np.zeros((len(K_record_times),nexp))
# Ls = np.zeros((len(K_record_times),nexp))
# gammas_rec = np.zeros((len(K_record_times),nexp))
# ## optimization
# for e in range(nexp):
# #batches
# batch = []
# for i in range(K+1): batch.append(random.sample(range(n),batch_size))
# #iterations
# i_record = 0
# x = [x0 for i in range(K+2)]
# gammas = np.zeros((K+2,))
# for k in range(K+1):
# sps_grad = cost(x[k],batch[k])/la.norm(grad(x[k],batch[k]))**2
# gammas[k] = min([sps_grad/c,gamma_max])
# #record
# if k==K_record_times[i_record]:
# gammas_rec[i_record,e] = gammas[k]
# f[i_record,e] = cost(x[k],range(n))
# if compute_hess:
# mus[i_record,e],Ls[i_record,e] = hess(x[k])
# i_record = i_record+1
# # update
# x[k+1] = x[k] - gammas[k]*grad(x[k],batch[k])
# ## name
# name = r'SPS$_max$, $c='+"{:.2f}".format(c+', \gamma_{\max}='+"{:.2f}".format(gamma_max)+'$'
# return name, f, gammas_rec, mus, Ls
def SPS_decr(cost,grad,hess,nexp, K_record_times, compute_hess, c_init, decr, gamma_max, x0, batch_size, n):
#number of iterations
K = K_record_times[-1]
#init
f = np.zeros((len(K_record_times),nexp))
gammas_rec = np.zeros((len(K_record_times),nexp))
## optimization
for e in range(nexp):
#batches
batch = []
for i in range(K+1): batch.append(random.sample(range(n),batch_size))
#iterations
i_record = 0
x = [x0 for i in range(K+2)]
gammas = np.zeros((K+2,))
c = np.zeros((K+2,))
for k in range(K+1):
sps_grad = cost(x[k],batch[k])/la.norm(grad(x[k],batch[k])+1e-4)**2
if k==0:
c[0] = c_init
gammas[0] = min([sps_grad,c[0]*gamma_max])/c[0]
else:
if decr == 'sqrt':
c[k] = c_init*np.sqrt(k+1)
else:
c[k] = c_init*(k+1)
gammas[k] = min([sps_grad, c[k-1]*gammas[k-1]])/c[k]
#record
if k==K_record_times[i_record]:
gammas_rec[i_record,e] = gammas[k]
f[i_record,e] = cost(x[k],range(n))
i_record = i_record+1
# update
x[k+1] = x[k] - gammas[k]*grad(x[k],batch[k])
## name
name = r'DecSPS, $c_0='+"{:.2f}".format(c_init)+', \gamma_{b}='+"{:.0f}".format(gamma_max)+'$'
return name, f, gammas_rec
def AdaNorm(cost,grad,hess,nexp, K_record_times, compute_hess, eta, b0, decr, th, x0, batch_size, n):
#number of iterations
K = K_record_times[-1]
#init
f = np.zeros((len(K_record_times),nexp))
gammas_rec = np.zeros((len(K_record_times),nexp))
## optimization
for e in range(nexp):
#batches
batch = []
for i in range(K+1): batch.append(random.sample(range(n),batch_size))
#iterations
i_record = 0
x = [x0 for i in range(K+2)]
gammas = np.zeros((K+2,))
for k in range(K+1):
# stepsize selection
if k ==0:
gammas[k] = 1/b0
else:
gammas[k] = 1/np.sqrt(1/gammas[k-1]**2 + la.norm(grad(x[k],batch[k]))**2)
#record
if k==K_record_times[i_record]:
gammas_rec[i_record,e] = gammas[k]
f[i_record,e] = cost(x[k],range(n))
i_record = i_record+1
# update
x[k+1] = x[k] - eta*gammas[k]*grad(x[k],batch[k])
## name
name = 'AdaNorm, $b_0='+"{:.2f}".format(b0)+', \eta='+"{:.1f}".format(eta)+'$'
return name, f, eta*gammas_rec
def Adam(cost,grad,hess,nexp, K_record_times, compute_hess, eta, beta2, decr, th, x0, batch_size, n):
#number of iterations
K = K_record_times[-1]
#init
f = np.zeros((len(K_record_times),nexp))
gammas_rec = np.zeros((len(K_record_times),nexp))
## optimization
for e in range(nexp):
#batches
batch = []
for i in range(K+1): batch.append(random.sample(range(n),batch_size))
#iterations
i_record = 0
x = [x0 for i in range(K+2)]
v = [0*x0 for i in range(K+2)]
for k in range(K+1):
# stepsize selection
if k ==0:
v[k] = np.power(grad(x[k],batch[k]), 2)
else:
v[k] = beta2*v[k-1] + (1-beta2)*np.power(grad(x[k],batch[k]), 2)
#record
if k==K_record_times[i_record]:
gammas_rec[i_record,e] = np.mean(np.power(v[k],-0.5))
f[i_record,e] = cost(x[k],range(n))
i_record = i_record+1
# update
x[k+1] = x[k] - eta*np.multiply(np.power(v[k],-0.5),grad(x[k],batch[k]))
## name
name = r'Adam, $\beta$='+"{:.2f}".format(beta2)+', $\eta$='+"{:.5f}".format(eta)
return name, f, eta*gammas_rec
def AMSgrad(cost,grad,hess,nexp, K_record_times, compute_hess, eta, beta2, decr, th, x0, batch_size, n):
#number of iterations
K = K_record_times[-1]
#init
f = np.zeros((len(K_record_times),nexp))
gammas_rec = np.zeros((len(K_record_times),nexp))
## optimization
for e in range(nexp):
#batches
batch = []
for i in range(K+1): batch.append(random.sample(range(n),batch_size))
#iterations
i_record = 0
x = [x0 for i in range(K+2)]
v = [0*x0 for i in range(K+2)]
for k in range(K+1):
# stepsize selection
if k ==0:
v[k] = np.power(grad(x[k],batch[k]), 2)
else:
v_k = beta2*v[k-1] + (1-beta2)*np.power(grad(x[k],batch[k]), 2)
v[k] = np.maximum(v_k, v[k-1])
#record
if k==K_record_times[i_record]:
gammas_rec[i_record,e] = eta*np.mean(np.power(v[k],-0.5))/math.sqrt(k-th+1)
f[i_record,e] = cost(x[k],range(n))
i_record = i_record+1
# update
x[k+1] = x[k] - eta*np.multiply(np.power(v[k],-0.5),grad(x[k],batch[k]))/math.sqrt(k-th+1)
## name
name = r'AMSgrad, $\beta$='+"{:.2f}".format(beta2)+', $\eta$='+"{:.4f}".format(eta)
return name, f, eta*gammas_rec
| 9,036 | 30.378472 | 108 | py |
null | DecSPS-main/Toy Dataset/methods.py | import numpy as np
from numpy import linalg as la
import random
import math
def SGD(cost, grad, hess, K, gamma, x0, batch_size, n):
#batches
batch = []
for i in range(K): batch.append(random.sample(range(n),batch_size))
## initialization
x = [x0 for i in range(K)]
f = np.zeros((K,))
gammas = np.zeros((K-1,))
f[0] = cost(x0,range(n))
for k in range(K-1):
gammas[k] = gamma
x[k+1] = x[k] - gammas[k]*grad(x[k],batch[k])
f[k+1] = cost(x[k+1],range(n))
name = 'SGD, step='+"{:.2f}".format(gamma)
return name, f, gammas
def SGD_decr(cost,grad,hess,nexp, K_record_times, compute_hess, gamma_init, decr, th, x0, batch_size, n):
#number of iterations
K = K_record_times[-1]
#init
f = np.zeros((len(K_record_times),nexp))
gammas_rec = np.zeros((len(K_record_times),nexp))
## optimization
for e in range(nexp):
#batches
batch = []
for i in range(K+1): batch.append(random.sample(range(n),batch_size))
#iterations
i_record = 0
x = [x0 for i in range(K+2)]
gammas = np.zeros((K+2,))
for k in range(K+1):
# stepsize selection
if k<th:
gammas[k] = gamma_init
else:
if decr == 'sqrt':
gammas[k] = gamma_init/math.sqrt(k-th+1)
else:
gammas[k] = (gamma_init/(k-th+1))
#record
if k==K_record_times[i_record]:
gammas_rec[i_record,e] = gammas[k]
f[i_record,e] = cost(x[k],range(n))
i_record = i_record+1
# update
x[k+1] = x[k] - gammas[k]*grad(x[k],batch[k])
## name
name = r'SGD, $\gamma_k='+"{:.2f}".format(gamma_init)+'/\sqrt{k+1}$'
return name, f, gammas_rec
# def SPS_max(cost,grad,hess,nexp, K_record_times, compute_hess, c, gamma_max, x0, batch_size, n):
# #number of iterations
# K = K_record_times[-1]
# #init
# f = np.zeros((len(K_record_times),nexp))
# mus = np.zeros((len(K_record_times),nexp))
# Ls = np.zeros((len(K_record_times),nexp))
# gammas_rec = np.zeros((len(K_record_times),nexp))
# ## optimization
# for e in range(nexp):
# #batches
# batch = []
# for i in range(K+1): batch.append(random.sample(range(n),batch_size))
# #iterations
# i_record = 0
# x = [x0 for i in range(K+2)]
# gammas = np.zeros((K+2,))
# for k in range(K+1):
# sps_grad = cost(x[k],batch[k])/la.norm(grad(x[k],batch[k]))**2
# gammas[k] = min([sps_grad/c,gamma_max])
# #record
# if k==K_record_times[i_record]:
# gammas_rec[i_record,e] = gammas[k]
# f[i_record,e] = cost(x[k],range(n))
# if compute_hess:
# mus[i_record,e],Ls[i_record,e] = hess(x[k])
# i_record = i_record+1
# # update
# x[k+1] = x[k] - gammas[k]*grad(x[k],batch[k])
# ## name
# name = r'SPS$_max$, $c='+"{:.2f}".format(c+', \gamma_{\max}='+"{:.2f}".format(gamma_max)+'$'
# return name, f, gammas_rec, mus, Ls
def SPS_decr(cost,grad,hess,nexp, K_record_times, compute_hess, c_init, decr, gamma_max, x0, batch_size, n):
#number of iterations
K = K_record_times[-1]
#init
f = np.zeros((len(K_record_times),nexp))
gammas_rec = np.zeros((len(K_record_times),nexp))
## optimization
for e in range(nexp):
#batches
batch = []
for i in range(K+1): batch.append(random.sample(range(n),batch_size))
#iterations
i_record = 0
x = [x0 for i in range(K+2)]
gammas = np.zeros((K+2,))
c = np.zeros((K+2,))
for k in range(K+1):
sps_grad = cost(x[k],batch[k])/la.norm(grad(x[k],batch[k]))**2
if k==0:
c[0] = c_init
gammas[0] = min([sps_grad,c[0]*gamma_max])/c[0]
else:
if decr == 'sqrt':
c[k] = c_init*np.sqrt(k+1)
else:
c[k] = c_init*(k+1)
gammas[k] = min([sps_grad, c[k-1]*gammas[k-1]])/c[k]
#record
if k==K_record_times[i_record]:
gammas_rec[i_record,e] = gammas[k]
f[i_record,e] = cost(x[k],range(n))
i_record = i_record+1
# update
x[k+1] = x[k] - gammas[k]*grad(x[k],batch[k])
## name
name = r'DecSPS, $c_0='+"{:.2f}".format(c_init)+', \gamma_{b}='+"{:.0f}".format(gamma_max)+'$'
return name, f, gammas_rec
def AdaNorm(cost,grad,hess,nexp, K_record_times, compute_hess, eta, b0, decr, th, x0, batch_size, n):
#number of iterations
K = K_record_times[-1]
#init
f = np.zeros((len(K_record_times),nexp))
gammas_rec = np.zeros((len(K_record_times),nexp))
## optimization
for e in range(nexp):
#batches
batch = []
for i in range(K+1): batch.append(random.sample(range(n),batch_size))
#iterations
i_record = 0
x = [x0 for i in range(K+2)]
gammas = np.zeros((K+2,))
for k in range(K+1):
# stepsize selection
if k ==0:
gammas[k] = 1/b0
else:
gammas[k] = 1/np.sqrt(1/gammas[k-1]**2 + la.norm(grad(x[k],batch[k]))**2)
#record
if k==K_record_times[i_record]:
gammas_rec[i_record,e] = gammas[k]
f[i_record,e] = cost(x[k],range(n))
i_record = i_record+1
# update
x[k+1] = x[k] - eta*gammas[k]*grad(x[k],batch[k])
## name
name = 'AdaNorm, $b_0='+"{:.2f}".format(b0)+', \eta='+"{:.1f}".format(eta)+'$'
return name, f, eta*gammas_rec
| 6,231 | 29.4 | 108 | py |
AMG | AMG-master/HYPRE.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header file for HYPRE library
*
*****************************************************************************/
#ifndef HYPRE_HEADER
#define HYPRE_HEADER
#define HYPRE_NO_GLOBAL_PARTITION 1
/*--------------------------------------------------------------------------
* Type definitions
*--------------------------------------------------------------------------*/
#ifdef HYPRE_BIGINT
typedef long long int HYPRE_Int;
#define HYPRE_MPI_INT MPI_LONG_LONG
#else
typedef int HYPRE_Int;
#define HYPRE_MPI_INT MPI_INT
#endif
/*--------------------------------------------------------------------------
* Constants
*--------------------------------------------------------------------------*/
#define HYPRE_UNITIALIZED -999
#define HYPRE_PETSC_MAT_PARILUT_SOLVER 222
#define HYPRE_PARILUT 333
#define HYPRE_STRUCT 1111
#define HYPRE_SSTRUCT 3333
#define HYPRE_PARCSR 5555
#define HYPRE_ISIS 9911
#define HYPRE_PETSC 9933
#define HYPRE_PFMG 10
#define HYPRE_SMG 11
#define HYPRE_Jacobi 17
#endif
| 2,050 | 30.553846 | 81 | h |
AMG | AMG-master/IJ_mv/HYPRE_IJMatrix.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* HYPRE_IJMatrix interface
*
*****************************************************************************/
#include "./_hypre_IJ_mv.h"
#include "../HYPRE.h"
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixCreate( MPI_Comm comm,
HYPRE_Int ilower,
HYPRE_Int iupper,
HYPRE_Int jlower,
HYPRE_Int jupper,
HYPRE_IJMatrix *matrix )
{
HYPRE_Int *row_partitioning;
HYPRE_Int *col_partitioning;
HYPRE_Int *info;
HYPRE_Int num_procs;
HYPRE_Int myid;
hypre_IJMatrix *ijmatrix;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int row0, col0, rowN, colN;
#else
HYPRE_Int *recv_buf;
HYPRE_Int i, i4;
HYPRE_Int square;
#endif
ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1);
hypre_IJMatrixComm(ijmatrix) = comm;
hypre_IJMatrixObject(ijmatrix) = NULL;
hypre_IJMatrixTranslator(ijmatrix) = NULL;
hypre_IJMatrixAssumedPart(ijmatrix) = NULL;
hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED;
hypre_IJMatrixAssembleFlag(ijmatrix) = 0;
hypre_IJMatrixPrintLevel(ijmatrix) = 0;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &myid);
if (ilower > iupper+1 || ilower < 0)
{
hypre_error_in_arg(2);
hypre_TFree(ijmatrix);
return hypre_error_flag;
}
if (iupper < -1)
{
hypre_error_in_arg(3);
hypre_TFree(ijmatrix);
return hypre_error_flag;
}
if (jlower > jupper+1 || jlower < 0)
{
hypre_error_in_arg(4);
hypre_TFree(ijmatrix);
return hypre_error_flag;
}
if (jupper < -1)
{
hypre_error_in_arg(5);
hypre_TFree(ijmatrix);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
info = hypre_CTAlloc(HYPRE_Int,2);
row_partitioning = hypre_CTAlloc(HYPRE_Int, 2);
col_partitioning = hypre_CTAlloc(HYPRE_Int, 2);
row_partitioning[0] = ilower;
row_partitioning[1] = iupper+1;
col_partitioning[0] = jlower;
col_partitioning[1] = jupper+1;
/* now we need the global number of rows and columns as well
as the global first row and column index */
/* proc 0 has the first row and col */
if (myid==0)
{
info[0] = ilower;
info[1] = jlower;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_INT, 0, comm);
row0 = info[0];
col0 = info[1];
/* proc (num_procs-1) has the last row and col */
if (myid == (num_procs-1))
{
info[0] = iupper;
info[1] = jupper;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_INT, num_procs-1, comm);
rowN = info[0];
colN = info[1];
hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0;
hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0;
hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1;
hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1;
hypre_TFree(info);
#else
info = hypre_CTAlloc(HYPRE_Int,4);
recv_buf = hypre_CTAlloc(HYPRE_Int,4*num_procs);
row_partitioning = hypre_CTAlloc(HYPRE_Int, num_procs+1);
info[0] = ilower;
info[1] = iupper;
info[2] = jlower;
info[3] = jupper;
/* Generate row- and column-partitioning through information exchange
across all processors, check whether the matrix is square, and
if the partitionings match. i.e. no overlaps or gaps,
if there are overlaps or gaps in the row partitioning or column
partitioning , ierr will be set to -9 or -10, respectively */
hypre_MPI_Allgather(info,4,HYPRE_MPI_INT,recv_buf,4,HYPRE_MPI_INT,comm);
row_partitioning[0] = recv_buf[0];
square = 1;
for (i=0; i < num_procs-1; i++)
{
i4 = 4*i;
if ( recv_buf[i4+1] != (recv_buf[i4+4]-1) )
{
hypre_error(HYPRE_ERROR_GENERIC);
hypre_TFree(ijmatrix);
hypre_TFree(info);
hypre_TFree(recv_buf);
hypre_TFree(row_partitioning);
return hypre_error_flag;
}
else
row_partitioning[i+1] = recv_buf[i4+4];
if ((square && (recv_buf[i4] != recv_buf[i4+2])) ||
(recv_buf[i4+1] != recv_buf[i4+3]) )
{
square = 0;
}
}
i4 = (num_procs-1)*4;
row_partitioning[num_procs] = recv_buf[i4+1]+1;
if ((recv_buf[i4] != recv_buf[i4+2]) || (recv_buf[i4+1] != recv_buf[i4+3]))
square = 0;
if (square)
col_partitioning = row_partitioning;
else
{
col_partitioning = hypre_CTAlloc(HYPRE_Int,num_procs+1);
col_partitioning[0] = recv_buf[2];
for (i=0; i < num_procs-1; i++)
{
i4 = 4*i;
if (recv_buf[i4+3] != recv_buf[i4+6]-1)
{
hypre_error(HYPRE_ERROR_GENERIC);
hypre_TFree(ijmatrix);
hypre_TFree(info);
hypre_TFree(recv_buf);
hypre_TFree(row_partitioning);
hypre_TFree(col_partitioning);
return hypre_error_flag;
}
else
col_partitioning[i+1] = recv_buf[i4+6];
}
col_partitioning[num_procs] = recv_buf[num_procs*4-1]+1;
}
hypre_IJMatrixGlobalFirstRow(ijmatrix) = row_partitioning[0];
hypre_IJMatrixGlobalFirstCol(ijmatrix) = col_partitioning[0];
hypre_IJMatrixGlobalNumRows(ijmatrix) = row_partitioning[num_procs] -
row_partitioning[0];
hypre_IJMatrixGlobalNumCols(ijmatrix) = col_partitioning[num_procs] -
col_partitioning[0];
hypre_TFree(info);
hypre_TFree(recv_buf);
#endif
hypre_IJMatrixRowPartitioning(ijmatrix) = row_partitioning;
hypre_IJMatrixColPartitioning(ijmatrix) = col_partitioning;
*matrix = (HYPRE_IJMatrix) ijmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixDestroy( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ijmatrix)
{
if (hypre_IJMatrixRowPartitioning(ijmatrix) ==
hypre_IJMatrixColPartitioning(ijmatrix))
hypre_TFree(hypre_IJMatrixRowPartitioning(ijmatrix));
else
{
hypre_TFree(hypre_IJMatrixRowPartitioning(ijmatrix));
hypre_TFree(hypre_IJMatrixColPartitioning(ijmatrix));
}
if hypre_IJMatrixAssumedPart(ijmatrix)
hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJMatrixAssumedPart(ijmatrix));
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
hypre_IJMatrixDestroyParCSR( ijmatrix );
else if ( hypre_IJMatrixObjectType(ijmatrix) != -1 )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
}
hypre_TFree(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixInitialize( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
hypre_IJMatrixInitializeParCSR( ijmatrix ) ;
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetPrintLevel
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetPrintLevel( HYPRE_IJMatrix matrix,
HYPRE_Int print_level )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixPrintLevel(ijmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetValues
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_Int *rows,
const HYPRE_Int *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
return hypre_error_flag;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
if (hypre_IJMatrixOMPFlag(ijmatrix))
return( hypre_IJMatrixSetValuesOMPParCSR( ijmatrix, nrows, ncols,
rows, cols, values ) );
else
return( hypre_IJMatrixSetValuesParCSR( ijmatrix, nrows, ncols,
rows, cols, values ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixAddToValues
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_Int *rows,
const HYPRE_Int *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
return hypre_error_flag;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
if (hypre_IJMatrixOMPFlag(ijmatrix))
return( hypre_IJMatrixAddToValuesOMPParCSR( ijmatrix, nrows, ncols,
rows, cols, values ) );
else
return( hypre_IJMatrixAddToValuesParCSR( ijmatrix, nrows, ncols,
rows, cols, values ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixAssemble
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAssemble( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixAssembleParCSR( ijmatrix ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixGetRowCounts
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetRowCounts( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *rows,
HYPRE_Int *ncols )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0) return hypre_error_flag;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetRowCountsParCSR( ijmatrix, nrows, rows, ncols );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixGetValues
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_Int *rows,
HYPRE_Int *cols,
HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0) return hypre_error_flag;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetValuesParCSR( ijmatrix, nrows, ncols,
rows, cols, values );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetObjectType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixObjectType(ijmatrix) = type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixGetObjectType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int *type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*type = hypre_IJMatrixObjectType(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixGetLocalRange
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetLocalRange( HYPRE_IJMatrix matrix,
HYPRE_Int *ilower,
HYPRE_Int *iupper,
HYPRE_Int *jlower,
HYPRE_Int *jupper )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
MPI_Comm comm;
HYPRE_Int *row_partitioning;
HYPRE_Int *col_partitioning;
HYPRE_Int my_id;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_IJMatrixComm(ijmatrix);
row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix);
col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
*ilower = row_partitioning[0];
*iupper = row_partitioning[1]-1;
*jlower = col_partitioning[0];
*jupper = col_partitioning[1]-1;
#else
*ilower = row_partitioning[my_id];
*iupper = row_partitioning[my_id+1]-1;
*jlower = col_partitioning[my_id];
*jupper = col_partitioning[my_id+1]-1;
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixGetObject
*--------------------------------------------------------------------------*/
/**
Returns a pointer to an underlying ijmatrix type used to implement IJMatrix.
Assumes that the implementation has an underlying matrix, so it would not
work with a direct implementation of IJMatrix.
@return integer error code
@param IJMatrix [IN]
The ijmatrix to be pointed to.
*/
HYPRE_Int
HYPRE_IJMatrixGetObject( HYPRE_IJMatrix matrix,
void **object )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*object = hypre_IJMatrixObject( ijmatrix );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetRowSizes
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetRowSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixSetRowSizesParCSR( ijmatrix , sizes ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetDiagOffdSizes
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetDiagOffdSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offdiag_sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixSetDiagOffdSizesParCSR( ijmatrix, diag_sizes, offdiag_sizes );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetMaxOffProcElmts
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetMaxOffProcElmts( HYPRE_IJMatrix matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return( hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix,
max_off_proc_elmts) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixRead
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixRead( const char *filename,
MPI_Comm comm,
HYPRE_Int type,
HYPRE_IJMatrix *matrix_ptr )
{
HYPRE_IJMatrix matrix;
HYPRE_Int ilower, iupper, jlower, jupper;
HYPRE_Int ncols, I, J;
HYPRE_Complex value;
HYPRE_Int myid, ret;
char new_filename[255];
FILE *file;
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_fscanf(file, "%d %d %d %d", &ilower, &iupper, &jlower, &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix);
HYPRE_IJMatrixSetObjectType(matrix, type);
HYPRE_IJMatrixInitialize(matrix);
/* It is important to ensure that whitespace follows the index value to help
* catch mistakes in the input file. See comments in IJVectorRead(). */
ncols = 1;
while ( (ret = hypre_fscanf(file, "%d %d%*[ \t]%le", &I, &J, &value)) != EOF )
{
if (ret != 3)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file.");
return hypre_error_flag;
}
if (I < ilower || I > iupper)
HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value);
else
HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value);
}
HYPRE_IJMatrixAssemble(matrix);
fclose(file);
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixPrint( HYPRE_IJMatrix matrix,
const char *filename )
{
MPI_Comm comm;
HYPRE_Int *row_partitioning;
HYPRE_Int *col_partitioning;
HYPRE_Int ilower, iupper, jlower, jupper;
HYPRE_Int i, j, ii;
HYPRE_Int ncols, *cols;
HYPRE_Complex *values;
HYPRE_Int myid;
char new_filename[255];
FILE *file;
void *object;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( (hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR) )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
ilower = row_partitioning[0];
iupper = row_partitioning[1] - 1;
jlower = col_partitioning[0];
jupper = col_partitioning[1] - 1;
#else
ilower = row_partitioning[myid];
iupper = row_partitioning[myid+1] - 1;
jlower = col_partitioning[myid];
jupper = col_partitioning[myid+1] - 1;
#endif
hypre_fprintf(file, "%d %d %d %d\n", ilower, iupper, jlower, jupper);
HYPRE_IJMatrixGetObject(matrix, &object);
for (i = ilower; i <= iupper; i++)
{
if ( hypre_IJMatrixObjectType(matrix) == HYPRE_PARCSR )
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
ii = i - hypre_IJMatrixGlobalFirstRow(matrix);
#else
ii = i - row_partitioning[0];
#endif
HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) object,
ii, &ncols, &cols, &values);
for (j = 0; j < ncols; j++)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
cols[j] += hypre_IJMatrixGlobalFirstCol(matrix);
#else
cols[j] += col_partitioning[0];
#endif
}
}
for (j = 0; j < ncols; j++)
{
hypre_fprintf(file, "%d %d %.14e\n", i, cols[j], values[j]);
}
if ( hypre_IJMatrixObjectType(matrix) == HYPRE_PARCSR )
{
for (j = 0; j < ncols; j++)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
cols[j] -= hypre_IJMatrixGlobalFirstCol(matrix);
#else
cols[j] -= col_partitioning[0];
#endif
}
HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) object,
ii, &ncols, &cols, &values);
}
}
fclose(file);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetOMPFlag
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetOMPFlag( HYPRE_IJMatrix matrix,
HYPRE_Int omp_flag )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag;
return hypre_error_flag;
}
| 25,929 | 25.086519 | 92 | c |
AMG | AMG-master/IJ_mv/HYPRE_IJVector.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* HYPRE_IJVector interface
*
*****************************************************************************/
#include "./_hypre_IJ_mv.h"
#include "../HYPRE.h"
/*--------------------------------------------------------------------------
* HYPRE_IJVectorCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorCreate( MPI_Comm comm,
HYPRE_Int jlower,
HYPRE_Int jupper,
HYPRE_IJVector *vector )
{
hypre_IJVector *vec;
HYPRE_Int num_procs, my_id, *partitioning;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int row0, rowN;
#else
HYPRE_Int *recv_buf;
HYPRE_Int *info;
HYPRE_Int i, i2;
#endif
vec = hypre_CTAlloc(hypre_IJVector, 1);
if (!vec)
{
hypre_error(HYPRE_ERROR_MEMORY);
return hypre_error_flag;
}
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (jlower > jupper+1 || jlower < 0)
{
hypre_error_in_arg(2);
hypre_TFree(vec);
return hypre_error_flag;
}
if (jupper < -1)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
partitioning = hypre_CTAlloc(HYPRE_Int, 2);
partitioning[0] = jlower;
partitioning[1] = jupper+1;
/* now we need the global number of rows as well
as the global first row index */
/* proc 0 has the first row */
if (my_id==0)
{
row0 = jlower;
}
hypre_MPI_Bcast(&row0, 1, HYPRE_MPI_INT, 0, comm);
/* proc (num_procs-1) has the last row */
if (my_id == (num_procs-1))
{
rowN = jupper;
}
hypre_MPI_Bcast(&rowN, 1, HYPRE_MPI_INT, num_procs-1, comm);
hypre_IJVectorGlobalFirstRow(vec) = row0;
hypre_IJVectorGlobalNumRows(vec) = rowN - row0 + 1;
#else
info = hypre_CTAlloc(HYPRE_Int,2);
recv_buf = hypre_CTAlloc(HYPRE_Int, 2*num_procs);
partitioning = hypre_CTAlloc(HYPRE_Int, num_procs+1);
info[0] = jlower;
info[1] = jupper;
hypre_MPI_Allgather(info, 2, HYPRE_MPI_INT, recv_buf, 2, HYPRE_MPI_INT, comm);
partitioning[0] = recv_buf[0];
for (i=0; i < num_procs-1; i++)
{
i2 = i+i;
if (recv_buf[i2+1] != (recv_buf[i2+2]-1))
{
/*hypre_printf("Inconsistent partitioning -- HYPRE_IJVectorCreate\n"); */
hypre_error(HYPRE_ERROR_GENERIC);
hypre_TFree(info);
hypre_TFree(recv_buf);
hypre_TFree(partitioning);
hypre_TFree(vec);
return hypre_error_flag;
}
else
partitioning[i+1] = recv_buf[i2+2];
}
i2 = (num_procs-1)*2;
partitioning[num_procs] = recv_buf[i2+1]+1;
hypre_TFree(info);
hypre_TFree(recv_buf);
hypre_IJVectorGlobalFirstRow(vec) = partitioning[0];
hypre_IJVectorGlobalNumRows(vec)= partitioning[num_procs]-partitioning[0];
#endif
hypre_IJVectorComm(vec) = comm;
hypre_IJVectorPartitioning(vec) = partitioning;
hypre_IJVectorObjectType(vec) = HYPRE_UNITIALIZED;
hypre_IJVectorObject(vec) = NULL;
hypre_IJVectorTranslator(vec) = NULL;
hypre_IJVectorAssumedPart(vec) = NULL;
hypre_IJVectorPrintLevel(vec) = 0;
*vector = (HYPRE_IJVector) vec;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorDestroy( HYPRE_IJVector vector )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (hypre_IJVectorPartitioning(vec))
hypre_TFree(hypre_IJVectorPartitioning(vec));
if (hypre_IJVectorAssumedPart(vec))
hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJVectorAssumedPart(vec));
if ( hypre_IJVectorObjectType(vec) == HYPRE_PARCSR )
{
hypre_IJVectorDestroyPar(vec) ;
if (hypre_IJVectorTranslator(vec))
{
hypre_AuxParVectorDestroy((hypre_AuxParVector *)
(hypre_IJVectorTranslator(vec)));
}
}
else if ( hypre_IJVectorObjectType(vec) != -1 )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_TFree(vec);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorInitialize( HYPRE_IJVector vector )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJVectorObjectType(vec) == HYPRE_PARCSR )
{
if (!hypre_IJVectorObject(vec))
hypre_IJVectorCreatePar(vec, hypre_IJVectorPartitioning(vec));
hypre_IJVectorInitializePar(vec);
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorSetPrintLevel
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorSetPrintLevel( HYPRE_IJVector vector,
HYPRE_Int print_level )
{
hypre_IJVector *ijvector = (hypre_IJVector *) vector;
if (!ijvector)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJVectorPrintLevel(ijvector) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorSetValues
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorSetValues( HYPRE_IJVector vector,
HYPRE_Int nvalues,
const HYPRE_Int *indices,
const HYPRE_Complex *values )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (nvalues == 0) return hypre_error_flag;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nvalues < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if ( hypre_IJVectorObjectType(vec) == HYPRE_PARCSR )
{
return( hypre_IJVectorSetValuesPar(vec, nvalues, indices, values) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorAddToValues
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorAddToValues( HYPRE_IJVector vector,
HYPRE_Int nvalues,
const HYPRE_Int *indices,
const HYPRE_Complex *values )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (nvalues == 0) return hypre_error_flag;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nvalues < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if ( hypre_IJVectorObjectType(vec) == HYPRE_PARCSR )
{
return( hypre_IJVectorAddToValuesPar(vec, nvalues, indices, values) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorAssemble
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorAssemble( HYPRE_IJVector vector )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJVectorObjectType(vec) == HYPRE_PARCSR )
{
return( hypre_IJVectorAssemblePar(vec) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorGetValues
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorGetValues( HYPRE_IJVector vector,
HYPRE_Int nvalues,
const HYPRE_Int *indices,
HYPRE_Complex *values )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (nvalues == 0) return hypre_error_flag;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nvalues < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if ( hypre_IJVectorObjectType(vec) == HYPRE_PARCSR )
{
return( hypre_IJVectorGetValuesPar(vec, nvalues, indices, values) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorSetMaxOffProcElmts
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorSetMaxOffProcElmts( HYPRE_IJVector vector,
HYPRE_Int max_off_proc_elmts )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJVectorObjectType(vec) == HYPRE_PARCSR )
{
return( hypre_IJVectorSetMaxOffProcElmtsPar(vec, max_off_proc_elmts));
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorSetObjectType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorSetObjectType( HYPRE_IJVector vector,
HYPRE_Int type )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJVectorObjectType(vec) = type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorGetObjectType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorGetObjectType( HYPRE_IJVector vector,
HYPRE_Int *type )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*type = hypre_IJVectorObjectType(vec);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorGetLocalRange
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorGetLocalRange( HYPRE_IJVector vector,
HYPRE_Int *jlower,
HYPRE_Int *jupper )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
MPI_Comm comm;
HYPRE_Int *partitioning;
HYPRE_Int my_id;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_IJVectorComm(vec);
partitioning = hypre_IJVectorPartitioning(vec);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
*jlower = partitioning[0];
*jupper = partitioning[1]-1;
#else
*jlower = partitioning[my_id];
*jupper = partitioning[my_id+1]-1;
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorGetObject
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorGetObject( HYPRE_IJVector vector,
void **object )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (!vec)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*object = hypre_IJVectorObject(vec);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorRead
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorRead( const char *filename,
MPI_Comm comm,
HYPRE_Int type,
HYPRE_IJVector *vector_ptr )
{
HYPRE_IJVector vector;
HYPRE_Int jlower, jupper, j;
HYPRE_Complex value;
HYPRE_Int myid, ret;
char new_filename[255];
FILE *file;
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_fscanf(file, "%d %d", &jlower, &jupper);
HYPRE_IJVectorCreate(comm, jlower, jupper, &vector);
HYPRE_IJVectorSetObjectType(vector, type);
HYPRE_IJVectorInitialize(vector);
/* It is important to ensure that whitespace follows the index value to help
* catch mistakes in the input file. This is done with %*[ \t]. Using a
* space here causes an input line with a single decimal value on it to be
* read as if it were an integer followed by a decimal value. */
while ( (ret = hypre_fscanf(file, "%d%*[ \t]%le", &j, &value)) != EOF )
{
if (ret != 2)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ vector input file.");
return hypre_error_flag;
}
if (j < jlower || j > jupper)
HYPRE_IJVectorAddToValues(vector, 1, &j, &value);
else
HYPRE_IJVectorSetValues(vector, 1, &j, &value);
}
HYPRE_IJVectorAssemble(vector);
fclose(file);
*vector_ptr = vector;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJVectorPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJVectorPrint( HYPRE_IJVector vector,
const char *filename )
{
MPI_Comm comm;
HYPRE_Int *partitioning;
HYPRE_Int jlower, jupper, j;
HYPRE_Complex value;
HYPRE_Int myid;
char new_filename[255];
FILE *file;
if (!vector)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_IJVectorComm(vector);
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
partitioning = hypre_IJVectorPartitioning(vector);
#ifdef HYPRE_NO_GLOBAL_PARTITION
jlower = partitioning[0];
jupper = partitioning[1] - 1;
#else
jlower = partitioning[myid];
jupper = partitioning[myid+1] - 1;
#endif
hypre_fprintf(file, "%d %d\n", jlower, jupper);
for (j = jlower; j <= jupper; j++)
{
HYPRE_IJVectorGetValues(vector, 1, &j, &value);
hypre_fprintf(file, "%d %.14e\n", j, value);
}
fclose(file);
return hypre_error_flag;
}
| 16,347 | 24.464174 | 88 | c |
AMG | AMG-master/IJ_mv/HYPRE_IJ_mv.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef HYPRE_IJ_MV_HEADER
#define HYPRE_IJ_MV_HEADER
#include "HYPRE_utilities.h"
#ifdef __cplusplus
extern "C" {
#endif
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name IJ System Interface
*
* This interface represents a linear-algebraic conceptual view of a
* linear system. The 'I' and 'J' in the name are meant to be
* mnemonic for the traditional matrix notation A(I,J).
*
* @memo A linear-algebraic conceptual interface
**/
/*@{*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name IJ Matrices
**/
/*@{*/
struct hypre_IJMatrix_struct;
/**
* The matrix object.
**/
typedef struct hypre_IJMatrix_struct *HYPRE_IJMatrix;
/**
* Create a matrix object. Each process owns some unique consecutive
* range of rows, indicated by the global row indices {\tt ilower} and
* {\tt iupper}. The row data is required to be such that the value
* of {\tt ilower} on any process $p$ be exactly one more than the
* value of {\tt iupper} on process $p-1$. Note that the first row of
* the global matrix may start with any integer value. In particular,
* one may use zero- or one-based indexing.
*
* For square matrices, {\tt jlower} and {\tt jupper} typically should
* match {\tt ilower} and {\tt iupper}, respectively. For rectangular
* matrices, {\tt jlower} and {\tt jupper} should define a
* partitioning of the columns. This partitioning must be used for
* any vector $v$ that will be used in matrix-vector products with the
* rectangular matrix. The matrix data structure may use {\tt jlower}
* and {\tt jupper} to store the diagonal blocks (rectangular in
* general) of the matrix separately from the rest of the matrix.
*
* Collective.
**/
HYPRE_Int HYPRE_IJMatrixCreate(MPI_Comm comm,
HYPRE_Int ilower,
HYPRE_Int iupper,
HYPRE_Int jlower,
HYPRE_Int jupper,
HYPRE_IJMatrix *matrix);
/**
* Destroy a matrix object. An object should be explicitly destroyed
* using this destructor when the user's code no longer needs direct
* access to it. Once destroyed, the object must not be referenced
* again. Note that the object may not be deallocated at the
* completion of this call, since there may be internal package
* references to the object. The object will then be destroyed when
* all internal reference counts go to zero.
**/
HYPRE_Int HYPRE_IJMatrixDestroy(HYPRE_IJMatrix matrix);
/**
* Prepare a matrix object for setting coefficient values. This
* routine will also re-initialize an already assembled matrix,
* allowing users to modify coefficient values.
**/
HYPRE_Int HYPRE_IJMatrixInitialize(HYPRE_IJMatrix matrix);
/**
* Sets values for {\tt nrows} rows or partial rows of the matrix.
* The arrays {\tt ncols}
* and {\tt rows} are of dimension {\tt nrows} and contain the number
* of columns in each row and the row indices, respectively. The
* array {\tt cols} contains the column indices for each of the {\tt
* rows}, and is ordered by rows. The data in the {\tt values} array
* corresponds directly to the column entries in {\tt cols}. Erases
* any previous values at the specified locations and replaces them
* with new ones, or, if there was no value there before, inserts a
* new one if set locally. Note that it is not possible to set values
* on other processors. If one tries to set a value from proc i on proc j,
* proc i will erase all previous occurrences of this value in its stack
* (including values generated with AddToValues), and treat it like
* a zero value. The actual value needs to be set on proc j.
*
* Note that a threaded version (threaded over the number of rows)
* will be called if
* HYPRE_IJMatrixSetOMPFlag is set to a value != 0.
* This requires that rows[i] != rows[j] for i!= j
* and is only efficient if a large number of rows is set in one call
* to HYPRE_IJMatrixSetValues.
*
* Not collective.
*
**/
HYPRE_Int HYPRE_IJMatrixSetValues(HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_Int *rows,
const HYPRE_Int *cols,
const HYPRE_Complex *values);
/**
* Adds to values for {\tt nrows} rows or partial rows of the matrix.
* Usage details are analogous to \Ref{HYPRE_IJMatrixSetValues}.
* Adds to any previous values at the specified locations, or, if
* there was no value there before, inserts a new one.
* AddToValues can be used to add to values on other processors.
*
* Note that a threaded version (threaded over the number of rows)
* will be called if
* HYPRE_IJMatrixSetOMPFlag is set to a value != 0.
* This requires that rows[i] != rows[j] for i!= j
* and is only efficient if a large number of rows is added in one call
* to HYPRE_IJMatrixAddToValues.
*
* Not collective.
*
**/
HYPRE_Int HYPRE_IJMatrixAddToValues(HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_Int *rows,
const HYPRE_Int *cols,
const HYPRE_Complex *values);
/**
* Finalize the construction of the matrix before using.
**/
HYPRE_Int HYPRE_IJMatrixAssemble(HYPRE_IJMatrix matrix);
/**
* Gets number of nonzeros elements for {\tt nrows} rows specified in {\tt rows}
* and returns them in {\tt ncols}, which needs to be allocated by the
* user.
**/
HYPRE_Int HYPRE_IJMatrixGetRowCounts(HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *rows,
HYPRE_Int *ncols);
/**
* Gets values for {\tt nrows} rows or partial rows of the matrix.
* Usage details are
* analogous to \Ref{HYPRE_IJMatrixSetValues}.
**/
HYPRE_Int HYPRE_IJMatrixGetValues(HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_Int *rows,
HYPRE_Int *cols,
HYPRE_Complex *values);
/**
* Set the storage type of the matrix object to be constructed.
* Currently, {\tt type} can only be {\tt HYPRE\_PARCSR}.
*
* Not collective, but must be the same on all processes.
*
* @see HYPRE_IJMatrixGetObject
**/
HYPRE_Int HYPRE_IJMatrixSetObjectType(HYPRE_IJMatrix matrix,
HYPRE_Int type);
/**
* Get the storage type of the constructed matrix object.
**/
HYPRE_Int HYPRE_IJMatrixGetObjectType(HYPRE_IJMatrix matrix,
HYPRE_Int *type);
/**
* Gets range of rows owned by this processor and range
* of column partitioning for this processor.
**/
HYPRE_Int HYPRE_IJMatrixGetLocalRange(HYPRE_IJMatrix matrix,
HYPRE_Int *ilower,
HYPRE_Int *iupper,
HYPRE_Int *jlower,
HYPRE_Int *jupper);
/**
* Get a reference to the constructed matrix object.
*
* @see HYPRE_IJMatrixSetObjectType
**/
HYPRE_Int HYPRE_IJMatrixGetObject(HYPRE_IJMatrix matrix,
void **object);
/**
* (Optional) Set the max number of nonzeros to expect in each row.
* The array {\tt sizes} contains estimated sizes for each row on this
* process. This call can significantly improve the efficiency of
* matrix construction, and should always be utilized if possible.
*
* Not collective.
**/
HYPRE_Int HYPRE_IJMatrixSetRowSizes(HYPRE_IJMatrix matrix,
const HYPRE_Int *sizes);
/**
* (Optional) Sets the exact number of nonzeros in each row of
* the diagonal and off-diagonal blocks. The diagonal block is the
* submatrix whose column numbers correspond to rows owned by this
* process, and the off-diagonal block is everything else. The arrays
* {\tt diag\_sizes} and {\tt offdiag\_sizes} contain estimated sizes
* for each row of the diagonal and off-diagonal blocks, respectively.
* This routine can significantly improve the efficiency of matrix
* construction, and should always be utilized if possible.
*
* Not collective.
**/
HYPRE_Int HYPRE_IJMatrixSetDiagOffdSizes(HYPRE_IJMatrix matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offdiag_sizes);
/**
* (Optional) Sets the maximum number of elements that are expected to be set
* (or added) on other processors from this processor
* This routine can significantly improve the efficiency of matrix
* construction, and should always be utilized if possible.
*
* Not collective.
**/
HYPRE_Int HYPRE_IJMatrixSetMaxOffProcElmts(HYPRE_IJMatrix matrix,
HYPRE_Int max_off_proc_elmts);
/**
* (Optional) Sets the print level, if the user wants to print
* error messages. The default is 0, i.e. no error messages are printed.
*
**/
HYPRE_Int HYPRE_IJMatrixSetPrintLevel(HYPRE_IJMatrix matrix,
HYPRE_Int print_level);
/**
* (Optional) if set, will use a threaded version of
* HYPRE_IJMatrixSetValues and HYPRE_IJMatrixAddToValues.
* This is only useful if a large number of rows is set or added to
* at once.
*
* NOTE that the values in the rows array of HYPRE_IJMatrixSetValues
* or HYPRE_IJMatrixAddToValues must be different from each other !!!
*
* This option is VERY inefficient if only a small number of rows
* is set or added at once and/or
* if reallocation of storage is required and/or
* if values are added to off processor values.
*
**/
HYPRE_Int HYPRE_IJMatrixSetOMPFlag(HYPRE_IJMatrix matrix,
HYPRE_Int omp_flag);
/**
* Read the matrix from file. This is mainly for debugging purposes.
**/
HYPRE_Int HYPRE_IJMatrixRead(const char *filename,
MPI_Comm comm,
HYPRE_Int type,
HYPRE_IJMatrix *matrix);
/**
* Print the matrix to file. This is mainly for debugging purposes.
**/
HYPRE_Int HYPRE_IJMatrixPrint(HYPRE_IJMatrix matrix,
const char *filename);
/*@}*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name IJ Vectors
**/
/*@{*/
struct hypre_IJVector_struct;
/**
* The vector object.
**/
typedef struct hypre_IJVector_struct *HYPRE_IJVector;
/**
* Create a vector object. Each process owns some unique consecutive
* range of vector unknowns, indicated by the global indices {\tt
* jlower} and {\tt jupper}. The data is required to be such that the
* value of {\tt jlower} on any process $p$ be exactly one more than
* the value of {\tt jupper} on process $p-1$. Note that the first
* index of the global vector may start with any integer value. In
* particular, one may use zero- or one-based indexing.
*
* Collective.
**/
HYPRE_Int HYPRE_IJVectorCreate(MPI_Comm comm,
HYPRE_Int jlower,
HYPRE_Int jupper,
HYPRE_IJVector *vector);
/**
* Destroy a vector object. An object should be explicitly destroyed
* using this destructor when the user's code no longer needs direct
* access to it. Once destroyed, the object must not be referenced
* again. Note that the object may not be deallocated at the
* completion of this call, since there may be internal package
* references to the object. The object will then be destroyed when
* all internal reference counts go to zero.
**/
HYPRE_Int HYPRE_IJVectorDestroy(HYPRE_IJVector vector);
/**
* Prepare a vector object for setting coefficient values. This
* routine will also re-initialize an already assembled vector,
* allowing users to modify coefficient values.
**/
HYPRE_Int HYPRE_IJVectorInitialize(HYPRE_IJVector vector);
/**
* (Optional) Sets the maximum number of elements that are expected to be set
* (or added) on other processors from this processor
* This routine can significantly improve the efficiency of matrix
* construction, and should always be utilized if possible.
*
* Not collective.
**/
HYPRE_Int HYPRE_IJVectorSetMaxOffProcElmts(HYPRE_IJVector vector,
HYPRE_Int max_off_proc_elmts);
/**
* Sets values in vector. The arrays {\tt values} and {\tt indices}
* are of dimension {\tt nvalues} and contain the vector values to be
* set and the corresponding global vector indices, respectively.
* Erases any previous values at the specified locations and replaces
* them with new ones. Note that it is not possible to set values
* on other processors. If one tries to set a value from proc i on proc j,
* proc i will erase all previous occurrences of this value in its stack
* (including values generated with AddToValues), and treat it like
* a zero value. The actual value needs to be set on proc j.
*
* Not collective.
**/
HYPRE_Int HYPRE_IJVectorSetValues(HYPRE_IJVector vector,
HYPRE_Int nvalues,
const HYPRE_Int *indices,
const HYPRE_Complex *values);
/**
* Adds to values in vector. Usage details are analogous to
* \Ref{HYPRE_IJVectorSetValues}.
* Adds to any previous values at the specified locations, or, if
* there was no value there before, inserts a new one.
* AddToValues can be used to add to values on other processors.
*
* Not collective.
**/
HYPRE_Int HYPRE_IJVectorAddToValues(HYPRE_IJVector vector,
HYPRE_Int nvalues,
const HYPRE_Int *indices,
const HYPRE_Complex *values);
/**
* Finalize the construction of the vector before using.
**/
HYPRE_Int HYPRE_IJVectorAssemble(HYPRE_IJVector vector);
/**
* Gets values in vector. Usage details are analogous to
* \Ref{HYPRE_IJVectorSetValues}.
*
* Not collective.
**/
HYPRE_Int HYPRE_IJVectorGetValues(HYPRE_IJVector vector,
HYPRE_Int nvalues,
const HYPRE_Int *indices,
HYPRE_Complex *values);
/**
* Set the storage type of the vector object to be constructed.
* Currently, {\tt type} can only be {\tt HYPRE\_PARCSR}.
*
* Not collective, but must be the same on all processes.
*
* @see HYPRE_IJVectorGetObject
**/
HYPRE_Int HYPRE_IJVectorSetObjectType(HYPRE_IJVector vector,
HYPRE_Int type);
/**
* Get the storage type of the constructed vector object.
**/
HYPRE_Int HYPRE_IJVectorGetObjectType(HYPRE_IJVector vector,
HYPRE_Int *type);
/**
* Returns range of the part of the vector owned by this processor.
**/
HYPRE_Int HYPRE_IJVectorGetLocalRange(HYPRE_IJVector vector,
HYPRE_Int *jlower,
HYPRE_Int *jupper);
/**
* Get a reference to the constructed vector object.
*
* @see HYPRE_IJVectorSetObjectType
**/
HYPRE_Int HYPRE_IJVectorGetObject(HYPRE_IJVector vector,
void **object);
/**
* (Optional) Sets the print level, if the user wants to print
* error messages. The default is 0, i.e. no error messages are printed.
*
**/
HYPRE_Int HYPRE_IJVectorSetPrintLevel(HYPRE_IJVector vector,
HYPRE_Int print_level);
/**
* Read the vector from file. This is mainly for debugging purposes.
**/
HYPRE_Int HYPRE_IJVectorRead(const char *filename,
MPI_Comm comm,
HYPRE_Int type,
HYPRE_IJVector *vector);
/**
* Print the vector to file. This is mainly for debugging purposes.
**/
HYPRE_Int HYPRE_IJVectorPrint(HYPRE_IJVector vector,
const char *filename);
/*@}*/
/*@}*/
#ifdef __cplusplus
}
#endif
#endif
| 17,922 | 37.297009 | 81 | h |
AMG | AMG-master/IJ_mv/IJMatrix.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* hypre_IJMatrix interface
*
*****************************************************************************/
#include "./_hypre_IJ_mv.h"
#include "../HYPRE.h"
/*--------------------------------------------------------------------------
* hypre_IJMatrixGetRowPartitioning
*--------------------------------------------------------------------------*/
/**
Returns a pointer to the row partitioning
@return integer error code
@param IJMatrix [IN]
The ijmatrix to be pointed to.
*/
HYPRE_Int
hypre_IJMatrixGetRowPartitioning( HYPRE_IJMatrix matrix ,
HYPRE_Int **row_partitioning )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Variable ijmatrix is NULL -- hypre_IJMatrixGetRowPartitioning\n");
return hypre_error_flag;
}
if ( hypre_IJMatrixRowPartitioning(ijmatrix))
*row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix);
else
{
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_IJMatrixGetColPartitioning
*--------------------------------------------------------------------------*/
/**
Returns a pointer to the column partitioning
@return integer error code
@param IJMatrix [IN]
The ijmatrix to be pointed to.
*/
HYPRE_Int
hypre_IJMatrixGetColPartitioning( HYPRE_IJMatrix matrix ,
HYPRE_Int **col_partitioning )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Variable ijmatrix is NULL -- hypre_IJMatrixGetColPartitioning\n");
return hypre_error_flag;
}
if ( hypre_IJMatrixColPartitioning(ijmatrix))
*col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix);
else
{
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_IJMatrixSetObject
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_IJMatrixSetObject( HYPRE_IJMatrix matrix,
void *object )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (hypre_IJMatrixObject(ijmatrix) != NULL)
{
/*hypre_printf("Referencing a new IJMatrix object can orphan an old -- ");
hypre_printf("hypre_IJMatrixSetObject\n");*/
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
hypre_IJMatrixObject(ijmatrix) = object;
return hypre_error_flag;
}
| 3,668 | 29.322314 | 111 | c |
AMG | AMG-master/IJ_mv/IJMatrix_parcsr.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "_hypre_parcsr_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixCreateParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_ParCSRMatrix *par_matrix;
HYPRE_Int *row_starts;
HYPRE_Int *col_starts;
HYPRE_Int num_procs;
HYPRE_Int i;
hypre_MPI_Comm_size(comm,&num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_starts = hypre_CTAlloc(HYPRE_Int,2);
if (hypre_IJMatrixGlobalFirstRow(matrix))
for (i=0; i < 2; i++)
row_starts[i] = row_partitioning[i]- hypre_IJMatrixGlobalFirstRow(matrix);
else
for (i=0; i < 2; i++)
row_starts[i] = row_partitioning[i];
if (row_partitioning != col_partitioning)
{
col_starts = hypre_CTAlloc(HYPRE_Int,2);
if (hypre_IJMatrixGlobalFirstCol(matrix))
for (i=0; i < 2; i++)
col_starts[i] = col_partitioning[i]-hypre_IJMatrixGlobalFirstCol(matrix);
else
for (i=0; i < 2; i++)
col_starts[i] = col_partitioning[i];
}
else
col_starts = row_starts;
par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix),
hypre_IJMatrixGlobalNumCols(matrix),
row_starts, col_starts, 0, 0, 0);
#else
row_starts = hypre_CTAlloc(HYPRE_Int,num_procs+1);
if (row_partitioning[0])
for (i=0; i < num_procs+1; i++)
row_starts[i] = row_partitioning[i]-row_partitioning[0];
else
for (i=0; i < num_procs+1; i++)
row_starts[i] = row_partitioning[i];
if (row_partitioning != col_partitioning)
{
col_starts = hypre_CTAlloc(HYPRE_Int,num_procs+1);
if (col_partitioning[0])
for (i=0; i < num_procs+1; i++)
col_starts[i] = col_partitioning[i]-col_partitioning[0];
else
for (i=0; i < num_procs+1; i++)
col_starts[i] = col_partitioning[i];
}
else
col_starts = row_starts;
par_matrix = hypre_ParCSRMatrixCreate(comm,row_starts[num_procs],
col_starts[num_procs],
row_starts, col_starts, 0, 0, 0);
#endif
hypre_IJMatrixObject(matrix) = par_matrix;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetRowSizesParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *sizes)
{
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_Int i, my_id;
HYPRE_Int *row_space;
HYPRE_Int *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_AuxParCSRMatrix *aux_matrix;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = row_partitioning[1]-row_partitioning[0];
local_num_cols = col_partitioning[1]-col_partitioning[0];
#else
local_num_rows = row_partitioning[my_id+1]-row_partitioning[my_id];
local_num_cols = col_partitioning[my_id+1]-col_partitioning[my_id];
#endif
aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
row_space = NULL;
if (aux_matrix)
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
if (!row_space)
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows);
for (i = 0; i < local_num_rows; i++)
row_space[i] = sizes[i];
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, row_space);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetDiagOffdSizesParCSR
* sets diag_i inside the diag part of the ParCSRMatrix
* and offd_i inside the offd part,
* requires exact row sizes for diag and offd
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offdiag_sizes)
{
HYPRE_Int local_num_rows;
HYPRE_Int i;
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix);
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_Int *diag_i;
HYPRE_Int *offd_i;
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix);
}
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
local_num_rows = hypre_CSRMatrixNumRows(diag);
if (!diag_i)
diag_i = hypre_CTAlloc(HYPRE_Int, local_num_rows+1);
for (i = 0; i < local_num_rows; i++)
diag_i[i+1] = diag_i[i] + diag_sizes[i];
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[local_num_rows];
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (!offd_i)
offd_i = hypre_CTAlloc(HYPRE_Int, local_num_rows+1);
for (i = 0; i < local_num_rows; i++)
offd_i[i+1] = offd_i[i] + offdiag_sizes[i];
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixNumNonzeros(offd) = offd_i[local_num_rows];
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
hypre_CSRMatrixNumCols(diag), NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetMaxOffProcElmtsParCSR
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int local_num_rows, local_num_cols, my_id;
HYPRE_Int *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (!aux_matrix)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = row_partitioning[1]-row_partitioning[0];
local_num_cols = col_partitioning[1]-col_partitioning[0];
#else
local_num_rows = row_partitioning[my_id+1]-row_partitioning[my_id];
local_num_cols = col_partitioning[my_id+1]-col_partitioning[my_id];
#endif
hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows,
local_num_cols, NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixInitializeParCSR
*
* initializes AuxParCSRMatrix and ParCSRMatrix as necessary
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
HYPRE_Int local_num_rows;
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
if (!par_matrix)
{
hypre_IJMatrixCreateParCSR(matrix);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
}
local_num_rows =
hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(par_matrix));
if (!aux_matrix)
{
hypre_AuxParCSRMatrixCreate(
&aux_matrix, local_num_rows,
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(par_matrix)), NULL);
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
hypre_ParCSRMatrixInitialize(par_matrix);
hypre_AuxParCSRMatrixInitialize(aux_matrix);
if (! hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int i, *indx_diag, *indx_offd, *diag_i, *offd_i;
diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(par_matrix));
offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(par_matrix));
indx_diag = hypre_AuxParCSRMatrixIndxDiag(aux_matrix);
indx_offd = hypre_AuxParCSRMatrixIndxOffd(aux_matrix);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < local_num_rows; i++)
{
indx_diag[i] = diag_i[i];
indx_offd[i] = offd_i[i];
}
}
}
else /* AB 4/06 - the assemble routine destroys the aux matrix - so we need
to recreate if initialize is called again*/
{
if (!aux_matrix)
{
local_num_rows =
hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(par_matrix));
hypre_AuxParCSRMatrixCreate(
&aux_matrix, local_num_rows,
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(par_matrix)), NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetRowCountsParCSR
*
* gets the number of columns for rows specified by the user
*
*****************************************************************************/
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *rows,
HYPRE_Int *ncols)
{
HYPRE_Int row_index;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int i, my_id, pstart;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
pstart = 0;
#else
pstart = my_id;
#endif
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < nrows; i++)
{
row_index = rows[i];
if (row_index >= row_partitioning[pstart] &&
row_index < row_partitioning[pstart+1])
{
/* compute local row number */
row_index -= row_partitioning[pstart];
ncols[i] = diag_i[row_index+1]-diag_i[row_index]+offd_i[row_index+1]
-offd_i[row_index];
}
else
{
ncols[i] = 0;
if (print_level)
hypre_printf ("Warning! Row %d is not on Proc. %d!\n",
row_index, my_id);
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixGetValuesParCSR
*
* gets values of an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_Int *rows,
HYPRE_Int *cols,
HYPRE_Complex *values)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix);
HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix);
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int *col_map_offd;
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
HYPRE_Int *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#endif
HYPRE_Int i, j, n, ii, indx, col_indx, pstart, first;
HYPRE_Int num_procs, my_id;
HYPRE_Int col_0, col_n, row, row_local, row_size;
HYPRE_Int warning = 0;
HYPRE_Int *counter;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (assemble_flag == 0)
{
hypre_error_in_arg(1);
if (print_level)
hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n");
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_starts[0];
col_n = col_starts[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_starts[my_id];
col_n = col_starts[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
}
if (nrows < 0)
{
nrows = -nrows;
counter = hypre_CTAlloc(HYPRE_Int,nrows+1);
counter[0] = 0;
for (i=0; i < nrows; i++)
counter[i+1] = counter[i]+ncols[i];
indx = 0;
for (i=0; i < nrows; i++)
{
row = rows[i];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
row_size = diag_i[row_local+1]-diag_i[row_local]+
offd_i[row_local+1]-offd_i[row_local];
if (counter[i]+row_size > counter[nrows])
{
hypre_error_in_arg(1);
if (print_level)
hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n");
}
if (ncols[i] < row_size)
warning = 1;
for (j = diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
cols[indx] = diag_j[j] + col_0;
values[indx++] = diag_data[j];
}
for (j = offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
cols[indx] = col_map_offd[offd_j[j]];
values[indx++] = offd_data[j];
}
counter[i+1] = indx;
}
else
if (print_level)
hypre_printf ("Warning! Row %d is not on Proc. %d!\n", row, my_id);
}
if (warning)
{
for (i=0; i < nrows; i++)
ncols[i] = counter[i+1] - counter[i];
if (print_level)
hypre_printf ("Warning! ncols has been changed!\n");
}
hypre_TFree(counter);
}
else
{
indx = 0;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
/* compute local row number */
for (i=0; i < n; i++)
{
col_indx = cols[indx] - first;
values[indx] = 0.0;
if (col_indx < col_0 || col_indx > col_n)
/* search in offd */
{
for (j=offd_i[row_local]; j < offd_i[row_local+1]; j++)
{
if (col_map_offd[offd_j[j]] == col_indx)
{
values[indx] = offd_data[j];
break;
}
}
}
else /* search in diag */
{
col_indx = col_indx - col_0;
for (j=diag_i[row_local]; j < diag_i[row_local+1]; j++)
{
if (diag_j[j] == col_indx)
{
values[indx] = diag_data[j];
break;
}
}
}
indx++;
}
}
else
if (print_level)
hypre_printf ("Warning! Row %d is not on Proc. %d!\n", row, my_id);
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixSetValuesParCSR
*
* sets values in an IJMatrix before assembly,
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_Int *rows,
const HYPRE_Int *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int *row_partitioning;
HYPRE_Int *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local, row;
HYPRE_Int row_len;
HYPRE_Int col_0, col_n;
HYPRE_Int i, ii, j, k, n, not_found;
HYPRE_Int col_indx, cancel_indx, cnt1;
HYPRE_Int **aux_j;
HYPRE_Int *local_j;
HYPRE_Int *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int first, pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
HYPRE_Int off_proc_i_indx;
HYPRE_Int *off_proc_i;
HYPRE_Int *off_proc_j;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_Int *col_map_offd;
HYPRE_Int num_cols_offd;
HYPRE_Int j_offd;
indx = 0;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level) hypre_printf (" row %d too long! \n", row);
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
return hypre_error_flag;
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
/* return -1;*/
return hypre_error_flag;
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
/* return -1; */
return hypre_error_flag;
}
}
indx++;
}
}
/* processor does not own the row */
else /*search for previous occurrences and cancel them */
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
/*current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
col_indx = 0;
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
cancel_indx++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}
}
}
}
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
indx = 0;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_Int,size);
tmp_data = hypre_CTAlloc(HYPRE_Complex,size);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_Int,
size+tmp_indx);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex,size+tmp_indx);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j);
hypre_TFree(tmp_data);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
offd_indx =hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx =hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf("Error in row %d ! Too many elements!\n",
row);
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == cols[indx])
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = cols[indx];
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf("Error in row %d ! Too many elements !\n",
row);
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
else
{
indx += n;
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
/*current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
col_indx = 0;
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
cancel_indx++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_Int *rows,
const HYPRE_Int *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int *row_partitioning;
HYPRE_Int *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int row_local, row;
HYPRE_Int col_0, col_n;
HYPRE_Int i, ii, j, n, not_found;
HYPRE_Int **aux_j;
HYPRE_Int *local_j;
HYPRE_Int *tmp_j;
HYPRE_Complex **aux_data;
HYPRE_Complex *local_data;
HYPRE_Complex *tmp_data;
HYPRE_Int diag_space, offd_space;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int tmp_indx, indx;
HYPRE_Int space, size, old_size;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int first, pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_Int *off_proc_i;
HYPRE_Int *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (hypre_IJMatrixAssembleFlag(matrix))
{
HYPRE_Int num_cols_offd;
HYPRE_Int *col_map_offd;
HYPRE_Int j_offd;
indx = 0;
/* AB - 4/06 - need to get this object*/
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
/* compute local row number */
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level) hypre_printf (" row %d too long! \n", row);
/* return -1; */
return hypre_error_flag;
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
return hypre_error_flag;
/* return -1; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
/* return -1;*/
return hypre_error_flag;
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
/* return -1; */
return hypre_error_flag;
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
/* return -1;*/
return hypre_error_flag;
}
}
indx++;
}
}
/* not my row */
else
{
if (!aux_matrix)
{
size = row_partitioning[pstart+1]-row_partitioning[pstart];
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_Int,2*max_off_proc_elmts);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_Int,max_off_proc_elmts);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex,max_off_proc_elmts);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i,HYPRE_Int,2*max_off_proc_elmts);
off_proc_j = hypre_TReAlloc(off_proc_j,HYPRE_Int,max_off_proc_elmts);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
/* AB - 4/6 - the row should be negative to indicate an add */
/* UMY - 12/28/09 - now positive since we eliminated the feature of
setting on other processors */
/* off_proc_i[off_proc_i_indx++] = row; */
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix)
= current_num_elmts;
}
}
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix);
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
indx = 0;
for (ii=0; ii < nrows; ii++)
{
row = rows[ii];
n = ncols[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
/* compute local row number */
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_Int,size);
tmp_data = hypre_CTAlloc(HYPRE_Complex,size);
}
else
{
tmp_j = NULL;
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_Int,
size+tmp_indx);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex,size+tmp_indx);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j);
hypre_TFree(tmp_data);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf("Error in row %d ! Too many elements!\n",
row);
/* return 1;*/
return hypre_error_flag;
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == cols[indx])
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = cols[indx];
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
if (print_level)
hypre_printf("Error in row %d ! Too many elements !\n",
row);
/* return 1; */
return hypre_error_flag;
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_Int,2*max_off_proc_elmts);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_Int,max_off_proc_elmts);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex,max_off_proc_elmts);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i,HYPRE_Int,2*max_off_proc_elmts);
off_proc_j = hypre_TReAlloc(off_proc_j,HYPRE_Int,max_off_proc_elmts);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix)
= current_num_elmts;
}
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixDestroyParCSR
*
* frees an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix)
{
hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix));
hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix));
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAssembleOffProcValsParCSR
*
* This is for handling set and get values calls to off-proc. entries -
* it is called from matrix assemble. There is an alternate version for
* when the assumed partition is being used.
*
*****************************************************************************/
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_Int *off_proc_i,
HYPRE_Int *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_MPI_Request *requests = NULL;
hypre_MPI_Status *status = NULL;
HYPRE_Int i, ii, j, j2, jj, n, row;
HYPRE_Int iii, iid, indx, ip;
HYPRE_Int proc_id, num_procs, my_id;
HYPRE_Int num_sends, num_sends3;
HYPRE_Int num_recvs;
HYPRE_Int num_requests;
HYPRE_Int vec_start, vec_len;
HYPRE_Int *send_procs;
HYPRE_Int *chunks;
HYPRE_Int *send_i;
HYPRE_Int *send_map_starts;
HYPRE_Int *dbl_send_map_starts;
HYPRE_Int *recv_procs;
HYPRE_Int *recv_chunks;
HYPRE_Int *recv_i;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *dbl_recv_vec_starts;
HYPRE_Int *info;
HYPRE_Int *int_buffer;
HYPRE_Int *proc_id_mem;
HYPRE_Int *partitioning;
HYPRE_Int *displs;
HYPRE_Int *recv_buf;
HYPRE_Complex *send_data;
HYPRE_Complex *recv_data;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
partitioning = hypre_IJMatrixRowPartitioning(matrix);
info = hypre_CTAlloc(HYPRE_Int,num_procs);
chunks = hypre_CTAlloc(HYPRE_Int,num_procs);
proc_id_mem = hypre_CTAlloc(HYPRE_Int,off_proc_i_indx/2);
j=0;
for (i=0; i < off_proc_i_indx; i++)
{
row = off_proc_i[i++];
if (row < 0) row = -row-1;
n = off_proc_i[i];
proc_id = hypre_FindProc(partitioning,row,num_procs);
proc_id_mem[j++] = proc_id;
info[proc_id] += n;
chunks[proc_id]++;
}
/* determine send_procs and amount of data to be sent */
num_sends = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
num_sends++;
}
}
send_procs = hypre_CTAlloc(HYPRE_Int,num_sends);
send_map_starts = hypre_CTAlloc(HYPRE_Int,num_sends+1);
dbl_send_map_starts = hypre_CTAlloc(HYPRE_Int,num_sends+1);
num_sends3 = 3*num_sends;
int_buffer = hypre_CTAlloc(HYPRE_Int,3*num_sends);
j = 0;
j2 = 0;
send_map_starts[0] = 0;
dbl_send_map_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
send_procs[j++] = i;
send_map_starts[j] = send_map_starts[j-1]+2*chunks[i]+info[i];
dbl_send_map_starts[j] = dbl_send_map_starts[j-1]+info[i];
int_buffer[j2++] = i;
int_buffer[j2++] = chunks[i];
int_buffer[j2++] = info[i];
}
}
hypre_TFree(chunks);
hypre_MPI_Allgather(&num_sends3,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm);
displs = hypre_CTAlloc(HYPRE_Int, num_procs+1);
displs[0] = 0;
for (i=1; i < num_procs+1; i++)
displs[i] = displs[i-1]+info[i-1];
recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs]);
hypre_MPI_Allgatherv(int_buffer,num_sends3,HYPRE_MPI_INT,recv_buf,info,displs,
HYPRE_MPI_INT,comm);
hypre_TFree(int_buffer);
hypre_TFree(info);
/* determine recv procs and amount of data to be received */
num_recvs = 0;
for (j=0; j < displs[num_procs]; j+=3)
{
if (recv_buf[j] == my_id)
num_recvs++;
}
recv_procs = hypre_CTAlloc(HYPRE_Int,num_recvs);
recv_chunks = hypre_CTAlloc(HYPRE_Int,num_recvs);
recv_vec_starts = hypre_CTAlloc(HYPRE_Int,num_recvs+1);
dbl_recv_vec_starts = hypre_CTAlloc(HYPRE_Int,num_recvs+1);
j2 = 0;
recv_vec_starts[0] = 0;
dbl_recv_vec_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
for (j=displs[i]; j < displs[i+1]; j+=3)
{
if (recv_buf[j] == my_id)
{
recv_procs[j2] = i;
recv_chunks[j2++] = recv_buf[j+1];
recv_vec_starts[j2] = recv_vec_starts[j2-1]+2*recv_buf[j+1]
+recv_buf[j+2];
dbl_recv_vec_starts[j2] = dbl_recv_vec_starts[j2-1]+recv_buf[j+2];
}
if (j2 == num_recvs) break;
}
}
hypre_TFree(recv_buf);
hypre_TFree(displs);
/* set up data to be sent to send procs */
/* send_i contains for each send proc : row no., no. of elmts and column
indices, send_data contains corresponding values */
send_i = hypre_CTAlloc(HYPRE_Int,send_map_starts[num_sends]);
send_data = hypre_CTAlloc(HYPRE_Complex,dbl_send_map_starts[num_sends]);
recv_i = hypre_CTAlloc(HYPRE_Int,recv_vec_starts[num_recvs]);
recv_data = hypre_CTAlloc(HYPRE_Complex,dbl_recv_vec_starts[num_recvs]);
j=0;
jj=0;
for (i=0; i < off_proc_i_indx; i++)
{
row = off_proc_i[i++];
n = off_proc_i[i];
proc_id = proc_id_mem[i/2];
indx = hypre_BinarySearch(send_procs,proc_id,num_sends);
iii = send_map_starts[indx];
iid = dbl_send_map_starts[indx];
send_i[iii++] = row;
send_i[iii++] = n;
for (ii = 0; ii < n; ii++)
{
send_i[iii++] = off_proc_j[jj];
send_data[iid++] = off_proc_data[jj++];
}
send_map_starts[indx] = iii;
dbl_send_map_starts[indx] = iid;
}
hypre_TFree(proc_id_mem);
for (i=num_sends; i > 0; i--)
{
send_map_starts[i] = send_map_starts[i-1];
dbl_send_map_starts[i] = dbl_send_map_starts[i-1];
}
send_map_starts[0] = 0;
dbl_send_map_starts[0] = 0;
num_requests = num_recvs+num_sends;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests);
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_INT, ip, 0, comm,
&requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_INT, ip, 0, comm,
&requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = dbl_recv_vec_starts[i];
vec_len = dbl_recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = dbl_send_map_starts[i];
vec_len = dbl_send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
hypre_TFree(requests);
hypre_TFree(status);
hypre_TFree(send_i);
hypre_TFree(send_data);
hypre_TFree(send_procs);
hypre_TFree(send_map_starts);
hypre_TFree(dbl_send_map_starts);
hypre_TFree(recv_procs);
hypre_TFree(recv_vec_starts);
hypre_TFree(dbl_recv_vec_starts);
j = 0;
j2 = 0;
for (i=0; i < num_recvs; i++)
{
for (ii=0; ii < recv_chunks[i]; ii++)
{
row = recv_i[j];
hypre_IJMatrixAddToValuesParCSR(matrix,1,&recv_i[j+1],&row,
&recv_i[j+2],&recv_data[j2]);
j2 += recv_i[j+1];
j += recv_i[j+1]+2;
}
}
hypre_TFree(recv_chunks);
hypre_TFree(recv_i);
hypre_TFree(recv_data);
return hypre_error_flag;
}
#else
/* assumed partition version */
HYPRE_Int
hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix,
HYPRE_Int off_proc_i_indx,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_Int *off_proc_i,
HYPRE_Int *off_proc_j,
HYPRE_Complex *off_proc_data )
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int i, j, k, in_i;
HYPRE_Int myid;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_Int global_num_cols;
HYPRE_Int global_first_col;
HYPRE_Int global_first_row;
HYPRE_Int ex_num_contacts = 0, num_rows = 0;
HYPRE_Int range_start, range_end;
HYPRE_Int num_elements;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_Int row, num_ranges;
HYPRE_Int num_recvs;
HYPRE_Int counter, upper_bound;
HYPRE_Int num_real_procs;
HYPRE_Int /*current_proc,*/ original_proc_indx;
HYPRE_Int *row_list=NULL, *row_list_num_elements=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL, *ex_contact_buf = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_Int *response_buf = NULL, *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL;
HYPRE_Int *argsort_contact_procs = NULL;
HYPRE_Int obj_size_bytes, int_size, complex_size;
HYPRE_Int tmp_int;
HYPRE_Int *col_ptr;
HYPRE_Int *int_data = NULL;
HYPRE_Int int_data_size = 0, complex_data_size = 0;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Complex *col_data_ptr;
HYPRE_Complex *complex_data = NULL;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_cols = hypre_IJMatrixGlobalNumCols(matrix);
global_first_col = hypre_IJMatrixGlobalFirstCol(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
num_rows = off_proc_i_indx/2;
/* verify that we have created the assumed partition */
if (hypre_IJMatrixAssumedPart(matrix) == NULL)
{
hypre_IJMatrixCreateAssumedPartition(matrix);
}
apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix);
/*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(par_matrix);
}
apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/
row_list = hypre_CTAlloc(HYPRE_Int, num_rows);
row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows);
a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows);
orig_order = hypre_CTAlloc(HYPRE_Int, num_rows);
real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows);
/* get the assumed processor id for each row */
if (num_rows > 0 )
{
for (i=0; i < num_rows; i++)
{
row = off_proc_i[i*2];
if (row < 0) row = -row-1;
row_list[i] = row;
row_list_num_elements[i] = off_proc_i[i*2+1];
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_cols, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_qsort3i(row_list, a_proc_id, orig_order, 0, num_rows -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < num_rows; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact assumed
processors and find out who the actual row owner is - we will contact with
a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1);
ex_contact_buf = hypre_CTAlloc(HYPRE_Int, ex_num_contacts*2);
counter = 0;
range_end = -1;
for (i=0; i< num_rows; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0) ex_contact_buf[counter*2 - 1] = row_list[i-1];
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols,
&range_start, &range_end);
}
}
/*finish the starts*/
ex_contact_vec_starts[counter] = counter*2;
/*finish the last range*/
if (counter > 0)
ex_contact_buf[counter*2 - 1] = row_list[num_rows - 1];
/*don't allocate space for responses */
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_Int),
sizeof(HYPRE_Int), &response_obj1, max_response_size, 1,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by a range upper bound */
hypre_TFree(ex_contact_procs);
hypre_TFree(ex_contact_buf);
hypre_TFree(ex_contact_vec_starts);
hypre_TFree(a_proc_id);
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < num_rows && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real procesors ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int data and HYPRE_Complex data. that we will need to pack
together */
/* first find out how many rows and elements we need to send per proc - so we
can do storage */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs);
num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
num_elements_total[0] = row_list_num_elements[orig_order[0]];
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < num_rows; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
num_elements_total[counter] += row_list_num_elements[orig_order[i]];
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
num_elements_total[counter] = row_list_num_elements[orig_order[i]];
}
}
}
/* to pack together, we need to use the largest obj. size of
(HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are
wasting some storage, but I do not think that it will be a
large amount since this function should not be used on really
large amounts of data anyway*/
int_size = sizeof(HYPRE_Int);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(int_size, complex_size);
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf contains #rows, row #,
no. elements, col indicies, col data, row #, no. elements, col
indicies, col data, etc. */
/* first calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2 * num_rows_per_proc[i] + 2* num_elements_total[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
hypre_TFree(num_elements_total);
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CAlloc(storage, obj_size_bytes);
index_ptr = void_contact_buf; /* step through with this index */
/* for each proc: #rows, row #, no. elements,
col indicies, col data, row #, no. elements, col indicies, col data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order, so
cheaper to do this*/
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows);
for (i=0; i < num_rows; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id);
counter = 0; /* index into data arrays */
prev_id = -1;
for (i=0; i < num_rows; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i*2];
num_elements = row_list_num_elements[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in_i = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in_i < 0)
{
in_i = -in_i - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
memcpy( index_ptr, &tmp_int, int_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* add row # */
memcpy( index_ptr, &row, int_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* add number of elements */
memcpy( index_ptr, &num_elements, int_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
/* now add col indices */
for (j=0; j< num_elements; j++)
{
tmp_int = off_proc_j[counter+j]; /* col number */
memcpy( index_ptr, &tmp_int, int_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i ++;
}
/* now add data */
for (j=0; j< num_elements; j++)
{
tmp_complex = off_proc_data[counter++]; /* value */
memcpy( index_ptr, &tmp_complex, complex_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in_i++;
}
/* increment the indexes to keep track of where we are - we
* adjust below to be actual starts*/
ex_contact_vec_starts[indx] = in_i;
}
/* some clean up */
hypre_TFree(response_buf);
hypre_TFree(response_buf_starts);
hypre_TFree(us_real_proc_id);
hypre_TFree(orig_order);
hypre_TFree(row_list);
hypre_TFree(row_list_num_elements);
hypre_TFree(num_rows_per_proc);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* first get the interger info in send_proc_obj */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_MAlloc(obj_size_bytes*send_proc_obj.element_storage_length);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 2,
comm, (void **) &response_buf, &response_buf_starts);
hypre_TFree(response_buf);
hypre_TFree(response_buf_starts);
hypre_TFree(ex_contact_procs);
hypre_TFree(void_contact_buf);
hypre_TFree(ex_contact_vec_starts);
/* Now we can unpack the send_proc_objects and call set
and add to values functions. We unpack messages in a
deterministic order, using processor rank */
num_recvs = send_proc_obj.length;
argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs);
for(i=0; i < num_recvs; i++)
{
argsort_contact_procs[i] = i;
}
/* This sort's the id array, but the original indices are stored in
* argsort_contact_procs */
hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs-1 );
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
for (i=0; i < num_recvs; i++)
{
/* Find the current processor in order, and reset recv_data_ptr to that processor's message */
original_proc_indx = argsort_contact_procs[i];
/*current_proc = send_proc_obj.id[i];*/
indx = recv_starts[original_proc_indx];
recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx*obj_size_bytes);
/* get the number of rows for this recv */
memcpy( &num_rows, recv_data_ptr, int_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < num_rows; j++) /* for each row: unpack info */
{
/* row # */
memcpy( &row, recv_data_ptr, int_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* num elements for this row */
memcpy( &num_elements, recv_data_ptr, int_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* col indices */
if (int_size == obj_size_bytes)
{
col_ptr = (HYPRE_Int *) recv_data_ptr;
recv_data_ptr =
(void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (int_data_size < num_elements)
{
int_data = hypre_TReAlloc(int_data, HYPRE_Int, num_elements + 10);
}
for (k=0; k< num_elements; k++)
{
memcpy( &int_data[k], recv_data_ptr, int_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_ptr = int_data;
}
/* col data */
if (complex_size == obj_size_bytes)
{
col_data_ptr = (HYPRE_Complex *) recv_data_ptr;
recv_data_ptr =
(void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes);
}
else /* copy data */
{
if (complex_data_size < num_elements)
{
complex_data =
hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10);
}
for (k=0; k< num_elements; k++)
{
memcpy( &complex_data[k], recv_data_ptr, complex_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
}
col_data_ptr = complex_data;
}
hypre_IJMatrixAddToValuesParCSR(matrix,1,&num_elements,&row,
col_ptr,col_data_ptr);
indx += (num_elements*2);
}
}
hypre_TFree(send_proc_obj.v_elements);
hypre_TFree(send_proc_obj.vec_starts);
hypre_TFree(send_proc_obj.id);
hypre_TFree(argsort_contact_procs);
if (int_data) hypre_TFree(int_data);
if (complex_data) hypre_TFree(complex_data);
return hypre_error_flag;
}
#endif
/*--------------------------------------------------------------------
* hypre_FillResponseIJOffProcVals
* Fill response function for the previous function (2nd data exchange)
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int index, count, elength;
HYPRE_Int object_size;
void *index_ptr;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2;
object_size = hypre_max(sizeof(HYPRE_Int), sizeof(HYPRE_Complex));
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for vec starts
* and id */
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=20; /*add space for 20 more contact*/
send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int,
send_proc_obj->storage_length + 1);
if( send_proc_obj->id != NULL)
{
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length + 1);
}
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /* current number of elements */
if( send_proc_obj->id != NULL)
{
send_proc_obj->id[count] = contact_proc;
}
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 100);
elength += index;
send_proc_obj->v_elements = hypre_ReAlloc((char*)send_proc_obj->v_elements,
elength*object_size);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
index_ptr = (void *) ((char *) send_proc_obj->v_elements + index*object_size);
memcpy(index_ptr, p_recv_contact_buf , object_size*contact_size);
send_proc_obj->vec_starts[count+1] = index + contact_size;
send_proc_obj->length++;
/* output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------*/
HYPRE_Int hypre_FindProc(HYPRE_Int *list, HYPRE_Int value, HYPRE_Int list_length)
{
HYPRE_Int low, high, m;
low = 0;
high = list_length;
if (value >= list[high] || value < list[low])
return -1;
else
{
while (low+1 < high)
{
m = (low + high) / 2;
if (value < list[m])
{
high = m;
}
else if (value >= list[m])
{
low = m;
}
}
return low;
}
}
/******************************************************************************
*
* hypre_IJMatrixAssembleParCSR
*
* assembles IJMatrix from AuxParCSRMatrix auxiliary structure
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix)
{
MPI_Comm comm = hypre_IJMatrixComm(matrix);
hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix);
hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
HYPRE_Int *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
HYPRE_Int *col_partitioning = hypre_IJMatrixColPartitioning(matrix);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *diag_j;
HYPRE_Int *offd_j;
HYPRE_Complex *diag_data;
HYPRE_Complex *offd_data;
HYPRE_Int i, j, j0;
HYPRE_Int num_cols_offd;
HYPRE_Int *diag_pos;
HYPRE_Int *col_map_offd;
HYPRE_Int *row_length;
HYPRE_Int **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int my_id, num_procs;
HYPRE_Int num_rows;
HYPRE_Int i_diag, i_offd;
HYPRE_Int col_0, col_n;
HYPRE_Int nnz_offd;
HYPRE_Int *aux_offd_j;
HYPRE_Complex temp;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int base = hypre_IJMatrixGlobalFirstCol(matrix);
#else
HYPRE_Int base = col_partitioning[0];
#endif
HYPRE_Int off_proc_i_indx;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int current_num_elmts;
HYPRE_Int *off_proc_i;
HYPRE_Int *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int offd_proc_elmts;
HYPRE_Int new_off_proc_i_indx;
HYPRE_Int cancel_indx;
HYPRE_Int col_indx;
HYPRE_Int current_indx;
HYPRE_Int current_i;
HYPRE_Int row_len;
HYPRE_Int max_num_threads;
HYPRE_Int aux_flag, aux_flag_global;
max_num_threads = hypre_NumThreads();
/* first find out if anyone has an aux_matrix, and create one if you don't
* have one, but other procs do */
aux_flag = 0;
aux_flag_global = 0;
if(aux_matrix)
{ aux_flag = 1; }
hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if(aux_flag_global && (!aux_flag))
{
hypre_MPI_Comm_rank(comm, &my_id);
num_rows = row_partitioning[my_id+1] - row_partitioning[my_id];
hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
if (aux_matrix)
{
/* first delete all cancelled elements */
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
if (cancel_indx)
{
current_num_elmts=hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
col_indx = 0;
current_i = 0;
current_indx = 0;
new_off_proc_i_indx = off_proc_i_indx;
for (i=0; i < off_proc_i_indx; i= i+2)
{
row_len = off_proc_i[i+1];
for (j=0; j < off_proc_i[i+1]; j++)
{
if (off_proc_j[col_indx] == -1)
{
col_indx++;
row_len--;
current_num_elmts--;
}
else
{
off_proc_j[current_indx] = off_proc_j[col_indx];
off_proc_data[current_indx++] = off_proc_data[col_indx++];
}
}
if (row_len)
{
off_proc_i[current_i] = off_proc_i[i];
off_proc_i[current_i+1] = row_len;
current_i += 2;
}
else
{
new_off_proc_i_indx -= 2;
}
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix) = current_num_elmts;
}
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
if (offd_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
current_num_elmts=hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix);
hypre_IJMatrixAssembleOffProcValsParCSR(
matrix,off_proc_i_indx, max_off_proc_elmts, current_num_elmts,
off_proc_i, off_proc_j, off_proc_data);
}
}
if (hypre_IJMatrixAssembleFlag(matrix) == 0)
{
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
num_rows = row_partitioning[1] - row_partitioning[0];
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
#else
num_rows = row_partitioning[my_id+1] - row_partitioning[my_id];
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
#endif
/* move data into ParCSRMatrix if not there already */
if (hypre_AuxParCSRMatrixNeedAux(aux_matrix))
{
HYPRE_Int *diag_array, *offd_array;
diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads);
offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads);
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
diag_pos = hypre_CTAlloc(HYPRE_Int, num_rows);
i_diag = 0;
i_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i, j, i_diag, i_offd)
#endif
{
HYPRE_Int *local_j;
HYPRE_Complex *local_data;
HYPRE_Int rest, size, ns, ne;
HYPRE_Int num_threads, my_thread_num;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(size + 1);
ne = (my_thread_num+1)*(size + 1);
}
else
{
ns = my_thread_num*size + rest;
ne = (my_thread_num+1)*size + rest;
}
i_diag = 0;
i_offd = 0;
for (i=ns; i < ne; i++)
{
local_j = aux_j[i];
local_data = aux_data[i];
diag_pos[i] = -1;
for (j=0; j < row_length[i]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
i_offd++;
else
{
i_diag++;
if (local_j[j]-col_0 == i) diag_pos[i] = j;
}
}
}
diag_array[my_thread_num] = i_diag;
offd_array[my_thread_num] = i_offd;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
i_diag = 0;
i_offd = 0;
for (i = 0; i < num_threads; i++)
{
i_diag += diag_array[i];
i_offd += offd_array[i];
diag_array[i] = i_diag;
offd_array[i] = i_offd;
}
diag_i[num_rows] = i_diag;
offd_i[num_rows] = i_offd;
if (hypre_CSRMatrixJ(diag))
hypre_TFree(hypre_CSRMatrixJ(diag));
if (hypre_CSRMatrixData(diag))
hypre_TFree(hypre_CSRMatrixData(diag));
if (hypre_CSRMatrixJ(offd))
hypre_TFree(hypre_CSRMatrixJ(offd));
if (hypre_CSRMatrixData(offd))
hypre_TFree(hypre_CSRMatrixData(offd));
diag_j = hypre_CTAlloc(HYPRE_Int,i_diag);
diag_data = hypre_CTAlloc(HYPRE_Complex,i_diag);
if (i_offd > 0)
{
offd_j = hypre_CTAlloc(HYPRE_Int,i_offd);
offd_data = hypre_CTAlloc(HYPRE_Complex,i_offd);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num)
{
i_diag = diag_array[my_thread_num-1];
i_offd = offd_array[my_thread_num-1];
}
else
{
i_diag = 0;
i_offd = 0;
}
for (i=ns; i < ne; i++)
{
diag_i[i] = i_diag;
offd_i[i] = i_offd;
local_j = aux_j[i];
local_data = aux_data[i];
if (diag_pos[i] > -1)
{
diag_j[i_diag] = local_j[diag_pos[i]] - col_0;
diag_data[i_diag++] = local_data[diag_pos[i]];
}
for (j=0; j < row_length[i]; j++)
{
if (local_j[j] < col_0 || local_j[j] > col_n)
{
offd_j[i_offd] = local_j[j];
offd_data[i_offd++] = local_data[j];
}
else if (j != diag_pos[i])
{
diag_j[i_diag] = local_j[j] - col_0;
diag_data[i_diag++] = local_data[j];
}
}
}
} /* end parallel region */
hypre_TFree(diag_array);
hypre_TFree(offd_array);
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows];
if (offd_i[num_rows] > 0)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows];
hypre_TFree(diag_pos);
}
else
{
/* move diagonal element into first space */
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private (i,j,j0,temp)
#endif
for (i=0; i < num_rows; i++)
{
j0 = diag_i[i];
for (j=j0; j < diag_i[i+1]; j++)
{
diag_j[j] -= col_0;
if (diag_j[j] == i)
{
temp = diag_data[j0];
diag_data[j0] = diag_data[j];
diag_data[j] = temp;
diag_j[j] = diag_j[j0];
diag_j[j0] = i;
}
}
}
offd_j = hypre_CSRMatrixJ(offd);
}
/* generate the nonzero rows inside offd and diag by calling */
hypre_CSRMatrixSetRownnz(diag);
hypre_CSRMatrixSetRownnz(offd);
/* generate col_map_offd */
nnz_offd = offd_i[num_rows];
if (nnz_offd)
{
aux_offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd);
for (i=0; i < nnz_offd; i++)
aux_offd_j[i] = offd_j[i];
hypre_qsort0(aux_offd_j,0,nnz_offd-1);
num_cols_offd = 1;
for (i=0; i < nnz_offd-1; i++)
{
if (aux_offd_j[i+1] > aux_offd_j[i])
aux_offd_j[num_cols_offd++] = aux_offd_j[i+1];
}
col_map_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd);
for (i=0; i < num_cols_offd; i++)
col_map_offd[i] = aux_offd_j[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i)
#endif
for (i=0; i < nnz_offd; i++)
offd_j[i]=hypre_BinarySearch(col_map_offd,offd_j[i],num_cols_offd);
if (base)
{
for (i=0; i < num_cols_offd; i++)
col_map_offd[i] -= base;
}
hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_TFree(aux_offd_j);
}
hypre_IJMatrixAssembleFlag(matrix) = 1;
}
hypre_AuxParCSRMatrixDestroy(aux_matrix);
hypre_IJMatrixTranslator(matrix) = NULL;
return hypre_error_flag;
}
/******************************************************************************
*
* IJMatrix_ParCSR interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJMatrixSetValuesOMPParCSR
*
* sets values in an IJMatrix before assembly,
* use of this routine requires that the values in rows are different from each
* other, i.e rows[i] != rows[j] for i != j
* to ensure accurate threading
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_Int *rows,
const HYPRE_Int *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int *row_partitioning;
HYPRE_Int *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int col_0, col_n;
HYPRE_Int cancel_indx;
HYPRE_Int **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int first, pstart;
/*HYPRE_Int current_num_elmts;*/
/*HYPRE_Int max_off_proc_elmts;*/
HYPRE_Int off_proc_i_indx;
HYPRE_Int *off_proc_i;
HYPRE_Int *off_proc_j;
HYPRE_Int *value_start, *offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
/*HYPRE_Complex *off_proc_data;*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
value_start = hypre_CTAlloc(HYPRE_Int, max_num_threads+1);
offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (nrows < 0)
{
hypre_error_in_arg(2);
if (print_level)
hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n");
return hypre_error_flag;
}
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/
{
HYPRE_Int *col_map_offd;
HYPRE_Int num_cols_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
/*current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, k, ii, n, row;
HYPRE_Int not_found, size, indx, cnt1, col_indx;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
value_start[my_thread_num] = 0;
for (ii=ns; ii < ne; ii++)
value_start[my_thread_num] += ncols[ii];
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=0; i < max_num_threads; i++)
value_start[i+1] += value_start[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
indx = 0;
if (my_thread_num)
indx = value_start[my_thread_num-1];
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level) hypre_printf (" row %d too long! \n", row);
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
break;
/*return hypre_error_flag; */
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
break;
/*return hypre_error_flag; */
}
diag_data[pos_diag] = values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (cols[indx]-col_0))
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* processor does not own the row */
else /*search for previous occurrences and cancel them */
{
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
offproc_cnt[my_thread_num]++;
/*cancel_indx++;*/
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
}
}
}
} /*end parallel region */
}
else /* matrix not assembled */
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
/*current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);*/
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int *tmp_j = NULL;
HYPRE_Int *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_len;
HYPRE_Int row_local;
HYPRE_Int i, j, k, ii, n, row;
HYPRE_Int not_found, size, indx, cnt1, col_indx;
HYPRE_Int old_size, space, cnt;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
value_start[my_thread_num] = 0;
for (ii=ns; ii < ne; ii++)
value_start[my_thread_num] += ncols[ii];
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=0; i < max_num_threads; i++)
value_start[i+1] += value_start[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
indx = 0;
if (my_thread_num)
indx = value_start[my_thread_num-1];
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols[ii];
/* processor owns the row */
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_Int,size);
tmp_data = hypre_CTAlloc(HYPRE_Complex,size);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_Int,
size+tmp_indx);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex,size+tmp_indx);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j);
hypre_TFree(tmp_data);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (offd_j[j] == cols[indx])
{
offd_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf("Error in row %d ! Too many elements!\n",
row);
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == cols[indx])
{
diag_data[j] = values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = cols[indx];
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf("Error in row %d ! Too many elements !\n",
row);
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* processor does not own the row */
else
{
indx += n;
if (aux_matrix)
{
col_indx = 0;
for (i=0; i < off_proc_i_indx; i=i+2)
{
row_len = off_proc_i[i+1];
if (off_proc_i[i] == row)
{
for (j=0; j < n; j++)
{
cnt1 = col_indx;
for (k=0; k < row_len; k++)
{
if (off_proc_j[cnt1] == cols[j])
{
off_proc_j[cnt1++] = -1;
/*cancel_indx++;*/
offproc_cnt[my_thread_num]++;
/* if no repetition allowed */
/* off_proc_j[col_indx] = -1;
col_indx -= k;
break; */
}
else
{
cnt1++;
}
}
}
col_indx += row_len;
}
else
{
col_indx += row_len;
}
}
/*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/
}
}
}
} /* end parallel region */
}
if (error_flag) return hypre_error_flag;
if (aux_matrix)
{
for (i1=0; i1 < max_num_threads; i1++)
cancel_indx += offproc_cnt[i1];
hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;
}
hypre_TFree(value_start);
hypre_TFree(offproc_cnt);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJMatrixAddToValuesParCSR
*
* adds row values to an IJMatrix
*
*****************************************************************************/
HYPRE_Int
hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_Int *rows,
const HYPRE_Int *cols,
const HYPRE_Complex *values )
{
hypre_ParCSRMatrix *par_matrix;
hypre_CSRMatrix *diag, *offd;
hypre_AuxParCSRMatrix *aux_matrix;
HYPRE_Int *row_partitioning;
HYPRE_Int *col_partitioning;
MPI_Comm comm = hypre_IJMatrixComm(matrix);
HYPRE_Int num_procs, my_id;
HYPRE_Int col_0, col_n;
HYPRE_Int **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int *row_length, *row_space;
HYPRE_Int need_aux;
HYPRE_Int first, pstart;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Complex *offd_data;
HYPRE_Int current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int off_proc_i_indx;
HYPRE_Int *off_proc_i;
HYPRE_Int *off_proc_j;
HYPRE_Complex *off_proc_data;
HYPRE_Int *value_start, **offproc_cnt;
HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix);
HYPRE_Int max_num_threads;
HYPRE_Int error_flag = 0;
HYPRE_Int i1;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix );
row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
col_partitioning = hypre_IJMatrixColPartitioning(matrix);
value_start = hypre_CTAlloc(HYPRE_Int, max_num_threads+1);
offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads);
for (i1=0; i1 < max_num_threads; i1++)
offproc_cnt[i1] = NULL;
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_0 = col_partitioning[0];
col_n = col_partitioning[1]-1;
first = hypre_IJMatrixGlobalFirstCol(matrix);
pstart = 0;
#else
col_0 = col_partitioning[my_id];
col_n = col_partitioning[my_id+1]-1;
first = col_partitioning[0];
pstart = my_id;
#endif
if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */
{
HYPRE_Int num_cols_offd;
HYPRE_Int *col_map_offd;
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
num_cols_offd = hypre_CSRMatrixNumCols(offd);
if (num_cols_offd)
{
col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int j_offd;
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int pos_diag, pos_offd;
HYPRE_Int len_diag, len_offd;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n, row;
HYPRE_Int not_found, size, indx;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
value_start[my_thread_num] = 0;
for (ii=ns; ii < ne; ii++)
value_start[my_thread_num] += ncols[ii];
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=0; i < max_num_threads; i++)
value_start[i+1] += value_start[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
indx = 0;
if (my_thread_num) indx = value_start[my_thread_num-1];
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
/* compute local row number */
size = diag_i[row_local+1] - diag_i[row_local]
+ offd_i[row_local+1] - offd_i[row_local];
if (n > size)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level) hypre_printf (" row %d too long! \n", row);
break;
/*return hypre_error_flag; */
}
pos_diag = diag_i[row_local];
pos_offd = offd_i[row_local];
len_diag = diag_i[row_local+1];
len_offd = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
j_offd = hypre_BinarySearch(col_map_offd,cols[indx]-first,
num_cols_offd);
if (j_offd == -1)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
break;
/*return hypre_error_flag;*/
}
for (j=pos_offd; j < len_offd; j++)
{
if (offd_j[j] == j_offd)
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
break;
/*return hypre_error_flag;*/
}
not_found = 1;
}
/* diagonal element */
else if (cols[indx] == row)
{
if (diag_j[pos_diag] != row_local)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
break;
/*return hypre_error_flag;*/
}
diag_data[pos_diag] += values[indx];
}
else /* insert into diag */
{
for (j=pos_diag; j < len_diag; j++)
{
if (diag_j[j] == (cols[indx]-col_0))
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf (" Error, element %d %d does not exist\n",
row, cols[indx]);
break;
/*return hypre_error_flag;*/
}
}
indx++;
}
}
/* not my row */
/* need to find solution for threaded version!!!! */
/* could save row number and process later .... */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt,HYPRE_Int,size+200);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
indx +=n;
}
}
} /* end parallel region */
}
/* not assembled */
else
{
aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix);
if (aux_matrix)
{
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
}
row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix);
row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix);
need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix);
if (need_aux)
{
aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix);
aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix);
}
else
{
diag = hypre_ParCSRMatrixDiag(par_matrix);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
diag_data = hypre_CSRMatrixData(diag);
offd = hypre_ParCSRMatrixOffd(par_matrix);
offd_i = hypre_CSRMatrixI(offd);
if (num_procs > 1)
{
offd_j = hypre_CSRMatrixJ(offd);
offd_data = hypre_CSRMatrixData(offd);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads, my_thread_num;
HYPRE_Int len, rest, ns, ne;
HYPRE_Int *tmp_j = NULL;
HYPRE_Int *local_j = NULL;
HYPRE_Complex *tmp_data = NULL;
HYPRE_Complex *local_data = NULL;
HYPRE_Int tmp_indx;
HYPRE_Int row_local;
HYPRE_Int i, j, ii, n, row;
HYPRE_Int not_found, size, indx;
HYPRE_Int old_size, space, cnt;
HYPRE_Int *my_offproc_cnt = NULL;
num_threads = hypre_NumActiveThreads();
my_thread_num = hypre_GetThreadNum();
len = nrows/num_threads;
rest = nrows - len*num_threads;
if (my_thread_num < rest)
{
ns = my_thread_num*(len+1);
ne = (my_thread_num+1)*(len+1);
}
else
{
ns = my_thread_num*len+rest;
ne = (my_thread_num+1)*len+rest;
}
value_start[my_thread_num] = 0;
for (ii=ns; ii < ne; ii++)
value_start[my_thread_num] += ncols[ii];
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (my_thread_num == 0)
{
for (i=0; i < max_num_threads; i++)
value_start[i+1] += value_start[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
indx = 0;
if (my_thread_num) indx = value_start[my_thread_num-1];
for (ii=ns; ii < ne; ii++)
{
row = rows[ii];
n = ncols[ii];
if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1])
{
row_local = row - row_partitioning[pstart];
/* compute local row number */
if (need_aux)
{
local_j = aux_j[row_local];
local_data = aux_data[row_local];
space = row_space[row_local];
old_size = row_length[row_local];
size = space - old_size;
if (size < n)
{
size = n - size;
tmp_j = hypre_CTAlloc(HYPRE_Int,size);
tmp_data = hypre_CTAlloc(HYPRE_Complex,size);
}
tmp_indx = 0;
not_found = 1;
size = old_size;
for (i=0; i < n; i++)
{
for (j=0; j < old_size; j++)
{
if (local_j[j] == cols[indx])
{
local_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (size < space)
{
local_j[size] = cols[indx];
local_data[size++] = values[indx];
}
else
{
tmp_j[tmp_indx] = cols[indx];
tmp_data[tmp_indx++] = values[indx];
}
}
not_found = 1;
indx++;
}
row_length[row_local] = size+tmp_indx;
if (tmp_indx)
{
aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_Int,
size+tmp_indx);
aux_data[row_local] = hypre_TReAlloc(aux_data[row_local],
HYPRE_Complex,size+tmp_indx);
row_space[row_local] = size+tmp_indx;
local_j = aux_j[row_local];
local_data = aux_data[row_local];
}
cnt = size;
for (i=0; i < tmp_indx; i++)
{
local_j[cnt] = tmp_j[i];
local_data[cnt++] = tmp_data[i];
}
if (tmp_j)
{
hypre_TFree(tmp_j);
hypre_TFree(tmp_data);
}
}
else /* insert immediately into data in ParCSRMatrix structure */
{
HYPRE_Int offd_indx, diag_indx;
HYPRE_Int offd_space, diag_space;
HYPRE_Int cnt_diag, cnt_offd;
offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local];
diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local];
cnt_diag = diag_indx;
cnt_offd = offd_indx;
diag_space = diag_i[row_local+1];
offd_space = offd_i[row_local+1];
not_found = 1;
for (i=0; i < n; i++)
{
if (cols[indx] < col_0 || cols[indx] > col_n)
/* insert into offd */
{
for (j=offd_i[row_local]; j < offd_indx; j++)
{
if (offd_j[j] == cols[indx])
{
offd_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_offd < offd_space)
{
offd_j[cnt_offd] = cols[indx];
offd_data[cnt_offd++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf("Error in row %d ! Too many elements!\n",
row);
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
else /* insert into diag */
{
for (j=diag_i[row_local]; j < diag_indx; j++)
{
if (diag_j[j] == cols[indx])
{
diag_data[j] += values[indx];
not_found = 0;
break;
}
}
if (not_found)
{
if (cnt_diag < diag_space)
{
diag_j[cnt_diag] = cols[indx];
diag_data[cnt_diag++] = values[indx];
}
else
{
hypre_error(HYPRE_ERROR_GENERIC);
#ifdef HYPRE_USING_OPENMP
#pragma omp atomic
#endif
error_flag++;
if (print_level)
hypre_printf("Error in row %d ! Too many elements !\n",
row);
break;
/*return hypre_error_flag;*/
}
}
not_found = 1;
}
indx++;
}
hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag;
hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd;
}
}
/* not my row */
else
{
if (!my_offproc_cnt)
{
my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200);
offproc_cnt[my_thread_num] = my_offproc_cnt;
my_offproc_cnt[0] = 200;
my_offproc_cnt[1] = 2;
}
i = my_offproc_cnt[1];
if (i+2 < my_offproc_cnt[0])
{
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
else
{
size = my_offproc_cnt[0];
my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt,HYPRE_Int,size+200);
my_offproc_cnt[0] += 200;
my_offproc_cnt[i] = ii;
my_offproc_cnt[i+1] = indx;
my_offproc_cnt[1] += 2;
}
indx +=n;
}
}
} /*end parallel region */
}
if (error_flag) return hypre_error_flag;
hypre_TFree(value_start);
if (!aux_matrix)
{
HYPRE_Int size = row_partitioning[pstart+1]-row_partitioning[pstart];
hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL);
hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0;
hypre_IJMatrixTranslator(matrix) = aux_matrix;
}
for (i1 = 0; i1 < max_num_threads; i1++)
{
if (offproc_cnt[i1])
{
HYPRE_Int *my_offproc_cnt = offproc_cnt[i1];
HYPRE_Int i, i2, ii, row, n, indx;
for (i2 = 2; i2 < my_offproc_cnt[1]; i2+=2)
{
ii = my_offproc_cnt[i2];
row = rows[ii];
n = ncols[ii];
indx = my_offproc_cnt[i2+1];
current_num_elmts
= hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix);
max_off_proc_elmts
= hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix);
off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
if (!max_off_proc_elmts)
{
max_off_proc_elmts = hypre_max(n,1000);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) =
max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix)
= hypre_CTAlloc(HYPRE_Int,2*max_off_proc_elmts);
hypre_AuxParCSRMatrixOffProcJ(aux_matrix)
= hypre_CTAlloc(HYPRE_Int,max_off_proc_elmts);
hypre_AuxParCSRMatrixOffProcData(aux_matrix)
= hypre_CTAlloc(HYPRE_Complex,max_off_proc_elmts);
off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix);
off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix);
off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix);
}
else if (current_num_elmts + n > max_off_proc_elmts)
{
max_off_proc_elmts += 3*n;
off_proc_i = hypre_TReAlloc(off_proc_i,HYPRE_Int,2*max_off_proc_elmts);
off_proc_j = hypre_TReAlloc(off_proc_j,HYPRE_Int,max_off_proc_elmts);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts);
hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix)
= max_off_proc_elmts;
hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i;
hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j;
hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data;
}
off_proc_i[off_proc_i_indx++] = row;
off_proc_i[off_proc_i_indx++] = n;
for (i=0; i < n; i++)
{
off_proc_j[current_num_elmts] = cols[indx];
off_proc_data[current_num_elmts++] = values[indx++];
}
hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx;
hypre_AuxParCSRMatrixCurrentNumElmts(aux_matrix)
= current_num_elmts;
}
hypre_TFree (offproc_cnt[i1]);
}
}
hypre_TFree(offproc_cnt);
return hypre_error_flag;
}
| 140,326 | 32.546976 | 100 | c |
AMG | AMG-master/IJ_mv/IJVector.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* hypre_IJVector interface
*
*****************************************************************************/
#include "./_hypre_IJ_mv.h"
#include "../HYPRE.h"
/*--------------------------------------------------------------------------
* hypre_IJVectorDistribute
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_IJVectorDistribute( HYPRE_IJVector vector, const HYPRE_Int *vec_starts )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (vec == NULL)
{
hypre_printf("Vector variable is NULL -- hypre_IJVectorDistribute\n");
exit(1);
}
if ( hypre_IJVectorObjectType(vec) == HYPRE_PARCSR )
return( hypre_IJVectorDistributePar(vec, vec_starts) );
else
{
hypre_printf("Unrecognized object type -- hypre_IJVectorDistribute\n");
exit(1);
}
return -99;
}
/*--------------------------------------------------------------------------
* hypre_IJVectorZeroValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_IJVectorZeroValues( HYPRE_IJVector vector )
{
hypre_IJVector *vec = (hypre_IJVector *) vector;
if (vec == NULL)
{
hypre_printf("Vector variable is NULL -- hypre_IJVectorZeroValues\n");
exit(1);
}
/* if ( hypre_IJVectorObjectType(vec) == HYPRE_PETSC )
return( hypre_IJVectorZeroValuesPETSc(vec) );
else if ( hypre_IJVectorObjectType(vec) == HYPRE_ISIS )
return( hypre_IJVectorZeroValuesISIS(vec) );
else */
if ( hypre_IJVectorObjectType(vec) == HYPRE_PARCSR )
return( hypre_IJVectorZeroValuesPar(vec) );
else
{
hypre_printf("Unrecognized object type -- hypre_IJVectorZeroValues\n");
exit(1);
}
return -99;
}
| 2,778 | 27.947917 | 81 | c |
AMG | AMG-master/IJ_mv/IJVector_parcsr.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* IJVector_Par interface
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "../HYPRE.h"
/******************************************************************************
*
* hypre_IJVectorCreatePar
*
* creates ParVector if necessary, and leaves a pointer to it as the
* hypre_IJVector object
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorCreatePar(hypre_IJVector *vector,
HYPRE_Int *IJpartitioning)
{
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int num_procs, jmin, global_n, *partitioning, j;
hypre_MPI_Comm_size(comm, &num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
jmin = hypre_IJVectorGlobalFirstRow(vector);
global_n = hypre_IJVectorGlobalNumRows(vector);
partitioning = hypre_CTAlloc(HYPRE_Int, 2);
/* Shift to zero-based partitioning for ParVector object */
for (j = 0; j < 2; j++)
partitioning[j] = IJpartitioning[j] - jmin;
#else
jmin = IJpartitioning[0];
global_n = IJpartitioning[num_procs] - jmin;
partitioning = hypre_CTAlloc(HYPRE_Int, num_procs+1);
/* Shift to zero-based partitioning for ParVector object */
for (j = 0; j < num_procs+1; j++)
partitioning[j] = IJpartitioning[j] - jmin;
#endif
hypre_IJVectorObject(vector) =
hypre_ParVectorCreate(comm, global_n, (HYPRE_Int *) partitioning);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorDestroyPar
*
* frees ParVector local storage of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorDestroyPar(hypre_IJVector *vector)
{
return hypre_ParVectorDestroy((hypre_ParVector*)hypre_IJVectorObject(vector));
}
/******************************************************************************
*
* hypre_IJVectorInitializePar
*
* initializes ParVector of IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorInitializePar(hypre_IJVector *vector)
{
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
HYPRE_Int *partitioning = hypre_ParVectorPartitioning(par_vector);
hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector);
HYPRE_Int my_id;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_MPI_Comm_rank(comm,&my_id);
if (!partitioning)
{
if (print_level)
{
hypre_printf("No ParVector partitioning for initialization -- ");
hypre_printf("hypre_IJVectorInitializePar\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_VectorSize(local_vector) = partitioning[1] - partitioning[0];
#else
hypre_VectorSize(local_vector) = partitioning[my_id+1] - partitioning[my_id];
#endif
hypre_ParVectorInitialize(par_vector);
if (!aux_vector)
{
hypre_AuxParVectorCreate(&aux_vector);
hypre_IJVectorTranslator(vector) = aux_vector;
}
hypre_AuxParVectorInitialize(aux_vector);
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorSetMaxOffProcElmtsPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorSetMaxOffProcElmtsPar(hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts)
{
hypre_AuxParVector *aux_vector;
aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
if (!aux_vector)
{
hypre_AuxParVectorCreate(&aux_vector);
hypre_IJVectorTranslator(vector) = aux_vector;
}
hypre_AuxParVectorMaxOffProcElmts(aux_vector) = max_off_proc_elmts;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorDistributePar
*
* takes an IJVector generated for one processor and distributes it
* across many processors according to vec_starts,
* if vec_starts is NULL, it distributes them evenly?
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorDistributePar(hypre_IJVector *vector,
const HYPRE_Int *vec_starts)
{
hypre_ParVector *old_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_ParVector *par_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
if (!old_vector)
{
if (print_level)
{
hypre_printf("old_vector == NULL -- ");
hypre_printf("hypre_IJVectorDistributePar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
par_vector = hypre_VectorToParVector(hypre_ParVectorComm(old_vector),
hypre_ParVectorLocalVector(old_vector),
(HYPRE_Int *)vec_starts);
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorDistributePar\n");
hypre_printf("**** Vector storage is unallocated ****\n");
}
hypre_error_in_arg(1);
}
hypre_ParVectorDestroy(old_vector);
hypre_IJVectorObject(vector) = par_vector;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorZeroValuesPar
*
* zeroes all local components of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorZeroValuesPar(hypre_IJVector *vector)
{
HYPRE_Int my_id;
HYPRE_Int i, vec_start, vec_stop;
HYPRE_Complex *data;
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int *partitioning;
hypre_Vector *local_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
partitioning = hypre_ParVectorPartitioning(par_vector);
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!partitioning)
{
if (print_level)
{
hypre_printf("partitioning == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = partitioning[0];
vec_stop = partitioning[1];
#else
vec_start = partitioning[my_id];
vec_stop = partitioning[my_id+1];
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorZeroValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
data = hypre_VectorData( local_vector );
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < vec_stop - vec_start; i++)
data[i] = 0.;
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorSetValuesPar
*
* sets a potentially noncontiguous set of components of an IJVectorPar
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorSetValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_Int *indices,
const HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int i, j, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
/* If no components are to be set, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1]-1;
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1]-1;
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorSetValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Determine whether indices points to local indices only, and if not, store
indices and values in auxiliary vector structure. If indices == NULL,
assume that num_values components are to be set in a block starting at
vec_start. NOTE: If indices == NULL off proc values are ignored!!! */
data = hypre_VectorData(local_vector);
if (indices)
{
HYPRE_Int current_num_elmts
= hypre_AuxParVectorCurrentNumElmts(aux_vector);
HYPRE_Int *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
HYPRE_Int cancel_indx = hypre_AuxParVectorCancelIndx(aux_vector);
HYPRE_Int ii;
for (j = 0; j < num_values; j++)
{
i = indices[j];
if (i < vec_start || i > vec_stop)
{
for (ii = 0; ii < current_num_elmts; ii++)
{
if (i == off_proc_i[ii])
{
off_proc_i[ii] = -1;
cancel_indx++;
}
}
hypre_AuxParVectorCancelIndx(aux_vector) = cancel_indx;
}
else /* local values are inserted into the vector */
{
i -= vec_start;
data[i] = values[j];
}
}
}
else
{
if (num_values > vec_stop - vec_start + 1)
{
if (print_level)
{
hypre_printf("Warning! Indices beyond local range not identified!\n ");
hypre_printf("Off processor values have been ignored!\n");
}
num_values = vec_stop - vec_start +1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
data[j] = values[j];
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorAddToValuesPar
*
* adds to a potentially noncontiguous set of IJVectorPar components
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorAddToValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_Int *indices,
const HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int i, j, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
/* If no components are to be retrieved, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1]-1;
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1]-1;
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorAddToValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
data = hypre_VectorData(local_vector);
if (indices)
{
HYPRE_Int current_num_elmts
= hypre_AuxParVectorCurrentNumElmts(aux_vector);
HYPRE_Int max_off_proc_elmts
= hypre_AuxParVectorMaxOffProcElmts(aux_vector);
HYPRE_Int *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
HYPRE_Complex *off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
for (j = 0; j < num_values; j++)
{
i = indices[j];
if (i < vec_start || i > vec_stop)
{
/* if elements outside processor boundaries, store in off processor
stash */
if (!max_off_proc_elmts)
{
max_off_proc_elmts = 100;
hypre_AuxParVectorMaxOffProcElmts(aux_vector) =
max_off_proc_elmts;
hypre_AuxParVectorOffProcI(aux_vector)
= hypre_CTAlloc(HYPRE_Int,max_off_proc_elmts);
hypre_AuxParVectorOffProcData(aux_vector)
= hypre_CTAlloc(HYPRE_Complex,max_off_proc_elmts);
off_proc_i = hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data = hypre_AuxParVectorOffProcData(aux_vector);
}
else if (current_num_elmts + 1 > max_off_proc_elmts)
{
max_off_proc_elmts += 10;
off_proc_i = hypre_TReAlloc(off_proc_i,HYPRE_Int,max_off_proc_elmts);
off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex,
max_off_proc_elmts);
hypre_AuxParVectorMaxOffProcElmts(aux_vector)
= max_off_proc_elmts;
hypre_AuxParVectorOffProcI(aux_vector) = off_proc_i;
hypre_AuxParVectorOffProcData(aux_vector) = off_proc_data;
}
off_proc_i[current_num_elmts] = i;
off_proc_data[current_num_elmts++] = values[j];
hypre_AuxParVectorCurrentNumElmts(aux_vector)=current_num_elmts;
}
else /* local values are added to the vector */
{
i -= vec_start;
data[i] += values[j];
}
}
}
else
{
if (num_values > vec_stop - vec_start + 1)
{
if (print_level)
{
hypre_printf("Warning! Indices beyond local range not identified!\n ");
hypre_printf("Off processor values have been ignored!\n");
}
num_values = vec_stop - vec_start +1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
data[j] += values[j];
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorAssemblePar
*
* currently tests existence of of ParVector object and its partitioning
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorAssemblePar(hypre_IJVector *vector)
{
HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector);
HYPRE_Int *partitioning;
MPI_Comm comm = hypre_IJVectorComm(vector);
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
partitioning = hypre_ParVectorPartitioning(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
if (!partitioning)
{
if (print_level)
{
hypre_printf("partitioning == NULL -- ");
hypre_printf("hypre_IJVectorAssemblePar\n");
hypre_printf("**** ParVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
}
if (aux_vector)
{
HYPRE_Int off_proc_elmts, current_num_elmts;
HYPRE_Int max_off_proc_elmts;
HYPRE_Int *off_proc_i;
HYPRE_Complex *off_proc_data;
HYPRE_Int cancel_indx = hypre_AuxParVectorCancelIndx(aux_vector);
HYPRE_Int current_i, ii;
current_num_elmts = hypre_AuxParVectorCurrentNumElmts(aux_vector);
if (cancel_indx)
{
off_proc_i=hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data=hypre_AuxParVectorOffProcData(aux_vector);
current_i = 0;
for (ii=0; ii < current_num_elmts; ii++)
{
if (off_proc_i[ii] != -1)
{
off_proc_i[current_i] = off_proc_i[ii];
off_proc_data[current_i++] = off_proc_data[ii];
}
}
hypre_AuxParVectorCurrentNumElmts(aux_vector) = current_i;
current_num_elmts = current_i;
}
hypre_MPI_Allreduce(¤t_num_elmts,&off_proc_elmts,1,HYPRE_MPI_INT,
hypre_MPI_SUM,comm);
if (off_proc_elmts)
{
max_off_proc_elmts=hypre_AuxParVectorMaxOffProcElmts(aux_vector);
off_proc_i=hypre_AuxParVectorOffProcI(aux_vector);
off_proc_data=hypre_AuxParVectorOffProcData(aux_vector);
hypre_IJVectorAssembleOffProcValsPar(vector, max_off_proc_elmts,
current_num_elmts, off_proc_i, off_proc_data);
hypre_TFree(hypre_AuxParVectorOffProcI(aux_vector));
hypre_TFree(hypre_AuxParVectorOffProcData(aux_vector));
hypre_AuxParVectorMaxOffProcElmts(aux_vector) = 0;
hypre_AuxParVectorCurrentNumElmts(aux_vector) = 0;
}
}
return hypre_error_flag;
}
/******************************************************************************
*
* hypre_IJVectorGetValuesPar
*
* get a potentially noncontiguous set of IJVectorPar components
*
*****************************************************************************/
HYPRE_Int
hypre_IJVectorGetValuesPar(hypre_IJVector *vector,
HYPRE_Int num_values,
const HYPRE_Int *indices,
HYPRE_Complex *values)
{
HYPRE_Int my_id;
HYPRE_Int i, j, vec_start, vec_stop;
HYPRE_Complex *data;
HYPRE_Int ierr = 0;
HYPRE_Int *IJpartitioning = hypre_IJVectorPartitioning(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_Vector *local_vector;
HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector);
/* If no components are to be retrieved, perform no checking and return */
if (num_values < 1) return 0;
hypre_MPI_Comm_rank(comm, &my_id);
/* If par_vector == NULL or partitioning == NULL or local_vector == NULL
let user know of catastrophe and exit */
if (!par_vector)
{
if (print_level)
{
hypre_printf("par_vector == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Vector storage is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
local_vector = hypre_ParVectorLocalVector(par_vector);
if (!IJpartitioning)
{
if (print_level)
{
hypre_printf("IJpartitioning == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!local_vector)
{
if (print_level)
{
hypre_printf("local_vector == NULL -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Vector local data is either unallocated or orphaned ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
vec_start = IJpartitioning[0];
vec_stop = IJpartitioning[1];
#else
vec_start = IJpartitioning[my_id];
vec_stop = IJpartitioning[my_id+1];
#endif
if (vec_start > vec_stop)
{
if (print_level)
{
hypre_printf("vec_start > vec_stop -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** This vector partitioning should not occur ****\n");
}
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Determine whether indices points to local indices only, and if not, let
user know of catastrophe and exit. If indices == NULL, assume that
num_values components are to be retrieved from block starting at
vec_start */
if (indices)
{
for (i = 0; i < num_values; i++)
{
ierr += (indices[i] < vec_start);
ierr += (indices[i] >= vec_stop);
}
}
if (ierr)
{
if (print_level)
{
hypre_printf("indices beyond local range -- ");
hypre_printf("hypre_IJVectorGetValuesPar\n");
hypre_printf("**** Indices specified are unusable ****\n");
}
hypre_error_in_arg(3);
return hypre_error_flag;
}
data = hypre_VectorData(local_vector);
if (indices)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
{
i = indices[j] - vec_start;
values[j] = data[i];
}
}
else
{
if (num_values > (vec_stop-vec_start))
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_values; j++)
values[j] = data[j];
}
return hypre_error_flag;
}
/******************************************************************************
* hypre_IJVectorAssembleOffProcValsPar
*
* This is for handling set and get values calls to off-proc. entries - it is
* called from assemble. There is an alternate version for when the assumed
* partition is being used.
*****************************************************************************/
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int
hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_Int *off_proc_i,
HYPRE_Complex *off_proc_data)
{
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_ParVector *par_vector = ( hypre_ParVector *) hypre_IJVectorObject(vector);
hypre_MPI_Request *requests = NULL;
hypre_MPI_Status *status = NULL;
HYPRE_Int i, j, j2, row;
HYPRE_Int iii, indx, ip, first_index;
HYPRE_Int proc_id, num_procs, my_id;
HYPRE_Int num_sends, num_sends2;
HYPRE_Int num_recvs;
HYPRE_Int num_requests;
HYPRE_Int vec_start, vec_len;
HYPRE_Int *send_procs;
HYPRE_Int *send_i;
HYPRE_Int *send_map_starts;
HYPRE_Int *recv_procs;
HYPRE_Int *recv_i;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *info;
HYPRE_Int *int_buffer;
HYPRE_Int *proc_id_mem;
HYPRE_Int *partitioning;
HYPRE_Int *displs;
HYPRE_Int *recv_buf;
HYPRE_Complex *send_data;
HYPRE_Complex *recv_data;
HYPRE_Complex *data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
partitioning = hypre_IJVectorPartitioning(vector);
first_index = partitioning[my_id];
info = hypre_CTAlloc(HYPRE_Int,num_procs);
proc_id_mem = hypre_CTAlloc(HYPRE_Int,current_num_elmts);
for (i=0; i < current_num_elmts; i++)
{
row = off_proc_i[i];
proc_id = hypre_FindProc(partitioning,row,num_procs);
proc_id_mem[i] = proc_id;
info[proc_id]++;
}
/* determine send_procs and amount of data to be sent */
num_sends = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
num_sends++;
}
}
num_sends2 = 2*num_sends;
send_procs = hypre_CTAlloc(HYPRE_Int,num_sends);
send_map_starts = hypre_CTAlloc(HYPRE_Int,num_sends+1);
int_buffer = hypre_CTAlloc(HYPRE_Int,num_sends2);
j = 0;
j2 = 0;
send_map_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
if (info[i])
{
send_procs[j++] = i;
send_map_starts[j] = send_map_starts[j-1]+info[i];
int_buffer[j2++] = i;
int_buffer[j2++] = info[i];
}
}
hypre_MPI_Allgather(&num_sends2,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm);
displs = hypre_CTAlloc(HYPRE_Int, num_procs+1);
displs[0] = 0;
for (i=1; i < num_procs+1; i++)
displs[i] = displs[i-1]+info[i-1];
recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs]);
hypre_MPI_Allgatherv(int_buffer,num_sends2,HYPRE_MPI_INT,recv_buf,info,displs,
HYPRE_MPI_INT,comm);
hypre_TFree(int_buffer);
hypre_TFree(info);
/* determine recv procs and amount of data to be received */
num_recvs = 0;
for (j=0; j < displs[num_procs]; j+=2)
{
if (recv_buf[j] == my_id)
num_recvs++;
}
recv_procs = hypre_CTAlloc(HYPRE_Int,num_recvs);
recv_vec_starts = hypre_CTAlloc(HYPRE_Int,num_recvs+1);
j2 = 0;
recv_vec_starts[0] = 0;
for (i=0; i < num_procs; i++)
{
for (j=displs[i]; j < displs[i+1]; j+=2)
{
if (recv_buf[j] == my_id)
{
recv_procs[j2++] = i;
recv_vec_starts[j2] = recv_vec_starts[j2-1]+recv_buf[j+1];
}
if (j2 == num_recvs) break;
}
}
hypre_TFree(recv_buf);
hypre_TFree(displs);
/* set up data to be sent to send procs */
/* send_i contains for each send proc
indices, send_data contains corresponding values */
send_i = hypre_CTAlloc(HYPRE_Int,send_map_starts[num_sends]);
send_data = hypre_CTAlloc(HYPRE_Complex,send_map_starts[num_sends]);
recv_i = hypre_CTAlloc(HYPRE_Int,recv_vec_starts[num_recvs]);
recv_data = hypre_CTAlloc(HYPRE_Complex,recv_vec_starts[num_recvs]);
for (i=0; i < current_num_elmts; i++)
{
proc_id = proc_id_mem[i];
indx = hypre_BinarySearch(send_procs,proc_id,num_sends);
iii = send_map_starts[indx];
send_i[iii] = off_proc_i[i];
send_data[iii] = off_proc_data[i];
send_map_starts[indx]++;
}
hypre_TFree(proc_id_mem);
for (i=num_sends; i > 0; i--)
{
send_map_starts[i] = send_map_starts[i-1];
}
send_map_starts[0] = 0;
num_requests = num_recvs+num_sends;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests);
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_INT,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_INT,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
j=0;
for (i=0; i < num_recvs; i++)
{
vec_start = recv_vec_starts[i];
vec_len = recv_vec_starts[i+1] - vec_start;
ip = recv_procs[i];
hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
for (i=0; i < num_sends; i++)
{
vec_start = send_map_starts[i];
vec_len = send_map_starts[i+1] - vec_start;
ip = send_procs[i];
hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX,
ip, 0, comm, &requests[j++]);
}
if (num_requests)
{
hypre_MPI_Waitall(num_requests, requests, status);
}
hypre_TFree(requests);
hypre_TFree(status);
hypre_TFree(send_i);
hypre_TFree(send_data);
hypre_TFree(send_procs);
hypre_TFree(send_map_starts);
hypre_TFree(recv_procs);
for (i=0; i < recv_vec_starts[num_recvs]; i++)
{
row = recv_i[i];
j = row - first_index;
data[j] += recv_data[i];
}
hypre_TFree(recv_vec_starts);
hypre_TFree(recv_i);
hypre_TFree(recv_data);
return hypre_error_flag;
}
#else
/* assumed partition version */
HYPRE_Int
hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector,
HYPRE_Int max_off_proc_elmts,
HYPRE_Int current_num_elmts,
HYPRE_Int *off_proc_i,
HYPRE_Complex *off_proc_data)
{
HYPRE_Int myid, global_num_rows;
HYPRE_Int global_first_row;
HYPRE_Int i, j, in, k;
HYPRE_Int proc_id, last_proc, prev_id, tmp_id;
HYPRE_Int max_response_size;
HYPRE_Int ex_num_contacts = 0;
HYPRE_Int range_start, range_end;
HYPRE_Int storage;
HYPRE_Int indx;
HYPRE_Int row, num_ranges, row_count;
HYPRE_Int num_recvs;
HYPRE_Int counter, upper_bound;
HYPRE_Int num_real_procs;
HYPRE_Int *row_list=NULL;
HYPRE_Int *a_proc_id=NULL, *orig_order=NULL;
HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL;
HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL;
HYPRE_Int *recv_starts=NULL;
HYPRE_Int *response_buf = NULL, *response_buf_starts=NULL;
HYPRE_Int *num_rows_per_proc = NULL;
HYPRE_Int tmp_int;
HYPRE_Int obj_size_bytes, int_size, complex_size;
HYPRE_Int first_index;
void *void_contact_buf = NULL;
void *index_ptr;
void *recv_data_ptr;
HYPRE_Complex tmp_complex;
HYPRE_Int *ex_contact_buf=NULL;
HYPRE_Complex *vector_data;
HYPRE_Complex value;
hypre_DataExchangeResponse response_obj1, response_obj2;
hypre_ProcListElements send_proc_obj;
MPI_Comm comm = hypre_IJVectorComm(vector);
hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector);
hypre_IJAssumedPart *apart;
hypre_MPI_Comm_rank(comm, &myid);
global_num_rows = hypre_IJVectorGlobalNumRows(vector);
global_first_row = hypre_IJVectorGlobalFirstRow(vector);
/* verify that we have created the assumed partition */
if (hypre_IJVectorAssumedPart(vector) == NULL)
{
hypre_IJVectorCreateAssumedPartition(vector);
}
apart = (hypre_IJAssumedPart*) hypre_IJVectorAssumedPart(vector);
/* get the assumed processor id for each row */
a_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
orig_order = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
row_list = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
if (current_num_elmts > 0)
{
for (i=0; i < current_num_elmts; i++)
{
row = off_proc_i[i];
row_list[i] = row;
hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row,
global_num_rows, &proc_id);
a_proc_id[i] = proc_id;
orig_order[i] = i;
}
/* now we need to find the actual order of each row - sort on row -
this will result in proc ids sorted also...*/
hypre_qsort3i(row_list, a_proc_id, orig_order, 0, current_num_elmts -1);
/* calculate the number of contacts */
ex_num_contacts = 1;
last_proc = a_proc_id[0];
for (i=1; i < current_num_elmts; i++)
{
if (a_proc_id[i] > last_proc)
{
ex_num_contacts++;
last_proc = a_proc_id[i];
}
}
}
/* now we will go through a create a contact list - need to contact
assumed processors and find out who the actual row owner is - we
will contact with a range (2 numbers) */
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts);
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1);
ex_contact_buf = hypre_CTAlloc(HYPRE_Int, ex_num_contacts*2);
counter = 0;
range_end = -1;
for (i=0; i< current_num_elmts; i++)
{
if (row_list[i] > range_end)
{
/* assumed proc */
proc_id = a_proc_id[i];
/* end of prev. range */
if (counter > 0) ex_contact_buf[counter*2 - 1] = row_list[i-1];
/*start new range*/
ex_contact_procs[counter] = proc_id;
ex_contact_vec_starts[counter] = counter*2;
ex_contact_buf[counter*2] = row_list[i];
counter++;
hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_row,
global_num_rows, &range_start, &range_end);
}
}
/*finish the starts*/
ex_contact_vec_starts[counter] = counter*2;
/*finish the last range*/
if (counter > 0)
ex_contact_buf[counter*2 - 1] = row_list[current_num_elmts - 1];
/* create response object - can use same fill response as used in the commpkg
routine */
response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs;
response_obj1.data1 = apart; /* this is necessary so we can fill responses*/
response_obj1.data2 = NULL;
max_response_size = 6; /* 6 means we can fit 3 ranges*/
hypre_DataExchangeList(ex_num_contacts, ex_contact_procs,
ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_Int),
sizeof(HYPRE_Int), &response_obj1, max_response_size, 4,
comm, (void**) &response_buf, &response_buf_starts);
/* now response_buf contains a proc_id followed by an upper bound for the
range. */
hypre_TFree(ex_contact_procs);
hypre_TFree(ex_contact_buf);
hypre_TFree(ex_contact_vec_starts);
hypre_TFree(a_proc_id);
a_proc_id = NULL;
/*how many ranges were returned?*/
num_ranges = response_buf_starts[ex_num_contacts];
num_ranges = num_ranges/2;
prev_id = -1;
j = 0;
counter = 0;
num_real_procs = 0;
/* loop through ranges - create a list of actual processor ids*/
for (i=0; i<num_ranges; i++)
{
upper_bound = response_buf[i*2+1];
counter = 0;
tmp_id = response_buf[i*2];
/* loop through row_list entries - counting how many are in the range */
while (j < current_num_elmts && row_list[j] <= upper_bound)
{
real_proc_id[j] = tmp_id;
j++;
counter++;
}
if (counter > 0 && tmp_id != prev_id)
{
num_real_procs++;
}
prev_id = tmp_id;
}
/* now we have the list of real procesors ids (real_proc_id) - and the number
of distinct ones - so now we can set up data to be sent - we have
HYPRE_Int and HYPRE_Complex data. (row number and value) - we will send
everything as a void since we may not know the rel sizes of ints and
doubles */
/* first find out how many elements to send per proc - so we can do
storage */
int_size = sizeof(HYPRE_Int);
complex_size = sizeof(HYPRE_Complex);
obj_size_bytes = hypre_max(int_size, complex_size);
ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs);
num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs);
counter = 0;
if (num_real_procs > 0 )
{
ex_contact_procs[0] = real_proc_id[0];
num_rows_per_proc[0] = 1;
/* loop through real procs - these are sorted (row_list is sorted also)*/
for (i=1; i < current_num_elmts; i++)
{
if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */
{
num_rows_per_proc[counter] += 1; /*another row */
}
else /* new processor */
{
counter++;
ex_contact_procs[counter] = real_proc_id[i];
num_rows_per_proc[counter] = 1;
}
}
}
/* calculate total storage and make vec_starts arrays */
storage = 0;
ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1);
ex_contact_vec_starts[0] = -1;
for (i=0; i < num_real_procs; i++)
{
storage += 1 + 2* num_rows_per_proc[i];
ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */
}
/*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/
void_contact_buf = hypre_CAlloc(storage,obj_size_bytes);
index_ptr = void_contact_buf; /* step through with this index */
/* set up data to be sent to send procs */
/* for each proc, ex_contact_buf_d contains #rows, row #, data, etc. */
/* un-sort real_proc_id - we want to access data arrays in order */
us_real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts);
for (i=0; i < current_num_elmts; i++)
{
us_real_proc_id[orig_order[i]] = real_proc_id[i];
}
hypre_TFree(real_proc_id);
prev_id = -1;
for (i=0; i < current_num_elmts; i++)
{
proc_id = us_real_proc_id[i];
/* can't use row list[i] - you loose the negative signs that differentiate
add/set values */
row = off_proc_i[i];
/* find position of this processor */
indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs);
in = ex_contact_vec_starts[indx];
index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
/* first time for this processor - add the number of rows to the buffer */
if (in < 0)
{
in = -in - 1;
/* re-calc. index_ptr since in_i was negative */
index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes);
tmp_int = num_rows_per_proc[indx];
memcpy( index_ptr, &tmp_int, int_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
}
/* add row # */
memcpy( index_ptr, &row, int_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
/* add value */
tmp_complex = off_proc_data[i];
memcpy( index_ptr, &tmp_complex, complex_size);
index_ptr = (void *) ((char *) index_ptr + obj_size_bytes);
in++;
/* increment the indexes to keep track of where we are - fix later */
ex_contact_vec_starts[indx] = in;
}
/* some clean up */
hypre_TFree(response_buf);
hypre_TFree(response_buf_starts);
hypre_TFree(us_real_proc_id);
hypre_TFree(orig_order);
hypre_TFree(row_list);
hypre_TFree(num_rows_per_proc);
for (i=num_real_procs; i > 0; i--)
{
ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1];
}
ex_contact_vec_starts[0] = 0;
/* now send the data */
/***********************************/
/* now get the info in send_proc_obj_d */
/* the response we expect is just a confirmation*/
response_buf = NULL;
response_buf_starts = NULL;
/*build the response object*/
/* use the send_proc_obj for the info kept from contacts */
/*estimate inital storage allocation */
send_proc_obj.length = 0;
send_proc_obj.storage_length = num_real_procs + 5;
send_proc_obj.id = NULL; /* don't care who sent it to us */
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = storage + 20;
send_proc_obj.v_elements =
hypre_MAlloc(obj_size_bytes*send_proc_obj.element_storage_length);
response_obj2.fill_response = hypre_FillResponseIJOffProcVals;
response_obj2.data1 = NULL;
response_obj2.data2 = &send_proc_obj;
max_response_size = 0;
hypre_DataExchangeList(num_real_procs, ex_contact_procs,
void_contact_buf, ex_contact_vec_starts, obj_size_bytes,
0, &response_obj2, max_response_size, 5,
comm, (void **) &response_buf, &response_buf_starts);
/***********************************/
hypre_TFree(response_buf);
hypre_TFree(response_buf_starts);
hypre_TFree(ex_contact_procs);
hypre_TFree(void_contact_buf);
hypre_TFree(ex_contact_vec_starts);
/* Now we can unpack the send_proc_objects and either set or add to the
vector data */
num_recvs = send_proc_obj.length;
/* alias */
recv_data_ptr = send_proc_obj.v_elements;
recv_starts = send_proc_obj.vec_starts;
vector_data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector));
first_index = hypre_ParVectorFirstIndex(par_vector);
for (i=0; i < num_recvs; i++)
{
indx = recv_starts[i];
/* get the number of rows for this recv */
memcpy( &row_count, recv_data_ptr, int_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
for (j=0; j < row_count; j++) /* for each row: unpack info */
{
/* row # */
memcpy( &row, recv_data_ptr, int_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
/* value */
memcpy( &value, recv_data_ptr, complex_size);
recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes);
indx++;
k = row - first_index - global_first_row;
vector_data[k] += value;
}
}
hypre_TFree(send_proc_obj.v_elements);
hypre_TFree(send_proc_obj.vec_starts);
return hypre_error_flag;
}
#endif
| 46,277 | 30.142665 | 94 | c |
AMG | AMG-master/IJ_mv/IJ_assumed_part.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/*----------------------------------------------------
* Functions for the IJ assumed partition fir IJ_Matrix
*-----------------------------------------------------*/
#include "_hypre_IJ_mv.h"
/*------------------------------------------------------------------
* hypre_IJMatrixCreateAssumedPartition -
* Each proc gets it own range. Then
* each needs to reconcile its actual range with its assumed
* range - the result is essentila a partition of its assumed range -
* this is the assumed partition.
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_IJMatrixCreateAssumedPartition( hypre_IJMatrix *matrix)
{
HYPRE_Int global_num_rows;
HYPRE_Int global_first_row;
HYPRE_Int myid;
HYPRE_Int row_start = 0, row_end = 0;
HYPRE_Int *row_partitioning = hypre_IJMatrixRowPartitioning(matrix);
MPI_Comm comm;
hypre_IJAssumedPart *apart;
global_num_rows = hypre_IJMatrixGlobalNumRows(matrix);
global_first_row = hypre_IJMatrixGlobalFirstRow(matrix);
comm = hypre_IJMatrixComm(matrix);
/* find out my actual range of rows and rowumns */
row_start = row_partitioning[0];
row_end = row_partitioning[1]-1;
hypre_MPI_Comm_rank(comm, &myid );
/* allocate space */
apart = hypre_CTAlloc(hypre_IJAssumedPart, 1);
/* get my assumed partitioning - we want row partitioning of the matrix
for off processor values - so we use the row start and end
Note that this is different from the assumed partitioning for the parcsr matrix
which needs it for matvec multiplications and therefore needs to do it for
the col partitioning */
hypre_GetAssumedPartitionRowRange( comm, myid, global_first_row,
global_num_rows, &(apart->row_start), &(apart->row_end));
/*allocate some space for the partition of the assumed partition */
apart->length = 0;
/*room for 10 owners of the assumed partition*/
apart->storage_length = 10; /*need to be >=1 */
apart->proc_list = hypre_TAlloc(HYPRE_Int, apart->storage_length);
apart->row_start_list = hypre_TAlloc(HYPRE_Int, apart->storage_length);
apart->row_end_list = hypre_TAlloc(HYPRE_Int, apart->storage_length);
/* now we want to reconcile our actual partition with the assumed partition */
hypre_LocateAssummedPartition(comm, row_start, row_end, global_first_row,
global_num_rows, apart, myid);
/* this partition will be saved in the matrix data structure until the matrix is destroyed */
hypre_IJMatrixAssumedPart(matrix) = apart;
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_IJVectorCreateAssumedPartition -
* Essentially the same as for a matrix!
* Each proc gets it own range. Then
* each needs to reconcile its actual range with its assumed
* range - the result is essentila a partition of its assumed range -
* this is the assumed partition.
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_IJVectorCreateAssumedPartition( hypre_IJVector *vector)
{
HYPRE_Int global_num, global_first_row;
HYPRE_Int myid;
HYPRE_Int start=0, end=0;
HYPRE_Int *partitioning = hypre_IJVectorPartitioning(vector);
MPI_Comm comm;
hypre_IJAssumedPart *apart;
global_num = hypre_IJVectorGlobalNumRows(vector);
global_first_row = hypre_IJVectorGlobalFirstRow(vector);
comm = hypre_ParVectorComm(vector);
/* find out my actualy range of rows */
start = partitioning[0];
end = partitioning[1]-1;
hypre_MPI_Comm_rank(comm, &myid );
/* allocate space */
apart = hypre_CTAlloc(hypre_IJAssumedPart, 1);
/* get my assumed partitioning - we want partitioning of the vector that the
matrix multiplies - so we use the col start and end */
hypre_GetAssumedPartitionRowRange( comm, myid, global_first_row,
global_num, &(apart->row_start), &(apart->row_end));
/*allocate some space for the partition of the assumed partition */
apart->length = 0;
/*room for 10 owners of the assumed partition*/
apart->storage_length = 10; /*need to be >=1 */
apart->proc_list = hypre_TAlloc(HYPRE_Int, apart->storage_length);
apart->row_start_list = hypre_TAlloc(HYPRE_Int, apart->storage_length);
apart->row_end_list = hypre_TAlloc(HYPRE_Int, apart->storage_length);
/* now we want to reconcile our actual partition with the assumed partition */
hypre_LocateAssummedPartition(comm, start, end, global_first_row,
global_num, apart, myid);
/* this partition will be saved in the vector data structure until the vector is destroyed */
hypre_IJVectorAssumedPart(vector) = apart;
return hypre_error_flag;
}
| 5,686 | 35.22293 | 97 | c |
AMG | AMG-master/IJ_mv/IJ_matrix.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header info for the hypre_IJMatrix structures
*
*****************************************************************************/
#ifndef hypre_IJ_MATRIX_HEADER
#define hypre_IJ_MATRIX_HEADER
/*--------------------------------------------------------------------------
* hypre_IJMatrix:
*--------------------------------------------------------------------------*/
typedef struct hypre_IJMatrix_struct
{
MPI_Comm comm;
HYPRE_Int *row_partitioning; /* distribution of rows across processors */
HYPRE_Int *col_partitioning; /* distribution of columns */
HYPRE_Int object_type; /* Indicates the type of "object" */
void *object; /* Structure for storing local portion */
void *translator; /* optional storage_type specfic structure
for holding additional local info */
void *assumed_part; /* IJMatrix assumed partition */
HYPRE_Int assemble_flag; /* indicates whether matrix has been
assembled */
HYPRE_Int global_first_row; /* these for data items are necessary */
HYPRE_Int global_first_col; /* to be able to avoind using the global */
HYPRE_Int global_num_rows; /* global partition */
HYPRE_Int global_num_cols;
HYPRE_Int omp_flag;
HYPRE_Int print_level;
} hypre_IJMatrix;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_IJMatrix
*--------------------------------------------------------------------------*/
#define hypre_IJMatrixComm(matrix) ((matrix) -> comm)
#define hypre_IJMatrixRowPartitioning(matrix) ((matrix) -> row_partitioning)
#define hypre_IJMatrixColPartitioning(matrix) ((matrix) -> col_partitioning)
#define hypre_IJMatrixObjectType(matrix) ((matrix) -> object_type)
#define hypre_IJMatrixObject(matrix) ((matrix) -> object)
#define hypre_IJMatrixTranslator(matrix) ((matrix) -> translator)
#define hypre_IJMatrixAssumedPart(matrix) ((matrix) -> assumed_part)
#define hypre_IJMatrixAssembleFlag(matrix) ((matrix) -> assemble_flag)
#define hypre_IJMatrixGlobalFirstRow(matrix) ((matrix) -> global_first_row)
#define hypre_IJMatrixGlobalFirstCol(matrix) ((matrix) -> global_first_col)
#define hypre_IJMatrixGlobalNumRows(matrix) ((matrix) -> global_num_rows)
#define hypre_IJMatrixGlobalNumCols(matrix) ((matrix) -> global_num_cols)
#define hypre_IJMatrixOMPFlag(matrix) ((matrix) -> omp_flag)
#define hypre_IJMatrixPrintLevel(matrix) ((matrix) -> print_level)
/*--------------------------------------------------------------------------
* prototypes for operations on local objects
*--------------------------------------------------------------------------*/
#ifdef PETSC_AVAILABLE
/* IJMatrix_petsc.c */
HYPRE_Int
hypre_GetIJMatrixParCSRMatrix( HYPRE_IJMatrix IJmatrix, Mat *reference )
#endif
#ifdef ISIS_AVAILABLE
/* IJMatrix_isis.c */
HYPRE_Int
hypre_GetIJMatrixISISMatrix( HYPRE_IJMatrix IJmatrix, RowMatrix *reference )
#endif
#endif
| 4,194 | 41.373737 | 87 | h |
AMG | AMG-master/IJ_mv/IJ_vector.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header info for the hypre_IJMatrix structures
*
*****************************************************************************/
#ifndef hypre_IJ_VECTOR_HEADER
#define hypre_IJ_VECTOR_HEADER
/*--------------------------------------------------------------------------
* hypre_IJVector:
*--------------------------------------------------------------------------*/
typedef struct hypre_IJVector_struct
{
MPI_Comm comm;
HYPRE_Int *partitioning; /* Indicates partitioning over tasks */
HYPRE_Int object_type; /* Indicates the type of "local storage" */
void *object; /* Structure for storing local portion */
void *translator; /* Structure for storing off processor
information */
void *assumed_part; /* IJ Vector assumed partition */
HYPRE_Int global_first_row; /* these for data items are necessary */
HYPRE_Int global_num_rows; /* to be able to avoid using the global */
/* global partition */
HYPRE_Int print_level;
} hypre_IJVector;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_IJVector
*--------------------------------------------------------------------------*/
#define hypre_IJVectorComm(vector) ((vector) -> comm)
#define hypre_IJVectorPartitioning(vector) ((vector) -> partitioning)
#define hypre_IJVectorObjectType(vector) ((vector) -> object_type)
#define hypre_IJVectorObject(vector) ((vector) -> object)
#define hypre_IJVectorTranslator(vector) ((vector) -> translator)
#define hypre_IJVectorAssumedPart(vector) ((vector) -> assumed_part)
#define hypre_IJVectorGlobalFirstRow(vector) ((vector) -> global_first_row)
#define hypre_IJVectorGlobalNumRows(vector) ((vector) -> global_num_rows)
#define hypre_IJVectorPrintLevel(vector) ((vector) -> print_level)
/*--------------------------------------------------------------------------
* prototypes for operations on local objects
*--------------------------------------------------------------------------*/
/* #include "./internal_protos.h" */
#endif
| 3,239 | 36.674419 | 86 | h |
AMG | AMG-master/IJ_mv/_hypre_IJ_mv.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/*#include <HYPRE_config.h>*/
#ifndef hypre_IJ_HEADER
#define hypre_IJ_HEADER
#include "_hypre_utilities.h"
#include "seq_mv.h"
#include "_hypre_parcsr_mv.h"
#include "HYPRE_IJ_mv.h"
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
*
* Header info for Auxiliary Parallel CSR Matrix data structures
*
* Note: this matrix currently uses 0-based indexing.
*
*****************************************************************************/
#ifndef hypre_AUX_PARCSR_MATRIX_HEADER
#define hypre_AUX_PARCSR_MATRIX_HEADER
/*--------------------------------------------------------------------------
* Auxiliary Parallel CSR Matrix
*--------------------------------------------------------------------------*/
typedef struct
{
HYPRE_Int local_num_rows; /* defines number of rows on this processors */
HYPRE_Int local_num_cols; /* defines number of cols of diag */
HYPRE_Int need_aux; /* if need_aux = 1, aux_j, aux_data are used to
generate the parcsr matrix (default),
for need_aux = 0, data is put directly into
parcsr structure (requires the knowledge of
offd_i and diag_i ) */
HYPRE_Int *row_length; /* row_length_diag[i] contains number of stored
elements in i-th row */
HYPRE_Int *row_space; /* row_space_diag[i] contains space allocated to
i-th row */
HYPRE_Int **aux_j; /* contains collected column indices */
HYPRE_Complex **aux_data; /* contains collected data */
HYPRE_Int *indx_diag; /* indx_diag[i] points to first empty space of portion
in diag_j , diag_data assigned to row i */
HYPRE_Int *indx_offd; /* indx_offd[i] points to first empty space of portion
in offd_j , offd_data assigned to row i */
HYPRE_Int max_off_proc_elmts; /* length of off processor stash set for
SetValues and AddTOValues */
HYPRE_Int current_num_elmts; /* current no. of elements stored in stash */
HYPRE_Int off_proc_i_indx; /* pointer to first empty space in
set_off_proc_i_set */
HYPRE_Int *off_proc_i; /* length 2*num_off_procs_elmts, contains info pairs
(code, no. of elmts) where code contains global
row no. if SetValues, and (-global row no. -1)
if AddToValues*/
HYPRE_Int *off_proc_j; /* contains column indices */
HYPRE_Complex *off_proc_data; /* contains corresponding data */
HYPRE_Int cancel_indx; /* number of elements that have to be deleted due
to setting values from another processor */
} hypre_AuxParCSRMatrix;
/*--------------------------------------------------------------------------
* Accessor functions for the Parallel CSR Matrix structure
*--------------------------------------------------------------------------*/
#define hypre_AuxParCSRMatrixLocalNumRows(matrix) ((matrix) -> local_num_rows)
#define hypre_AuxParCSRMatrixLocalNumCols(matrix) ((matrix) -> local_num_cols)
#define hypre_AuxParCSRMatrixNeedAux(matrix) ((matrix) -> need_aux)
#define hypre_AuxParCSRMatrixRowLength(matrix) ((matrix) -> row_length)
#define hypre_AuxParCSRMatrixRowSpace(matrix) ((matrix) -> row_space)
#define hypre_AuxParCSRMatrixAuxJ(matrix) ((matrix) -> aux_j)
#define hypre_AuxParCSRMatrixAuxData(matrix) ((matrix) -> aux_data)
#define hypre_AuxParCSRMatrixIndxDiag(matrix) ((matrix) -> indx_diag)
#define hypre_AuxParCSRMatrixIndxOffd(matrix) ((matrix) -> indx_offd)
#define hypre_AuxParCSRMatrixMaxOffProcElmts(matrix) ((matrix) -> max_off_proc_elmts)
#define hypre_AuxParCSRMatrixCurrentNumElmts(matrix) ((matrix) -> current_num_elmts)
#define hypre_AuxParCSRMatrixOffProcIIndx(matrix) ((matrix) -> off_proc_i_indx)
#define hypre_AuxParCSRMatrixOffProcI(matrix) ((matrix) -> off_proc_i)
#define hypre_AuxParCSRMatrixOffProcJ(matrix) ((matrix) -> off_proc_j)
#define hypre_AuxParCSRMatrixOffProcData(matrix) ((matrix) -> off_proc_data)
#define hypre_AuxParCSRMatrixCancelIndx(matrix) ((matrix) -> cancel_indx)
#endif
/******************************************************************************
*
* Header info for Auxiliary Parallel Vector data structures
*
* Note: this vector currently uses 0-based indexing.
*
*****************************************************************************/
#ifndef hypre_AUX_PAR_VECTOR_HEADER
#define hypre_AUX_PAR_VECTOR_HEADER
/*--------------------------------------------------------------------------
* Auxiliary Parallel Vector
*--------------------------------------------------------------------------*/
typedef struct
{
HYPRE_Int max_off_proc_elmts; /* length of off processor stash for
SetValues and AddToValues*/
HYPRE_Int current_num_elmts; /* current no. of elements stored in stash */
HYPRE_Int *off_proc_i; /* contains column indices */
HYPRE_Complex *off_proc_data; /* contains corresponding data */
HYPRE_Int cancel_indx; /* number of elements that have to be deleted due
to setting values from another processor */
} hypre_AuxParVector;
/*--------------------------------------------------------------------------
* Accessor functions for the Parallel Vector structure
*--------------------------------------------------------------------------*/
#define hypre_AuxParVectorMaxOffProcElmts(matrix) ((matrix) -> max_off_proc_elmts)
#define hypre_AuxParVectorCurrentNumElmts(matrix) ((matrix) -> current_num_elmts)
#define hypre_AuxParVectorOffProcI(matrix) ((matrix) -> off_proc_i)
#define hypre_AuxParVectorOffProcData(matrix) ((matrix) -> off_proc_data)
#define hypre_AuxParVectorCancelIndx(matrix) ((matrix) -> cancel_indx)
#endif
/******************************************************************************
*
* Header info for the hypre_IJMatrix structures
*
*****************************************************************************/
#ifndef hypre_IJ_MATRIX_HEADER
#define hypre_IJ_MATRIX_HEADER
/*--------------------------------------------------------------------------
* hypre_IJMatrix:
*--------------------------------------------------------------------------*/
typedef struct hypre_IJMatrix_struct
{
MPI_Comm comm;
HYPRE_Int *row_partitioning; /* distribution of rows across processors */
HYPRE_Int *col_partitioning; /* distribution of columns */
HYPRE_Int object_type; /* Indicates the type of "object" */
void *object; /* Structure for storing local portion */
void *translator; /* optional storage_type specfic structure
for holding additional local info */
void *assumed_part; /* IJMatrix assumed partition */
HYPRE_Int assemble_flag; /* indicates whether matrix has been
assembled */
HYPRE_Int global_first_row; /* these for data items are necessary */
HYPRE_Int global_first_col; /* to be able to avoid using the */
HYPRE_Int global_num_rows; /* global partition */
HYPRE_Int global_num_cols;
HYPRE_Int omp_flag;
HYPRE_Int print_level;
} hypre_IJMatrix;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_IJMatrix
*--------------------------------------------------------------------------*/
#define hypre_IJMatrixComm(matrix) ((matrix) -> comm)
#define hypre_IJMatrixRowPartitioning(matrix) ((matrix) -> row_partitioning)
#define hypre_IJMatrixColPartitioning(matrix) ((matrix) -> col_partitioning)
#define hypre_IJMatrixObjectType(matrix) ((matrix) -> object_type)
#define hypre_IJMatrixObject(matrix) ((matrix) -> object)
#define hypre_IJMatrixTranslator(matrix) ((matrix) -> translator)
#define hypre_IJMatrixAssumedPart(matrix) ((matrix) -> assumed_part)
#define hypre_IJMatrixAssembleFlag(matrix) ((matrix) -> assemble_flag)
#define hypre_IJMatrixGlobalFirstRow(matrix) ((matrix) -> global_first_row)
#define hypre_IJMatrixGlobalFirstCol(matrix) ((matrix) -> global_first_col)
#define hypre_IJMatrixGlobalNumRows(matrix) ((matrix) -> global_num_rows)
#define hypre_IJMatrixGlobalNumCols(matrix) ((matrix) -> global_num_cols)
#define hypre_IJMatrixOMPFlag(matrix) ((matrix) -> omp_flag)
#define hypre_IJMatrixPrintLevel(matrix) ((matrix) -> print_level)
#endif
/******************************************************************************
*
* Header info for the hypre_IJMatrix structures
*
*****************************************************************************/
#ifndef hypre_IJ_VECTOR_HEADER
#define hypre_IJ_VECTOR_HEADER
/*--------------------------------------------------------------------------
* hypre_IJVector:
*--------------------------------------------------------------------------*/
typedef struct hypre_IJVector_struct
{
MPI_Comm comm;
HYPRE_Int *partitioning; /* Indicates partitioning over tasks */
HYPRE_Int object_type; /* Indicates the type of "local storage" */
void *object; /* Structure for storing local portion */
void *translator; /* Structure for storing off processor
information */
void *assumed_part; /* IJ Vector assumed partition */
HYPRE_Int global_first_row; /* these for data items are necessary */
HYPRE_Int global_num_rows; /* to be able to avoid using the global */
/* global partition */
HYPRE_Int print_level;
} hypre_IJVector;
/*--------------------------------------------------------------------------
* Accessor macros: hypre_IJVector
*--------------------------------------------------------------------------*/
#define hypre_IJVectorComm(vector) ((vector) -> comm)
#define hypre_IJVectorPartitioning(vector) ((vector) -> partitioning)
#define hypre_IJVectorObjectType(vector) ((vector) -> object_type)
#define hypre_IJVectorObject(vector) ((vector) -> object)
#define hypre_IJVectorTranslator(vector) ((vector) -> translator)
#define hypre_IJVectorAssumedPart(vector) ((vector) -> assumed_part)
#define hypre_IJVectorGlobalFirstRow(vector) ((vector) -> global_first_row)
#define hypre_IJVectorGlobalNumRows(vector) ((vector) -> global_num_rows)
#define hypre_IJVectorPrintLevel(vector) ((vector) -> print_level)
/*--------------------------------------------------------------------------
* prototypes for operations on local objects
*--------------------------------------------------------------------------*/
/* #include "./internal_protos.h" */
#endif
/* aux_parcsr_matrix.c */
HYPRE_Int hypre_AuxParCSRMatrixCreate ( hypre_AuxParCSRMatrix **aux_matrix , HYPRE_Int local_num_rows , HYPRE_Int local_num_cols , HYPRE_Int *sizes );
HYPRE_Int hypre_AuxParCSRMatrixDestroy ( hypre_AuxParCSRMatrix *matrix );
HYPRE_Int hypre_AuxParCSRMatrixInitialize ( hypre_AuxParCSRMatrix *matrix );
HYPRE_Int hypre_AuxParCSRMatrixSetMaxOffPRocElmts ( hypre_AuxParCSRMatrix *matrix , HYPRE_Int max_off_proc_elmts );
/* aux_par_vector.c */
HYPRE_Int hypre_AuxParVectorCreate ( hypre_AuxParVector **aux_vector );
HYPRE_Int hypre_AuxParVectorDestroy ( hypre_AuxParVector *vector );
HYPRE_Int hypre_AuxParVectorInitialize ( hypre_AuxParVector *vector );
HYPRE_Int hypre_AuxParVectorSetMaxOffPRocElmts ( hypre_AuxParVector *vector , HYPRE_Int max_off_proc_elmts );
/* IJ_assumed_part.c */
HYPRE_Int hypre_IJMatrixCreateAssumedPartition ( hypre_IJMatrix *matrix );
HYPRE_Int hypre_IJVectorCreateAssumedPartition ( hypre_IJVector *vector );
/* IJMatrix.c */
HYPRE_Int hypre_IJMatrixGetRowPartitioning ( HYPRE_IJMatrix matrix , HYPRE_Int **row_partitioning );
HYPRE_Int hypre_IJMatrixGetColPartitioning ( HYPRE_IJMatrix matrix , HYPRE_Int **col_partitioning );
HYPRE_Int hypre_IJMatrixSetObject ( HYPRE_IJMatrix matrix , void *object );
/* IJMatrix_parcsr.c */
HYPRE_Int hypre_IJMatrixCreateParCSR ( hypre_IJMatrix *matrix );
HYPRE_Int hypre_IJMatrixSetRowSizesParCSR ( hypre_IJMatrix *matrix , const HYPRE_Int *sizes );
HYPRE_Int hypre_IJMatrixSetDiagOffdSizesParCSR ( hypre_IJMatrix *matrix , const HYPRE_Int *diag_sizes , const HYPRE_Int *offdiag_sizes );
HYPRE_Int hypre_IJMatrixSetMaxOffProcElmtsParCSR ( hypre_IJMatrix *matrix , HYPRE_Int max_off_proc_elmts );
HYPRE_Int hypre_IJMatrixInitializeParCSR ( hypre_IJMatrix *matrix );
HYPRE_Int hypre_IJMatrixGetRowCountsParCSR ( hypre_IJMatrix *matrix , HYPRE_Int nrows , HYPRE_Int *rows , HYPRE_Int *ncols );
HYPRE_Int hypre_IJMatrixGetValuesParCSR ( hypre_IJMatrix *matrix , HYPRE_Int nrows , HYPRE_Int *ncols , HYPRE_Int *rows , HYPRE_Int *cols , HYPRE_Complex *values );
HYPRE_Int hypre_IJMatrixSetValuesParCSR ( hypre_IJMatrix *matrix , HYPRE_Int nrows , HYPRE_Int *ncols , const HYPRE_Int *rows , const HYPRE_Int *cols , const HYPRE_Complex *values );
HYPRE_Int hypre_IJMatrixAddToValuesParCSR ( hypre_IJMatrix *matrix , HYPRE_Int nrows , HYPRE_Int *ncols , const HYPRE_Int *rows , const HYPRE_Int *cols , const HYPRE_Complex *values );
HYPRE_Int hypre_IJMatrixDestroyParCSR ( hypre_IJMatrix *matrix );
HYPRE_Int hypre_IJMatrixAssembleOffProcValsParCSR ( hypre_IJMatrix *matrix , HYPRE_Int off_proc_i_indx , HYPRE_Int max_off_proc_elmts , HYPRE_Int current_num_elmts , HYPRE_Int *off_proc_i , HYPRE_Int *off_proc_j , HYPRE_Complex *off_proc_data );
HYPRE_Int hypre_FillResponseIJOffProcVals ( void *p_recv_contact_buf , HYPRE_Int contact_size , HYPRE_Int contact_proc , void *ro , MPI_Comm comm , void **p_send_response_buf , HYPRE_Int *response_message_size );
HYPRE_Int hypre_FindProc ( HYPRE_Int *list , HYPRE_Int value , HYPRE_Int list_length );
HYPRE_Int hypre_IJMatrixAssembleParCSR ( hypre_IJMatrix *matrix );
HYPRE_Int hypre_IJMatrixSetValuesOMPParCSR ( hypre_IJMatrix *matrix , HYPRE_Int nrows , HYPRE_Int *ncols , const HYPRE_Int *rows , const HYPRE_Int *cols , const HYPRE_Complex *values );
HYPRE_Int hypre_IJMatrixAddToValuesOMPParCSR ( hypre_IJMatrix *matrix , HYPRE_Int nrows , HYPRE_Int *ncols , const HYPRE_Int *rows , const HYPRE_Int *cols , const HYPRE_Complex *values );
/* IJVector.c */
HYPRE_Int hypre_IJVectorDistribute ( HYPRE_IJVector vector , const HYPRE_Int *vec_starts );
HYPRE_Int hypre_IJVectorZeroValues ( HYPRE_IJVector vector );
/* IJVector_parcsr.c */
HYPRE_Int hypre_IJVectorCreatePar ( hypre_IJVector *vector , HYPRE_Int *IJpartitioning );
HYPRE_Int hypre_IJVectorDestroyPar ( hypre_IJVector *vector );
HYPRE_Int hypre_IJVectorInitializePar ( hypre_IJVector *vector );
HYPRE_Int hypre_IJVectorSetMaxOffProcElmtsPar ( hypre_IJVector *vector , HYPRE_Int max_off_proc_elmts );
HYPRE_Int hypre_IJVectorDistributePar ( hypre_IJVector *vector , const HYPRE_Int *vec_starts );
HYPRE_Int hypre_IJVectorZeroValuesPar ( hypre_IJVector *vector );
HYPRE_Int hypre_IJVectorSetValuesPar ( hypre_IJVector *vector , HYPRE_Int num_values , const HYPRE_Int *indices , const HYPRE_Complex *values );
HYPRE_Int hypre_IJVectorAddToValuesPar ( hypre_IJVector *vector , HYPRE_Int num_values , const HYPRE_Int *indices , const HYPRE_Complex *values );
HYPRE_Int hypre_IJVectorAssemblePar ( hypre_IJVector *vector );
HYPRE_Int hypre_IJVectorGetValuesPar ( hypre_IJVector *vector , HYPRE_Int num_values , const HYPRE_Int *indices , HYPRE_Complex *values );
HYPRE_Int hypre_IJVectorAssembleOffProcValsPar ( hypre_IJVector *vector , HYPRE_Int max_off_proc_elmts , HYPRE_Int current_num_elmts , HYPRE_Int *off_proc_i , HYPRE_Complex *off_proc_data );
/* HYPRE_IJMatrix.c */
HYPRE_Int HYPRE_IJMatrixCreate ( MPI_Comm comm , HYPRE_Int ilower , HYPRE_Int iupper , HYPRE_Int jlower , HYPRE_Int jupper , HYPRE_IJMatrix *matrix );
HYPRE_Int HYPRE_IJMatrixDestroy ( HYPRE_IJMatrix matrix );
HYPRE_Int HYPRE_IJMatrixInitialize ( HYPRE_IJMatrix matrix );
HYPRE_Int HYPRE_IJMatrixSetPrintLevel ( HYPRE_IJMatrix matrix , HYPRE_Int print_level );
HYPRE_Int HYPRE_IJMatrixSetValues ( HYPRE_IJMatrix matrix , HYPRE_Int nrows , HYPRE_Int *ncols , const HYPRE_Int *rows , const HYPRE_Int *cols , const HYPRE_Complex *values );
HYPRE_Int HYPRE_IJMatrixAddToValues ( HYPRE_IJMatrix matrix , HYPRE_Int nrows , HYPRE_Int *ncols , const HYPRE_Int *rows , const HYPRE_Int *cols , const HYPRE_Complex *values );
HYPRE_Int HYPRE_IJMatrixAssemble ( HYPRE_IJMatrix matrix );
HYPRE_Int HYPRE_IJMatrixGetRowCounts ( HYPRE_IJMatrix matrix , HYPRE_Int nrows , HYPRE_Int *rows , HYPRE_Int *ncols );
HYPRE_Int HYPRE_IJMatrixGetValues ( HYPRE_IJMatrix matrix , HYPRE_Int nrows , HYPRE_Int *ncols , HYPRE_Int *rows , HYPRE_Int *cols , HYPRE_Complex *values );
HYPRE_Int HYPRE_IJMatrixSetObjectType ( HYPRE_IJMatrix matrix , HYPRE_Int type );
HYPRE_Int HYPRE_IJMatrixGetObjectType ( HYPRE_IJMatrix matrix , HYPRE_Int *type );
HYPRE_Int HYPRE_IJMatrixGetLocalRange ( HYPRE_IJMatrix matrix , HYPRE_Int *ilower , HYPRE_Int *iupper , HYPRE_Int *jlower , HYPRE_Int *jupper );
HYPRE_Int HYPRE_IJMatrixGetObject ( HYPRE_IJMatrix matrix , void **object );
HYPRE_Int HYPRE_IJMatrixSetRowSizes ( HYPRE_IJMatrix matrix , const HYPRE_Int *sizes );
HYPRE_Int HYPRE_IJMatrixSetDiagOffdSizes ( HYPRE_IJMatrix matrix , const HYPRE_Int *diag_sizes , const HYPRE_Int *offdiag_sizes );
HYPRE_Int HYPRE_IJMatrixSetMaxOffProcElmts ( HYPRE_IJMatrix matrix , HYPRE_Int max_off_proc_elmts );
HYPRE_Int HYPRE_IJMatrixRead ( const char *filename , MPI_Comm comm , HYPRE_Int type , HYPRE_IJMatrix *matrix_ptr );
HYPRE_Int HYPRE_IJMatrixPrint ( HYPRE_IJMatrix matrix , const char *filename );
HYPRE_Int HYPRE_IJMatrixSetOMPFlag ( HYPRE_IJMatrix matrix , HYPRE_Int omp_flag );
/* HYPRE_IJVector.c */
HYPRE_Int HYPRE_IJVectorCreate ( MPI_Comm comm , HYPRE_Int jlower , HYPRE_Int jupper , HYPRE_IJVector *vector );
HYPRE_Int HYPRE_IJVectorDestroy ( HYPRE_IJVector vector );
HYPRE_Int HYPRE_IJVectorInitialize ( HYPRE_IJVector vector );
HYPRE_Int HYPRE_IJVectorSetPrintLevel ( HYPRE_IJVector vector , HYPRE_Int print_level );
HYPRE_Int HYPRE_IJVectorSetValues ( HYPRE_IJVector vector , HYPRE_Int nvalues , const HYPRE_Int *indices , const HYPRE_Complex *values );
HYPRE_Int HYPRE_IJVectorAddToValues ( HYPRE_IJVector vector , HYPRE_Int nvalues , const HYPRE_Int *indices , const HYPRE_Complex *values );
HYPRE_Int HYPRE_IJVectorAssemble ( HYPRE_IJVector vector );
HYPRE_Int HYPRE_IJVectorGetValues ( HYPRE_IJVector vector , HYPRE_Int nvalues , const HYPRE_Int *indices , HYPRE_Complex *values );
HYPRE_Int HYPRE_IJVectorSetMaxOffProcElmts ( HYPRE_IJVector vector , HYPRE_Int max_off_proc_elmts );
HYPRE_Int HYPRE_IJVectorSetObjectType ( HYPRE_IJVector vector , HYPRE_Int type );
HYPRE_Int HYPRE_IJVectorGetObjectType ( HYPRE_IJVector vector , HYPRE_Int *type );
HYPRE_Int HYPRE_IJVectorGetLocalRange ( HYPRE_IJVector vector , HYPRE_Int *jlower , HYPRE_Int *jupper );
HYPRE_Int HYPRE_IJVectorGetObject ( HYPRE_IJVector vector , void **object );
HYPRE_Int HYPRE_IJVectorRead ( const char *filename , MPI_Comm comm , HYPRE_Int type , HYPRE_IJVector *vector_ptr );
HYPRE_Int HYPRE_IJVectorPrint ( HYPRE_IJVector vector , const char *filename );
#ifdef __cplusplus
}
#endif
#endif
| 20,542 | 53.203166 | 245 | h |
AMG | AMG-master/IJ_mv/aux_par_vector.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Member functions for hypre_AuxParVector class.
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "aux_par_vector.h"
/*--------------------------------------------------------------------------
* hypre_AuxParVectorCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParVectorCreate( hypre_AuxParVector **aux_vector)
{
hypre_AuxParVector *vector;
vector = hypre_CTAlloc(hypre_AuxParVector, 1);
/* set defaults */
hypre_AuxParVectorMaxOffProcElmts(vector) = 0;
hypre_AuxParVectorCurrentNumElmts(vector) = 0;
/* stash for setting or adding off processor values */
hypre_AuxParVectorOffProcI(vector) = NULL;
hypre_AuxParVectorOffProcData(vector) = NULL;
*aux_vector = vector;
return 0;
}
/*--------------------------------------------------------------------------
* hypre_AuxParVectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParVectorDestroy( hypre_AuxParVector *vector )
{
HYPRE_Int ierr=0;
if (vector)
{
if (hypre_AuxParVectorOffProcI(vector))
hypre_TFree(hypre_AuxParVectorOffProcI(vector));
if (hypre_AuxParVectorOffProcData(vector))
hypre_TFree(hypre_AuxParVectorOffProcData(vector));
hypre_TFree(vector);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_AuxParVectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParVectorInitialize( hypre_AuxParVector *vector )
{
HYPRE_Int max_off_proc_elmts = hypre_AuxParVectorMaxOffProcElmts(vector);
/* allocate stash for setting or adding off processor values */
if (max_off_proc_elmts > 0)
{
hypre_AuxParVectorOffProcI(vector) = hypre_CTAlloc(HYPRE_Int,
max_off_proc_elmts);
hypre_AuxParVectorOffProcData(vector) = hypre_CTAlloc(HYPRE_Complex,
max_off_proc_elmts);
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_AuxParVectorSetMaxOffProcElmts
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParVectorSetMaxOffPRocElmts( hypre_AuxParVector *vector,
HYPRE_Int max_off_proc_elmts )
{
HYPRE_Int ierr = 0;
hypre_AuxParVectorMaxOffProcElmts(vector) = max_off_proc_elmts;
return ierr;
}
| 3,643 | 33.704762 | 81 | c |
AMG | AMG-master/IJ_mv/aux_par_vector.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header info for Auxiliary Parallel Vector data structures
*
* Note: this vector currently uses 0-based indexing.
*
*****************************************************************************/
#ifndef hypre_AUX_PAR_VECTOR_HEADER
#define hypre_AUX_PAR_VECTOR_HEADER
/*--------------------------------------------------------------------------
* Auxiliary Parallel Vector
*--------------------------------------------------------------------------*/
typedef struct
{
HYPRE_Int max_off_proc_elmts; /* length of off processor stash for
SetValues and AddToValues*/
HYPRE_Int current_num_elmts; /* current no. of elements stored in stash */
HYPRE_Int *off_proc_i; /* contains column indices */
HYPRE_Complex *off_proc_data; /* contains corresponding data */
HYPRE_Int cancel_indx; /* number of elements that have to be deleted due
to setting values from another processor */
} hypre_AuxParVector;
/*--------------------------------------------------------------------------
* Accessor functions for the Parallel Vector structure
*--------------------------------------------------------------------------*/
#define hypre_AuxParVectorMaxOffProcElmts(matrix) ((matrix) -> max_off_proc_elmts)
#define hypre_AuxParVectorCurrentNumElmts(matrix) ((matrix) -> current_num_elmts)
#define hypre_AuxParVectorOffProcI(matrix) ((matrix) -> off_proc_i)
#define hypre_AuxParVectorOffProcData(matrix) ((matrix) -> off_proc_data)
#define hypre_AuxParVectorCancelIndx(matrix) ((matrix) -> cancel_indx)
#endif
| 2,631 | 46.854545 | 83 | h |
AMG | AMG-master/IJ_mv/aux_parcsr_matrix.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Member functions for hypre_AuxParCSRMatrix class.
*
*****************************************************************************/
#include "_hypre_IJ_mv.h"
#include "aux_parcsr_matrix.h"
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixCreate( hypre_AuxParCSRMatrix **aux_matrix,
HYPRE_Int local_num_rows,
HYPRE_Int local_num_cols,
HYPRE_Int *sizes)
{
hypre_AuxParCSRMatrix *matrix;
matrix = hypre_CTAlloc(hypre_AuxParCSRMatrix, 1);
hypre_AuxParCSRMatrixLocalNumRows(matrix) = local_num_rows;
hypre_AuxParCSRMatrixLocalNumCols(matrix) = local_num_cols;
if (sizes)
{
hypre_AuxParCSRMatrixRowSpace(matrix) = sizes;
}
else
{
hypre_AuxParCSRMatrixRowSpace(matrix) = NULL;
}
/* set defaults */
hypre_AuxParCSRMatrixNeedAux(matrix) = 1;
hypre_AuxParCSRMatrixMaxOffProcElmts(matrix) = 0;
hypre_AuxParCSRMatrixCurrentNumElmts(matrix) = 0;
hypre_AuxParCSRMatrixOffProcIIndx(matrix) = 0;
hypre_AuxParCSRMatrixRowLength(matrix) = NULL;
hypre_AuxParCSRMatrixAuxJ(matrix) = NULL;
hypre_AuxParCSRMatrixAuxData(matrix) = NULL;
hypre_AuxParCSRMatrixIndxDiag(matrix) = NULL;
hypre_AuxParCSRMatrixIndxOffd(matrix) = NULL;
/* stash for setting or adding off processor values */
hypre_AuxParCSRMatrixOffProcI(matrix) = NULL;
hypre_AuxParCSRMatrixOffProcJ(matrix) = NULL;
hypre_AuxParCSRMatrixOffProcData(matrix) = NULL;
*aux_matrix = matrix;
return 0;
}
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixDestroy( hypre_AuxParCSRMatrix *matrix )
{
HYPRE_Int ierr=0;
HYPRE_Int i;
HYPRE_Int num_rows;
if (matrix)
{
num_rows = hypre_AuxParCSRMatrixLocalNumRows(matrix);
if (hypre_AuxParCSRMatrixRowLength(matrix))
hypre_TFree(hypre_AuxParCSRMatrixRowLength(matrix));
if (hypre_AuxParCSRMatrixRowSpace(matrix))
hypre_TFree(hypre_AuxParCSRMatrixRowSpace(matrix));
if (hypre_AuxParCSRMatrixAuxJ(matrix))
{
for (i=0; i < num_rows; i++)
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix)[i]);
hypre_TFree(hypre_AuxParCSRMatrixAuxJ(matrix));
}
if (hypre_AuxParCSRMatrixAuxData(matrix))
{
for (i=0; i < num_rows; i++)
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix)[i]);
hypre_TFree(hypre_AuxParCSRMatrixAuxData(matrix));
}
if (hypre_AuxParCSRMatrixIndxDiag(matrix))
hypre_TFree(hypre_AuxParCSRMatrixIndxDiag(matrix));
if (hypre_AuxParCSRMatrixIndxOffd(matrix))
hypre_TFree(hypre_AuxParCSRMatrixIndxOffd(matrix));
if (hypre_AuxParCSRMatrixOffProcI(matrix))
hypre_TFree(hypre_AuxParCSRMatrixOffProcI(matrix));
if (hypre_AuxParCSRMatrixOffProcJ(matrix))
hypre_TFree(hypre_AuxParCSRMatrixOffProcJ(matrix));
if (hypre_AuxParCSRMatrixOffProcData(matrix))
hypre_TFree(hypre_AuxParCSRMatrixOffProcData(matrix));
hypre_TFree(matrix);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixInitialize( hypre_AuxParCSRMatrix *matrix )
{
HYPRE_Int local_num_rows = hypre_AuxParCSRMatrixLocalNumRows(matrix);
HYPRE_Int *row_space = hypre_AuxParCSRMatrixRowSpace(matrix);
HYPRE_Int max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(matrix);
HYPRE_Int **aux_j;
HYPRE_Complex **aux_data;
HYPRE_Int i;
if (local_num_rows < 0)
return -1;
if (local_num_rows == 0)
return 0;
/* allocate stash for setting or adding off processor values */
if (max_off_proc_elmts > 0)
{
hypre_AuxParCSRMatrixOffProcI(matrix) = hypre_CTAlloc(HYPRE_Int,
2*max_off_proc_elmts);
hypre_AuxParCSRMatrixOffProcJ(matrix) = hypre_CTAlloc(HYPRE_Int,
max_off_proc_elmts);
hypre_AuxParCSRMatrixOffProcData(matrix) = hypre_CTAlloc(HYPRE_Complex,
max_off_proc_elmts);
}
if (hypre_AuxParCSRMatrixNeedAux(matrix))
{
aux_j = hypre_CTAlloc(HYPRE_Int *, local_num_rows);
aux_data = hypre_CTAlloc(HYPRE_Complex *, local_num_rows);
if (!hypre_AuxParCSRMatrixRowLength(matrix))
hypre_AuxParCSRMatrixRowLength(matrix) =
hypre_CTAlloc(HYPRE_Int, local_num_rows);
if (row_space)
{
for (i=0; i < local_num_rows; i++)
{
aux_j[i] = hypre_CTAlloc(HYPRE_Int, row_space[i]);
aux_data[i] = hypre_CTAlloc(HYPRE_Complex, row_space[i]);
}
}
else
{
row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows);
for (i=0; i < local_num_rows; i++)
{
row_space[i] = 30;
aux_j[i] = hypre_CTAlloc(HYPRE_Int, 30);
aux_data[i] = hypre_CTAlloc(HYPRE_Complex, 30);
}
hypre_AuxParCSRMatrixRowSpace(matrix) = row_space;
}
hypre_AuxParCSRMatrixAuxJ(matrix) = aux_j;
hypre_AuxParCSRMatrixAuxData(matrix) = aux_data;
}
else
{
hypre_AuxParCSRMatrixIndxDiag(matrix) = hypre_CTAlloc(HYPRE_Int,local_num_rows);
hypre_AuxParCSRMatrixIndxOffd(matrix) = hypre_CTAlloc(HYPRE_Int,local_num_rows);
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_AuxParCSRMatrixSetMaxOffProcElmts
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AuxParCSRMatrixSetMaxOffPRocElmts( hypre_AuxParCSRMatrix *matrix,
HYPRE_Int max_off_proc_elmts )
{
HYPRE_Int ierr = 0;
hypre_AuxParCSRMatrixMaxOffProcElmts(matrix) = max_off_proc_elmts;
return ierr;
}
| 7,057 | 34.646465 | 86 | c |
AMG | AMG-master/IJ_mv/aux_parcsr_matrix.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header info for Auxiliary Parallel CSR Matrix data structures
*
* Note: this matrix currently uses 0-based indexing.
*
*****************************************************************************/
#ifndef hypre_AUX_PARCSR_MATRIX_HEADER
#define hypre_AUX_PARCSR_MATRIX_HEADER
/*--------------------------------------------------------------------------
* Auxiliary Parallel CSR Matrix
*--------------------------------------------------------------------------*/
typedef struct
{
HYPRE_Int local_num_rows; /* defines number of rows on this processors */
HYPRE_Int local_num_cols; /* defines number of cols of diag */
HYPRE_Int need_aux; /* if need_aux = 1, aux_j, aux_data are used to
generate the parcsr matrix (default),
for need_aux = 0, data is put directly into
parcsr structure (requires the knowledge of
offd_i and diag_i ) */
HYPRE_Int *row_length; /* row_length_diag[i] contains number of stored
elements in i-th row */
HYPRE_Int *row_space; /* row_space_diag[i] contains space allocated to
i-th row */
HYPRE_Int **aux_j; /* contains collected column indices */
HYPRE_Complex **aux_data; /* contains collected data */
HYPRE_Int *indx_diag; /* indx_diag[i] points to first empty space of portion
in diag_j , diag_data assigned to row i */
HYPRE_Int *indx_offd; /* indx_offd[i] points to first empty space of portion
in offd_j , offd_data assigned to row i */
HYPRE_Int max_off_proc_elmts; /* length of off processor stash set for
SetValues and AddTOValues */
HYPRE_Int current_num_elmts; /* current no. of elements stored in stash */
HYPRE_Int off_proc_i_indx; /* pointer to first empty space in
set_off_proc_i_set */
HYPRE_Int *off_proc_i; /* length 2*num_off_procs_elmts, contains info pairs
(code, no. of elmts) where code contains global
row no. if SetValues, and (-global row no. -1)
if AddToValues*/
HYPRE_Int *off_proc_j; /* contains column indices */
HYPRE_Complex *off_proc_data; /* contains corresponding data */
HYPRE_Int cancel_indx; /* number of elements that have to be deleted due
to setting values from another processor */
} hypre_AuxParCSRMatrix;
/*--------------------------------------------------------------------------
* Accessor functions for the Parallel CSR Matrix structure
*--------------------------------------------------------------------------*/
#define hypre_AuxParCSRMatrixLocalNumRows(matrix) ((matrix) -> local_num_rows)
#define hypre_AuxParCSRMatrixLocalNumCols(matrix) ((matrix) -> local_num_cols)
#define hypre_AuxParCSRMatrixNeedAux(matrix) ((matrix) -> need_aux)
#define hypre_AuxParCSRMatrixRowLength(matrix) ((matrix) -> row_length)
#define hypre_AuxParCSRMatrixRowSpace(matrix) ((matrix) -> row_space)
#define hypre_AuxParCSRMatrixAuxJ(matrix) ((matrix) -> aux_j)
#define hypre_AuxParCSRMatrixAuxData(matrix) ((matrix) -> aux_data)
#define hypre_AuxParCSRMatrixIndxDiag(matrix) ((matrix) -> indx_diag)
#define hypre_AuxParCSRMatrixIndxOffd(matrix) ((matrix) -> indx_offd)
#define hypre_AuxParCSRMatrixMaxOffProcElmts(matrix) ((matrix) -> max_off_proc_elmts)
#define hypre_AuxParCSRMatrixCurrentNumElmts(matrix) ((matrix) -> current_num_elmts)
#define hypre_AuxParCSRMatrixOffProcIIndx(matrix) ((matrix) -> off_proc_i_indx)
#define hypre_AuxParCSRMatrixOffProcI(matrix) ((matrix) -> off_proc_i)
#define hypre_AuxParCSRMatrixOffProcJ(matrix) ((matrix) -> off_proc_j)
#define hypre_AuxParCSRMatrixOffProcData(matrix) ((matrix) -> off_proc_data)
#define hypre_AuxParCSRMatrixCancelIndx(matrix) ((matrix) -> cancel_indx)
#endif
| 5,131 | 53.021053 | 86 | h |
AMG | AMG-master/IJ_mv/headers.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "_hypre_IJ_mv.h"
#include "HYPRE_IJ_mv.h"
| 1,038 | 36.107143 | 81 | h |
AMG | AMG-master/krylov/HYPRE_MatvecFunctions.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef HYPRE_MATVEC_FUNCTIONS
#define HYPRE_MATVEC_FUNCTIONS
typedef struct
{
void* (*MatvecCreate) ( void *A, void *x );
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y );
HYPRE_Int (*MatvecDestroy) ( void *matvec_data );
void* (*MatMultiVecCreate) ( void *A, void *x );
HYPRE_Int (*MatMultiVec) ( void *data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y );
HYPRE_Int (*MatMultiVecDestroy) ( void *data );
} HYPRE_MatvecFunctions;
#endif
| 1,565 | 42.5 | 81 | h |
AMG | AMG-master/krylov/HYPRE_gmres.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* HYPRE_GMRES interface
*
*****************************************************************************/
#include "krylov.h"
/*--------------------------------------------------------------------------
* HYPRE_GMRESDestroy
*--------------------------------------------------------------------------*/
/* to do, not trivial */
/*
HYPRE_Int
HYPRE_ParCSRGMRESDestroy( HYPRE_Solver solver )
{
return( hypre_GMRESDestroy( (void *) solver ) );
}
*/
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetup( HYPRE_Solver solver,
HYPRE_Matrix A,
HYPRE_Vector b,
HYPRE_Vector x )
{
return( hypre_GMRESSetup( solver,
A,
b,
x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSolve
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSolve( HYPRE_Solver solver,
HYPRE_Matrix A,
HYPRE_Vector b,
HYPRE_Vector x )
{
return( hypre_GMRESSolve( solver,
A,
b,
x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetKDim, HYPRE_GMRESGetKDim
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetKDim( HYPRE_Solver solver,
HYPRE_Int k_dim )
{
return( hypre_GMRESSetKDim( (void *) solver, k_dim ) );
}
HYPRE_Int
HYPRE_GMRESGetKDim( HYPRE_Solver solver,
HYPRE_Int * k_dim )
{
return( hypre_GMRESGetKDim( (void *) solver, k_dim ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetTol, HYPRE_GMRESGetTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetTol( HYPRE_Solver solver,
HYPRE_Real tol )
{
return( hypre_GMRESSetTol( (void *) solver, tol ) );
}
HYPRE_Int
HYPRE_GMRESGetTol( HYPRE_Solver solver,
HYPRE_Real * tol )
{
return( hypre_GMRESGetTol( (void *) solver, tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetAbsoluteTol, HYPRE_GMRESGetAbsoluteTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetAbsoluteTol( HYPRE_Solver solver,
HYPRE_Real a_tol )
{
return( hypre_GMRESSetAbsoluteTol( (void *) solver, a_tol ) );
}
HYPRE_Int
HYPRE_GMRESGetAbsoluteTol( HYPRE_Solver solver,
HYPRE_Real * a_tol )
{
return( hypre_GMRESGetAbsoluteTol( (void *) solver, a_tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetConvergenceFactorTol, HYPRE_GMRESGetConvergenceFactorTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetConvergenceFactorTol( HYPRE_Solver solver,
HYPRE_Real cf_tol )
{
return( hypre_GMRESSetConvergenceFactorTol( (void *) solver, cf_tol ) );
}
HYPRE_Int
HYPRE_GMRESGetConvergenceFactorTol( HYPRE_Solver solver,
HYPRE_Real * cf_tol )
{
return( hypre_GMRESGetConvergenceFactorTol( (void *) solver, cf_tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetMinIter, HYPRE_GMRESGetMinIter
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetMinIter( HYPRE_Solver solver,
HYPRE_Int min_iter )
{
return( hypre_GMRESSetMinIter( (void *) solver, min_iter ) );
}
HYPRE_Int
HYPRE_GMRESGetMinIter( HYPRE_Solver solver,
HYPRE_Int * min_iter )
{
return( hypre_GMRESGetMinIter( (void *) solver, min_iter ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetMaxIter, HYPRE_GMRESGetMaxIter
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetMaxIter( HYPRE_Solver solver,
HYPRE_Int max_iter )
{
return( hypre_GMRESSetMaxIter( (void *) solver, max_iter ) );
}
HYPRE_Int
HYPRE_GMRESGetMaxIter( HYPRE_Solver solver,
HYPRE_Int * max_iter )
{
return( hypre_GMRESGetMaxIter( (void *) solver, max_iter ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetStopCrit, HYPRE_GMRESGetStopCrit - OBSOLETE
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetStopCrit( HYPRE_Solver solver,
HYPRE_Int stop_crit )
{
return( hypre_GMRESSetStopCrit( (void *) solver, stop_crit ) );
}
HYPRE_Int
HYPRE_GMRESGetStopCrit( HYPRE_Solver solver,
HYPRE_Int * stop_crit )
{
return( hypre_GMRESGetStopCrit( (void *) solver, stop_crit ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetRelChange, HYPRE_GMRESGetRelChange
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetRelChange( HYPRE_Solver solver,
HYPRE_Int rel_change )
{
return( hypre_GMRESSetRelChange( (void *) solver, rel_change ) );
}
HYPRE_Int
HYPRE_GMRESGetRelChange( HYPRE_Solver solver,
HYPRE_Int * rel_change )
{
return( hypre_GMRESGetRelChange( (void *) solver, rel_change ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetSkipRealResidualCheck, HYPRE_GMRESGetSkipRealResidualCheck
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetSkipRealResidualCheck( HYPRE_Solver solver,
HYPRE_Int skip_real_r_check )
{
return( hypre_GMRESSetSkipRealResidualCheck( (void *) solver, skip_real_r_check ) );
}
HYPRE_Int
HYPRE_GMRESGetSkipRealResidualCheck( HYPRE_Solver solver,
HYPRE_Int *skip_real_r_check )
{
return( hypre_GMRESGetSkipRealResidualCheck( (void *) solver, skip_real_r_check ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetPrecond( HYPRE_Solver solver,
HYPRE_PtrToSolverFcn precond,
HYPRE_PtrToSolverFcn precond_setup,
HYPRE_Solver precond_solver )
{
return( hypre_GMRESSetPrecond( (void *) solver,
(HYPRE_Int (*)(void*, void*, void*, void*))precond,
(HYPRE_Int (*)(void*, void*, void*, void*))precond_setup,
(void *) precond_solver ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESGetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESGetPrecond( HYPRE_Solver solver,
HYPRE_Solver *precond_data_ptr )
{
return( hypre_GMRESGetPrecond( (void *) solver,
(HYPRE_Solver *) precond_data_ptr ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetPrintLevel, HYPRE_GMRESGetPrintLevel
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetPrintLevel( HYPRE_Solver solver,
HYPRE_Int level )
{
return( hypre_GMRESSetPrintLevel( (void *) solver, level ) );
}
HYPRE_Int
HYPRE_GMRESGetPrintLevel( HYPRE_Solver solver,
HYPRE_Int * level )
{
return( hypre_GMRESGetPrintLevel( (void *) solver, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESSetLogging, HYPRE_GMRESGetLogging
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESSetLogging( HYPRE_Solver solver,
HYPRE_Int level )
{
return( hypre_GMRESSetLogging( (void *) solver, level ) );
}
HYPRE_Int
HYPRE_GMRESGetLogging( HYPRE_Solver solver,
HYPRE_Int * level )
{
return( hypre_GMRESGetLogging( (void *) solver, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESGetNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESGetNumIterations( HYPRE_Solver solver,
HYPRE_Int *num_iterations )
{
return( hypre_GMRESGetNumIterations( (void *) solver, num_iterations ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESGetConverged
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESGetConverged( HYPRE_Solver solver,
HYPRE_Int *converged )
{
return( hypre_GMRESGetConverged( (void *) solver, converged ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESGetFinalRelativeResidualNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_GMRESGetFinalRelativeResidualNorm( HYPRE_Solver solver,
HYPRE_Real *norm )
{
return( hypre_GMRESGetFinalRelativeResidualNorm( (void *) solver, norm ) );
}
/*--------------------------------------------------------------------------
* HYPRE_GMRESGetResidual
*--------------------------------------------------------------------------*/
HYPRE_Int HYPRE_GMRESGetResidual( HYPRE_Solver solver, void **residual )
{
/* returns a pointer to the residual vector */
return hypre_GMRESGetResidual( (void *) solver, residual );
}
| 11,615 | 33.064516 | 87 | c |
AMG | AMG-master/krylov/HYPRE_krylov.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef HYPRE_KRYLOV_HEADER
#define HYPRE_KRYLOV_HEADER
#include "HYPRE_utilities.h"
#ifdef __cplusplus
extern "C" {
#endif
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name Krylov Solvers
*
* These solvers support many of the matrix/vector storage schemes in hypre.
* They should be used in conjunction with the storage-specific interfaces,
* particularly the specific Create() and Destroy() functions.
*
* @memo A basic interface for Krylov solvers
**/
/*@{*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name Krylov Solvers
**/
/*@{*/
#ifndef HYPRE_SOLVER_STRUCT
#define HYPRE_SOLVER_STRUCT
struct hypre_Solver_struct;
/**
* The solver object.
**/
typedef struct hypre_Solver_struct *HYPRE_Solver;
#endif
#ifndef HYPRE_MATRIX_STRUCT
#define HYPRE_MATRIX_STRUCT
struct hypre_Matrix_struct;
/**
* The matrix object.
**/
typedef struct hypre_Matrix_struct *HYPRE_Matrix;
#endif
#ifndef HYPRE_VECTOR_STRUCT
#define HYPRE_VECTOR_STRUCT
struct hypre_Vector_struct;
/**
* The vector object.
**/
typedef struct hypre_Vector_struct *HYPRE_Vector;
#endif
typedef HYPRE_Int (*HYPRE_PtrToSolverFcn)(HYPRE_Solver,
HYPRE_Matrix,
HYPRE_Vector,
HYPRE_Vector);
#ifndef HYPRE_MODIFYPC
#define HYPRE_MODIFYPC
typedef HYPRE_Int (*HYPRE_PtrToModifyPCFcn)(HYPRE_Solver,
HYPRE_Int,
HYPRE_Real);
#endif
/*@}*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name PCG Solver
**/
/*@{*/
/**
* Prepare to solve the system. The coefficient data in {\tt b} and {\tt x} is
* ignored here, but information about the layout of the data may be used.
**/
HYPRE_Int HYPRE_PCGSetup(HYPRE_Solver solver,
HYPRE_Matrix A,
HYPRE_Vector b,
HYPRE_Vector x);
/**
* Solve the system.
**/
HYPRE_Int HYPRE_PCGSolve(HYPRE_Solver solver,
HYPRE_Matrix A,
HYPRE_Vector b,
HYPRE_Vector x);
/**
* (Optional) Set the relative convergence tolerance.
**/
HYPRE_Int HYPRE_PCGSetTol(HYPRE_Solver solver,
HYPRE_Real tol);
/**
* (Optional) Set the absolute convergence tolerance (default is
* 0). If one desires the convergence test to check the absolute
* convergence tolerance {\it only}, then set the relative convergence
* tolerance to 0.0. (The default convergence test is $ <C*r,r> \leq$
* max(relative$\_$tolerance$^{2} \ast <C*b, b>$, absolute$\_$tolerance$^2$).)
**/
HYPRE_Int HYPRE_PCGSetAbsoluteTol(HYPRE_Solver solver,
HYPRE_Real a_tol);
/**
* (Optional) Set a residual-based convergence tolerance which checks if
* $\|r_{old}-r_{new}\| < rtol \|b\|$. This is useful when trying to converge to
* very low relative and/or absolute tolerances, in order to bail-out before
* roundoff errors affect the approximation.
**/
HYPRE_Int HYPRE_PCGSetResidualTol(HYPRE_Solver solver,
HYPRE_Real rtol);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_PCGSetAbsoluteTolFactor(HYPRE_Solver solver, HYPRE_Real abstolf);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_PCGSetConvergenceFactorTol(HYPRE_Solver solver, HYPRE_Real cf_tol);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_PCGSetStopCrit(HYPRE_Solver solver, HYPRE_Int stop_crit);
/**
* (Optional) Set maximum number of iterations.
**/
HYPRE_Int HYPRE_PCGSetMaxIter(HYPRE_Solver solver,
HYPRE_Int max_iter);
/**
* (Optional) Use the two-norm in stopping criteria.
**/
HYPRE_Int HYPRE_PCGSetTwoNorm(HYPRE_Solver solver,
HYPRE_Int two_norm);
/**
* (Optional) Additionally require that the relative difference in
* successive iterates be small.
**/
HYPRE_Int HYPRE_PCGSetRelChange(HYPRE_Solver solver,
HYPRE_Int rel_change);
/**
* (Optional) Recompute the residual at the end to double-check convergence.
**/
HYPRE_Int HYPRE_PCGSetRecomputeResidual(HYPRE_Solver solver,
HYPRE_Int recompute_residual);
/**
* (Optional) Periodically recompute the residual while iterating.
**/
HYPRE_Int HYPRE_PCGSetRecomputeResidualP(HYPRE_Solver solver,
HYPRE_Int recompute_residual_p);
/**
* (Optional) Set the preconditioner to use.
**/
HYPRE_Int HYPRE_PCGSetPrecond(HYPRE_Solver solver,
HYPRE_PtrToSolverFcn precond,
HYPRE_PtrToSolverFcn precond_setup,
HYPRE_Solver precond_solver);
/**
* (Optional) Set the amount of logging to do.
**/
HYPRE_Int HYPRE_PCGSetLogging(HYPRE_Solver solver,
HYPRE_Int logging);
/**
* (Optional) Set the amount of printing to do to the screen.
**/
HYPRE_Int HYPRE_PCGSetPrintLevel(HYPRE_Solver solver,
HYPRE_Int level);
/**
* Return the number of iterations taken.
**/
HYPRE_Int HYPRE_PCGGetNumIterations(HYPRE_Solver solver,
HYPRE_Int *num_iterations);
/**
* Return the norm of the final relative residual.
**/
HYPRE_Int HYPRE_PCGGetFinalRelativeResidualNorm(HYPRE_Solver solver,
HYPRE_Real *norm);
/**
* Return the residual.
**/
HYPRE_Int HYPRE_PCGGetResidual(HYPRE_Solver solver,
void **residual);
/**
**/
HYPRE_Int HYPRE_PCGGetTol(HYPRE_Solver solver,
HYPRE_Real *tol);
/**
**/
HYPRE_Int HYPRE_PCGGetResidualTol(HYPRE_Solver solver,
HYPRE_Real *rtol);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_PCGGetAbsoluteTolFactor(HYPRE_Solver solver, HYPRE_Real *abstolf);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_PCGGetConvergenceFactorTol(HYPRE_Solver solver, HYPRE_Real *cf_tol);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_PCGGetStopCrit(HYPRE_Solver solver, HYPRE_Int *stop_crit);
/**
**/
HYPRE_Int HYPRE_PCGGetMaxIter(HYPRE_Solver solver,
HYPRE_Int *max_iter);
/**
**/
HYPRE_Int HYPRE_PCGGetTwoNorm(HYPRE_Solver solver,
HYPRE_Int *two_norm);
/**
**/
HYPRE_Int HYPRE_PCGGetRelChange(HYPRE_Solver solver,
HYPRE_Int *rel_change);
/**
**/
HYPRE_Int HYPRE_GMRESGetSkipRealResidualCheck(HYPRE_Solver solver,
HYPRE_Int *skip_real_r_check);
/**
**/
HYPRE_Int HYPRE_PCGGetPrecond(HYPRE_Solver solver,
HYPRE_Solver *precond_data_ptr);
/**
**/
HYPRE_Int HYPRE_PCGGetLogging(HYPRE_Solver solver,
HYPRE_Int *level);
/**
**/
HYPRE_Int HYPRE_PCGGetPrintLevel(HYPRE_Solver solver,
HYPRE_Int *level);
/**
**/
HYPRE_Int HYPRE_PCGGetConverged(HYPRE_Solver solver,
HYPRE_Int *converged);
/*@}*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name GMRES Solver
**/
/*@{*/
/**
* Prepare to solve the system. The coefficient data in {\tt b} and {\tt x} is
* ignored here, but information about the layout of the data may be used.
**/
HYPRE_Int HYPRE_GMRESSetup(HYPRE_Solver solver,
HYPRE_Matrix A,
HYPRE_Vector b,
HYPRE_Vector x);
/**
* Solve the system.
**/
HYPRE_Int HYPRE_GMRESSolve(HYPRE_Solver solver,
HYPRE_Matrix A,
HYPRE_Vector b,
HYPRE_Vector x);
/**
* (Optional) Set the relative convergence tolerance.
**/
HYPRE_Int HYPRE_GMRESSetTol(HYPRE_Solver solver,
HYPRE_Real tol);
/**
* (Optional) Set the absolute convergence tolerance (default is 0).
* If one desires
* the convergence test to check the absolute convergence tolerance {\it only}, then
* set the relative convergence tolerance to 0.0. (The convergence test is
* $\|r\| \leq$ max(relative$\_$tolerance$\ast \|b\|$, absolute$\_$tolerance).)
*
**/
HYPRE_Int HYPRE_GMRESSetAbsoluteTol(HYPRE_Solver solver,
HYPRE_Real a_tol);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_GMRESSetConvergenceFactorTol(HYPRE_Solver solver, HYPRE_Real cf_tol);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_GMRESSetStopCrit(HYPRE_Solver solver, HYPRE_Int stop_crit);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_GMRESSetMinIter(HYPRE_Solver solver, HYPRE_Int min_iter);
/**
* (Optional) Set maximum number of iterations.
**/
HYPRE_Int HYPRE_GMRESSetMaxIter(HYPRE_Solver solver,
HYPRE_Int max_iter);
/**
* (Optional) Set the maximum size of the Krylov space.
**/
HYPRE_Int HYPRE_GMRESSetKDim(HYPRE_Solver solver,
HYPRE_Int k_dim);
/**
* (Optional) Additionally require that the relative difference in
* successive iterates be small.
**/
HYPRE_Int HYPRE_GMRESSetRelChange(HYPRE_Solver solver,
HYPRE_Int rel_change);
/**
* (Optional) By default, hypre checks for convergence by evaluating the actual
* residual before returnig from GMRES (with restart if the true residual does
* not indicate convergence). This option allows users to skip the evaluation
* and the check of the actual residual for badly conditioned problems where
* restart is not expected to be beneficial.
**/
HYPRE_Int HYPRE_GMRESSetSkipRealResidualCheck(HYPRE_Solver solver,
HYPRE_Int skip_real_r_check);
/**
* (Optional) Set the preconditioner to use.
**/
HYPRE_Int HYPRE_GMRESSetPrecond(HYPRE_Solver solver,
HYPRE_PtrToSolverFcn precond,
HYPRE_PtrToSolverFcn precond_setup,
HYPRE_Solver precond_solver);
/**
* (Optional) Set the amount of logging to do.
**/
HYPRE_Int HYPRE_GMRESSetLogging(HYPRE_Solver solver,
HYPRE_Int logging);
/**
* (Optional) Set the amount of printing to do to the screen.
**/
HYPRE_Int HYPRE_GMRESSetPrintLevel(HYPRE_Solver solver,
HYPRE_Int level);
/**
* Return the number of iterations taken.
**/
HYPRE_Int HYPRE_GMRESGetNumIterations(HYPRE_Solver solver,
HYPRE_Int *num_iterations);
/**
* Return the norm of the final relative residual.
**/
HYPRE_Int HYPRE_GMRESGetFinalRelativeResidualNorm(HYPRE_Solver solver,
HYPRE_Real *norm);
/**
* Return the residual.
**/
HYPRE_Int HYPRE_GMRESGetResidual(HYPRE_Solver solver,
void **residual);
/**
**/
HYPRE_Int HYPRE_GMRESGetTol(HYPRE_Solver solver,
HYPRE_Real *tol);
/**
**/
HYPRE_Int HYPRE_GMRESGetAbsoluteTol(HYPRE_Solver solver,
HYPRE_Real *tol);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_GMRESGetConvergenceFactorTol(HYPRE_Solver solver, HYPRE_Real *cf_tol);
/*
* OBSOLETE
**/
HYPRE_Int HYPRE_GMRESGetStopCrit(HYPRE_Solver solver, HYPRE_Int *stop_crit);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_GMRESGetMinIter(HYPRE_Solver solver, HYPRE_Int *min_iter);
/**
**/
HYPRE_Int HYPRE_GMRESGetMaxIter(HYPRE_Solver solver,
HYPRE_Int *max_iter);
/**
**/
HYPRE_Int HYPRE_GMRESGetKDim(HYPRE_Solver solver,
HYPRE_Int *k_dim);
/**
**/
HYPRE_Int HYPRE_GMRESGetRelChange(HYPRE_Solver solver,
HYPRE_Int *rel_change);
/**
**/
HYPRE_Int HYPRE_GMRESGetPrecond(HYPRE_Solver solver,
HYPRE_Solver *precond_data_ptr);
/**
**/
HYPRE_Int HYPRE_GMRESGetLogging(HYPRE_Solver solver,
HYPRE_Int *level);
/**
**/
HYPRE_Int HYPRE_GMRESGetPrintLevel(HYPRE_Solver solver,
HYPRE_Int *level);
/**
**/
HYPRE_Int HYPRE_GMRESGetConverged(HYPRE_Solver solver,
HYPRE_Int *converged);
/*@}*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/*@}*/
#ifdef __cplusplus
}
#endif
#endif
| 13,743 | 27.279835 | 86 | h |
AMG | AMG-master/krylov/HYPRE_pcg.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* HYPRE_PCG interface
*
*****************************************************************************/
#include "krylov.h"
/*--------------------------------------------------------------------------
* HYPRE_PCGCreate: Call class-specific function, e.g. HYPRE_ParCSRPCGCreate
*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
* HYPRE_PCGDestroy: Call class-specific function
*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
* HYPRE_PCGSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetup( HYPRE_Solver solver,
HYPRE_Matrix A,
HYPRE_Vector b,
HYPRE_Vector x )
{
return( hypre_PCGSetup( solver,
A,
b,
x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSolve
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSolve( HYPRE_Solver solver,
HYPRE_Matrix A,
HYPRE_Vector b,
HYPRE_Vector x )
{
return( hypre_PCGSolve( (void *) solver,
(void *) A,
(void *) b,
(void *) x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetTol, HYPRE_PCGGetTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetTol( HYPRE_Solver solver,
HYPRE_Real tol )
{
return( hypre_PCGSetTol( (void *) solver, tol ) );
}
HYPRE_Int
HYPRE_PCGGetTol( HYPRE_Solver solver,
HYPRE_Real *tol )
{
return( hypre_PCGGetTol( (void *) solver, tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetAbsoluteTol, HYPRE_PCGGetAbsoluteTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetAbsoluteTol( HYPRE_Solver solver,
HYPRE_Real a_tol )
{
return( hypre_PCGSetAbsoluteTol( (void *) solver, a_tol ) );
}
HYPRE_Int
HYPRE_PCGGetAbsoluteTol( HYPRE_Solver solver,
HYPRE_Real *a_tol )
{
return( hypre_PCGGetAbsoluteTol( (void *) solver, a_tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetResidualTol, HYPRE_PCGGetResidualTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetResidualTol( HYPRE_Solver solver,
HYPRE_Real rtol )
{
return( hypre_PCGSetResidualTol( (void *) solver, rtol ) );
}
HYPRE_Int
HYPRE_PCGGetResidualTol( HYPRE_Solver solver,
HYPRE_Real *rtol )
{
return( hypre_PCGGetResidualTol( (void *) solver, rtol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetAbsoluteTolFactor, HYPRE_PCGGetAbsoluteTolFactor
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetAbsoluteTolFactor( HYPRE_Solver solver,
HYPRE_Real abstolf )
{
return( hypre_PCGSetAbsoluteTolFactor( (void *) solver, abstolf ) );
}
HYPRE_Int
HYPRE_PCGGetAbsoluteTolFactor( HYPRE_Solver solver,
HYPRE_Real *abstolf )
{
return( hypre_PCGGetAbsoluteTolFactor( (void *) solver, abstolf ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetConvergenceFactorTol, HYPRE_PCGGetConvergenceFactorTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetConvergenceFactorTol( HYPRE_Solver solver,
HYPRE_Real cf_tol )
{
return hypre_PCGSetConvergenceFactorTol( (void *) solver,
cf_tol );
}
HYPRE_Int
HYPRE_PCGGetConvergenceFactorTol( HYPRE_Solver solver,
HYPRE_Real *cf_tol )
{
return hypre_PCGGetConvergenceFactorTol( (void *) solver,
cf_tol );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetMaxIter, HYPRE_PCGGetMaxIter
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetMaxIter( HYPRE_Solver solver,
HYPRE_Int max_iter )
{
return( hypre_PCGSetMaxIter( (void *) solver, max_iter ) );
}
HYPRE_Int
HYPRE_PCGGetMaxIter( HYPRE_Solver solver,
HYPRE_Int *max_iter )
{
return( hypre_PCGGetMaxIter( (void *) solver, max_iter ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetStopCrit, HYPRE_PCGGetStopCrit
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetStopCrit( HYPRE_Solver solver,
HYPRE_Int stop_crit )
{
return( hypre_PCGSetStopCrit( (void *) solver, stop_crit ) );
}
HYPRE_Int
HYPRE_PCGGetStopCrit( HYPRE_Solver solver,
HYPRE_Int *stop_crit )
{
return( hypre_PCGGetStopCrit( (void *) solver, stop_crit ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetTwoNorm, HYPRE_PCGGetTwoNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetTwoNorm( HYPRE_Solver solver,
HYPRE_Int two_norm )
{
return( hypre_PCGSetTwoNorm( (void *) solver, two_norm ) );
}
HYPRE_Int
HYPRE_PCGGetTwoNorm( HYPRE_Solver solver,
HYPRE_Int *two_norm )
{
return( hypre_PCGGetTwoNorm( (void *) solver, two_norm ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetRelChange, HYPRE_PCGGetRelChange
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetRelChange( HYPRE_Solver solver,
HYPRE_Int rel_change )
{
return( hypre_PCGSetRelChange( (void *) solver, rel_change ) );
}
HYPRE_Int
HYPRE_PCGGetRelChange( HYPRE_Solver solver,
HYPRE_Int *rel_change )
{
return( hypre_PCGGetRelChange( (void *) solver, rel_change ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetRecomputeResidual, HYPRE_PCGGetRecomputeResidual
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetRecomputeResidual( HYPRE_Solver solver,
HYPRE_Int recompute_residual )
{
return( hypre_PCGSetRecomputeResidual( (void *) solver, recompute_residual ) );
}
HYPRE_Int
HYPRE_PCGGetRecomputeResidual( HYPRE_Solver solver,
HYPRE_Int *recompute_residual )
{
return( hypre_PCGGetRecomputeResidual( (void *) solver, recompute_residual ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetRecomputeResidualP, HYPRE_PCGGetRecomputeResidualP
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetRecomputeResidualP( HYPRE_Solver solver,
HYPRE_Int recompute_residual_p )
{
return( hypre_PCGSetRecomputeResidualP( (void *) solver, recompute_residual_p ) );
}
HYPRE_Int
HYPRE_PCGGetRecomputeResidualP( HYPRE_Solver solver,
HYPRE_Int *recompute_residual_p )
{
return( hypre_PCGGetRecomputeResidualP( (void *) solver, recompute_residual_p ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetPrecond( HYPRE_Solver solver,
HYPRE_PtrToSolverFcn precond,
HYPRE_PtrToSolverFcn precond_setup,
HYPRE_Solver precond_solver )
{
return( hypre_PCGSetPrecond( (void *) solver,
(HYPRE_Int (*)(void*, void*, void*, void*))precond,
(HYPRE_Int (*)(void*, void*, void*, void*))precond_setup,
(void *) precond_solver ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGGetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGGetPrecond( HYPRE_Solver solver,
HYPRE_Solver *precond_data_ptr )
{
return( hypre_PCGGetPrecond( (void *) solver,
(HYPRE_Solver *) precond_data_ptr ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetLogging, HYPRE_PCGGetLogging
* SetLogging sets both the print and log level, for backwards compatibility.
* Soon the SetPrintLevel call should be deleted.
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetLogging( HYPRE_Solver solver,
HYPRE_Int level )
{
return ( hypre_PCGSetLogging( (void *) solver, level ) );
}
HYPRE_Int
HYPRE_PCGGetLogging( HYPRE_Solver solver,
HYPRE_Int * level )
{
return ( hypre_PCGGetLogging( (void *) solver, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGSetPrintLevel, HYPRE_PCGGetPrintLevel
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGSetPrintLevel( HYPRE_Solver solver,
HYPRE_Int level )
{
return( hypre_PCGSetPrintLevel( (void *) solver, level ) );
}
HYPRE_Int
HYPRE_PCGGetPrintLevel( HYPRE_Solver solver,
HYPRE_Int *level )
{
return( hypre_PCGGetPrintLevel( (void *) solver, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGGetNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGGetNumIterations( HYPRE_Solver solver,
HYPRE_Int *num_iterations )
{
return( hypre_PCGGetNumIterations( (void *) solver, num_iterations ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGGetConverged
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGGetConverged( HYPRE_Solver solver,
HYPRE_Int *converged )
{
return( hypre_PCGGetConverged( (void *) solver, converged ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGGetFinalRelativeResidualNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_PCGGetFinalRelativeResidualNorm( HYPRE_Solver solver,
HYPRE_Real *norm )
{
return( hypre_PCGGetFinalRelativeResidualNorm( (void *) solver, norm ) );
}
/*--------------------------------------------------------------------------
* HYPRE_PCGGetResidual
*--------------------------------------------------------------------------*/
HYPRE_Int HYPRE_PCGGetResidual( HYPRE_Solver solver,
void **residual )
{
/* returns a pointer to the residual vector */
return hypre_PCGGetResidual( (void *) solver, residual );
}
| 12,762 | 33.034667 | 85 | c |
AMG | AMG-master/krylov/gmres.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* GMRES gmres
*
*****************************************************************************/
#include "krylov.h"
#include "_hypre_utilities.h"
/*--------------------------------------------------------------------------
* hypre_GMRESFunctionsCreate
*--------------------------------------------------------------------------*/
hypre_GMRESFunctions *
hypre_GMRESFunctionsCreate(
char * (*CAlloc) ( size_t count, size_t elt_size ),
HYPRE_Int (*Free) ( char *ptr ),
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs ),
void * (*CreateVector) ( void *vector ),
void * (*CreateVectorArray) ( HYPRE_Int size, void *vectors ),
HYPRE_Int (*DestroyVector) ( void *vector ),
void * (*MatvecCreate) ( void *A, void *x ),
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y ),
HYPRE_Int (*MatvecDestroy) ( void *matvec_data ),
HYPRE_Real (*InnerProd) ( void *x, void *y ),
HYPRE_Int (*CopyVector) ( void *x, void *y ),
HYPRE_Int (*ClearVector) ( void *x ),
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x ),
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y ),
HYPRE_Int (*PrecondSetup) ( void *vdata, void *A, void *b, void *x ),
HYPRE_Int (*Precond) ( void *vdata, void *A, void *b, void *x )
)
{
hypre_GMRESFunctions * gmres_functions;
gmres_functions = (hypre_GMRESFunctions *)
CAlloc( 1, sizeof(hypre_GMRESFunctions) );
gmres_functions->CAlloc = CAlloc;
gmres_functions->Free = Free;
gmres_functions->CommInfo = CommInfo; /* not in PCGFunctionsCreate */
gmres_functions->CreateVector = CreateVector;
gmres_functions->CreateVectorArray = CreateVectorArray; /* not in PCGFunctionsCreate */
gmres_functions->DestroyVector = DestroyVector;
gmres_functions->MatvecCreate = MatvecCreate;
gmres_functions->Matvec = Matvec;
gmres_functions->MatvecDestroy = MatvecDestroy;
gmres_functions->InnerProd = InnerProd;
gmres_functions->CopyVector = CopyVector;
gmres_functions->ClearVector = ClearVector;
gmres_functions->ScaleVector = ScaleVector;
gmres_functions->Axpy = Axpy;
/* default preconditioner must be set here but can be changed later... */
gmres_functions->precond_setup = PrecondSetup;
gmres_functions->precond = Precond;
return gmres_functions;
}
/*--------------------------------------------------------------------------
* hypre_GMRESCreate
*--------------------------------------------------------------------------*/
void *
hypre_GMRESCreate( hypre_GMRESFunctions *gmres_functions )
{
hypre_GMRESData *gmres_data;
gmres_data = hypre_CTAllocF(hypre_GMRESData, 1, gmres_functions);
gmres_data->functions = gmres_functions;
/* set defaults */
(gmres_data -> k_dim) = 5;
(gmres_data -> tol) = 1.0e-06; /* relative residual tol */
(gmres_data -> cf_tol) = 0.0;
(gmres_data -> a_tol) = 0.0; /* abs. residual tol */
(gmres_data -> min_iter) = 0;
(gmres_data -> max_iter) = 1000;
(gmres_data -> rel_change) = 0;
(gmres_data -> skip_real_r_check) = 0;
(gmres_data -> stop_crit) = 0; /* rel. residual norm - this is obsolete!*/
(gmres_data -> converged) = 0;
(gmres_data -> precond_data) = NULL;
(gmres_data -> print_level) = 0;
(gmres_data -> logging) = 0;
(gmres_data -> p) = NULL;
(gmres_data -> r) = NULL;
(gmres_data -> w) = NULL;
(gmres_data -> w_2) = NULL;
(gmres_data -> matvec_data) = NULL;
(gmres_data -> norms) = NULL;
(gmres_data -> log_file_name) = NULL;
return (void *) gmres_data;
}
/*--------------------------------------------------------------------------
* hypre_GMRESDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESDestroy( void *gmres_vdata )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
HYPRE_Int i;
if (gmres_data)
{
hypre_GMRESFunctions *gmres_functions = gmres_data->functions;
if ( (gmres_data->logging>0) || (gmres_data->print_level) > 0 )
{
if ( (gmres_data -> norms) != NULL )
hypre_TFreeF( gmres_data -> norms, gmres_functions );
}
if ( (gmres_data -> matvec_data) != NULL )
(*(gmres_functions->MatvecDestroy))(gmres_data -> matvec_data);
if ( (gmres_data -> r) != NULL )
(*(gmres_functions->DestroyVector))(gmres_data -> r);
if ( (gmres_data -> w) != NULL )
(*(gmres_functions->DestroyVector))(gmres_data -> w);
if ( (gmres_data -> w_2) != NULL )
(*(gmres_functions->DestroyVector))(gmres_data -> w_2);
if ( (gmres_data -> p) != NULL )
{
for (i = 0; i < (gmres_data -> k_dim+1); i++)
{
if ( (gmres_data -> p)[i] != NULL )
(*(gmres_functions->DestroyVector))( (gmres_data -> p) [i]);
}
hypre_TFreeF( gmres_data->p, gmres_functions );
}
hypre_TFreeF( gmres_data, gmres_functions );
hypre_TFreeF( gmres_functions, gmres_functions );
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetResidual
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_GMRESGetResidual( void *gmres_vdata, void **residual )
{
/* returns a pointer to the residual vector */
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*residual = gmres_data->r;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetup( void *gmres_vdata,
void *A,
void *b,
void *x )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
hypre_GMRESFunctions *gmres_functions = gmres_data->functions;
HYPRE_Int k_dim = (gmres_data -> k_dim);
HYPRE_Int max_iter = (gmres_data -> max_iter);
HYPRE_Int (*precond_setup)(void*,void*,void*,void*) = (gmres_functions->precond_setup);
void *precond_data = (gmres_data -> precond_data);
HYPRE_Int rel_change = (gmres_data -> rel_change);
(gmres_data -> A) = A;
/*--------------------------------------------------
* The arguments for NewVector are important to
* maintain consistency between the setup and
* compute phases of matvec and the preconditioner.
*--------------------------------------------------*/
if ((gmres_data -> p) == NULL)
(gmres_data -> p) = (void**)(*(gmres_functions->CreateVectorArray))(k_dim+1,x);
if ((gmres_data -> r) == NULL)
(gmres_data -> r) = (*(gmres_functions->CreateVector))(b);
if ((gmres_data -> w) == NULL)
(gmres_data -> w) = (*(gmres_functions->CreateVector))(b);
if (rel_change)
{
if ((gmres_data -> w_2) == NULL)
(gmres_data -> w_2) = (*(gmres_functions->CreateVector))(b);
}
if ((gmres_data -> matvec_data) == NULL)
(gmres_data -> matvec_data) = (*(gmres_functions->MatvecCreate))(A, x);
precond_setup(precond_data, A, b, x);
/*-----------------------------------------------------
* Allocate space for log info
*-----------------------------------------------------*/
if ( (gmres_data->logging)>0 || (gmres_data->print_level) > 0 )
{
if ((gmres_data -> norms) == NULL)
(gmres_data -> norms) = hypre_CTAllocF(HYPRE_Real, max_iter + 1,gmres_functions);
}
if ( (gmres_data->print_level) > 0 ) {
if ((gmres_data -> log_file_name) == NULL)
(gmres_data -> log_file_name) = (char*)"gmres.out.log";
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSolve
*-------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSolve(void *gmres_vdata,
void *A,
void *b,
void *x)
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
hypre_GMRESFunctions *gmres_functions = gmres_data->functions;
HYPRE_Int k_dim = (gmres_data -> k_dim);
HYPRE_Int min_iter = (gmres_data -> min_iter);
HYPRE_Int max_iter = (gmres_data -> max_iter);
HYPRE_Int rel_change = (gmres_data -> rel_change);
HYPRE_Int skip_real_r_check = (gmres_data -> skip_real_r_check);
HYPRE_Real r_tol = (gmres_data -> tol);
HYPRE_Real cf_tol = (gmres_data -> cf_tol);
HYPRE_Real a_tol = (gmres_data -> a_tol);
void *matvec_data = (gmres_data -> matvec_data);
void *r = (gmres_data -> r);
void *w = (gmres_data -> w);
/* note: w_2 is only allocated if rel_change = 1 */
void *w_2 = (gmres_data -> w_2);
void **p = (gmres_data -> p);
HYPRE_Int (*precond)(void*,void*,void*,void*) = (gmres_functions -> precond);
HYPRE_Int *precond_data = (HYPRE_Int*)(gmres_data -> precond_data);
HYPRE_Int print_level = (gmres_data -> print_level);
HYPRE_Int logging = (gmres_data -> logging);
HYPRE_Real *norms = (gmres_data -> norms);
/* not used yet char *log_file_name = (gmres_data -> log_file_name);*/
/* FILE *fp; */
HYPRE_Int break_value = 0;
HYPRE_Int i, j, k;
HYPRE_Real *rs, **hh, *c, *s, *rs_2;
HYPRE_Int iter;
HYPRE_Int my_id, num_procs;
HYPRE_Real epsilon, gamma, t, r_norm, b_norm, den_norm, x_norm;
HYPRE_Real w_norm;
HYPRE_Real epsmac = 1.e-16;
HYPRE_Real ieee_check = 0.;
HYPRE_Real guard_zero_residual;
HYPRE_Real cf_ave_0 = 0.0;
HYPRE_Real cf_ave_1 = 0.0;
HYPRE_Real weight;
HYPRE_Real r_norm_0;
HYPRE_Real relative_error = 1.0;
HYPRE_Int rel_change_passed = 0, num_rel_change_check = 0;
HYPRE_Real real_r_norm_old, real_r_norm_new;
(gmres_data -> converged) = 0;
/*-----------------------------------------------------------------------
* With relative change convergence test on, it is possible to attempt
* another iteration with a zero residual. This causes the parameter
* alpha to go NaN. The guard_zero_residual parameter is to circumvent
* this. Perhaps it should be set to something non-zero (but small).
*-----------------------------------------------------------------------*/
guard_zero_residual = 0.0;
(*(gmres_functions->CommInfo))(A,&my_id,&num_procs);
if ( logging>0 || print_level>0 )
{
norms = (gmres_data -> norms);
}
/* initialize work arrays */
rs = hypre_CTAllocF(HYPRE_Real,k_dim+1,gmres_functions);
c = hypre_CTAllocF(HYPRE_Real,k_dim,gmres_functions);
s = hypre_CTAllocF(HYPRE_Real,k_dim,gmres_functions);
if (rel_change) rs_2 = hypre_CTAllocF(HYPRE_Real,k_dim+1,gmres_functions);
hh = hypre_CTAllocF(HYPRE_Real*,k_dim+1,gmres_functions);
for (i=0; i < k_dim+1; i++)
{
hh[i] = hypre_CTAllocF(HYPRE_Real,k_dim,gmres_functions);
}
(*(gmres_functions->CopyVector))(b,p[0]);
/* compute initial residual */
(*(gmres_functions->Matvec))(matvec_data,-1.0, A, x, 1.0, p[0]);
b_norm = sqrt((*(gmres_functions->InnerProd))(b,b));
real_r_norm_old = b_norm;
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (b_norm != 0.) ieee_check = b_norm/b_norm; /* INF -> NaN conversion */
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (logging > 0 || print_level > 0)
{
hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n");
hypre_printf("ERROR -- hypre_GMRESSolve: INFs and/or NaNs detected in input.\n");
hypre_printf("User probably placed non-numerics in supplied b.\n");
hypre_printf("Returning error flag += 101. Program not terminated.\n");
hypre_printf("ERROR detected by Hypre ... END\n\n\n");
}
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
r_norm = sqrt((*(gmres_functions->InnerProd))(p[0],p[0]));
r_norm_0 = r_norm;
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (r_norm != 0.) ieee_check = r_norm/r_norm; /* INF -> NaN conversion */
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (logging > 0 || print_level > 0)
{
hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n");
hypre_printf("ERROR -- hypre_GMRESSolve: INFs and/or NaNs detected in input.\n");
hypre_printf("User probably placed non-numerics in supplied A or x_0.\n");
hypre_printf("Returning error flag += 101. Program not terminated.\n");
hypre_printf("ERROR detected by Hypre ... END\n\n\n");
}
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
if ( logging>0 || print_level > 0)
{
norms[0] = r_norm;
if ( print_level>1 && my_id == 0 )
{
hypre_printf("L2 norm of b: %e\n", b_norm);
if (b_norm == 0.0)
hypre_printf("Rel_resid_norm actually contains the residual norm\n");
hypre_printf("Initial L2 norm of residual: %e\n", r_norm);
}
}
iter = 0;
if (b_norm > 0.0)
{
/* convergence criterion |r_i|/|b| <= accuracy if |b| > 0 */
den_norm= b_norm;
}
else
{
/* convergence criterion |r_i|/|r0| <= accuracy if |b| = 0 */
den_norm= r_norm;
};
/* convergence criteria: |r_i| <= max( a_tol, r_tol * den_norm)
den_norm = |r_0| or |b|
note: default for a_tol is 0.0, so relative residual criteria is used unless
user specifies a_tol, or sets r_tol = 0.0, which means absolute
tol only is checked */
epsilon = hypre_max(a_tol,r_tol*den_norm);
/* so now our stop criteria is |r_i| <= epsilon */
if ( print_level>1 && my_id == 0 )
{
if (b_norm > 0.0)
{hypre_printf("=============================================\n\n");
hypre_printf("Iters resid.norm conv.rate rel.res.norm\n");
hypre_printf("----- ------------ ---------- ------------\n");
}
else
{hypre_printf("=============================================\n\n");
hypre_printf("Iters resid.norm conv.rate\n");
hypre_printf("----- ------------ ----------\n");
};
}
/* once the rel. change check has passed, we do not want to check it again */
rel_change_passed = 0;
/* outer iteration cycle */
while (iter < max_iter)
{
/* initialize first term of hessenberg system */
rs[0] = r_norm;
if (r_norm == 0.0)
{
hypre_TFreeF(c,gmres_functions);
hypre_TFreeF(s,gmres_functions);
hypre_TFreeF(rs,gmres_functions);
if (rel_change) hypre_TFreeF(rs_2,gmres_functions);
for (i=0; i < k_dim+1; i++) hypre_TFreeF(hh[i],gmres_functions);
hypre_TFreeF(hh,gmres_functions);
return hypre_error_flag;
}
/* see if we are already converged and
should print the final norm and exit */
if (r_norm <= epsilon && iter >= min_iter)
{
if (!rel_change) /* shouldn't exit after no iterations if
* relative change is on*/
{
(*(gmres_functions->CopyVector))(b,r);
(*(gmres_functions->Matvec))(matvec_data,-1.0,A,x,1.0,r);
r_norm = sqrt((*(gmres_functions->InnerProd))(r,r));
if (r_norm <= epsilon)
{
if ( print_level>1 && my_id == 0)
{
hypre_printf("\n\n");
hypre_printf("Final L2 norm of residual: %e\n\n", r_norm);
}
break;
}
else
if ( print_level>0 && my_id == 0)
hypre_printf("false convergence 1\n");
}
}
t = 1.0 / r_norm;
(*(gmres_functions->ScaleVector))(t,p[0]);
i = 0;
/***RESTART CYCLE (right-preconditioning) ***/
while (i < k_dim && iter < max_iter)
{
i++;
iter++;
(*(gmres_functions->ClearVector))(r);
precond(precond_data, A, p[i-1], r);
(*(gmres_functions->Matvec))(matvec_data, 1.0, A, r, 0.0, p[i]);
/* modified Gram_Schmidt */
for (j=0; j < i; j++)
{
hh[j][i-1] = (*(gmres_functions->InnerProd))(p[j],p[i]);
(*(gmres_functions->Axpy))(-hh[j][i-1],p[j],p[i]);
}
t = sqrt((*(gmres_functions->InnerProd))(p[i],p[i]));
hh[i][i-1] = t;
if (t != 0.0)
{
t = 1.0/t;
(*(gmres_functions->ScaleVector))(t,p[i]);
}
/* done with modified Gram_schmidt and Arnoldi step.
update factorization of hh */
for (j = 1; j < i; j++)
{
t = hh[j-1][i-1];
hh[j-1][i-1] = s[j-1]*hh[j][i-1] + c[j-1]*t;
hh[j][i-1] = -s[j-1]*t + c[j-1]*hh[j][i-1];
}
t= hh[i][i-1]*hh[i][i-1];
t+= hh[i-1][i-1]*hh[i-1][i-1];
gamma = sqrt(t);
if (gamma == 0.0) gamma = epsmac;
c[i-1] = hh[i-1][i-1]/gamma;
s[i-1] = hh[i][i-1]/gamma;
rs[i] = -hh[i][i-1]*rs[i-1];
rs[i]/= gamma;
rs[i-1] = c[i-1]*rs[i-1];
/* determine residual norm */
hh[i-1][i-1] = s[i-1]*hh[i][i-1] + c[i-1]*hh[i-1][i-1];
r_norm = fabs(rs[i]);
/* print ? */
if ( print_level>0 )
{
norms[iter] = r_norm;
if ( print_level>1 && my_id == 0 )
{
if (b_norm > 0.0)
hypre_printf("% 5d %e %f %e\n", iter,
norms[iter],norms[iter]/norms[iter-1],
norms[iter]/b_norm);
else
hypre_printf("% 5d %e %f\n", iter, norms[iter],
norms[iter]/norms[iter-1]);
}
}
/*convergence factor tolerance */
if (cf_tol > 0.0)
{
cf_ave_0 = cf_ave_1;
cf_ave_1 = pow( r_norm / r_norm_0, 1.0/(2.0*iter));
weight = fabs(cf_ave_1 - cf_ave_0);
weight = weight / hypre_max(cf_ave_1, cf_ave_0);
weight = 1.0 - weight;
#if 0
hypre_printf("I = %d: cf_new = %e, cf_old = %e, weight = %e\n",
i, cf_ave_1, cf_ave_0, weight );
#endif
if (weight * cf_ave_1 > cf_tol)
{
break_value = 1;
break;
}
}
/* should we exit the restart cycle? (conv. check) */
if (r_norm <= epsilon && iter >= min_iter)
{
if (rel_change && !rel_change_passed)
{
/* To decide whether to break here: to actually
determine the relative change requires the approx
solution (so a triangular solve) and a
precond. solve - so if we have to do this many
times, it will be expensive...(unlike cg where is
is relatively straightforward)
previously, the intent (there was a bug), was to
exit the restart cycle based on the residual norm
and check the relative change outside the cycle.
Here we will check the relative here as we don't
want to exit the restart cycle prematurely */
for (k=0; k<i; k++) /* extra copy of rs so we don't need
to change the later solve */
rs_2[k] = rs[k];
/* solve tri. system*/
rs_2[i-1] = rs_2[i-1]/hh[i-1][i-1];
for (k = i-2; k >= 0; k--)
{
t = 0.0;
for (j = k+1; j < i; j++)
{
t -= hh[k][j]*rs_2[j];
}
t+= rs_2[k];
rs_2[k] = t/hh[k][k];
}
(*(gmres_functions->CopyVector))(p[i-1],w);
(*(gmres_functions->ScaleVector))(rs_2[i-1],w);
for (j = i-2; j >=0; j--)
(*(gmres_functions->Axpy))(rs_2[j], p[j], w);
(*(gmres_functions->ClearVector))(r);
/* find correction (in r) */
precond(precond_data, A, w, r);
/* copy current solution (x) to w (don't want to over-write x)*/
(*(gmres_functions->CopyVector))(x,w);
/* add the correction */
(*(gmres_functions->Axpy))(1.0,r,w);
/* now w is the approx solution - get the norm*/
x_norm = sqrt( (*(gmres_functions->InnerProd))(w,w) );
if ( !(x_norm <= guard_zero_residual ))
/* don't divide by zero */
{ /* now get x_i - x_i-1 */
if (num_rel_change_check)
{
/* have already checked once so we can avoid another precond.
solve */
(*(gmres_functions->CopyVector))(w, r);
(*(gmres_functions->Axpy))(-1.0, w_2, r);
/* now r contains x_i - x_i-1*/
/* save current soln w in w_2 for next time */
(*(gmres_functions->CopyVector))(w, w_2);
}
else
{
/* first time to check rel change*/
/* first save current soln w in w_2 for next time */
(*(gmres_functions->CopyVector))(w, w_2);
/* for relative change take x_(i-1) to be
x + M^{-1}[sum{j=0..i-2} rs_j p_j ].
Now
x_i - x_{i-1}= {x + M^{-1}[sum{j=0..i-1} rs_j p_j ]}
- {x + M^{-1}[sum{j=0..i-2} rs_j p_j ]}
= M^{-1} rs_{i-1}{p_{i-1}} */
(*(gmres_functions->ClearVector))(w);
(*(gmres_functions->Axpy))(rs_2[i-1], p[i-1], w);
(*(gmres_functions->ClearVector))(r);
/* apply the preconditioner */
precond(precond_data, A, w, r);
/* now r contains x_i - x_i-1 */
}
/* find the norm of x_i - x_i-1 */
w_norm = sqrt( (*(gmres_functions->InnerProd))(r,r) );
relative_error = w_norm/x_norm;
if (relative_error <= r_tol)
{
rel_change_passed = 1;
break;
}
}
else
{
rel_change_passed = 1;
break;
}
num_rel_change_check++;
}
else /* no relative change */
{
break;
}
}
} /*** end of restart cycle ***/
/* now compute solution, first solve upper triangular system */
if (break_value) break;
rs[i-1] = rs[i-1]/hh[i-1][i-1];
for (k = i-2; k >= 0; k--)
{
t = 0.0;
for (j = k+1; j < i; j++)
{
t -= hh[k][j]*rs[j];
}
t+= rs[k];
rs[k] = t/hh[k][k];
}
(*(gmres_functions->CopyVector))(p[i-1],w);
(*(gmres_functions->ScaleVector))(rs[i-1],w);
for (j = i-2; j >=0; j--)
(*(gmres_functions->Axpy))(rs[j], p[j], w);
(*(gmres_functions->ClearVector))(r);
/* find correction (in r) */
precond(precond_data, A, w, r);
/* update current solution x (in x) */
(*(gmres_functions->Axpy))(1.0,r,x);
/* check for convergence by evaluating the actual residual */
if (r_norm <= epsilon && iter >= min_iter)
{
if (skip_real_r_check)
{
(gmres_data -> converged) = 1;
break;
}
/* calculate actual residual norm*/
(*(gmres_functions->CopyVector))(b,r);
(*(gmres_functions->Matvec))(matvec_data,-1.0,A,x,1.0,r);
real_r_norm_new = r_norm = sqrt( (*(gmres_functions->InnerProd))(r,r) );
if (r_norm <= epsilon)
{
if (rel_change && !rel_change_passed) /* calculate the relative change */
{
/* calculate the norm of the solution */
x_norm = sqrt( (*(gmres_functions->InnerProd))(x,x) );
if ( !(x_norm <= guard_zero_residual ))
/* don't divide by zero */
{
/* for relative change take x_(i-1) to be
x + M^{-1}[sum{j=0..i-2} rs_j p_j ].
Now
x_i - x_{i-1}= {x + M^{-1}[sum{j=0..i-1} rs_j p_j ]}
- {x + M^{-1}[sum{j=0..i-2} rs_j p_j ]}
= M^{-1} rs_{i-1}{p_{i-1}} */
(*(gmres_functions->ClearVector))(w);
(*(gmres_functions->Axpy))(rs[i-1], p[i-1], w);
(*(gmres_functions->ClearVector))(r);
/* apply the preconditioner */
precond(precond_data, A, w, r);
/* find the norm of x_i - x_i-1 */
w_norm = sqrt( (*(gmres_functions->InnerProd))(r,r) );
relative_error= w_norm/x_norm;
if ( relative_error < r_tol )
{
(gmres_data -> converged) = 1;
if ( print_level>1 && my_id == 0 )
{
hypre_printf("\n\n");
hypre_printf("Final L2 norm of residual: %e\n\n", r_norm);
}
break;
}
}
else
{
(gmres_data -> converged) = 1;
if ( print_level>1 && my_id == 0 )
{
hypre_printf("\n\n");
hypre_printf("Final L2 norm of residual: %e\n\n", r_norm);
}
break;
}
}
else /* don't need to check rel. change */
{
if ( print_level>1 && my_id == 0 )
{
hypre_printf("\n\n");
hypre_printf("Final L2 norm of residual: %e\n\n", r_norm);
}
(gmres_data -> converged) = 1;
break;
}
}
else /* conv. has not occurred, according to true residual */
{
/* exit if the real residual norm has not decreased */
if (real_r_norm_new >= real_r_norm_old)
{
if (print_level > 1 && my_id == 0)
{
hypre_printf("\n\n");
hypre_printf("Final L2 norm of residual: %e\n\n", r_norm);
}
(gmres_data -> converged) = 1;
break;
}
/* report discrepancy between real/GMRES residuals and restart */
if ( print_level>0 && my_id == 0)
hypre_printf("false convergence 2, L2 norm of residual: %e\n", r_norm);
(*(gmres_functions->CopyVector))(r,p[0]);
i = 0;
real_r_norm_old = real_r_norm_new;
}
} /* end of convergence check */
/* compute residual vector and continue loop */
for (j=i ; j > 0; j--)
{
rs[j-1] = -s[j-1]*rs[j];
rs[j] = c[j-1]*rs[j];
}
if (i) (*(gmres_functions->Axpy))(rs[i]-1.0,p[i],p[i]);
for (j=i-1 ; j > 0; j--)
(*(gmres_functions->Axpy))(rs[j],p[j],p[i]);
if (i)
{
(*(gmres_functions->Axpy))(rs[0]-1.0,p[0],p[0]);
(*(gmres_functions->Axpy))(1.0,p[i],p[0]);
}
} /* END of iteration while loop */
if ( print_level>1 && my_id == 0 )
hypre_printf("\n\n");
(gmres_data -> num_iterations) = iter;
if (b_norm > 0.0)
(gmres_data -> rel_residual_norm) = r_norm/b_norm;
if (b_norm == 0.0)
(gmres_data -> rel_residual_norm) = r_norm;
if (iter >= max_iter && r_norm > epsilon) hypre_error(HYPRE_ERROR_CONV);
hypre_TFreeF(c,gmres_functions);
hypre_TFreeF(s,gmres_functions);
hypre_TFreeF(rs,gmres_functions);
if (rel_change) hypre_TFreeF(rs_2,gmres_functions);
for (i=0; i < k_dim+1; i++)
{
hypre_TFreeF(hh[i],gmres_functions);
}
hypre_TFreeF(hh,gmres_functions);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetKDim, hypre_GMRESGetKDim
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetKDim( void *gmres_vdata,
HYPRE_Int k_dim )
{
hypre_GMRESData *gmres_data =(hypre_GMRESData *) gmres_vdata;
(gmres_data -> k_dim) = k_dim;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetKDim( void *gmres_vdata,
HYPRE_Int * k_dim )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*k_dim = (gmres_data -> k_dim);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetTol, hypre_GMRESGetTol
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetTol( void *gmres_vdata,
HYPRE_Real tol )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> tol) = tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetTol( void *gmres_vdata,
HYPRE_Real * tol )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*tol = (gmres_data -> tol);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetAbsoluteTol, hypre_GMRESGetAbsoluteTol
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetAbsoluteTol( void *gmres_vdata,
HYPRE_Real a_tol )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> a_tol) = a_tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetAbsoluteTol( void *gmres_vdata,
HYPRE_Real * a_tol )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*a_tol = (gmres_data -> a_tol);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetConvergenceFactorTol, hypre_GMRESGetConvergenceFactorTol
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetConvergenceFactorTol( void *gmres_vdata,
HYPRE_Real cf_tol )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> cf_tol) = cf_tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetConvergenceFactorTol( void *gmres_vdata,
HYPRE_Real * cf_tol )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*cf_tol = (gmres_data -> cf_tol);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetMinIter, hypre_GMRESGetMinIter
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetMinIter( void *gmres_vdata,
HYPRE_Int min_iter )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> min_iter) = min_iter;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetMinIter( void *gmres_vdata,
HYPRE_Int * min_iter )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*min_iter = (gmres_data -> min_iter);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetMaxIter, hypre_GMRESGetMaxIter
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetMaxIter( void *gmres_vdata,
HYPRE_Int max_iter )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> max_iter) = max_iter;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetMaxIter( void *gmres_vdata,
HYPRE_Int * max_iter )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*max_iter = (gmres_data -> max_iter);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetRelChange, hypre_GMRESGetRelChange
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetRelChange( void *gmres_vdata,
HYPRE_Int rel_change )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> rel_change) = rel_change;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetRelChange( void *gmres_vdata,
HYPRE_Int * rel_change )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*rel_change = (gmres_data -> rel_change);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetSkipRealResidualCheck, hypre_GMRESGetSkipRealResidualCheck
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetSkipRealResidualCheck( void *gmres_vdata,
HYPRE_Int skip_real_r_check )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> skip_real_r_check) = skip_real_r_check;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetSkipRealResidualCheck( void *gmres_vdata,
HYPRE_Int *skip_real_r_check)
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*skip_real_r_check = (gmres_data -> skip_real_r_check);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetStopCrit, hypre_GMRESGetStopCrit
*
* OBSOLETE
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetStopCrit( void *gmres_vdata,
HYPRE_Int stop_crit )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> stop_crit) = stop_crit;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetStopCrit( void *gmres_vdata,
HYPRE_Int * stop_crit )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*stop_crit = (gmres_data -> stop_crit);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetPrecond( void *gmres_vdata,
HYPRE_Int (*precond)(void*,void*,void*,void*),
HYPRE_Int (*precond_setup)(void*,void*,void*,void*),
void *precond_data )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
hypre_GMRESFunctions *gmres_functions = gmres_data->functions;
(gmres_functions -> precond) = precond;
(gmres_functions -> precond_setup) = precond_setup;
(gmres_data -> precond_data) = precond_data;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESGetPrecond( void *gmres_vdata,
HYPRE_Solver *precond_data_ptr )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*precond_data_ptr = (HYPRE_Solver)(gmres_data -> precond_data);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetPrintLevel, hypre_GMRESGetPrintLevel
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetPrintLevel( void *gmres_vdata,
HYPRE_Int level)
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> print_level) = level;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetPrintLevel( void *gmres_vdata,
HYPRE_Int * level)
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*level = (gmres_data -> print_level);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetLogging, hypre_GMRESGetLogging
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetLogging( void *gmres_vdata,
HYPRE_Int level)
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
(gmres_data -> logging) = level;
return hypre_error_flag;
}
HYPRE_Int
hypre_GMRESGetLogging( void *gmres_vdata,
HYPRE_Int * level)
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*level = (gmres_data -> logging);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESGetNumIterations( void *gmres_vdata,
HYPRE_Int *num_iterations )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*num_iterations = (gmres_data -> num_iterations);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetConverged
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESGetConverged( void *gmres_vdata,
HYPRE_Int *converged )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*converged = (gmres_data -> converged);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetFinalRelativeResidualNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESGetFinalRelativeResidualNorm( void *gmres_vdata,
HYPRE_Real *relative_residual_norm )
{
hypre_GMRESData *gmres_data = (hypre_GMRESData *)gmres_vdata;
*relative_residual_norm = (gmres_data -> rel_residual_norm);
return hypre_error_flag;
}
| 42,365 | 32.974338 | 99 | c |
AMG | AMG-master/krylov/gmres.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* GMRES gmres
*
*****************************************************************************/
#ifndef hypre_KRYLOV_GMRES_HEADER
#define hypre_KRYLOV_GMRES_HEADER
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name Generic GMRES Interface
*
* A general description of the interface goes here...
*
* @memo A generic GMRES linear solver interface
* @version 0.1
* @author Jeffrey F. Painter
**/
/*@{*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
* hypre_GMRESData and hypre_GMRESFunctions
*--------------------------------------------------------------------------*/
/**
* @name GMRES structs
*
* Description...
**/
/*@{*/
/**
* The {\tt hypre\_GMRESFunctions} object ...
**/
typedef struct
{
char * (*CAlloc) ( size_t count, size_t elt_size );
HYPRE_Int (*Free) ( char *ptr );
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs );
void * (*CreateVector) ( void *vector );
void * (*CreateVectorArray) ( HYPRE_Int size, void *vectors );
HYPRE_Int (*DestroyVector) ( void *vector );
void * (*MatvecCreate) ( void *A, void *x );
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y );
HYPRE_Int (*MatvecDestroy) ( void *matvec_data );
HYPRE_Real (*InnerProd) ( void *x, void *y );
HYPRE_Int (*CopyVector) ( void *x, void *y );
HYPRE_Int (*ClearVector) ( void *x );
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x );
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y );
HYPRE_Int (*precond) ();
HYPRE_Int (*precond_setup) ();
} hypre_GMRESFunctions;
/**
* The {\tt hypre\_GMRESData} object ...
**/
typedef struct
{
HYPRE_Int k_dim;
HYPRE_Int min_iter;
HYPRE_Int max_iter;
HYPRE_Int rel_change;
HYPRE_Int skip_real_r_check;
HYPRE_Int stop_crit;
HYPRE_Int converged;
HYPRE_Real tol;
HYPRE_Real cf_tol;
HYPRE_Real a_tol;
HYPRE_Real rel_residual_norm;
void *A;
void *r;
void *w;
void *w_2;
void **p;
void *matvec_data;
void *precond_data;
hypre_GMRESFunctions * functions;
/* log info (always logged) */
HYPRE_Int num_iterations;
HYPRE_Int print_level; /* printing when print_level>0 */
HYPRE_Int logging; /* extra computations for logging when logging>0 */
HYPRE_Real *norms;
char *log_file_name;
} hypre_GMRESData;
#ifdef __cplusplus
extern "C" {
#endif
/**
* @name generic GMRES Solver
*
* Description...
**/
/*@{*/
/**
* Description...
*
* @param param [IN] ...
**/
hypre_GMRESFunctions *
hypre_GMRESFunctionsCreate(
char * (*CAlloc) ( size_t count, size_t elt_size ),
HYPRE_Int (*Free) ( char *ptr ),
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs ),
void * (*CreateVector) ( void *vector ),
void * (*CreateVectorArray) ( HYPRE_Int size, void *vectors ),
HYPRE_Int (*DestroyVector) ( void *vector ),
void * (*MatvecCreate) ( void *A, void *x ),
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y ),
HYPRE_Int (*MatvecDestroy) ( void *matvec_data ),
HYPRE_Real (*InnerProd) ( void *x, void *y ),
HYPRE_Int (*CopyVector) ( void *x, void *y ),
HYPRE_Int (*ClearVector) ( void *x ),
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x ),
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y ),
HYPRE_Int (*PrecondSetup) ( void *vdata, void *A, void *b, void *x ),
HYPRE_Int (*Precond) ( void *vdata, void *A, void *b, void *x )
);
/**
* Description...
*
* @param param [IN] ...
**/
void *
hypre_GMRESCreate( hypre_GMRESFunctions *gmres_functions );
#ifdef __cplusplus
}
#endif
#endif
| 5,459 | 30.37931 | 83 | h |
AMG | AMG-master/krylov/krylov.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "HYPRE_krylov.h"
#ifndef hypre_KRYLOV_HEADER
#define hypre_KRYLOV_HEADER
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "_hypre_utilities.h"
#define hypre_CTAllocF(type, count, funcs) ( (type *)(*(funcs->CAlloc))((size_t)(count), (size_t)sizeof(type)) )
#define hypre_TFreeF( ptr, funcs ) ( (*(funcs->Free))((char *)ptr), ptr = NULL )
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************
*
* GMRES gmres
*
*****************************************************************************/
#ifndef hypre_KRYLOV_GMRES_HEADER
#define hypre_KRYLOV_GMRES_HEADER
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name Generic GMRES Interface
*
* A general description of the interface goes here...
*
* @memo A generic GMRES linear solver interface
* @version 0.1
* @author Jeffrey F. Painter
**/
/*@{*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
* hypre_GMRESData and hypre_GMRESFunctions
*--------------------------------------------------------------------------*/
/**
* @name GMRES structs
*
* Description...
**/
/*@{*/
/**
* The {\tt hypre\_GMRESFunctions} object ...
**/
typedef struct
{
char * (*CAlloc) ( size_t count, size_t elt_size );
HYPRE_Int (*Free) ( char *ptr );
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs );
void * (*CreateVector) ( void *vector );
void * (*CreateVectorArray) ( HYPRE_Int size, void *vectors );
HYPRE_Int (*DestroyVector) ( void *vector );
void * (*MatvecCreate) ( void *A, void *x );
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y );
HYPRE_Int (*MatvecDestroy) ( void *matvec_data );
HYPRE_Real (*InnerProd) ( void *x, void *y );
HYPRE_Int (*CopyVector) ( void *x, void *y );
HYPRE_Int (*ClearVector) ( void *x );
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x );
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y );
HYPRE_Int (*precond) (void *vdata , void *A , void *b , void *x);
HYPRE_Int (*precond_setup) (void *vdata , void *A , void *b , void *x);
} hypre_GMRESFunctions;
/**
* The {\tt hypre\_GMRESData} object ...
**/
typedef struct
{
HYPRE_Int k_dim;
HYPRE_Int min_iter;
HYPRE_Int max_iter;
HYPRE_Int rel_change;
HYPRE_Int skip_real_r_check;
HYPRE_Int stop_crit;
HYPRE_Int converged;
HYPRE_Real tol;
HYPRE_Real cf_tol;
HYPRE_Real a_tol;
HYPRE_Real rel_residual_norm;
void *A;
void *r;
void *w;
void *w_2;
void **p;
void *matvec_data;
void *precond_data;
hypre_GMRESFunctions * functions;
/* log info (always logged) */
HYPRE_Int num_iterations;
HYPRE_Int print_level; /* printing when print_level>0 */
HYPRE_Int logging; /* extra computations for logging when logging>0 */
HYPRE_Real *norms;
char *log_file_name;
} hypre_GMRESData;
#ifdef __cplusplus
extern "C" {
#endif
/**
* @name generic GMRES Solver
*
* Description...
**/
/*@{*/
/**
* Description...
*
* @param param [IN] ...
**/
hypre_GMRESFunctions *
hypre_GMRESFunctionsCreate(
char * (*CAlloc) ( size_t count, size_t elt_size ),
HYPRE_Int (*Free) ( char *ptr ),
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs ),
void * (*CreateVector) ( void *vector ),
void * (*CreateVectorArray) ( HYPRE_Int size, void *vectors ),
HYPRE_Int (*DestroyVector) ( void *vector ),
void * (*MatvecCreate) ( void *A, void *x ),
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y ),
HYPRE_Int (*MatvecDestroy) ( void *matvec_data ),
HYPRE_Real (*InnerProd) ( void *x, void *y ),
HYPRE_Int (*CopyVector) ( void *x, void *y ),
HYPRE_Int (*ClearVector) ( void *x ),
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x ),
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y ),
HYPRE_Int (*PrecondSetup) ( void *vdata, void *A, void *b, void *x ),
HYPRE_Int (*Precond) ( void *vdata, void *A, void *b, void *x )
);
/**
* Description...
*
* @param param [IN] ...
**/
void *
hypre_GMRESCreate( hypre_GMRESFunctions *gmres_functions );
#ifdef __cplusplus
}
#endif
#endif
/******************************************************************************
*
* Preconditioned conjugate gradient (Omin) headers
*
*****************************************************************************/
#ifndef hypre_KRYLOV_PCG_HEADER
#define hypre_KRYLOV_PCG_HEADER
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name Generic PCG Interface
*
* A general description of the interface goes here...
*
* @memo A generic PCG linear solver interface
* @version 0.1
* @author Jeffrey F. Painter
**/
/*@{*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
* hypre_PCGData and hypre_PCGFunctions
*--------------------------------------------------------------------------*/
/**
* @name PCG structs
*
* Description...
**/
/*@{*/
/**
* The {\tt hypre\_PCGSFunctions} object ...
**/
typedef struct
{
char * (*CAlloc) ( size_t count, size_t elt_size );
HYPRE_Int (*Free) ( char *ptr );
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs );
void * (*CreateVector) ( void *vector );
HYPRE_Int (*DestroyVector) ( void *vector );
void * (*MatvecCreate) ( void *A, void *x );
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y );
HYPRE_Int (*MatvecDestroy) ( void *matvec_data );
HYPRE_Real (*InnerProd) ( void *x, void *y );
HYPRE_Int (*CopyVector) ( void *x, void *y );
HYPRE_Int (*ClearVector) ( void *x );
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x );
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y );
HYPRE_Int (*precond)(void *vdata , void *A , void *b , void *x);
HYPRE_Int (*precond_setup)(void *vdata , void *A , void *b , void *x);
} hypre_PCGFunctions;
/**
* The {\tt hypre\_PCGData} object ...
**/
/*
Summary of Parameters to Control Stopping Test:
- Standard (default) error tolerance: |delta-residual|/|right-hand-side|<tol
where the norm is an energy norm wrt preconditioner, |r|=sqrt(<Cr,r>).
- two_norm!=0 means: the norm is the L2 norm, |r|=sqrt(<r,r>)
- rel_change!=0 means: if pass the other stopping criteria, also check the
relative change in the solution x. Pass iff this relative change is small.
- tol = relative error tolerance, as above
-a_tol = absolute convergence tolerance (default is 0.0)
If one desires the convergence test to check the absolute
convergence tolerance *only*, then set the relative convergence
tolerance to 0.0. (The default convergence test is <C*r,r> <=
max(relative_tolerance^2 * <C*b, b>, absolute_tolerance^2)
- cf_tol = convergence factor tolerance; if >0 used for special test
for slow convergence
- stop_crit!=0 means (TO BE PHASED OUT):
pure absolute error tolerance rather than a pure relative
error tolerance on the residual. Never applies if rel_change!=0 or atolf!=0.
- atolf = absolute error tolerance factor to be used _together_ with the
relative error tolerance, |delta-residual| / ( atolf + |right-hand-side| ) < tol
(To BE PHASED OUT)
- recompute_residual means: when the iteration seems to be converged, recompute the
residual from scratch (r=b-Ax) and use this new residual to repeat the convergence test.
This can be expensive, use this only if you have seen a problem with the regular
residual computation.
- recompute_residual_p means: recompute the residual from scratch (r=b-Ax)
every "recompute_residual_p" iterations. This can be expensive and degrade the
convergence. Use it only if you have seen a problem with the regular residual
computation.
*/
typedef struct
{
HYPRE_Real tol;
HYPRE_Real atolf;
HYPRE_Real cf_tol;
HYPRE_Real a_tol;
HYPRE_Real rtol;
HYPRE_Int max_iter;
HYPRE_Int two_norm;
HYPRE_Int rel_change;
HYPRE_Int recompute_residual;
HYPRE_Int recompute_residual_p;
HYPRE_Int stop_crit;
HYPRE_Int converged;
void *A;
void *p;
void *s;
void *r; /* ...contains the residual. This is currently kept permanently.
If that is ever changed, it still must be kept if logging>1 */
HYPRE_Int owns_matvec_data; /* normally 1; if 0, don't delete it */
void *matvec_data;
void *precond_data;
hypre_PCGFunctions * functions;
/* log info (always logged) */
HYPRE_Int num_iterations;
HYPRE_Real rel_residual_norm;
HYPRE_Int print_level; /* printing when print_level>0 */
HYPRE_Int logging; /* extra computations for logging when logging>0 */
HYPRE_Real *norms;
HYPRE_Real *rel_norms;
} hypre_PCGData;
#define hypre_PCGDataOwnsMatvecData(pcgdata) ((pcgdata) -> owns_matvec_data)
#ifdef __cplusplus
extern "C" {
#endif
/**
* @name generic PCG Solver
*
* Description...
**/
/*@{*/
/**
* Description...
*
* @param param [IN] ...
**/
hypre_PCGFunctions *
hypre_PCGFunctionsCreate(
char * (*CAlloc) ( size_t count, size_t elt_size ),
HYPRE_Int (*Free) ( char *ptr ),
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs ),
void * (*CreateVector) ( void *vector ),
HYPRE_Int (*DestroyVector) ( void *vector ),
void * (*MatvecCreate) ( void *A, void *x ),
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y ),
HYPRE_Int (*MatvecDestroy) ( void *matvec_data ),
HYPRE_Real (*InnerProd) ( void *x, void *y ),
HYPRE_Int (*CopyVector) ( void *x, void *y ),
HYPRE_Int (*ClearVector) ( void *x ),
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x ),
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y ),
HYPRE_Int (*PrecondSetup) ( void *vdata, void *A, void *b, void *x ),
HYPRE_Int (*Precond) ( void *vdata, void *A, void *b, void *x )
);
/**
* Description...
*
* @param param [IN] ...
**/
void *
hypre_PCGCreate( hypre_PCGFunctions *pcg_functions );
#ifdef __cplusplus
}
#endif
#endif
/* gmres.c */
void *hypre_GMRESCreate ( hypre_GMRESFunctions *gmres_functions );
HYPRE_Int hypre_GMRESDestroy ( void *gmres_vdata );
HYPRE_Int hypre_GMRESGetResidual ( void *gmres_vdata , void **residual );
HYPRE_Int hypre_GMRESSetup ( void *gmres_vdata , void *A , void *b , void *x );
HYPRE_Int hypre_GMRESSolve ( void *gmres_vdata , void *A , void *b , void *x );
HYPRE_Int hypre_GMRESSetKDim ( void *gmres_vdata , HYPRE_Int k_dim );
HYPRE_Int hypre_GMRESGetKDim ( void *gmres_vdata , HYPRE_Int *k_dim );
HYPRE_Int hypre_GMRESSetTol ( void *gmres_vdata , HYPRE_Real tol );
HYPRE_Int hypre_GMRESGetTol ( void *gmres_vdata , HYPRE_Real *tol );
HYPRE_Int hypre_GMRESSetAbsoluteTol ( void *gmres_vdata , HYPRE_Real a_tol );
HYPRE_Int hypre_GMRESGetAbsoluteTol ( void *gmres_vdata , HYPRE_Real *a_tol );
HYPRE_Int hypre_GMRESSetConvergenceFactorTol ( void *gmres_vdata , HYPRE_Real cf_tol );
HYPRE_Int hypre_GMRESGetConvergenceFactorTol ( void *gmres_vdata , HYPRE_Real *cf_tol );
HYPRE_Int hypre_GMRESSetMinIter ( void *gmres_vdata , HYPRE_Int min_iter );
HYPRE_Int hypre_GMRESGetMinIter ( void *gmres_vdata , HYPRE_Int *min_iter );
HYPRE_Int hypre_GMRESSetMaxIter ( void *gmres_vdata , HYPRE_Int max_iter );
HYPRE_Int hypre_GMRESGetMaxIter ( void *gmres_vdata , HYPRE_Int *max_iter );
HYPRE_Int hypre_GMRESSetRelChange ( void *gmres_vdata , HYPRE_Int rel_change );
HYPRE_Int hypre_GMRESGetRelChange ( void *gmres_vdata , HYPRE_Int *rel_change );
HYPRE_Int hypre_GMRESSetSkipRealResidualCheck ( void *gmres_vdata , HYPRE_Int skip_real_r_check );
HYPRE_Int hypre_GMRESGetSkipRealResidualCheck ( void *gmres_vdata , HYPRE_Int *skip_real_r_check );
HYPRE_Int hypre_GMRESSetStopCrit ( void *gmres_vdata , HYPRE_Int stop_crit );
HYPRE_Int hypre_GMRESGetStopCrit ( void *gmres_vdata , HYPRE_Int *stop_crit );
HYPRE_Int hypre_GMRESSetPrecond ( void *gmres_vdata , HYPRE_Int (*precond )(void*,void*,void*,void*), HYPRE_Int (*precond_setup )(void*,void*,void*,void*), void *precond_data );
HYPRE_Int hypre_GMRESGetPrecond ( void *gmres_vdata , HYPRE_Solver *precond_data_ptr );
HYPRE_Int hypre_GMRESSetPrintLevel ( void *gmres_vdata , HYPRE_Int level );
HYPRE_Int hypre_GMRESGetPrintLevel ( void *gmres_vdata , HYPRE_Int *level );
HYPRE_Int hypre_GMRESSetLogging ( void *gmres_vdata , HYPRE_Int level );
HYPRE_Int hypre_GMRESGetLogging ( void *gmres_vdata , HYPRE_Int *level );
HYPRE_Int hypre_GMRESGetNumIterations ( void *gmres_vdata , HYPRE_Int *num_iterations );
HYPRE_Int hypre_GMRESGetConverged ( void *gmres_vdata , HYPRE_Int *converged );
HYPRE_Int hypre_GMRESGetFinalRelativeResidualNorm ( void *gmres_vdata , HYPRE_Real *relative_residual_norm );
/* HYPRE_gmres.c */
HYPRE_Int HYPRE_GMRESSetup ( HYPRE_Solver solver , HYPRE_Matrix A , HYPRE_Vector b , HYPRE_Vector x );
HYPRE_Int HYPRE_GMRESSolve ( HYPRE_Solver solver , HYPRE_Matrix A , HYPRE_Vector b , HYPRE_Vector x );
HYPRE_Int HYPRE_GMRESSetKDim ( HYPRE_Solver solver , HYPRE_Int k_dim );
HYPRE_Int HYPRE_GMRESGetKDim ( HYPRE_Solver solver , HYPRE_Int *k_dim );
HYPRE_Int HYPRE_GMRESSetTol ( HYPRE_Solver solver , HYPRE_Real tol );
HYPRE_Int HYPRE_GMRESGetTol ( HYPRE_Solver solver , HYPRE_Real *tol );
HYPRE_Int HYPRE_GMRESSetAbsoluteTol ( HYPRE_Solver solver , HYPRE_Real a_tol );
HYPRE_Int HYPRE_GMRESGetAbsoluteTol ( HYPRE_Solver solver , HYPRE_Real *a_tol );
HYPRE_Int HYPRE_GMRESSetConvergenceFactorTol ( HYPRE_Solver solver , HYPRE_Real cf_tol );
HYPRE_Int HYPRE_GMRESGetConvergenceFactorTol ( HYPRE_Solver solver , HYPRE_Real *cf_tol );
HYPRE_Int HYPRE_GMRESSetMinIter ( HYPRE_Solver solver , HYPRE_Int min_iter );
HYPRE_Int HYPRE_GMRESGetMinIter ( HYPRE_Solver solver , HYPRE_Int *min_iter );
HYPRE_Int HYPRE_GMRESSetMaxIter ( HYPRE_Solver solver , HYPRE_Int max_iter );
HYPRE_Int HYPRE_GMRESGetMaxIter ( HYPRE_Solver solver , HYPRE_Int *max_iter );
HYPRE_Int HYPRE_GMRESSetStopCrit ( HYPRE_Solver solver , HYPRE_Int stop_crit );
HYPRE_Int HYPRE_GMRESGetStopCrit ( HYPRE_Solver solver , HYPRE_Int *stop_crit );
HYPRE_Int HYPRE_GMRESSetRelChange ( HYPRE_Solver solver , HYPRE_Int rel_change );
HYPRE_Int HYPRE_GMRESGetRelChange ( HYPRE_Solver solver , HYPRE_Int *rel_change );
HYPRE_Int HYPRE_GMRESSetSkipRealResidualCheck ( HYPRE_Solver solver , HYPRE_Int skip_real_r_check );
HYPRE_Int HYPRE_GMRESGetSkipRealResidualCheck ( HYPRE_Solver solver , HYPRE_Int *skip_real_r_check );
HYPRE_Int HYPRE_GMRESSetPrecond ( HYPRE_Solver solver , HYPRE_PtrToSolverFcn precond , HYPRE_PtrToSolverFcn precond_setup , HYPRE_Solver precond_solver );
HYPRE_Int HYPRE_GMRESGetPrecond ( HYPRE_Solver solver , HYPRE_Solver *precond_data_ptr );
HYPRE_Int HYPRE_GMRESSetPrintLevel ( HYPRE_Solver solver , HYPRE_Int level );
HYPRE_Int HYPRE_GMRESGetPrintLevel ( HYPRE_Solver solver , HYPRE_Int *level );
HYPRE_Int HYPRE_GMRESSetLogging ( HYPRE_Solver solver , HYPRE_Int level );
HYPRE_Int HYPRE_GMRESGetLogging ( HYPRE_Solver solver , HYPRE_Int *level );
HYPRE_Int HYPRE_GMRESGetNumIterations ( HYPRE_Solver solver , HYPRE_Int *num_iterations );
HYPRE_Int HYPRE_GMRESGetConverged ( HYPRE_Solver solver , HYPRE_Int *converged );
HYPRE_Int HYPRE_GMRESGetFinalRelativeResidualNorm ( HYPRE_Solver solver , HYPRE_Real *norm );
HYPRE_Int HYPRE_GMRESGetResidual ( HYPRE_Solver solver , void **residual );
/* HYPRE_pcg.c */
HYPRE_Int HYPRE_PCGSetup ( HYPRE_Solver solver , HYPRE_Matrix A , HYPRE_Vector b , HYPRE_Vector x );
HYPRE_Int HYPRE_PCGSolve ( HYPRE_Solver solver , HYPRE_Matrix A , HYPRE_Vector b , HYPRE_Vector x );
HYPRE_Int HYPRE_PCGSetTol ( HYPRE_Solver solver , HYPRE_Real tol );
HYPRE_Int HYPRE_PCGGetTol ( HYPRE_Solver solver , HYPRE_Real *tol );
HYPRE_Int HYPRE_PCGSetAbsoluteTol ( HYPRE_Solver solver , HYPRE_Real a_tol );
HYPRE_Int HYPRE_PCGGetAbsoluteTol ( HYPRE_Solver solver , HYPRE_Real *a_tol );
HYPRE_Int HYPRE_PCGSetAbsoluteTolFactor ( HYPRE_Solver solver , HYPRE_Real abstolf );
HYPRE_Int HYPRE_PCGGetAbsoluteTolFactor ( HYPRE_Solver solver , HYPRE_Real *abstolf );
HYPRE_Int HYPRE_PCGSetResidualTol ( HYPRE_Solver solver , HYPRE_Real rtol );
HYPRE_Int HYPRE_PCGGetResidualTol ( HYPRE_Solver solver , HYPRE_Real *rtol );
HYPRE_Int HYPRE_PCGSetConvergenceFactorTol ( HYPRE_Solver solver , HYPRE_Real cf_tol );
HYPRE_Int HYPRE_PCGGetConvergenceFactorTol ( HYPRE_Solver solver , HYPRE_Real *cf_tol );
HYPRE_Int HYPRE_PCGSetMaxIter ( HYPRE_Solver solver , HYPRE_Int max_iter );
HYPRE_Int HYPRE_PCGGetMaxIter ( HYPRE_Solver solver , HYPRE_Int *max_iter );
HYPRE_Int HYPRE_PCGSetStopCrit ( HYPRE_Solver solver , HYPRE_Int stop_crit );
HYPRE_Int HYPRE_PCGGetStopCrit ( HYPRE_Solver solver , HYPRE_Int *stop_crit );
HYPRE_Int HYPRE_PCGSetTwoNorm ( HYPRE_Solver solver , HYPRE_Int two_norm );
HYPRE_Int HYPRE_PCGGetTwoNorm ( HYPRE_Solver solver , HYPRE_Int *two_norm );
HYPRE_Int HYPRE_PCGSetRelChange ( HYPRE_Solver solver , HYPRE_Int rel_change );
HYPRE_Int HYPRE_PCGGetRelChange ( HYPRE_Solver solver , HYPRE_Int *rel_change );
HYPRE_Int HYPRE_PCGSetRecomputeResidual ( HYPRE_Solver solver , HYPRE_Int recompute_residual );
HYPRE_Int HYPRE_PCGGetRecomputeResidual ( HYPRE_Solver solver , HYPRE_Int *recompute_residual );
HYPRE_Int HYPRE_PCGSetRecomputeResidualP ( HYPRE_Solver solver , HYPRE_Int recompute_residual_p );
HYPRE_Int HYPRE_PCGGetRecomputeResidualP ( HYPRE_Solver solver , HYPRE_Int *recompute_residual_p );
HYPRE_Int HYPRE_PCGSetPrecond ( HYPRE_Solver solver , HYPRE_PtrToSolverFcn precond , HYPRE_PtrToSolverFcn precond_setup , HYPRE_Solver precond_solver );
HYPRE_Int HYPRE_PCGGetPrecond ( HYPRE_Solver solver , HYPRE_Solver *precond_data_ptr );
HYPRE_Int HYPRE_PCGSetLogging ( HYPRE_Solver solver , HYPRE_Int level );
HYPRE_Int HYPRE_PCGGetLogging ( HYPRE_Solver solver , HYPRE_Int *level );
HYPRE_Int HYPRE_PCGSetPrintLevel ( HYPRE_Solver solver , HYPRE_Int level );
HYPRE_Int HYPRE_PCGGetPrintLevel ( HYPRE_Solver solver , HYPRE_Int *level );
HYPRE_Int HYPRE_PCGGetNumIterations ( HYPRE_Solver solver , HYPRE_Int *num_iterations );
HYPRE_Int HYPRE_PCGGetConverged ( HYPRE_Solver solver , HYPRE_Int *converged );
HYPRE_Int HYPRE_PCGGetFinalRelativeResidualNorm ( HYPRE_Solver solver , HYPRE_Real *norm );
HYPRE_Int HYPRE_PCGGetResidual ( HYPRE_Solver solver , void **residual );
/* pcg.c */
void *hypre_PCGCreate ( hypre_PCGFunctions *pcg_functions );
HYPRE_Int hypre_PCGDestroy ( void *pcg_vdata );
HYPRE_Int hypre_PCGGetResidual ( void *pcg_vdata , void **residual );
HYPRE_Int hypre_PCGSetup ( void *pcg_vdata , void *A , void *b , void *x );
HYPRE_Int hypre_PCGSolve ( void *pcg_vdata , void *A , void *b , void *x );
HYPRE_Int hypre_PCGSetTol ( void *pcg_vdata , HYPRE_Real tol );
HYPRE_Int hypre_PCGGetTol ( void *pcg_vdata , HYPRE_Real *tol );
HYPRE_Int hypre_PCGSetAbsoluteTol ( void *pcg_vdata , HYPRE_Real a_tol );
HYPRE_Int hypre_PCGGetAbsoluteTol ( void *pcg_vdata , HYPRE_Real *a_tol );
HYPRE_Int hypre_PCGSetAbsoluteTolFactor ( void *pcg_vdata , HYPRE_Real atolf );
HYPRE_Int hypre_PCGGetAbsoluteTolFactor ( void *pcg_vdata , HYPRE_Real *atolf );
HYPRE_Int hypre_PCGSetResidualTol ( void *pcg_vdata , HYPRE_Real rtol );
HYPRE_Int hypre_PCGGetResidualTol ( void *pcg_vdata , HYPRE_Real *rtol );
HYPRE_Int hypre_PCGSetConvergenceFactorTol ( void *pcg_vdata , HYPRE_Real cf_tol );
HYPRE_Int hypre_PCGGetConvergenceFactorTol ( void *pcg_vdata , HYPRE_Real *cf_tol );
HYPRE_Int hypre_PCGSetMaxIter ( void *pcg_vdata , HYPRE_Int max_iter );
HYPRE_Int hypre_PCGGetMaxIter ( void *pcg_vdata , HYPRE_Int *max_iter );
HYPRE_Int hypre_PCGSetTwoNorm ( void *pcg_vdata , HYPRE_Int two_norm );
HYPRE_Int hypre_PCGGetTwoNorm ( void *pcg_vdata , HYPRE_Int *two_norm );
HYPRE_Int hypre_PCGSetRelChange ( void *pcg_vdata , HYPRE_Int rel_change );
HYPRE_Int hypre_PCGGetRelChange ( void *pcg_vdata , HYPRE_Int *rel_change );
HYPRE_Int hypre_PCGSetRecomputeResidual ( void *pcg_vdata , HYPRE_Int recompute_residual );
HYPRE_Int hypre_PCGGetRecomputeResidual ( void *pcg_vdata , HYPRE_Int *recompute_residual );
HYPRE_Int hypre_PCGSetRecomputeResidualP ( void *pcg_vdata , HYPRE_Int recompute_residual_p );
HYPRE_Int hypre_PCGGetRecomputeResidualP ( void *pcg_vdata , HYPRE_Int *recompute_residual_p );
HYPRE_Int hypre_PCGSetStopCrit ( void *pcg_vdata , HYPRE_Int stop_crit );
HYPRE_Int hypre_PCGGetStopCrit ( void *pcg_vdata , HYPRE_Int *stop_crit );
HYPRE_Int hypre_PCGGetPrecond ( void *pcg_vdata , HYPRE_Solver *precond_data_ptr );
HYPRE_Int hypre_PCGSetPrecond ( void *pcg_vdata , HYPRE_Int (*precond )(void*,void*,void*,void*), HYPRE_Int (*precond_setup )(void*,void*,void*,void*), void *precond_data );
HYPRE_Int hypre_PCGSetPrintLevel ( void *pcg_vdata , HYPRE_Int level );
HYPRE_Int hypre_PCGGetPrintLevel ( void *pcg_vdata , HYPRE_Int *level );
HYPRE_Int hypre_PCGSetLogging ( void *pcg_vdata , HYPRE_Int level );
HYPRE_Int hypre_PCGGetLogging ( void *pcg_vdata , HYPRE_Int *level );
HYPRE_Int hypre_PCGGetNumIterations ( void *pcg_vdata , HYPRE_Int *num_iterations );
HYPRE_Int hypre_PCGGetConverged ( void *pcg_vdata , HYPRE_Int *converged );
HYPRE_Int hypre_PCGPrintLogging ( void *pcg_vdata , HYPRE_Int myid );
HYPRE_Int hypre_PCGGetFinalRelativeResidualNorm ( void *pcg_vdata , HYPRE_Real *relative_residual_norm );
#ifdef __cplusplus
}
#endif
#endif
| 23,667 | 43.322097 | 178 | h |
AMG | AMG-master/krylov/old_gmres.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* GMRES gmres
*
*****************************************************************************/
#include "krylov.h"
#include "utilities.h"
/*--------------------------------------------------------------------------
* hypre_GMRESFunctionsCreate
*--------------------------------------------------------------------------*/
hypre_GMRESFunctions *
hypre_GMRESFunctionsCreate(
char * (*CAlloc) ( HYPRE_Int count, HYPRE_Int elt_size ),
HYPRE_Int (*Free) ( char *ptr ),
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id, HYPRE_Int *num_procs ),
void * (*CreateVector) ( void *vector ),
void * (*CreateVectorArray) ( HYPRE_Int size, void *vectors ),
HYPRE_Int (*DestroyVector) ( void *vector ),
void * (*MatvecCreate) ( void *A, void *x ),
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Real alpha, void *A,
void *x, HYPRE_Real beta, void *y ),
HYPRE_Int (*MatvecDestroy) ( void *matvec_data ),
HYPRE_Real (*InnerProd) ( void *x, void *y ),
HYPRE_Int (*CopyVector) ( void *x, void *y ),
HYPRE_Int (*ClearVector) ( void *x ),
HYPRE_Int (*ScaleVector) ( HYPRE_Real alpha, void *x ),
HYPRE_Int (*Axpy) ( HYPRE_Real alpha, void *x, void *y ),
HYPRE_Int (*PrecondSetup) ( void *vdata, void *A, void *b, void *x ),
HYPRE_Int (*Precond) ( void *vdata, void *A, void *b, void *x )
)
{
hypre_GMRESFunctions * gmres_functions;
gmres_functions = (hypre_GMRESFunctions *)
CAlloc( 1, sizeof(hypre_GMRESFunctions) );
gmres_functions->CAlloc = CAlloc;
gmres_functions->Free = Free;
gmres_functions->CommInfo = CommInfo; /* not in PCGFunctionsCreate */
gmres_functions->CreateVector = CreateVector;
gmres_functions->CreateVectorArray = CreateVectorArray; /* not in PCGFunctionsCreate */
gmres_functions->DestroyVector = DestroyVector;
gmres_functions->MatvecCreate = MatvecCreate;
gmres_functions->Matvec = Matvec;
gmres_functions->MatvecDestroy = MatvecDestroy;
gmres_functions->InnerProd = InnerProd;
gmres_functions->CopyVector = CopyVector;
gmres_functions->ClearVector = ClearVector;
gmres_functions->ScaleVector = ScaleVector;
gmres_functions->Axpy = Axpy;
/* default preconditioner must be set here but can be changed later... */
gmres_functions->precond_setup = PrecondSetup;
gmres_functions->precond = Precond;
return gmres_functions;
}
/*--------------------------------------------------------------------------
* hypre_GMRESCreate
*--------------------------------------------------------------------------*/
void *
hypre_GMRESCreate( hypre_GMRESFunctions *gmres_functions )
{
hypre_GMRESData *gmres_data;
gmres_data = hypre_CTAllocF(hypre_GMRESData, 1, gmres_functions);
gmres_data->functions = gmres_functions;
/* set defaults */
(gmres_data -> k_dim) = 5;
(gmres_data -> tol) = 1.0e-06;
(gmres_data -> cf_tol) = 0.0;
(gmres_data -> min_iter) = 0;
(gmres_data -> max_iter) = 1000;
(gmres_data -> rel_change) = 0;
(gmres_data -> stop_crit) = 0; /* rel. residual norm */
(gmres_data -> converged) = 0;
(gmres_data -> precond_data) = NULL;
(gmres_data -> print_level) = 0;
(gmres_data -> logging) = 0;
(gmres_data -> p) = NULL;
(gmres_data -> r) = NULL;
(gmres_data -> w) = NULL;
(gmres_data -> matvec_data) = NULL;
(gmres_data -> norms) = NULL;
(gmres_data -> log_file_name) = NULL;
return (void *) gmres_data;
}
/*--------------------------------------------------------------------------
* hypre_GMRESDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESDestroy( void *gmres_vdata )
{
hypre_GMRESData *gmres_data = gmres_vdata;
hypre_GMRESFunctions *gmres_functions = gmres_data->functions;
HYPRE_Int i, ierr = 0;
if (gmres_data)
{
if ( (gmres_data->logging>0) || (gmres_data->print_level) > 0 )
{
if ( (gmres_data -> norms) != NULL )
hypre_TFreeF( gmres_data -> norms, gmres_functions );
}
if ( (gmres_data -> matvec_data) != NULL )
(*(gmres_functions->MatvecDestroy))(gmres_data -> matvec_data);
if ( (gmres_data -> r) != NULL )
(*(gmres_functions->DestroyVector))(gmres_data -> r);
if ( (gmres_data -> w) != NULL )
(*(gmres_functions->DestroyVector))(gmres_data -> w);
if ( (gmres_data -> p) != NULL )
{
for (i = 0; i < (gmres_data -> k_dim+1); i++)
{
if ( (gmres_data -> p)[i] != NULL )
(*(gmres_functions->DestroyVector))( (gmres_data -> p) [i]);
}
hypre_TFreeF( gmres_data->p, gmres_functions );
}
hypre_TFreeF( gmres_data, gmres_functions );
hypre_TFreeF( gmres_functions, gmres_functions );
}
return(ierr);
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetResidual
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_GMRESGetResidual( void *gmres_vdata, void **residual )
{
/* returns a poHYPRE_Inter to the residual vector */
HYPRE_Int ierr = 0;
hypre_GMRESData *gmres_data = gmres_vdata;
*residual = gmres_data->r;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetup( void *gmres_vdata,
void *A,
void *b,
void *x )
{
hypre_GMRESData *gmres_data = gmres_vdata;
hypre_GMRESFunctions *gmres_functions = gmres_data->functions;
HYPRE_Int k_dim = (gmres_data -> k_dim);
HYPRE_Int max_iter = (gmres_data -> max_iter);
HYPRE_Int (*precond_setup)() = (gmres_functions->precond_setup);
void *precond_data = (gmres_data -> precond_data);
HYPRE_Int ierr = 0;
(gmres_data -> A) = A;
/*--------------------------------------------------
* The arguments for NewVector are important to
* maHYPRE_Intain consistency between the setup and
* compute phases of matvec and the preconditioner.
*--------------------------------------------------*/
if ((gmres_data -> p) == NULL)
(gmres_data -> p) = (*(gmres_functions->CreateVectorArray))(k_dim+1,x);
if ((gmres_data -> r) == NULL)
(gmres_data -> r) = (*(gmres_functions->CreateVector))(b);
if ((gmres_data -> w) == NULL)
(gmres_data -> w) = (*(gmres_functions->CreateVector))(b);
if ((gmres_data -> matvec_data) == NULL)
(gmres_data -> matvec_data) = (*(gmres_functions->MatvecCreate))(A, x);
ierr = precond_setup(precond_data, A, b, x);
/*-----------------------------------------------------
* Allocate space for log info
*-----------------------------------------------------*/
if ( (gmres_data->logging)>0 || (gmres_data->print_level) > 0 )
{
if ((gmres_data -> norms) == NULL)
(gmres_data -> norms) = hypre_CTAllocF(HYPRE_Real, max_iter + 1,gmres_functions);
}
if ( (gmres_data->print_level) > 0 ) {
if ((gmres_data -> log_file_name) == NULL)
(gmres_data -> log_file_name) = "gmres.out.log";
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSolve
*-------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSolve(void *gmres_vdata,
void *A,
void *b,
void *x)
{
hypre_GMRESData *gmres_data = gmres_vdata;
hypre_GMRESFunctions *gmres_functions = gmres_data->functions;
HYPRE_Int k_dim = (gmres_data -> k_dim);
HYPRE_Int min_iter = (gmres_data -> min_iter);
HYPRE_Int max_iter = (gmres_data -> max_iter);
HYPRE_Int rel_change = (gmres_data -> rel_change);
HYPRE_Int stop_crit = (gmres_data -> stop_crit);
HYPRE_Real accuracy = (gmres_data -> tol);
HYPRE_Real cf_tol = (gmres_data -> cf_tol);
void *matvec_data = (gmres_data -> matvec_data);
void *r = (gmres_data -> r);
void *w = (gmres_data -> w);
void **p = (gmres_data -> p);
HYPRE_Int (*precond)() = (gmres_functions -> precond);
HYPRE_Int *precond_data = (gmres_data -> precond_data);
HYPRE_Int print_level = (gmres_data -> print_level);
HYPRE_Int logging = (gmres_data -> logging);
HYPRE_Real *norms = (gmres_data -> norms);
/* not used yet char *log_file_name = (gmres_data -> log_file_name);*/
/* FILE *fp; */
HYPRE_Int ierr = 0;
HYPRE_Int break_value = 0;
HYPRE_Int i, j, k;
HYPRE_Real *rs, **hh, *c, *s;
HYPRE_Int iter;
HYPRE_Int my_id, num_procs;
HYPRE_Real epsilon, gamma, t, r_norm, b_norm, den_norm, x_norm;
HYPRE_Real epsmac = 1.e-16;
HYPRE_Real ieee_check = 0.;
HYPRE_Real guard_zero_residual;
HYPRE_Real cf_ave_0 = 0.0;
HYPRE_Real cf_ave_1 = 0.0;
HYPRE_Real weight;
HYPRE_Real r_norm_0;
HYPRE_Real relative_error;
(gmres_data -> converged) = 0;
/*-----------------------------------------------------------------------
* With relative change convergence test on, it is possible to attempt
* another iteration with a zero residual. This causes the parameter
* alpha to go NaN. The guard_zero_residual parameter is to circumvent
* this. Perhaps it should be set to something non-zero (but small).
*-----------------------------------------------------------------------*/
guard_zero_residual = 0.0;
(*(gmres_functions->CommInfo))(A,&my_id,&num_procs);
if ( logging>0 || print_level>0 )
{
norms = (gmres_data -> norms);
/* not used yet log_file_name = (gmres_data -> log_file_name);*/
/* fp = fopen(log_file_name,"w"); */
}
/* initialize work arrays */
rs = hypre_CTAllocF(HYPRE_Real,k_dim+1,gmres_functions);
c = hypre_CTAllocF(HYPRE_Real,k_dim,gmres_functions);
s = hypre_CTAllocF(HYPRE_Real,k_dim,gmres_functions);
hh = hypre_CTAllocF(HYPRE_Real*,k_dim+1,gmres_functions);
for (i=0; i < k_dim+1; i++)
{
hh[i] = hypre_CTAllocF(HYPRE_Real,k_dim,gmres_functions);
}
(*(gmres_functions->CopyVector))(b,p[0]);
/* compute initial residual */
(*(gmres_functions->Matvec))(matvec_data,-1.0, A, x, 1.0, p[0]);
b_norm = sqrt((*(gmres_functions->InnerProd))(b,b));
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (b_norm != 0.) ieee_check = b_norm/b_norm; /* INF -> NaN conversion */
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (logging > 0 || print_level > 0)
{
printf("\n\nERROR detected by Hypre ... BEGIN\n");
printf("ERROR -- hypre_GMRESSolve: INFs and/or NaNs detected in input.\n");
printf("User probably placed non-numerics in supplied b.\n");
printf("Returning error flag += 101. Program not terminated.\n");
printf("ERROR detected by Hypre ... END\n\n\n");
}
ierr += 101;
return ierr;
}
r_norm = sqrt((*(gmres_functions->InnerProd))(p[0],p[0]));
r_norm_0 = r_norm;
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (r_norm != 0.) ieee_check = r_norm/r_norm; /* INF -> NaN conversion */
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (logging > 0 || print_level > 0)
{
printf("\n\nERROR detected by Hypre ... BEGIN\n");
printf("ERROR -- hypre_GMRESSolve: INFs and/or NaNs detected in input.\n");
printf("User probably placed non-numerics in supplied A or x_0.\n");
printf("Returning error flag += 101. Program not terminated.\n");
printf("ERROR detected by Hypre ... END\n\n\n");
}
ierr += 101;
return ierr;
}
if ( logging>0 || print_level > 0)
{
norms[0] = r_norm;
if ( print_level>1 && my_id == 0 )
{
printf("L2 norm of b: %e\n", b_norm);
if (b_norm == 0.0)
printf("Rel_resid_norm actually contains the residual norm\n");
printf("Initial L2 norm of residual: %e\n", r_norm);
}
}
iter = 0;
if (b_norm > 0.0)
{
/* convergence criterion |r_i|/|b| <= accuracy if |b| > 0 */
den_norm= b_norm;
}
else
{
/* convergence criterion |r_i|/|r0| <= accuracy if |b| = 0 */
den_norm= r_norm;
};
epsilon= accuracy;
/* convergence criterion |r_i| <= accuracy , absolute residual norm*/
if ( stop_crit && !rel_change )
epsilon = accuracy;
if ( print_level>1 && my_id == 0 )
{
if (b_norm > 0.0)
{printf("=============================================\n\n");
printf("Iters resid.norm conv.rate rel.res.norm\n");
printf("----- ------------ ---------- ------------\n");
}
else
{printf("=============================================\n\n");
printf("Iters resid.norm conv.rate\n");
printf("----- ------------ ----------\n");
};
}
/* set the relative_error to initially bypass the stopping criterion */
if (rel_change)
{
relative_error= epsilon + 1.;
}
while (iter < max_iter)
{
/* initialize first term of hessenberg system */
rs[0] = r_norm;
if (r_norm == 0.0)
{
hypre_TFreeF(c,gmres_functions);
hypre_TFreeF(s,gmres_functions);
hypre_TFreeF(rs,gmres_functions);
for (i=0; i < k_dim+1; i++) hypre_TFreeF(hh[i],gmres_functions);
hypre_TFreeF(hh,gmres_functions);
ierr = 0;
return ierr;
}
if (r_norm/den_norm <= epsilon && iter >= min_iter)
{
if (rel_change)
{
if (relative_error <= epsilon)
{
(*(gmres_functions->CopyVector))(b,r);
(*(gmres_functions->Matvec))(matvec_data,-1.0,A,x,1.0,r);
r_norm = sqrt((*(gmres_functions->InnerProd))(r,r));
if (r_norm/den_norm <= epsilon)
{
if ( print_level>1 && my_id == 0)
{
printf("\n\n");
printf("Final L2 norm of residual: %e\n\n", r_norm);
}
break;
}
else
if ( print_level>0 && my_id == 0)
printf("false convergence 1\n");
}
}
else
{
(*(gmres_functions->CopyVector))(b,r);
(*(gmres_functions->Matvec))(matvec_data,-1.0,A,x,1.0,r);
r_norm = sqrt((*(gmres_functions->InnerProd))(r,r));
if (r_norm/den_norm <= epsilon)
{
if ( print_level>1 && my_id == 0)
{
printf("\n\n");
printf("Final L2 norm of residual: %e\n\n", r_norm);
}
break;
}
else
if ( print_level>0 && my_id == 0)
printf("false convergence 1\n");
}
}
t = 1.0 / r_norm;
(*(gmres_functions->ScaleVector))(t,p[0]);
i = 0;
while (i < k_dim && ( (r_norm/den_norm > epsilon || iter < min_iter)
|| ((rel_change) && relative_error > epsilon) )
&& iter < max_iter)
{
i++;
iter++;
(*(gmres_functions->ClearVector))(r);
precond(precond_data, A, p[i-1], r);
(*(gmres_functions->Matvec))(matvec_data, 1.0, A, r, 0.0, p[i]);
/* modified Gram_Schmidt */
for (j=0; j < i; j++)
{
hh[j][i-1] = (*(gmres_functions->InnerProd))(p[j],p[i]);
(*(gmres_functions->Axpy))(-hh[j][i-1],p[j],p[i]);
}
t = sqrt((*(gmres_functions->InnerProd))(p[i],p[i]));
hh[i][i-1] = t;
if (t != 0.0)
{
t = 1.0/t;
(*(gmres_functions->ScaleVector))(t,p[i]);
}
/* done with modified Gram_schmidt and Arnoldi step.
update factorization of hh */
for (j = 1; j < i; j++)
{
t = hh[j-1][i-1];
hh[j-1][i-1] = c[j-1]*t + s[j-1]*hh[j][i-1];
hh[j][i-1] = -s[j-1]*t + c[j-1]*hh[j][i-1];
}
gamma = sqrt(hh[i-1][i-1]*hh[i-1][i-1] + hh[i][i-1]*hh[i][i-1]);
if (gamma == 0.0) gamma = epsmac;
c[i-1] = hh[i-1][i-1]/gamma;
s[i-1] = hh[i][i-1]/gamma;
rs[i] = -s[i-1]*rs[i-1];
rs[i-1] = c[i-1]*rs[i-1];
/* determine residual norm */
hh[i-1][i-1] = c[i-1]*hh[i-1][i-1] + s[i-1]*hh[i][i-1];
r_norm = fabs(rs[i]);
if ( print_level>0 )
{
norms[iter] = r_norm;
if ( print_level>1 && my_id == 0 )
{
if (b_norm > 0.0)
printf("% 5d %e %f %e\n", iter,
norms[iter],norms[iter]/norms[iter-1],
norms[iter]/b_norm);
else
printf("% 5d %e %f\n", iter, norms[iter],
norms[iter]/norms[iter-1]);
}
}
if (cf_tol > 0.0)
{
cf_ave_0 = cf_ave_1;
cf_ave_1 = pow( r_norm / r_norm_0, 1.0/(2.0*iter));
weight = fabs(cf_ave_1 - cf_ave_0);
weight = weight / hypre_max(cf_ave_1, cf_ave_0);
weight = 1.0 - weight;
#if 0
printf("I = %d: cf_new = %e, cf_old = %e, weight = %e\n",
i, cf_ave_1, cf_ave_0, weight );
#endif
if (weight * cf_ave_1 > cf_tol)
{
break_value = 1;
break;
}
}
}
/* now compute solution, first solve upper triangular system */
if (break_value) break;
rs[i-1] = rs[i-1]/hh[i-1][i-1];
for (k = i-2; k >= 0; k--)
{
t = rs[k];
for (j = k+1; j < i; j++)
{
t -= hh[k][j]*rs[j];
}
rs[k] = t/hh[k][k];
}
(*(gmres_functions->CopyVector))(p[0],w);
(*(gmres_functions->ScaleVector))(rs[0],w);
for (j = 1; j < i; j++)
(*(gmres_functions->Axpy))(rs[j], p[j], w);
(*(gmres_functions->ClearVector))(r);
precond(precond_data, A, w, r);
(*(gmres_functions->Axpy))(1.0,r,x);
/* check for convergence, evaluate actual residual */
if (r_norm/den_norm <= epsilon && iter >= min_iter)
{
if (rel_change)
{
x_norm = sqrt( (*(gmres_functions->InnerProd))(x,x) );
if ( x_norm<=guard_zero_residual ) break; /* don't divide by 0 */
r_norm = sqrt( (*(gmres_functions->InnerProd))(r,r) );
relative_error= r_norm/x_norm;
}
(*(gmres_functions->CopyVector))(b,r);
(*(gmres_functions->Matvec))(matvec_data,-1.0,A,x,1.0,r);
r_norm = sqrt( (*(gmres_functions->InnerProd))(r,r) );
if (r_norm/den_norm <= epsilon)
{
if ( print_level>1 && my_id == 0 )
{
printf("\n\n");
printf("Final L2 norm of residual: %e\n\n", r_norm);
}
if (rel_change && r_norm > guard_zero_residual)
/* Also test on relative change of iterates, x_i - x_(i-1) */
{ /* At this poHYPRE_Int r = x_i - x_(i-1) */
x_norm = sqrt( (*(gmres_functions->InnerProd))(x,x) );
if ( x_norm<=guard_zero_residual ) break; /* don't divide by 0 */
if ( relative_error < epsilon )
{
(gmres_data -> converged) = 1;
break;
}
}
else
{
(gmres_data -> converged) = 1;
break;
}
}
else
{
if ( print_level>0 && my_id == 0)
printf("false convergence 2\n");
(*(gmres_functions->CopyVector))(r,p[0]);
i = 0;
}
}
/* compute residual vector and continue loop */
for (j=i ; j > 0; j--)
{
rs[j-1] = -s[j-1]*rs[j];
rs[j] = c[j-1]*rs[j];
}
if (i) (*(gmres_functions->Axpy))(rs[0]-1.0,p[0],p[0]);
for (j=1; j < i+1; j++)
(*(gmres_functions->Axpy))(rs[j],p[j],p[0]);
}
if ( print_level>1 && my_id == 0 )
printf("\n\n");
(gmres_data -> num_iterations) = iter;
if (b_norm > 0.0)
(gmres_data -> rel_residual_norm) = r_norm/b_norm;
if (b_norm == 0.0)
(gmres_data -> rel_residual_norm) = r_norm;
if (iter >= max_iter && r_norm/den_norm > epsilon) ierr = 1;
hypre_TFreeF(c,gmres_functions);
hypre_TFreeF(s,gmres_functions);
hypre_TFreeF(rs,gmres_functions);
for (i=0; i < k_dim+1; i++)
{
hypre_TFreeF(hh[i],gmres_functions);
}
hypre_TFreeF(hh,gmres_functions);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetKDim, hypre_GMRESGetKDim
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetKDim( void *gmres_vdata,
HYPRE_Int k_dim )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
(gmres_data -> k_dim) = k_dim;
return ierr;
}
HYPRE_Int
hypre_GMRESGetKDim( void *gmres_vdata,
HYPRE_Int * k_dim )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*k_dim = (gmres_data -> k_dim);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetTol, hypre_GMRESGetTol
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetTol( void *gmres_vdata,
HYPRE_Real tol )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
(gmres_data -> tol) = tol;
return ierr;
}
HYPRE_Int
hypre_GMRESGetTol( void *gmres_vdata,
HYPRE_Real * tol )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*tol = (gmres_data -> tol);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetConvergenceFactorTol, hypre_GMRESGetConvergenceFactorTol
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetConvergenceFactorTol( void *gmres_vdata,
HYPRE_Real cf_tol )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
(gmres_data -> cf_tol) = cf_tol;
return ierr;
}
HYPRE_Int
hypre_GMRESGetConvergenceFactorTol( void *gmres_vdata,
HYPRE_Real * cf_tol )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*cf_tol = (gmres_data -> cf_tol);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetMinIter, hypre_GMRESGetMinIter
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetMinIter( void *gmres_vdata,
HYPRE_Int min_iter )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
(gmres_data -> min_iter) = min_iter;
return ierr;
}
HYPRE_Int
hypre_GMRESGetMinIter( void *gmres_vdata,
HYPRE_Int * min_iter )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*min_iter = (gmres_data -> min_iter);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetMaxIter, hypre_GMRESGetMaxIter
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetMaxIter( void *gmres_vdata,
HYPRE_Int max_iter )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
(gmres_data -> max_iter) = max_iter;
return ierr;
}
HYPRE_Int
hypre_GMRESGetMaxIter( void *gmres_vdata,
HYPRE_Int * max_iter )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*max_iter = (gmres_data -> max_iter);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetRelChange, hypre_GMRESGetRelChange
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetRelChange( void *gmres_vdata,
HYPRE_Int rel_change )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
(gmres_data -> rel_change) = rel_change;
return ierr;
}
HYPRE_Int
hypre_GMRESGetRelChange( void *gmres_vdata,
HYPRE_Int * rel_change )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*rel_change = (gmres_data -> rel_change);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetStopCrit, hypre_GMRESGetStopCrit
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetStopCrit( void *gmres_vdata,
HYPRE_Int stop_crit )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
(gmres_data -> stop_crit) = stop_crit;
return ierr;
}
HYPRE_Int
hypre_GMRESGetStopCrit( void *gmres_vdata,
HYPRE_Int * stop_crit )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*stop_crit = (gmres_data -> stop_crit);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetPrecond( void *gmres_vdata,
HYPRE_Int (*precond)(),
HYPRE_Int (*precond_setup)(),
void *precond_data )
{
hypre_GMRESData *gmres_data = gmres_vdata;
hypre_GMRESFunctions *gmres_functions = gmres_data->functions;
HYPRE_Int ierr = 0;
(gmres_functions -> precond) = precond;
(gmres_functions -> precond_setup) = precond_setup;
(gmres_data -> precond_data) = precond_data;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESGetPrecond( void *gmres_vdata,
HYPRE_Solver *precond_data_ptr )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*precond_data_ptr = (HYPRE_Solver)(gmres_data -> precond_data);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetPrintLevel, hypre_GMRESGetPrintLevel
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetPrintLevel( void *gmres_vdata,
HYPRE_Int level)
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
(gmres_data -> print_level) = level;
return ierr;
}
HYPRE_Int
hypre_GMRESGetPrintLevel( void *gmres_vdata,
HYPRE_Int * level)
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*level = (gmres_data -> print_level);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESSetLogging, hypre_GMRESGetLogging
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESSetLogging( void *gmres_vdata,
HYPRE_Int level)
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
(gmres_data -> logging) = level;
return ierr;
}
HYPRE_Int
hypre_GMRESGetLogging( void *gmres_vdata,
HYPRE_Int * level)
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*level = (gmres_data -> logging);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESGetNumIterations( void *gmres_vdata,
HYPRE_Int *num_iterations )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*num_iterations = (gmres_data -> num_iterations);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetConverged
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESGetConverged( void *gmres_vdata,
HYPRE_Int *converged )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*converged = (gmres_data -> converged);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_GMRESGetFinalRelativeResidualNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_GMRESGetFinalRelativeResidualNorm( void *gmres_vdata,
HYPRE_Real *relative_residual_norm )
{
hypre_GMRESData *gmres_data = gmres_vdata;
HYPRE_Int ierr = 0;
*relative_residual_norm = (gmres_data -> rel_residual_norm);
return ierr;
}
| 32,181 | 31.376258 | 90 | c |
AMG | AMG-master/krylov/pcg.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Preconditioned conjugate gradient (Omin) functions
*
*****************************************************************************/
/* This was based on the pcg.c formerly in struct_ls, with
changes (GetPrecond and stop_crit) for compatibility with the pcg.c
in parcsr_ls and elsewhere. Incompatibilities with the
parcsr_ls version:
- logging is different; no attempt has been made to be the same
- treatment of b=0 in Ax=b is different: this returns x=0; the parcsr
version iterates with a special stopping criterion
*/
#include "krylov.h"
#include "_hypre_utilities.h"
/*--------------------------------------------------------------------------
* hypre_PCGFunctionsCreate
*--------------------------------------------------------------------------*/
hypre_PCGFunctions *
hypre_PCGFunctionsCreate(
char * (*CAlloc) ( size_t count, size_t elt_size ),
HYPRE_Int (*Free) ( char *ptr ),
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs ),
void * (*CreateVector) ( void *vector ),
HYPRE_Int (*DestroyVector) ( void *vector ),
void * (*MatvecCreate) ( void *A, void *x ),
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y ),
HYPRE_Int (*MatvecDestroy) ( void *matvec_data ),
HYPRE_Real (*InnerProd) ( void *x, void *y ),
HYPRE_Int (*CopyVector) ( void *x, void *y ),
HYPRE_Int (*ClearVector) ( void *x ),
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x ),
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y ),
HYPRE_Int (*PrecondSetup) ( void *vdata, void *A, void *b, void *x ),
HYPRE_Int (*Precond) ( void *vdata, void *A, void *b, void *x )
)
{
hypre_PCGFunctions * pcg_functions;
pcg_functions = (hypre_PCGFunctions *)
CAlloc( 1, sizeof(hypre_PCGFunctions) );
pcg_functions->CAlloc = CAlloc;
pcg_functions->Free = Free;
pcg_functions->CommInfo = CommInfo;
pcg_functions->CreateVector = CreateVector;
pcg_functions->DestroyVector = DestroyVector;
pcg_functions->MatvecCreate = MatvecCreate;
pcg_functions->Matvec = Matvec;
pcg_functions->MatvecDestroy = MatvecDestroy;
pcg_functions->InnerProd = InnerProd;
pcg_functions->CopyVector = CopyVector;
pcg_functions->ClearVector = ClearVector;
pcg_functions->ScaleVector = ScaleVector;
pcg_functions->Axpy = Axpy;
/* default preconditioner must be set here but can be changed later... */
pcg_functions->precond_setup = PrecondSetup;
pcg_functions->precond = Precond;
return pcg_functions;
}
/*--------------------------------------------------------------------------
* hypre_PCGCreate
*--------------------------------------------------------------------------*/
void *
hypre_PCGCreate( hypre_PCGFunctions *pcg_functions )
{
hypre_PCGData *pcg_data;
pcg_data = hypre_CTAllocF(hypre_PCGData, 1, pcg_functions);
pcg_data -> functions = pcg_functions;
/* set defaults */
(pcg_data -> tol) = 1.0e-06;
(pcg_data -> atolf) = 0.0;
(pcg_data -> cf_tol) = 0.0;
(pcg_data -> a_tol) = 0.0;
(pcg_data -> rtol) = 0.0;
(pcg_data -> max_iter) = 1000;
(pcg_data -> two_norm) = 0;
(pcg_data -> rel_change) = 0;
(pcg_data -> recompute_residual) = 0;
(pcg_data -> recompute_residual_p) = 0;
(pcg_data -> stop_crit) = 0;
(pcg_data -> converged) = 0;
(pcg_data -> owns_matvec_data ) = 1;
(pcg_data -> matvec_data) = NULL;
(pcg_data -> precond_data) = NULL;
(pcg_data -> print_level) = 0;
(pcg_data -> logging) = 0;
(pcg_data -> norms) = NULL;
(pcg_data -> rel_norms) = NULL;
(pcg_data -> p) = NULL;
(pcg_data -> s) = NULL;
(pcg_data -> r) = NULL;
return (void *) pcg_data;
}
/*--------------------------------------------------------------------------
* hypre_PCGDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGDestroy( void *pcg_vdata )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
if (pcg_data)
{
hypre_PCGFunctions *pcg_functions = pcg_data->functions;
if ( (pcg_data -> norms) != NULL )
{
hypre_TFreeF( pcg_data -> norms, pcg_functions );
pcg_data -> norms = NULL;
}
if ( (pcg_data -> rel_norms) != NULL )
{
hypre_TFreeF( pcg_data -> rel_norms, pcg_functions );
pcg_data -> rel_norms = NULL;
}
if ( pcg_data -> matvec_data != NULL && pcg_data->owns_matvec_data )
{
(*(pcg_functions->MatvecDestroy))(pcg_data -> matvec_data);
pcg_data -> matvec_data = NULL;
}
if ( pcg_data -> p != NULL )
{
(*(pcg_functions->DestroyVector))(pcg_data -> p);
pcg_data -> p = NULL;
}
if ( pcg_data -> s != NULL )
{
(*(pcg_functions->DestroyVector))(pcg_data -> s);
pcg_data -> s = NULL;
}
if ( pcg_data -> r != NULL )
{
(*(pcg_functions->DestroyVector))(pcg_data -> r);
pcg_data -> r = NULL;
}
hypre_TFreeF( pcg_data, pcg_functions );
hypre_TFreeF( pcg_functions, pcg_functions );
}
return(hypre_error_flag);
}
/*--------------------------------------------------------------------------
* hypre_PCGGetResidual
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_PCGGetResidual( void *pcg_vdata, void **residual )
{
/* returns a pointer to the residual vector */
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*residual = pcg_data->r;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetup( void *pcg_vdata,
void *A,
void *b,
void *x )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
hypre_PCGFunctions *pcg_functions = pcg_data->functions;
HYPRE_Int max_iter = (pcg_data -> max_iter);
HYPRE_Int (*precond_setup)(void*,void*,void*,void*) = (pcg_functions -> precond_setup);
void *precond_data = (pcg_data -> precond_data);
(pcg_data -> A) = A;
/*--------------------------------------------------
* The arguments for CreateVector are important to
* maintain consistency between the setup and
* compute phases of matvec and the preconditioner.
*--------------------------------------------------*/
if ( pcg_data -> p != NULL )
(*(pcg_functions->DestroyVector))(pcg_data -> p);
(pcg_data -> p) = (*(pcg_functions->CreateVector))(x);
if ( pcg_data -> s != NULL )
(*(pcg_functions->DestroyVector))(pcg_data -> s);
(pcg_data -> s) = (*(pcg_functions->CreateVector))(x);
if ( pcg_data -> r != NULL )
(*(pcg_functions->DestroyVector))(pcg_data -> r);
(pcg_data -> r) = (*(pcg_functions->CreateVector))(b);
if ( pcg_data -> matvec_data != NULL && pcg_data->owns_matvec_data )
(*(pcg_functions->MatvecDestroy))(pcg_data -> matvec_data);
(pcg_data -> matvec_data) = (*(pcg_functions->MatvecCreate))(A, x);
precond_setup(precond_data, A, b, x);
/*-----------------------------------------------------
* Allocate space for log info
*-----------------------------------------------------*/
if ( (pcg_data->logging)>0 || (pcg_data->print_level)>0 )
{
if ( (pcg_data -> norms) != NULL )
hypre_TFreeF( pcg_data -> norms, pcg_functions );
(pcg_data -> norms) = hypre_CTAllocF( HYPRE_Real, max_iter + 1,
pcg_functions);
if ( (pcg_data -> rel_norms) != NULL )
hypre_TFreeF( pcg_data -> rel_norms, pcg_functions );
(pcg_data -> rel_norms) = hypre_CTAllocF( HYPRE_Real, max_iter + 1,
pcg_functions );
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSolve
*--------------------------------------------------------------------------
*
* We use the following convergence test as the default (see Ashby, Holst,
* Manteuffel, and Saylor):
*
* ||e||_A ||r||_C
* ------- <= [kappa_A(C*A)]^(1/2) ------- < tol
* ||x||_A ||b||_C
*
* where we let (for the time being) kappa_A(CA) = 1.
* We implement the test as:
*
* gamma = <C*r,r>/<C*b,b> < (tol^2) = eps
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSolve( void *pcg_vdata,
void *A,
void *b,
void *x )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
hypre_PCGFunctions *pcg_functions = pcg_data->functions;
HYPRE_Real r_tol = (pcg_data -> tol);
HYPRE_Real a_tol = (pcg_data -> a_tol);
HYPRE_Real atolf = (pcg_data -> atolf);
HYPRE_Real cf_tol = (pcg_data -> cf_tol);
HYPRE_Real rtol = (pcg_data -> rtol);
HYPRE_Int max_iter = (pcg_data -> max_iter);
HYPRE_Int two_norm = (pcg_data -> two_norm);
HYPRE_Int rel_change = (pcg_data -> rel_change);
HYPRE_Int recompute_residual = (pcg_data -> recompute_residual);
HYPRE_Int recompute_residual_p = (pcg_data -> recompute_residual_p);
HYPRE_Int stop_crit = (pcg_data -> stop_crit);
/*
HYPRE_Int converged = (pcg_data -> converged);
*/
void *p = (pcg_data -> p);
void *s = (pcg_data -> s);
void *r = (pcg_data -> r);
void *matvec_data = (pcg_data -> matvec_data);
HYPRE_Int (*precond)(void*,void*,void*,void*) = (pcg_functions -> precond);
void *precond_data = (pcg_data -> precond_data);
HYPRE_Int print_level = (pcg_data -> print_level);
HYPRE_Int logging = (pcg_data -> logging);
HYPRE_Real *norms = (pcg_data -> norms);
HYPRE_Real *rel_norms = (pcg_data -> rel_norms);
HYPRE_Real alpha, beta;
HYPRE_Real gamma, gamma_old;
HYPRE_Real bi_prod, eps;
HYPRE_Real pi_prod, xi_prod;
HYPRE_Real ieee_check = 0.;
HYPRE_Real i_prod = 0.0;
HYPRE_Real i_prod_0 = 0.0;
HYPRE_Real cf_ave_0 = 0.0;
HYPRE_Real cf_ave_1 = 0.0;
HYPRE_Real weight;
HYPRE_Real ratio;
HYPRE_Real guard_zero_residual, sdotp;
HYPRE_Int tentatively_converged = 0;
HYPRE_Int recompute_true_residual = 0;
HYPRE_Int i = 0;
HYPRE_Int my_id, num_procs;
(pcg_data -> converged) = 0;
(*(pcg_functions->CommInfo))(A,&my_id,&num_procs);
/*-----------------------------------------------------------------------
* With relative change convergence test on, it is possible to attempt
* another iteration with a zero residual. This causes the parameter
* alpha to go NaN. The guard_zero_residual parameter is to circumvent
* this. Perhaps it should be set to something non-zero (but small).
*-----------------------------------------------------------------------*/
guard_zero_residual = 0.0;
/*-----------------------------------------------------------------------
* Start pcg solve
*-----------------------------------------------------------------------*/
/* compute eps */
if (two_norm)
{
/* bi_prod = <b,b> */
bi_prod = (*(pcg_functions->InnerProd))(b, b);
if (print_level > 1 && my_id == 0)
hypre_printf("<b,b>: %e\n",bi_prod);
}
else
{
/* bi_prod = <C*b,b> */
(*(pcg_functions->ClearVector))(p);
precond(precond_data, A, b, p);
bi_prod = (*(pcg_functions->InnerProd))(p, b);
if (print_level > 1 && my_id == 0)
hypre_printf("<C*b,b>: %e\n",bi_prod);
};
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (bi_prod != 0.) ieee_check = bi_prod/bi_prod; /* INF -> NaN conversion */
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (print_level > 0 || logging > 0)
{
hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n");
hypre_printf("ERROR -- hypre_PCGSolve: INFs and/or NaNs detected in input.\n");
hypre_printf("User probably placed non-numerics in supplied b.\n");
hypre_printf("Returning error flag += 101. Program not terminated.\n");
hypre_printf("ERROR detected by Hypre ... END\n\n\n");
}
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
eps = r_tol*r_tol; /* note: this may be re-assigned below */
if ( bi_prod > 0.0 ) {
if ( stop_crit && !rel_change && atolf<=0 ) { /* pure absolute tolerance */
eps = eps / bi_prod;
/* Note: this section is obsolete. Aside from backwards comatability
concerns, we could delete the stop_crit parameter and related code,
using tol & atolf instead. */
}
else if ( atolf>0 ) /* mixed relative and absolute tolerance */
bi_prod += atolf;
else /* DEFAULT (stop_crit and atolf exist for backwards compatibilty
and are not in the reference manual) */
{
/* convergence criteria: <C*r,r> <= max( a_tol^2, r_tol^2 * <C*b,b> )
note: default for a_tol is 0.0, so relative residual criteria is used unless
user specifies a_tol, or sets r_tol = 0.0, which means absolute
tol only is checked */
eps = hypre_max(r_tol*r_tol, a_tol*a_tol/bi_prod);
}
}
else /* bi_prod==0.0: the rhs vector b is zero */
{
/* Set x equal to zero and return */
(*(pcg_functions->CopyVector))(b, x);
if (logging>0 || print_level>0)
{
norms[0] = 0.0;
rel_norms[i] = 0.0;
}
return hypre_error_flag;
/* In this case, for the original parcsr pcg, the code would take special
action to force iterations even though the exact value was known. */
};
/* r = b - Ax */
(*(pcg_functions->CopyVector))(b, r);
(*(pcg_functions->Matvec))(matvec_data, -1.0, A, x, 1.0, r);
/* p = C*r */
(*(pcg_functions->ClearVector))(p);
precond(precond_data, A, r, p);
/* gamma = <r,p> */
gamma = (*(pcg_functions->InnerProd))(r,p);
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (gamma != 0.) ieee_check = gamma/gamma; /* INF -> NaN conversion */
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (print_level > 0 || logging > 0)
{
hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n");
hypre_printf("ERROR -- hypre_PCGSolve: INFs and/or NaNs detected in input.\n");
hypre_printf("User probably placed non-numerics in supplied A or x_0.\n");
hypre_printf("Returning error flag += 101. Program not terminated.\n");
hypre_printf("ERROR detected by Hypre ... END\n\n\n");
}
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
/* Set initial residual norm */
if ( logging>0 || print_level > 0 || cf_tol > 0.0 )
{
if (two_norm)
i_prod_0 = (*(pcg_functions->InnerProd))(r,r);
else
i_prod_0 = gamma;
if ( logging>0 || print_level>0 ) norms[0] = sqrt(i_prod_0);
}
if ( print_level > 1 && my_id==0 )
{
hypre_printf("\n\n");
if (two_norm)
{
if ( stop_crit && !rel_change && atolf==0 ) { /* pure absolute tolerance */
hypre_printf("Iters ||r||_2 conv.rate\n");
hypre_printf("----- ------------ ---------\n");
}
else {
hypre_printf("Iters ||r||_2 conv.rate ||r||_2/||b||_2\n");
hypre_printf("----- ------------ --------- ------------ \n");
}
}
else /* !two_norm */
{
hypre_printf("Iters ||r||_C conv.rate ||r||_C/||b||_C\n");
hypre_printf("----- ------------ --------- ------------ \n");
}
}
while ((i+1) <= max_iter)
{
/*--------------------------------------------------------------------
* the core CG calculations...
*--------------------------------------------------------------------*/
i++;
/* At user request, periodically recompute the residual from the formula
r = b - A x (instead of using the recursive definition). Note that this
is potentially expensive and can lead to degraded convergence (since it
essentially a "restarted CG"). */
recompute_true_residual = recompute_residual_p && !(i%recompute_residual_p);
/* s = A*p */
(*(pcg_functions->Matvec))(matvec_data, 1.0, A, p, 0.0, s);
/* alpha = gamma / <s,p> */
sdotp = (*(pcg_functions->InnerProd))(s, p);
if ( sdotp==0.0 )
{
/* ++ierr;*/
if (i==1) i_prod=i_prod_0;
break;
}
alpha = gamma / sdotp;
gamma_old = gamma;
/* x = x + alpha*p */
(*(pcg_functions->Axpy))(alpha, p, x);
/* r = r - alpha*s */
if ( !recompute_true_residual )
{
(*(pcg_functions->Axpy))(-alpha, s, r);
}
else
{
if (print_level > 1 && my_id == 0)
{
hypre_printf("Recomputing the residual...\n");
}
(*(pcg_functions->CopyVector))(b, r);
(*(pcg_functions->Matvec))(matvec_data, -1.0, A, x, 1.0, r);
}
/* residual-based stopping criteria: ||r_new-r_old|| < rtol ||b|| */
if (rtol && two_norm)
{
/* use that r_new-r_old = alpha * s */
HYPRE_Real drob2 = alpha*alpha*(*(pcg_functions->InnerProd))(s,s)/bi_prod;
if ( drob2 < rtol*rtol )
{
if (print_level > 1 && my_id == 0)
{
hypre_printf("\n\n||r_old-r_new||/||b||: %e\n", sqrt(drob2));
}
break;
}
}
/* s = C*r */
(*(pcg_functions->ClearVector))(s);
precond(precond_data, A, r, s);
/* gamma = <r,s> */
gamma = (*(pcg_functions->InnerProd))(r, s);
/* residual-based stopping criteria: ||r_new-r_old||_C < rtol ||b||_C */
if (rtol && !two_norm)
{
/* use that ||r_new-r_old||_C^2 = (r_new ,C r_new) + (r_old, C r_old) */
HYPRE_Real r2ob2 = (gamma + gamma_old)/bi_prod;
if ( r2ob2 < rtol*rtol)
{
if (print_level > 1 && my_id == 0)
{
hypre_printf("\n\n||r_old-r_new||_C/||b||_C: %e\n", sqrt(r2ob2));
}
break;
}
}
/* set i_prod for convergence test */
if (two_norm)
i_prod = (*(pcg_functions->InnerProd))(r,r);
else
i_prod = gamma;
/*--------------------------------------------------------------------
* optional output
*--------------------------------------------------------------------*/
#if 0
if (two_norm)
hypre_printf("Iter (%d): ||r||_2 = %e, ||r||_2/||b||_2 = %e\n",
i, sqrt(i_prod), (bi_prod ? sqrt(i_prod/bi_prod) : 0));
else
hypre_printf("Iter (%d): ||r||_C = %e, ||r||_C/||b||_C = %e\n",
i, sqrt(i_prod), (bi_prod ? sqrt(i_prod/bi_prod) : 0));
#endif
/* print norm info */
if ( logging>0 || print_level>0 )
{
norms[i] = sqrt(i_prod);
rel_norms[i] = bi_prod ? sqrt(i_prod/bi_prod) : 0;
}
if ( print_level > 1 && my_id==0 )
{
if (two_norm)
{
if ( stop_crit && !rel_change && atolf==0 ) { /* pure absolute tolerance */
hypre_printf("% 5d %e %f\n", i, norms[i],
norms[i]/norms[i-1] );
}
else
{
hypre_printf("% 5d %e %f %e\n", i, norms[i],
norms[i]/norms[i-1], rel_norms[i] );
}
}
else
{
hypre_printf("% 5d %e %f %e\n", i, norms[i],
norms[i]/norms[i-1], rel_norms[i] );
}
}
/*--------------------------------------------------------------------
* check for convergence
*--------------------------------------------------------------------*/
if (i_prod / bi_prod < eps) /* the basic convergence test */
tentatively_converged = 1;
if ( tentatively_converged && recompute_residual )
/* At user request, don't trust the convergence test until we've recomputed
the residual from scratch. This is expensive in the usual case where an
the norm is the energy norm.
This calculation is coded on the assumption that r's accuracy is only a
concern for problems where CG takes many iterations. */
{
/* r = b - Ax */
(*(pcg_functions->CopyVector))(b, r);
(*(pcg_functions->Matvec))(matvec_data, -1.0, A, x, 1.0, r);
/* set i_prod for convergence test */
if (two_norm)
i_prod = (*(pcg_functions->InnerProd))(r,r);
else
{
/* s = C*r */
(*(pcg_functions->ClearVector))(s);
precond(precond_data, A, r, s);
/* iprod = gamma = <r,s> */
i_prod = (*(pcg_functions->InnerProd))(r, s);
}
if (i_prod / bi_prod >= eps) tentatively_converged = 0;
}
if ( tentatively_converged && rel_change && (i_prod > guard_zero_residual ))
/* At user request, don't treat this as converged unless x didn't change
much in the last iteration. */
{
pi_prod = (*(pcg_functions->InnerProd))(p,p);
xi_prod = (*(pcg_functions->InnerProd))(x,x);
ratio = alpha*alpha*pi_prod/xi_prod;
if (ratio >= eps) tentatively_converged = 0;
}
if ( tentatively_converged )
/* we've passed all the convergence tests, it's for real */
{
(pcg_data -> converged) = 1;
break;
}
if ( (gamma<HYPRE_REAL_MIN) && ((-gamma)<HYPRE_REAL_MIN) ) {
/* ierr = 1;*/
hypre_error(HYPRE_ERROR_CONV);
break;
}
/* ... gamma should be >=0. IEEE subnormal numbers are < 2**(-1022)=2.2e-308
(and >= 2**(-1074)=4.9e-324). So a gamma this small means we're getting
dangerously close to subnormal or zero numbers (usually if gamma is small,
so will be other variables). Thus further calculations risk a crash.
Such small gamma generally means no hope of progress anyway. */
/*--------------------------------------------------------------------
* Optional test to see if adequate progress is being made.
* The average convergence factor is recorded and compared
* against the tolerance 'cf_tol'. The weighting factor is
* intended to pay more attention to the test when an accurate
* estimate for average convergence factor is available.
*--------------------------------------------------------------------*/
if (cf_tol > 0.0)
{
cf_ave_0 = cf_ave_1;
if ( i_prod_0<HYPRE_REAL_MIN ) {
/* i_prod_0 is zero, or (almost) subnormal, yet i_prod wasn't small
enough to pass the convergence test. Therefore initial guess was good,
and we're just calculating garbage - time to bail out before the
next step, which will be a divide by zero (or close to it). */
/* ierr = 1; */
hypre_error(HYPRE_ERROR_CONV);
break;
}
cf_ave_1 = pow( i_prod / i_prod_0, 1.0/(2.0*i));
weight = fabs(cf_ave_1 - cf_ave_0);
weight = weight / hypre_max(cf_ave_1, cf_ave_0);
weight = 1.0 - weight;
#if 0
hypre_printf("I = %d: cf_new = %e, cf_old = %e, weight = %e\n",
i, cf_ave_1, cf_ave_0, weight );
#endif
if (weight * cf_ave_1 > cf_tol) break;
}
/*--------------------------------------------------------------------
* back to the core CG calculations
*--------------------------------------------------------------------*/
/* beta = gamma / gamma_old */
beta = gamma / gamma_old;
/* p = s + beta p */
if ( !recompute_true_residual )
{
(*(pcg_functions->ScaleVector))(beta, p);
(*(pcg_functions->Axpy))(1.0, s, p);
}
else
(*(pcg_functions->CopyVector))(s, p);
}
/*--------------------------------------------------------------------
* Finish up with some outputs.
*--------------------------------------------------------------------*/
if ( print_level > 1 && my_id==0 )
hypre_printf("\n\n");
(pcg_data -> num_iterations) = i;
if (bi_prod > 0.0)
(pcg_data -> rel_residual_norm) = sqrt(i_prod/bi_prod);
else /* actually, we'll never get here... */
(pcg_data -> rel_residual_norm) = 0.0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetTol, hypre_PCGGetTol
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetTol( void *pcg_vdata,
HYPRE_Real tol )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> tol) = tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetTol( void *pcg_vdata,
HYPRE_Real * tol )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*tol = (pcg_data -> tol);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetAbsoluteTol, hypre_PCGGetAbsoluteTol
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetAbsoluteTol( void *pcg_vdata,
HYPRE_Real a_tol )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> a_tol) = a_tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetAbsoluteTol( void *pcg_vdata,
HYPRE_Real * a_tol )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*a_tol = (pcg_data -> a_tol);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetAbsoluteTolFactor, hypre_PCGGetAbsoluteTolFactor
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetAbsoluteTolFactor( void *pcg_vdata,
HYPRE_Real atolf )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> atolf) = atolf;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetAbsoluteTolFactor( void *pcg_vdata,
HYPRE_Real * atolf )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*atolf = (pcg_data -> atolf);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetResidualTol, hypre_PCGGetResidualTol
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetResidualTol( void *pcg_vdata,
HYPRE_Real rtol )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> rtol) = rtol;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetResidualTol( void *pcg_vdata,
HYPRE_Real * rtol )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*rtol = (pcg_data -> rtol);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetConvergenceFactorTol, hypre_PCGGetConvergenceFactorTol
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetConvergenceFactorTol( void *pcg_vdata,
HYPRE_Real cf_tol )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> cf_tol) = cf_tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetConvergenceFactorTol( void *pcg_vdata,
HYPRE_Real * cf_tol )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*cf_tol = (pcg_data -> cf_tol);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetMaxIter, hypre_PCGGetMaxIter
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetMaxIter( void *pcg_vdata,
HYPRE_Int max_iter )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> max_iter) = max_iter;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetMaxIter( void *pcg_vdata,
HYPRE_Int * max_iter )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*max_iter = (pcg_data -> max_iter);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetTwoNorm, hypre_PCGGetTwoNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetTwoNorm( void *pcg_vdata,
HYPRE_Int two_norm )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> two_norm) = two_norm;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetTwoNorm( void *pcg_vdata,
HYPRE_Int * two_norm )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*two_norm = (pcg_data -> two_norm);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetRelChange, hypre_PCGGetRelChange
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetRelChange( void *pcg_vdata,
HYPRE_Int rel_change )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> rel_change) = rel_change;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetRelChange( void *pcg_vdata,
HYPRE_Int * rel_change )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*rel_change = (pcg_data -> rel_change);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetRecomputeResidual, hypre_PCGGetRecomputeResidual
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetRecomputeResidual( void *pcg_vdata,
HYPRE_Int recompute_residual )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> recompute_residual) = recompute_residual;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetRecomputeResidual( void *pcg_vdata,
HYPRE_Int * recompute_residual )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*recompute_residual = (pcg_data -> recompute_residual);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetRecomputeResidualP, hypre_PCGGetRecomputeResidualP
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetRecomputeResidualP( void *pcg_vdata,
HYPRE_Int recompute_residual_p )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> recompute_residual_p) = recompute_residual_p;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetRecomputeResidualP( void *pcg_vdata,
HYPRE_Int * recompute_residual_p )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*recompute_residual_p = (pcg_data -> recompute_residual_p);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetStopCrit, hypre_PCGGetStopCrit
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetStopCrit( void *pcg_vdata,
HYPRE_Int stop_crit )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> stop_crit) = stop_crit;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetStopCrit( void *pcg_vdata,
HYPRE_Int * stop_crit )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*stop_crit = (pcg_data -> stop_crit);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGGetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGGetPrecond( void *pcg_vdata,
HYPRE_Solver *precond_data_ptr )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*precond_data_ptr = (HYPRE_Solver)(pcg_data -> precond_data);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetPrecond( void *pcg_vdata,
HYPRE_Int (*precond)(void*,void*,void*,void*),
HYPRE_Int (*precond_setup)(void*,void*,void*,void*),
void *precond_data )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
hypre_PCGFunctions *pcg_functions = pcg_data->functions;
(pcg_functions -> precond) = precond;
(pcg_functions -> precond_setup) = precond_setup;
(pcg_data -> precond_data) = precond_data;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetPrintLevel, hypre_PCGGetPrintLevel
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetPrintLevel( void *pcg_vdata,
HYPRE_Int level)
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> print_level) = level;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetPrintLevel( void *pcg_vdata,
HYPRE_Int * level)
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*level = (pcg_data -> print_level);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGSetLogging, hypre_PCGGetLogging
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGSetLogging( void *pcg_vdata,
HYPRE_Int level)
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
(pcg_data -> logging) = level;
return hypre_error_flag;
}
HYPRE_Int
hypre_PCGGetLogging( void *pcg_vdata,
HYPRE_Int * level)
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*level = (pcg_data -> logging);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGGetNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGGetNumIterations( void *pcg_vdata,
HYPRE_Int *num_iterations )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*num_iterations = (pcg_data -> num_iterations);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGGetConverged
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGGetConverged( void *pcg_vdata,
HYPRE_Int *converged)
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
*converged = (pcg_data -> converged);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGPrintLogging
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGPrintLogging( void *pcg_vdata,
HYPRE_Int myid)
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
HYPRE_Int num_iterations = (pcg_data -> num_iterations);
HYPRE_Int print_level = (pcg_data -> print_level);
HYPRE_Real *norms = (pcg_data -> norms);
HYPRE_Real *rel_norms = (pcg_data -> rel_norms);
HYPRE_Int i;
if (myid == 0)
{
if (print_level > 0)
{
for (i = 0; i < num_iterations; i++)
{
hypre_printf("Residual norm[%d] = %e ", i, norms[i]);
hypre_printf("Relative residual norm[%d] = %e\n", i, rel_norms[i]);
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_PCGGetFinalRelativeResidualNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PCGGetFinalRelativeResidualNorm( void *pcg_vdata,
HYPRE_Real *relative_residual_norm )
{
hypre_PCGData *pcg_data = (hypre_PCGData *)pcg_vdata;
HYPRE_Real rel_residual_norm = (pcg_data -> rel_residual_norm);
*relative_residual_norm = rel_residual_norm;
return hypre_error_flag;
}
| 39,887 | 32.379079 | 99 | c |
AMG | AMG-master/krylov/pcg.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* Preconditioned conjugate gradient (Omin) headers
*
*****************************************************************************/
#ifndef hypre_KRYLOV_PCG_HEADER
#define hypre_KRYLOV_PCG_HEADER
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name Generic PCG Interface
*
* A general description of the interface goes here...
*
* @memo A generic PCG linear solver interface
* @version 0.1
* @author Jeffrey F. Painter
**/
/*@{*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/*--------------------------------------------------------------------------
* hypre_PCGData and hypre_PCGFunctions
*--------------------------------------------------------------------------*/
/**
* @name PCG structs
*
* Description...
**/
/*@{*/
/**
* The {\tt hypre\_PCGSFunctions} object ...
**/
typedef struct
{
char * (*CAlloc) ( size_t count, size_t elt_size );
HYPRE_Int (*Free) ( char *ptr );
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs );
void * (*CreateVector) ( void *vector );
HYPRE_Int (*DestroyVector) ( void *vector );
void * (*MatvecCreate) ( void *A, void *x );
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y );
HYPRE_Int (*MatvecDestroy) ( void *matvec_data );
HYPRE_Real (*InnerProd) ( void *x, void *y );
HYPRE_Int (*CopyVector) ( void *x, void *y );
HYPRE_Int (*ClearVector) ( void *x );
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x );
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y );
HYPRE_Int (*precond)();
HYPRE_Int (*precond_setup)();
} hypre_PCGFunctions;
/**
* The {\tt hypre\_PCGData} object ...
**/
/*
Summary of Parameters to Control Stopping Test:
- Standard (default) error tolerance: |delta-residual|/|right-hand-side|<tol
where the norm is an energy norm wrt preconditioner, |r|=sqrt(<Cr,r>).
- two_norm!=0 means: the norm is the L2 norm, |r|=sqrt(<r,r>)
- rel_change!=0 means: if pass the other stopping criteria, also check the
relative change in the solution x. Pass iff this relative change is small.
- tol = relative error tolerance, as above
-a_tol = absolute convergence tolerance (default is 0.0)
If one desires the convergence test to check the absolute
convergence tolerance *only*, then set the relative convergence
tolerance to 0.0. (The default convergence test is <C*r,r> <=
max(relative_tolerance^2 * <C*b, b>, absolute_tolerance^2)
- cf_tol = convergence factor tolerance; if >0 used for special test
for slow convergence
- stop_crit!=0 means (TO BE PHASED OUT):
pure absolute error tolerance rather than a pure relative
error tolerance on the residual. Never applies if rel_change!=0 or atolf!=0.
- atolf = absolute error tolerance factor to be used _together_ with the
relative error tolerance, |delta-residual| / ( atolf + |right-hand-side| ) < tol
(To BE PHASED OUT)
- recompute_residual means: when the iteration seems to be converged, recompute the
residual from scratch (r=b-Ax) and use this new residual to repeat the convergence test.
This can be expensive, use this only if you have seen a problem with the regular
residual computation.
- recompute_residual_p means: recompute the residual from scratch (r=b-Ax)
every "recompute_residual_p" iterations. This can be expensive and degrade the
convergence. Use it only if you have seen a problem with the regular residual
computation.
*/
typedef struct
{
HYPRE_Real tol;
HYPRE_Real atolf;
HYPRE_Real cf_tol;
HYPRE_Real a_tol;
HYPRE_Real rtol;
HYPRE_Int max_iter;
HYPRE_Int two_norm;
HYPRE_Int rel_change;
HYPRE_Int recompute_residual;
HYPRE_Int recompute_residual_p;
HYPRE_Int stop_crit;
HYPRE_Int converged;
void *A;
void *p;
void *s;
void *r; /* ...contains the residual. This is currently kept permanently.
If that is ever changed, it still must be kept if logging>1 */
HYPRE_Int owns_matvec_data; /* normally 1; if 0, don't delete it */
void *matvec_data;
void *precond_data;
hypre_PCGFunctions * functions;
/* log info (always logged) */
HYPRE_Int num_iterations;
HYPRE_Real rel_residual_norm;
HYPRE_Int print_level; /* printing when print_level>0 */
HYPRE_Int logging; /* extra computations for logging when logging>0 */
HYPRE_Real *norms;
HYPRE_Real *rel_norms;
} hypre_PCGData;
#define hypre_PCGDataOwnsMatvecData(pcgdata) ((pcgdata) -> owns_matvec_data)
#ifdef __cplusplus
extern "C" {
#endif
/**
* @name generic PCG Solver
*
* Description...
**/
/*@{*/
/**
* Description...
*
* @param param [IN] ...
**/
hypre_PCGFunctions *
hypre_PCGFunctionsCreate(
char * (*CAlloc) ( size_t count, size_t elt_size ),
HYPRE_Int (*Free) ( char *ptr ),
HYPRE_Int (*CommInfo) ( void *A, HYPRE_Int *my_id,
HYPRE_Int *num_procs ),
void * (*CreateVector) ( void *vector ),
HYPRE_Int (*DestroyVector) ( void *vector ),
void * (*MatvecCreate) ( void *A, void *x ),
HYPRE_Int (*Matvec) ( void *matvec_data, HYPRE_Complex alpha, void *A,
void *x, HYPRE_Complex beta, void *y ),
HYPRE_Int (*MatvecDestroy) ( void *matvec_data ),
HYPRE_Real (*InnerProd) ( void *x, void *y ),
HYPRE_Int (*CopyVector) ( void *x, void *y ),
HYPRE_Int (*ClearVector) ( void *x ),
HYPRE_Int (*ScaleVector) ( HYPRE_Complex alpha, void *x ),
HYPRE_Int (*Axpy) ( HYPRE_Complex alpha, void *x, void *y ),
HYPRE_Int (*PrecondSetup) ( void *vdata, void *A, void *b, void *x ),
HYPRE_Int (*Precond) ( void *vdata, void *A, void *b, void *x )
);
/**
* Description...
*
* @param param [IN] ...
**/
void *
hypre_PCGCreate( hypre_PCGFunctions *pcg_functions );
#ifdef __cplusplus
}
#endif
#endif
| 7,406 | 34.440191 | 89 | h |
AMG | AMG-master/parcsr_ls/HYPRE_parcsr_amg.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGCreate( HYPRE_Solver *solver)
{
if (!solver)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*solver = (HYPRE_Solver) hypre_BoomerAMGCreate( ) ;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGDestroy( HYPRE_Solver solver )
{
return( hypre_BoomerAMGDestroy( (void *) solver ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetup( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
return( hypre_BoomerAMGSetup( (void *) solver,
(hypre_ParCSRMatrix *) A,
(hypre_ParVector *) b,
(hypre_ParVector *) x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSolve
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSolve( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
return( hypre_BoomerAMGSolve( (void *) solver,
(hypre_ParCSRMatrix *) A,
(hypre_ParVector *) b,
(hypre_ParVector *) x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetRestriction
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetRestriction( HYPRE_Solver solver,
HYPRE_Int restr_par )
{
return( hypre_BoomerAMGSetRestriction( (void *) solver, restr_par ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMaxLevels, HYPRE_BoomerAMGGetMaxLevels
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMaxLevels( HYPRE_Solver solver,
HYPRE_Int max_levels )
{
return( hypre_BoomerAMGSetMaxLevels( (void *) solver, max_levels ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetMaxLevels( HYPRE_Solver solver,
HYPRE_Int * max_levels )
{
return( hypre_BoomerAMGGetMaxLevels( (void *) solver, max_levels ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMaxCoarseSize, HYPRE_BoomerAMGGetMaxCoarseSize
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMaxCoarseSize( HYPRE_Solver solver,
HYPRE_Int max_coarse_size )
{
return( hypre_BoomerAMGSetMaxCoarseSize( (void *) solver, max_coarse_size ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetMaxCoarseSize( HYPRE_Solver solver,
HYPRE_Int * max_coarse_size )
{
return( hypre_BoomerAMGGetMaxCoarseSize( (void *) solver, max_coarse_size ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMinCoarseSize, HYPRE_BoomerAMGGetMinCoarseSize
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMinCoarseSize( HYPRE_Solver solver,
HYPRE_Int min_coarse_size )
{
return( hypre_BoomerAMGSetMinCoarseSize( (void *) solver, min_coarse_size ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetMinCoarseSize( HYPRE_Solver solver,
HYPRE_Int * min_coarse_size )
{
return( hypre_BoomerAMGGetMinCoarseSize( (void *) solver, min_coarse_size ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetSeqThreshold, HYPRE_BoomerAMGGetSeqThreshold
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetSeqThreshold( HYPRE_Solver solver,
HYPRE_Int seq_threshold )
{
return( hypre_BoomerAMGSetSeqThreshold( (void *) solver, seq_threshold ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetSeqThreshold( HYPRE_Solver solver,
HYPRE_Int * seq_threshold )
{
return( hypre_BoomerAMGGetSeqThreshold( (void *) solver, seq_threshold ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetRedundant, HYPRE_BoomerAMGGetRedundant
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetRedundant( HYPRE_Solver solver,
HYPRE_Int redundant )
{
return( hypre_BoomerAMGSetRedundant( (void *) solver, redundant ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetRedundant( HYPRE_Solver solver,
HYPRE_Int * redundant )
{
return( hypre_BoomerAMGGetRedundant( (void *) solver, redundant ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetStrongThreshold, HYPRE_BoomerAMGGetStrongThreshold
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetStrongThreshold( HYPRE_Solver solver,
HYPRE_Real strong_threshold )
{
return( hypre_BoomerAMGSetStrongThreshold( (void *) solver,
strong_threshold ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetStrongThreshold( HYPRE_Solver solver,
HYPRE_Real * strong_threshold )
{
return( hypre_BoomerAMGGetStrongThreshold( (void *) solver,
strong_threshold ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMaxRowSum, HYPRE_BoomerAMGGetMaxRowSum
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMaxRowSum( HYPRE_Solver solver,
HYPRE_Real max_row_sum )
{
return( hypre_BoomerAMGSetMaxRowSum( (void *) solver,
max_row_sum ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetMaxRowSum( HYPRE_Solver solver,
HYPRE_Real * max_row_sum )
{
return( hypre_BoomerAMGGetMaxRowSum( (void *) solver,
max_row_sum ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetTruncFactor, HYPRE_BoomerAMGGetTruncFactor
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetTruncFactor( HYPRE_Solver solver,
HYPRE_Real trunc_factor )
{
return( hypre_BoomerAMGSetTruncFactor( (void *) solver,
trunc_factor ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetTruncFactor( HYPRE_Solver solver,
HYPRE_Real * trunc_factor )
{
return( hypre_BoomerAMGGetTruncFactor( (void *) solver,
trunc_factor ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetPMaxElmts, HYPRE_BoomerAMGGetPMaxElmts
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetPMaxElmts( HYPRE_Solver solver,
HYPRE_Int P_max_elmts )
{
return( hypre_BoomerAMGSetPMaxElmts( (void *) solver,
P_max_elmts ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetPMaxElmts( HYPRE_Solver solver,
HYPRE_Int * P_max_elmts )
{
return( hypre_BoomerAMGGetPMaxElmts( (void *) solver,
P_max_elmts ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetJacobiTruncThreshold, HYPRE_BoomerAMGGetJacobiTruncThreshold
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetJacobiTruncThreshold( HYPRE_Solver solver,
HYPRE_Real jacobi_trunc_threshold )
{
return( hypre_BoomerAMGSetJacobiTruncThreshold( (void *) solver,
jacobi_trunc_threshold ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetJacobiTruncThreshold( HYPRE_Solver solver,
HYPRE_Real * jacobi_trunc_threshold )
{
return( hypre_BoomerAMGGetJacobiTruncThreshold( (void *) solver,
jacobi_trunc_threshold ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetPostInterpType, HYPRE_BoomerAMGGetPostInterpType
* If >0, specifies something to do to improve a computed interpolation matrix.
* defaults to 0, for nothing.
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetPostInterpType( HYPRE_Solver solver,
HYPRE_Int post_interp_type )
{
return( hypre_BoomerAMGSetPostInterpType( (void *) solver,
post_interp_type ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetPostInterpType( HYPRE_Solver solver,
HYPRE_Int * post_interp_type )
{
return( hypre_BoomerAMGGetPostInterpType( (void *) solver,
post_interp_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetSCommPkgSwitch
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetSCommPkgSwitch( HYPRE_Solver solver,
HYPRE_Real S_commpkg_switch )
{
return( hypre_BoomerAMGSetSCommPkgSwitch( (void *) solver,
S_commpkg_switch ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetInterpType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetInterpType( HYPRE_Solver solver,
HYPRE_Int interp_type )
{
return( hypre_BoomerAMGSetInterpType( (void *) solver, interp_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetSepWeight
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetSepWeight( HYPRE_Solver solver,
HYPRE_Int sep_weight )
{
return( hypre_BoomerAMGSetSepWeight( (void *) solver, sep_weight ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMinIter
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMinIter( HYPRE_Solver solver,
HYPRE_Int min_iter )
{
return( hypre_BoomerAMGSetMinIter( (void *) solver, min_iter ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMaxIter
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMaxIter( HYPRE_Solver solver,
HYPRE_Int max_iter )
{
return( hypre_BoomerAMGSetMaxIter( (void *) solver, max_iter ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetMaxIter( HYPRE_Solver solver,
HYPRE_Int * max_iter )
{
return( hypre_BoomerAMGGetMaxIter( (void *) solver, max_iter ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetCoarsenType, HYPRE_BoomerAMGGetCoarsenType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetCoarsenType( HYPRE_Solver solver,
HYPRE_Int coarsen_type )
{
return( hypre_BoomerAMGSetCoarsenType( (void *) solver, coarsen_type ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetCoarsenType( HYPRE_Solver solver,
HYPRE_Int * coarsen_type )
{
return( hypre_BoomerAMGGetCoarsenType( (void *) solver, coarsen_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMeasureType, HYPRE_BoomerAMGGetMeasureType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMeasureType( HYPRE_Solver solver,
HYPRE_Int measure_type )
{
return( hypre_BoomerAMGSetMeasureType( (void *) solver, measure_type ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetMeasureType( HYPRE_Solver solver,
HYPRE_Int * measure_type )
{
return( hypre_BoomerAMGGetMeasureType( (void *) solver, measure_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetOldDefault
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetOldDefault( HYPRE_Solver solver)
{
HYPRE_BoomerAMGSetCoarsenType( solver, 6 );
HYPRE_BoomerAMGSetInterpType( solver, 0 );
HYPRE_BoomerAMGSetPMaxElmts( solver, 0 );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetSetupType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetSetupType( HYPRE_Solver solver,
HYPRE_Int setup_type )
{
return( hypre_BoomerAMGSetSetupType( (void *) solver, setup_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetCycleType, HYPRE_BoomerAMGGetCycleType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetCycleType( HYPRE_Solver solver,
HYPRE_Int cycle_type )
{
return( hypre_BoomerAMGSetCycleType( (void *) solver, cycle_type ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetCycleType( HYPRE_Solver solver,
HYPRE_Int * cycle_type )
{
return( hypre_BoomerAMGGetCycleType( (void *) solver, cycle_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetTol, HYPRE_BoomerAMGGetTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetTol( HYPRE_Solver solver,
HYPRE_Real tol )
{
return( hypre_BoomerAMGSetTol( (void *) solver, tol ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetTol( HYPRE_Solver solver,
HYPRE_Real * tol )
{
return( hypre_BoomerAMGGetTol( (void *) solver, tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetNumGridSweeps
* DEPRECATED. There are memory management problems associated with the
* use of a user-supplied array (who releases it?).
* Use SetNumSweeps and SetCycleNumSweeps instead.
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetNumGridSweeps( HYPRE_Solver solver,
HYPRE_Int *num_grid_sweeps )
{
return( hypre_BoomerAMGSetNumGridSweeps( (void *) solver, num_grid_sweeps ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetNumSweeps
* There is no corresponding Get function. Use GetCycleNumSweeps.
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetNumSweeps( HYPRE_Solver solver,
HYPRE_Int num_sweeps )
{
return( hypre_BoomerAMGSetNumSweeps( (void *) solver, num_sweeps ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetCycleNumSweeps, HYPRE_BoomerAMGGetCycleNumSweeps
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetCycleNumSweeps( HYPRE_Solver solver,
HYPRE_Int num_sweeps, HYPRE_Int k )
{
return( hypre_BoomerAMGSetCycleNumSweeps( (void *) solver, num_sweeps, k ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetCycleNumSweeps( HYPRE_Solver solver,
HYPRE_Int * num_sweeps, HYPRE_Int k )
{
return( hypre_BoomerAMGGetCycleNumSweeps( (void *) solver, num_sweeps, k ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGInitGridRelaxation
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGInitGridRelaxation( HYPRE_Int **num_grid_sweeps_ptr,
HYPRE_Int **grid_relax_type_ptr,
HYPRE_Int ***grid_relax_points_ptr,
HYPRE_Int coarsen_type,
HYPRE_Real **relax_weights_ptr,
HYPRE_Int max_levels )
{ HYPRE_Int i;
HYPRE_Int *num_grid_sweeps;
HYPRE_Int *grid_relax_type;
HYPRE_Int **grid_relax_points;
HYPRE_Real *relax_weights;
*num_grid_sweeps_ptr = hypre_CTAlloc(HYPRE_Int, 4);
*grid_relax_type_ptr = hypre_CTAlloc(HYPRE_Int, 4);
*grid_relax_points_ptr = hypre_CTAlloc(HYPRE_Int*, 4);
*relax_weights_ptr = hypre_CTAlloc(HYPRE_Real, max_levels);
num_grid_sweeps = *num_grid_sweeps_ptr;
grid_relax_type = *grid_relax_type_ptr;
grid_relax_points = *grid_relax_points_ptr;
relax_weights = *relax_weights_ptr;
if (coarsen_type == 5)
{
/* fine grid */
num_grid_sweeps[0] = 3;
grid_relax_type[0] = 3;
grid_relax_points[0] = hypre_CTAlloc(HYPRE_Int, 4);
grid_relax_points[0][0] = -2;
grid_relax_points[0][1] = -1;
grid_relax_points[0][2] = 1;
/* down cycle */
num_grid_sweeps[1] = 4;
grid_relax_type[1] = 3;
grid_relax_points[1] = hypre_CTAlloc(HYPRE_Int, 4);
grid_relax_points[1][0] = -1;
grid_relax_points[1][1] = 1;
grid_relax_points[1][2] = -2;
grid_relax_points[1][3] = -2;
/* up cycle */
num_grid_sweeps[2] = 4;
grid_relax_type[2] = 3;
grid_relax_points[2] = hypre_CTAlloc(HYPRE_Int, 4);
grid_relax_points[2][0] = -2;
grid_relax_points[2][1] = -2;
grid_relax_points[2][2] = 1;
grid_relax_points[2][3] = -1;
}
else
{
/* fine grid */
num_grid_sweeps[0] = 2;
grid_relax_type[0] = 3;
grid_relax_points[0] = hypre_CTAlloc(HYPRE_Int, 2);
grid_relax_points[0][0] = 1;
grid_relax_points[0][1] = -1;
/* down cycle */
num_grid_sweeps[1] = 2;
grid_relax_type[1] = 3;
grid_relax_points[1] = hypre_CTAlloc(HYPRE_Int, 2);
grid_relax_points[1][0] = 1;
grid_relax_points[1][1] = -1;
/* up cycle */
num_grid_sweeps[2] = 2;
grid_relax_type[2] = 3;
grid_relax_points[2] = hypre_CTAlloc(HYPRE_Int, 2);
grid_relax_points[2][0] = -1;
grid_relax_points[2][1] = 1;
}
/* coarsest grid */
num_grid_sweeps[3] = 1;
grid_relax_type[3] = 3;
grid_relax_points[3] = hypre_CTAlloc(HYPRE_Int, 1);
grid_relax_points[3][0] = 0;
for (i = 0; i < max_levels; i++)
relax_weights[i] = 1.;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetGridRelaxType
* DEPRECATED. There are memory management problems associated with the
* use of a user-supplied array (who releases it?).
* Use SetRelaxType and SetCycleRelaxType instead.
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetGridRelaxType( HYPRE_Solver solver,
HYPRE_Int *grid_relax_type )
{
return( hypre_BoomerAMGSetGridRelaxType( (void *) solver, grid_relax_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetRelaxType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetRelaxType( HYPRE_Solver solver,
HYPRE_Int relax_type )
{
return( hypre_BoomerAMGSetRelaxType( (void *) solver, relax_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetCycleRelaxType, HYPRE_BoomerAMGetCycleRelaxType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetCycleRelaxType( HYPRE_Solver solver,
HYPRE_Int relax_type, HYPRE_Int k )
{
return( hypre_BoomerAMGSetCycleRelaxType( (void *) solver, relax_type, k ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetCycleRelaxType( HYPRE_Solver solver,
HYPRE_Int * relax_type, HYPRE_Int k )
{
return( hypre_BoomerAMGGetCycleRelaxType( (void *) solver, relax_type, k ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetRelaxOrder
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetRelaxOrder( HYPRE_Solver solver,
HYPRE_Int relax_order)
{
return( hypre_BoomerAMGSetRelaxOrder( (void *) solver, relax_order ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetGridRelaxPoints
* DEPRECATED. There are memory management problems associated with the
* use of a user-supplied array (who releases it?).
* Ulrike Yang suspects that nobody uses this function.
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetGridRelaxPoints( HYPRE_Solver solver,
HYPRE_Int **grid_relax_points )
{
return( hypre_BoomerAMGSetGridRelaxPoints( (void *) solver, grid_relax_points ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetRelaxWeight
* DEPRECATED. There are memory management problems associated with the
* use of a user-supplied array (who releases it?).
* Use SetRelaxWt and SetLevelRelaxWt instead.
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetRelaxWeight( HYPRE_Solver solver,
HYPRE_Real *relax_weight )
{
return( hypre_BoomerAMGSetRelaxWeight( (void *) solver, relax_weight ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetRelaxWt
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetRelaxWt( HYPRE_Solver solver,
HYPRE_Real relax_wt )
{
return( hypre_BoomerAMGSetRelaxWt( (void *) solver, relax_wt ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetLevelRelaxWt
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetLevelRelaxWt( HYPRE_Solver solver,
HYPRE_Real relax_wt,
HYPRE_Int level )
{
return( hypre_BoomerAMGSetLevelRelaxWt( (void *) solver, relax_wt, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetOmega
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetOmega( HYPRE_Solver solver,
HYPRE_Real *omega )
{
return( hypre_BoomerAMGSetOmega( (void *) solver, omega ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetOuterWt
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetOuterWt( HYPRE_Solver solver,
HYPRE_Real outer_wt )
{
return( hypre_BoomerAMGSetOuterWt( (void *) solver, outer_wt ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetLevelOuterWt
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetLevelOuterWt( HYPRE_Solver solver,
HYPRE_Real outer_wt,
HYPRE_Int level )
{
return( hypre_BoomerAMGSetLevelOuterWt( (void *) solver, outer_wt, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetLogging, HYPRE_BoomerAMGGetLogging
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetLogging( HYPRE_Solver solver,
HYPRE_Int logging )
{
/* This function should be called before Setup. Logging changes
may require allocation or freeing of arrays, which is presently
only done there.
It may be possible to support logging changes at other times,
but there is little need.
*/
return( hypre_BoomerAMGSetLogging( (void *) solver, logging ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetLogging( HYPRE_Solver solver,
HYPRE_Int * logging )
{
return( hypre_BoomerAMGGetLogging( (void *) solver, logging ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetPrintLevel, HYPRE_BoomerAMGGetPrintLevel
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetPrintLevel( HYPRE_Solver solver,
HYPRE_Int print_level )
{
return( hypre_BoomerAMGSetPrintLevel( (void *) solver, print_level ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetPrintLevel( HYPRE_Solver solver,
HYPRE_Int * print_level )
{
return( hypre_BoomerAMGGetPrintLevel( (void *) solver, print_level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetPrintFileName
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetPrintFileName( HYPRE_Solver solver,
const char *print_file_name )
{
return( hypre_BoomerAMGSetPrintFileName( (void *) solver, print_file_name ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetDebugFlag, HYPRE_BoomerAMGGetDebugFlag
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetDebugFlag( HYPRE_Solver solver,
HYPRE_Int debug_flag )
{
return( hypre_BoomerAMGSetDebugFlag( (void *) solver, debug_flag ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetDebugFlag( HYPRE_Solver solver,
HYPRE_Int * debug_flag )
{
return( hypre_BoomerAMGGetDebugFlag( (void *) solver, debug_flag ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGGetNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGGetNumIterations( HYPRE_Solver solver,
HYPRE_Int *num_iterations )
{
return( hypre_BoomerAMGGetNumIterations( (void *) solver, num_iterations ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGGetCumNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGGetCumNumIterations( HYPRE_Solver solver,
HYPRE_Int *cum_num_iterations )
{
return( hypre_BoomerAMGGetCumNumIterations( (void *) solver, cum_num_iterations ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGGetCumNnzAP
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGGetCumNnzAP( HYPRE_Solver solver,
HYPRE_Real *cum_nnz_AP )
{
return( hypre_BoomerAMGGetCumNnzAP( (void *) solver, cum_nnz_AP ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGGetResidual
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGGetResidual( HYPRE_Solver solver, HYPRE_ParVector * residual )
{
return hypre_BoomerAMGGetResidual( (void *) solver,
(hypre_ParVector **) residual );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGGetFinalRelativeResidualNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGGetFinalRelativeResidualNorm( HYPRE_Solver solver,
HYPRE_Real *rel_resid_norm )
{
return( hypre_BoomerAMGGetRelResidualNorm( (void *) solver, rel_resid_norm ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetNumFunctions, HYPRE_BoomerAMGGetNumFunctions
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetNumFunctions( HYPRE_Solver solver,
HYPRE_Int num_functions )
{
return( hypre_BoomerAMGSetNumFunctions( (void *) solver, num_functions ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetNumFunctions( HYPRE_Solver solver,
HYPRE_Int * num_functions )
{
return( hypre_BoomerAMGGetNumFunctions( (void *) solver, num_functions ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetNodal
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetNodal( HYPRE_Solver solver,
HYPRE_Int nodal )
{
return( hypre_BoomerAMGSetNodal( (void *) solver, nodal ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetNodalLevels
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetNodalLevels( HYPRE_Solver solver,
HYPRE_Int nodal_levels )
{
return( hypre_BoomerAMGSetNodalLevels( (void *) solver, nodal_levels ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetNodalDiag
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetNodalDiag( HYPRE_Solver solver,
HYPRE_Int nodal )
{
return( hypre_BoomerAMGSetNodalDiag( (void *) solver, nodal ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetDofFunc
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetDofFunc( HYPRE_Solver solver,
HYPRE_Int *dof_func )
/* Warning about a possible memory problem: When the BoomerAMG object is destroyed
in hypre_BoomerAMGDestroy, dof_func aka DofFunc will be destroyed (currently
line 246 of par_amg.c). Normally this is what we want. But if the user provided
dof_func by calling HYPRE_BoomerAMGSetDofFunc, this could be an unwanted surprise.
As hypre is currently commonly used, this situation is likely to be rare. */
{
return( hypre_BoomerAMGSetDofFunc( (void *) solver, dof_func ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetNumPaths
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetNumPaths( HYPRE_Solver solver,
HYPRE_Int num_paths )
{
return( hypre_BoomerAMGSetNumPaths( (void *) solver, num_paths ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAggNumLevels
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAggNumLevels( HYPRE_Solver solver,
HYPRE_Int agg_num_levels )
{
return( hypre_BoomerAMGSetAggNumLevels( (void *) solver, agg_num_levels ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAggInterpType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAggInterpType( HYPRE_Solver solver,
HYPRE_Int agg_interp_type )
{
return( hypre_BoomerAMGSetAggInterpType( (void *) solver, agg_interp_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAggTruncFactor
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAggTruncFactor( HYPRE_Solver solver,
HYPRE_Real agg_trunc_factor )
{
return( hypre_BoomerAMGSetAggTruncFactor( (void *) solver, agg_trunc_factor ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAddTruncFactor
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAddTruncFactor( HYPRE_Solver solver,
HYPRE_Real add_trunc_factor )
{
return( hypre_BoomerAMGSetMultAddTruncFactor( (void *) solver, add_trunc_factor ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMultAddTruncFactor
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMultAddTruncFactor( HYPRE_Solver solver,
HYPRE_Real add_trunc_factor )
{
return( hypre_BoomerAMGSetMultAddTruncFactor( (void *) solver, add_trunc_factor ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAddRelaxWt
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAddRelaxWt( HYPRE_Solver solver,
HYPRE_Real add_rlx_wt )
{
return( hypre_BoomerAMGSetAddRelaxWt( (void *) solver, add_rlx_wt ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAddRelaxType
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAddRelaxType( HYPRE_Solver solver,
HYPRE_Int add_rlx_type )
{
return( hypre_BoomerAMGSetAddRelaxType( (void *) solver, add_rlx_type ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAggP12TruncFactor
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAggP12TruncFactor( HYPRE_Solver solver,
HYPRE_Real agg_P12_trunc_factor )
{
return( hypre_BoomerAMGSetAggP12TruncFactor( (void *) solver, agg_P12_trunc_factor ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAggPMaxElmts
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAggPMaxElmts( HYPRE_Solver solver,
HYPRE_Int agg_P_max_elmts )
{
return( hypre_BoomerAMGSetAggPMaxElmts( (void *) solver, agg_P_max_elmts ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAddPMaxElmts
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAddPMaxElmts( HYPRE_Solver solver,
HYPRE_Int add_P_max_elmts )
{
return( hypre_BoomerAMGSetMultAddPMaxElmts( (void *) solver, add_P_max_elmts ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMultAddPMaxElmts
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMultAddPMaxElmts( HYPRE_Solver solver,
HYPRE_Int add_P_max_elmts )
{
return( hypre_BoomerAMGSetMultAddPMaxElmts( (void *) solver, add_P_max_elmts ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAggP12MaxElmts
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAggP12MaxElmts( HYPRE_Solver solver,
HYPRE_Int agg_P12_max_elmts )
{
return( hypre_BoomerAMGSetAggP12MaxElmts( (void *) solver, agg_P12_max_elmts ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetChebyOrder
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetChebyOrder( HYPRE_Solver solver,
HYPRE_Int order )
{
return( hypre_BoomerAMGSetChebyOrder( (void *) solver, order ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetChebyFraction
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetChebyFraction( HYPRE_Solver solver,
HYPRE_Real ratio )
{
return( hypre_BoomerAMGSetChebyFraction( (void *) solver, ratio ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetChebyScale
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetChebyScale( HYPRE_Solver solver,
HYPRE_Int scale )
{
return( hypre_BoomerAMGSetChebyScale( (void *) solver, scale ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetChebyVariant
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetChebyVariant( HYPRE_Solver solver,
HYPRE_Int variant )
{
return( hypre_BoomerAMGSetChebyVariant( (void *) solver, variant ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetChebyEigEst
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetChebyEigEst( HYPRE_Solver solver,
HYPRE_Int eig_est )
{
return( hypre_BoomerAMGSetChebyEigEst( (void *) solver, eig_est ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAdditive, HYPRE_BoomerAMGGetAdditive
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAdditive( HYPRE_Solver solver,
HYPRE_Int additive )
{
return( hypre_BoomerAMGSetAdditive( (void *) solver, additive ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetAdditive( HYPRE_Solver solver,
HYPRE_Int * additive )
{
return( hypre_BoomerAMGGetAdditive( (void *) solver, additive ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetMultAdditive, HYPRE_BoomerAMGGetMultAdditive
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetMultAdditive( HYPRE_Solver solver,
HYPRE_Int mult_additive )
{
return( hypre_BoomerAMGSetMultAdditive( (void *) solver, mult_additive ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetMultAdditive( HYPRE_Solver solver,
HYPRE_Int * mult_additive )
{
return( hypre_BoomerAMGGetMultAdditive( (void *) solver, mult_additive ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetSimple, HYPRE_BoomerAMGGetSimple
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetSimple( HYPRE_Solver solver,
HYPRE_Int simple )
{
return( hypre_BoomerAMGSetSimple( (void *) solver, simple ) );
}
HYPRE_Int
HYPRE_BoomerAMGGetSimple( HYPRE_Solver solver,
HYPRE_Int * simple )
{
return( hypre_BoomerAMGGetSimple( (void *) solver, simple ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetAddLastLvl
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetAddLastLvl( HYPRE_Solver solver,
HYPRE_Int add_last_lvl )
{
return( hypre_BoomerAMGSetAddLastLvl( (void *) solver, add_last_lvl ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetNonGalerkinTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetNonGalerkinTol (HYPRE_Solver solver,
HYPRE_Real nongalerkin_tol)
{
return (hypre_BoomerAMGSetNonGalerkinTol ( (void *) solver, nongalerkin_tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetLevelNonGalerkinTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetLevelNonGalerkinTol (HYPRE_Solver solver,
HYPRE_Real nongalerkin_tol,
HYPRE_Int level)
{
return (hypre_BoomerAMGSetLevelNonGalerkinTol ( (void *) solver, nongalerkin_tol , level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetNonGalerkTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetNonGalerkTol (HYPRE_Solver solver,
HYPRE_Int nongalerk_num_tol,
HYPRE_Real *nongalerk_tol)
{
return (hypre_BoomerAMGSetNonGalerkTol ( (void *) solver, nongalerk_num_tol , nongalerk_tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetRAP2
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetRAP2 (HYPRE_Solver solver,
HYPRE_Int rap2)
{
return (hypre_BoomerAMGSetRAP2 ( (void *) solver, rap2 ) );
}
/*--------------------------------------------------------------------------
* HYPRE_BoomerAMGSetKeepTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_BoomerAMGSetKeepTranspose (HYPRE_Solver solver,
HYPRE_Int keepTranspose)
{
return (hypre_BoomerAMGSetKeepTranspose ( (void *) solver, keepTranspose ) );
}
| 45,262 | 35.297514 | 99 | c |
AMG | AMG-master/parcsr_ls/HYPRE_parcsr_gmres.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESCreate( MPI_Comm comm, HYPRE_Solver *solver )
{
hypre_GMRESFunctions * gmres_functions;
if (!solver)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
gmres_functions =
hypre_GMRESFunctionsCreate(
hypre_CAlloc, hypre_ParKrylovFree, hypre_ParKrylovCommInfo,
hypre_ParKrylovCreateVector,
hypre_ParKrylovCreateVectorArray,
hypre_ParKrylovDestroyVector, hypre_ParKrylovMatvecCreate,
hypre_ParKrylovMatvec, hypre_ParKrylovMatvecDestroy,
hypre_ParKrylovInnerProd, hypre_ParKrylovCopyVector,
hypre_ParKrylovClearVector,
hypre_ParKrylovScaleVector, hypre_ParKrylovAxpy,
hypre_ParKrylovIdentitySetup, hypre_ParKrylovIdentity );
*solver = ( (HYPRE_Solver) hypre_GMRESCreate( gmres_functions ) );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESDestroy( HYPRE_Solver solver )
{
return( hypre_GMRESDestroy( (void *) solver ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetup( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
return( HYPRE_GMRESSetup( solver,
(HYPRE_Matrix) A,
(HYPRE_Vector) b,
(HYPRE_Vector) x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSolve
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSolve( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
return( HYPRE_GMRESSolve( solver,
(HYPRE_Matrix) A,
(HYPRE_Vector) b,
(HYPRE_Vector) x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetKDim
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetKDim( HYPRE_Solver solver,
HYPRE_Int k_dim )
{
return( HYPRE_GMRESSetKDim( solver, k_dim ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetTol( HYPRE_Solver solver,
HYPRE_Real tol )
{
return( HYPRE_GMRESSetTol( solver, tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetAbsoluteTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetAbsoluteTol( HYPRE_Solver solver,
HYPRE_Real a_tol )
{
return( HYPRE_GMRESSetAbsoluteTol( solver, a_tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetMinIter
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetMinIter( HYPRE_Solver solver,
HYPRE_Int min_iter )
{
return( HYPRE_GMRESSetMinIter( solver, min_iter ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetMaxIter
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetMaxIter( HYPRE_Solver solver,
HYPRE_Int max_iter )
{
return( HYPRE_GMRESSetMaxIter( solver, max_iter ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetStopCrit - OBSOLETE
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetStopCrit( HYPRE_Solver solver,
HYPRE_Int stop_crit )
{
return( HYPRE_GMRESSetStopCrit( solver, stop_crit ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetPrecond( HYPRE_Solver solver,
HYPRE_PtrToParSolverFcn precond,
HYPRE_PtrToParSolverFcn precond_setup,
HYPRE_Solver precond_solver )
{
return( HYPRE_GMRESSetPrecond( solver,
(HYPRE_PtrToSolverFcn) precond,
(HYPRE_PtrToSolverFcn) precond_setup,
precond_solver ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESGetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESGetPrecond( HYPRE_Solver solver,
HYPRE_Solver *precond_data_ptr )
{
return( HYPRE_GMRESGetPrecond( solver, precond_data_ptr ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetLogging
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetLogging( HYPRE_Solver solver,
HYPRE_Int logging)
{
return( HYPRE_GMRESSetLogging( solver, logging ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESSetPrintLevel
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESSetPrintLevel( HYPRE_Solver solver,
HYPRE_Int print_level)
{
return( HYPRE_GMRESSetPrintLevel( solver, print_level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESGetNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESGetNumIterations( HYPRE_Solver solver,
HYPRE_Int *num_iterations )
{
return( HYPRE_GMRESGetNumIterations( solver, num_iterations ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm( HYPRE_Solver solver,
HYPRE_Real *norm )
{
return( HYPRE_GMRESGetFinalRelativeResidualNorm( solver, norm ) );
}
| 8,273 | 35.449339 | 81 | c |
AMG | AMG-master/parcsr_ls/HYPRE_parcsr_ls.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef HYPRE_PARCSR_LS_HEADER
#define HYPRE_PARCSR_LS_HEADER
#include "HYPRE.h"
#include "HYPRE_utilities.h"
#include "HYPRE_seq_mv.h"
#include "HYPRE_parcsr_mv.h"
#include "HYPRE_IJ_mv.h"
#ifdef __cplusplus
extern "C" {
#endif
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name ParCSR Solvers
*
* These solvers use matrix/vector storage schemes that are taylored
* for general sparse matrix systems.
*
* @memo Linear solvers for sparse matrix systems
**/
/*@{*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name ParCSR Solvers
**/
/*@{*/
struct hypre_Solver_struct;
/**
* The solver object.
**/
#ifndef HYPRE_SOLVER_STRUCT
#define HYPRE_SOLVER_STRUCT
struct hypre_Solver_struct;
typedef struct hypre_Solver_struct *HYPRE_Solver;
#endif
typedef HYPRE_Int (*HYPRE_PtrToParSolverFcn)(HYPRE_Solver,
HYPRE_ParCSRMatrix,
HYPRE_ParVector,
HYPRE_ParVector);
#ifndef HYPRE_MODIFYPC
#define HYPRE_MODIFYPC
typedef HYPRE_Int (*HYPRE_PtrToModifyPCFcn)(HYPRE_Solver,
HYPRE_Int,
HYPRE_Real);
#endif
/*@}*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name ParCSR BoomerAMG Solver and Preconditioner
*
* Parallel unstructured algebraic multigrid solver and preconditioner
**/
/*@{*/
/**
* Create a solver object.
**/
HYPRE_Int HYPRE_BoomerAMGCreate(HYPRE_Solver *solver);
/**
* Destroy a solver object.
**/
HYPRE_Int HYPRE_BoomerAMGDestroy(HYPRE_Solver solver);
/**
* Set up the BoomerAMG solver or preconditioner.
* If used as a preconditioner, this function should be passed
* to the iterative solver {\tt SetPrecond} function.
*
* @param solver [IN] object to be set up.
* @param A [IN] ParCSR matrix used to construct the solver/preconditioner.
* @param b Ignored by this function.
* @param x Ignored by this function.
**/
HYPRE_Int HYPRE_BoomerAMGSetup(HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x);
/**
* Solve the system or apply AMG as a preconditioner.
* If used as a preconditioner, this function should be passed
* to the iterative solver {\tt SetPrecond} function.
*
* @param solver [IN] solver or preconditioner object to be applied.
* @param A [IN] ParCSR matrix, matrix of the linear system to be solved
* @param b [IN] right hand side of the linear system to be solved
* @param x [OUT] approximated solution of the linear system to be solved
**/
HYPRE_Int HYPRE_BoomerAMGSolve(HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x);
/**
* Solve the transpose system $A^T x = b$ or apply AMG as a preconditioner
* to the transpose system . Note that this function should only be used
* when preconditioning CGNR with BoomerAMG. It can only be used with
* Jacobi smoothing (relax_type 0 or 7) and without CF smoothing,
* i.e relax_order needs to be set to 0.
* If used as a preconditioner, this function should be passed
* to the iterative solver {\tt SetPrecond} function.
*
* @param solver [IN] solver or preconditioner object to be applied.
* @param A [IN] ParCSR matrix
* @param b [IN] right hand side of the linear system to be solved
* @param x [OUT] approximated solution of the linear system to be solved
**/
HYPRE_Int HYPRE_BoomerAMGSolveT(HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x);
/**
* Recovers old default for coarsening and interpolation, i.e Falgout
* coarsening and untruncated modified classical interpolation.
* This option might be preferred for 2 dimensional problems.
**/
HYPRE_Int HYPRE_BoomerAMGSetOldDefault(HYPRE_Solver solver);
/**
* Returns the residual.
**/
HYPRE_Int HYPRE_BoomerAMGGetResidual(HYPRE_Solver solver,
HYPRE_ParVector *residual);
/**
* Returns the number of iterations taken.
**/
HYPRE_Int HYPRE_BoomerAMGGetNumIterations(HYPRE_Solver solver,
HYPRE_Int *num_iterations);
/**
* Returns the norm of the final relative residual.
**/
HYPRE_Int HYPRE_BoomerAMGGetFinalRelativeResidualNorm(HYPRE_Solver solver,
HYPRE_Real *rel_resid_norm);
/**
* (Optional) Sets the size of the system of PDEs, if using the systems version.
* The default is 1, i.e. a scalar system.
**/
HYPRE_Int HYPRE_BoomerAMGSetNumFunctions(HYPRE_Solver solver,
HYPRE_Int num_functions);
/**
* (Optional) Sets the mapping that assigns the function to each variable,
* if using the systems version. If no assignment is made and the number of
* functions is k > 1, the mapping generated is (0,1,...,k-1,0,1,...,k-1,...).
**/
HYPRE_Int HYPRE_BoomerAMGSetDofFunc(HYPRE_Solver solver,
HYPRE_Int *dof_func);
/**
* (Optional) Set the convergence tolerance, if BoomerAMG is used
* as a solver. If it is used as a preconditioner, it should be set to 0.
* The default is 1.e-7.
**/
HYPRE_Int HYPRE_BoomerAMGSetTol(HYPRE_Solver solver,
HYPRE_Real tol);
/**
* (Optional) Sets maximum number of iterations, if BoomerAMG is used
* as a solver. If it is used as a preconditioner, it should be set to 1.
* The default is 20.
**/
HYPRE_Int HYPRE_BoomerAMGSetMaxIter(HYPRE_Solver solver,
HYPRE_Int max_iter);
/**
* (Optional)
**/
HYPRE_Int HYPRE_BoomerAMGSetMinIter(HYPRE_Solver solver,
HYPRE_Int min_iter);
/**
* (Optional) Sets maximum size of coarsest grid.
* The default is 9.
**/
HYPRE_Int HYPRE_BoomerAMGSetMaxCoarseSize(HYPRE_Solver solver,
HYPRE_Int max_coarse_size);
/**
* (Optional) Sets minimum size of coarsest grid.
* The default is 1.
**/
HYPRE_Int HYPRE_BoomerAMGSetMinCoarseSize(HYPRE_Solver solver,
HYPRE_Int min_coarse_size);
/**
* (Optional) Sets maximum number of multigrid levels.
* The default is 25.
**/
HYPRE_Int HYPRE_BoomerAMGSetMaxLevels(HYPRE_Solver solver,
HYPRE_Int max_levels);
/**
* (Optional) Sets AMG strength threshold. The default is 0.25.
* For 2d Laplace operators, 0.25 is a good value, for 3d Laplace
* operators, 0.5 or 0.6 is a better value. For elasticity problems,
* a large strength threshold, such as 0.9, is often better.
**/
HYPRE_Int HYPRE_BoomerAMGSetStrongThreshold(HYPRE_Solver solver,
HYPRE_Real strong_threshold);
/**
* (Optional) Defines the largest strength threshold for which
* the strength matrix S uses the communication package of the operator A.
* If the strength threshold is larger than this values,
* a communication package is generated for S. This can save
* memory and decrease the amount of data that needs to be communicated,
* if S is substantially sparser than A.
* The default is 1.0.
**/
HYPRE_Int HYPRE_BoomerAMGSetSCommPkgSwitch(HYPRE_Solver solver,
HYPRE_Real S_commpkg_switch);
/**
* (Optional) Sets a parameter to modify the definition of strength for
* diagonal dominant portions of the matrix. The default is 0.9.
* If max\_row\_sum is 1, no checking for diagonally dominant rows is
* performed.
**/
HYPRE_Int HYPRE_BoomerAMGSetMaxRowSum(HYPRE_Solver solver,
HYPRE_Real max_row_sum);
/**
* (Optional) Defines which parallel coarsening algorithm is used.
* There are the following options for coarsen\_type:
*
* \begin{tabular}{|c|l|} \hline
* 0 & CLJP-coarsening (a parallel coarsening algorithm using independent sets. \\
* 1 & classical Ruge-Stueben coarsening on each processor, no boundary treatment (not recommended!) \\
* 3 & classical Ruge-Stueben coarsening on each processor, followed by a third pass, which adds coarse \\
* & points on the boundaries \\
* 6 & Falgout coarsening (uses 1 first, followed by CLJP using the interior coarse points \\
* & generated by 1 as its first independent set) \\
* 7 & CLJP-coarsening (using a fixed random vector, for debugging purposes only) \\
* 8 & PMIS-coarsening (a parallel coarsening algorithm using independent sets, generating \\
* & lower complexities than CLJP, might also lead to slower convergence) \\
* 9 & PMIS-coarsening (using a fixed random vector, for debugging purposes only) \\
* 10 & HMIS-coarsening (uses one pass Ruge-Stueben on each processor independently, followed \\
* & by PMIS using the interior C-points generated as its first independent set) \\
* 11 & one-pass Ruge-Stueben coarsening on each processor, no boundary treatment (not recommended!) \\
* 21 & CGC coarsening by M. Griebel, B. Metsch and A. Schweitzer \\
* 22 & CGC-E coarsening by M. Griebel, B. Metsch and A.Schweitzer \\
* \hline
* \end{tabular}
*
* The default is 6.
**/
HYPRE_Int HYPRE_BoomerAMGSetCoarsenType(HYPRE_Solver solver,
HYPRE_Int coarsen_type);
/**
* (Optional) Defines the non-Galerkin drop-tolerance
* for sparsifying coarse grid operators and thus reducing communication.
* Value specified here is set on all levels.
* This routine should be used before HYPRE_BoomerAMGSetLevelNonGalerkinTol, which
* then can be used to change individual levels if desired
**/
HYPRE_Int HYPRE_BoomerAMGSetNonGalerkinTol (HYPRE_Solver solver,
HYPRE_Real nongalerkin_tol);
/**
* (Optional) Defines the level specific non-Galerkin drop-tolerances
* for sparsifying coarse grid operators and thus reducing communication.
* A drop-tolerance of 0.0 means to skip doing non-Galerkin on that
* level. The maximum drop tolerance for a level is 1.0, although
* much smaller values such as 0.03 or 0.01 are recommended.
*
* Note that if the user wants to set a specific tolerance on all levels,
* HYPRE_BooemrAMGSetNonGalerkinTol should be used. Individual levels
* can then be changed using this routine.
*
* In general, it is safer to drop more aggressively on coarser levels.
* For instance, one could use 0.0 on the finest level, 0.01 on the second level and
* then using 0.05 on all remaining levels. The best way to achieve this is
* to set 0.05 on all levels with HYPRE_BoomerAMGSetNonGalerkinTol and then
* change the tolerance on level 0 to 0.0 and the tolerance on level 1 to 0.01
* with HYPRE_BoomerAMGSetLevelNonGalerkinTol.
* Like many AMG parameters, these drop tolerances can be tuned. It is also common
* to delay the start of the non-Galerkin process further to a later level than
* level 1.
*
* @param solver [IN] solver or preconditioner object to be applied.
* @param nongalerkin_tol [IN] level specific drop tolerance
* @param level [IN] level on which drop tolerance is used
**/
HYPRE_Int HYPRE_BoomerAMGSetLevelNonGalerkinTol (HYPRE_Solver solver,
HYPRE_Real nongalerkin_tol,
HYPRE_Int level);
/*
* (Optional) Defines the non-Galerkin drop-tolerance (old version)
**/
HYPRE_Int HYPRE_BoomerAMGSetNonGalerkTol (HYPRE_Solver solver,
HYPRE_Int nongalerk_num_tol,
HYPRE_Real *nongalerk_tol);
/**
* (Optional) Defines whether local or global measures are used.
**/
HYPRE_Int HYPRE_BoomerAMGSetMeasureType(HYPRE_Solver solver,
HYPRE_Int measure_type);
/**
* (Optional) Defines the number of levels of aggressive coarsening.
* The default is 0, i.e. no aggressive coarsening.
**/
HYPRE_Int HYPRE_BoomerAMGSetAggNumLevels(HYPRE_Solver solver,
HYPRE_Int agg_num_levels);
/**
* (Optional) Defines the degree of aggressive coarsening.
* The default is 1. Larger numbers lead to less aggressive
* coarsening.
**/
HYPRE_Int HYPRE_BoomerAMGSetNumPaths(HYPRE_Solver solver,
HYPRE_Int num_paths);
/**
* (optional) Defines the number of pathes for CGC-coarsening.
**/
HYPRE_Int HYPRE_BoomerAMGSetCGCIts (HYPRE_Solver solver,
HYPRE_Int its);
/**
* (Optional) Sets whether to use the nodal systems coarsening.
* Should be used for linear systems generated from systems of PDEs.
* The default is 0 (unknown-based coarsening,
* only coarsens within same function).
* For the remaining options a nodal matrix is generated by
* applying a norm to the nodal blocks and applying the coarsening
* algorithm to this matrix.
* \begin{tabular}{|c|l|} \hline
* 1 & Frobenius norm \\
* 2 & sum of absolute values of elements in each block \\
* 3 & largest element in each block (not absolute value)\\
* 4 & row-sum norm \\
* 6 & sum of all values in each block \\
* \hline
* \end{tabular}
**/
HYPRE_Int HYPRE_BoomerAMGSetNodal(HYPRE_Solver solver,
HYPRE_Int nodal);
/**
* (Optional) Sets whether to give special treatment to diagonal elements in
* the nodal systems version.
* The default is 0.
* If set to 1, the diagonal entry is set to the negative sum of all off
* diagonal entries.
* If set to 2, the signs of all diagonal entries are inverted.
*/
HYPRE_Int HYPRE_BoomerAMGSetNodalDiag(HYPRE_Solver solver,
HYPRE_Int nodal_diag);
/**
* (Optional) Defines which parallel interpolation operator is used.
* There are the following options for interp\_type:
*
* \begin{tabular}{|c|l|} \hline
* 0 & classical modified interpolation \\
* 1 & LS interpolation (for use with GSMG) \\
* 2 & classical modified interpolation for hyperbolic PDEs \\
* 3 & direct interpolation (with separation of weights) \\
* 4 & multipass interpolation \\
* 5 & multipass interpolation (with separation of weights) \\
* 6 & extended+i interpolation \\
* 7 & extended+i (if no common C neighbor) interpolation \\
* 8 & standard interpolation \\
* 9 & standard interpolation (with separation of weights) \\
* 10 & classical block interpolation (for use with nodal systems version only) \\
* 11 & classical block interpolation (for use with nodal systems version only) \\
* & with diagonalized diagonal blocks \\
* 12 & FF interpolation \\
* 13 & FF1 interpolation \\
* 14 & extended interpolation \\
* \hline
* \end{tabular}
*
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetInterpType(HYPRE_Solver solver,
HYPRE_Int interp_type);
/**
* (Optional) Defines a truncation factor for the interpolation.
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetTruncFactor(HYPRE_Solver solver,
HYPRE_Real trunc_factor);
/**
* (Optional) Defines the maximal number of elements per row for the interpolation.
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetPMaxElmts(HYPRE_Solver solver,
HYPRE_Int P_max_elmts);
/**
* (Optional) Defines whether separation of weights is used
* when defining strength for standard interpolation or
* multipass interpolation.
* Default: 0, i.e. no separation of weights used.
**/
HYPRE_Int HYPRE_BoomerAMGSetSepWeight(HYPRE_Solver solver,
HYPRE_Int sep_weight);
/**
* (Optional) Defines the interpolation used on levels of aggressive coarsening
* The default is 4, i.e. multipass interpolation.
* The following options exist:
*
* \begin{tabular}{|c|l|} \hline
* 1 & 2-stage extended+i interpolation \\
* 2 & 2-stage standard interpolation \\
* 3 & 2-stage extended interpolation \\
* 4 & multipass interpolation \\
* \hline
* \end{tabular}
**/
HYPRE_Int HYPRE_BoomerAMGSetAggInterpType(HYPRE_Solver solver,
HYPRE_Int agg_interp_type);
/**
* (Optional) Defines the truncation factor for the
* interpolation used for aggressive coarsening.
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetAggTruncFactor(HYPRE_Solver solver,
HYPRE_Real agg_trunc_factor);
/**
* (Optional) Defines the truncation factor for the
* matrices P1 and P2 which are used to build 2-stage interpolation.
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetAggP12TruncFactor(HYPRE_Solver solver,
HYPRE_Real agg_P12_trunc_factor);
/**
* (Optional) Defines the maximal number of elements per row for the
* interpolation used for aggressive coarsening.
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetAggPMaxElmts(HYPRE_Solver solver,
HYPRE_Int agg_P_max_elmts);
/**
* (Optional) Defines the maximal number of elements per row for the
* matrices P1 and P2 which are used to build 2-stage interpolation.
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetAggP12MaxElmts(HYPRE_Solver solver,
HYPRE_Int agg_P12_max_elmts);
/**
* (Optional) Allows the user to incorporate additional vectors
* into the interpolation for systems AMG, e.g. rigid body modes for
* linear elasticity problems.
* This can only be used in context with nodal coarsening and still
* requires the user to choose an interpolation.
**/
HYPRE_Int HYPRE_BoomerAMGSetInterpVectors (HYPRE_Solver solver ,
HYPRE_Int num_vectors ,
HYPRE_ParVector *interp_vectors );
/**
* (Optional) Defines the interpolation variant used for
* HYPRE_BoomerAMGSetInterpVectors:
* \begin{tabular}{|c|l|} \hline
* 1 & GM approach 1 \\
* 2 & GM approach 2 (to be preferred over 1) \\
* 3 & LN approach \\
* \hline
* \end{tabular}
**/
HYPRE_Int HYPRE_BoomerAMGSetInterpVecVariant (HYPRE_Solver solver,
HYPRE_Int var );
/**
* (Optional) Defines the maximal elements per row for Q, the additional
* columns added to the original interpolation matrix P, to reduce complexity.
* The default is no truncation.
**/
HYPRE_Int HYPRE_BoomerAMGSetInterpVecQMax (HYPRE_Solver solver,
HYPRE_Int q_max );
/**
* (Optional) Defines a truncation factor for Q, the additional
* columns added to the original interpolation matrix P, to reduce complexity.
* The default is no truncation.
**/
HYPRE_Int HYPRE_BoomerAMGSetInterpVecAbsQTrunc (HYPRE_Solver solver,
HYPRE_Real q_trunc );
/**
* (Optional) Specifies the use of GSMG - geometrically smooth
* coarsening and interpolation. Currently any nonzero value for
* gsmg will lead to the use of GSMG.
* The default is 0, i.e. (GSMG is not used)
**/
HYPRE_Int HYPRE_BoomerAMGSetGSMG(HYPRE_Solver solver,
HYPRE_Int gsmg);
/**
* (Optional) Defines the number of sample vectors used in GSMG
* or LS interpolation.
**/
HYPRE_Int HYPRE_BoomerAMGSetNumSamples(HYPRE_Solver solver,
HYPRE_Int num_samples);
/**
* (Optional) Defines the type of cycle.
* For a V-cycle, set cycle\_type to 1, for a W-cycle
* set cycle\_type to 2. The default is 1.
**/
HYPRE_Int HYPRE_BoomerAMGSetCycleType(HYPRE_Solver solver,
HYPRE_Int cycle_type);
/**
* (Optional) Defines use of an additive V(1,1)-cycle using the
* classical additive method starting at level 'addlvl'.
* The multiplicative approach is used on levels 0, ...'addlvl+1'.
* 'addlvl' needs to be > -1 for this to have an effect.
* Can only be used with weighted Jacobi and l1-Jacobi(default).
*
* Can only be used when AMG is used as a preconditioner !!!
**/
HYPRE_Int HYPRE_BoomerAMGSetAdditive(HYPRE_Solver solver,
HYPRE_Int addlvl);
/**
* (Optional) Defines use of an additive V(1,1)-cycle using the
* mult-additive method starting at level 'addlvl'.
* The multiplicative approach is used on levels 0, ...'addlvl+1'.
* 'addlvl' needs to be > -1 for this to have an effect.
* Can only be used with weighted Jacobi and l1-Jacobi(default).
*
* Can only be used when AMG is used as a preconditioner !!!
**/
HYPRE_Int HYPRE_BoomerAMGSetMultAdditive(HYPRE_Solver solver,
HYPRE_Int addlvl);
/**
* (Optional) Defines use of an additive V(1,1)-cycle using the
* simplified mult-additive method starting at level 'addlvl'.
* The multiplicative approach is used on levels 0, ...'addlvl+1'.
* 'addlvl' needs to be > -1 for this to have an effect.
* Can only be used with weighted Jacobi and l1-Jacobi(default).
*
* Can only be used when AMG is used as a preconditioner !!!
**/
HYPRE_Int HYPRE_BoomerAMGSetSimple(HYPRE_Solver solver,
HYPRE_Int addlvl);
/**
* (Optional) Defines last level where additive, mult-additive
* or simple cycle is used.
* The multiplicative approach is used on levels > add_last_lvl.
*
* Can only be used when AMG is used as a preconditioner !!!
**/
HYPRE_Int HYPRE_BoomerAMGSetAddLastLvl(HYPRE_Solver solver,
HYPRE_Int add_last_lvl);
/**
* (Optional) Defines the truncation factor for the
* smoothed interpolation used for mult-additive or simple method.
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetMultAddTruncFactor(HYPRE_Solver solver,
HYPRE_Real add_trunc_factor);
/**
* (Optional) Defines the maximal number of elements per row for the
* smoothed interpolation used for mult-additive or simple method.
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetMultAddPMaxElmts(HYPRE_Solver solver,
HYPRE_Int add_P_max_elmts);
/**
* (Optional) Defines the relaxation type used in the (mult)additive cycle
* portion (also affects simple method.)
* The default is 18 (L1-Jacobi).
* Currently the only other option allowed is 0 (Jacobi) which should be
**/
HYPRE_Int HYPRE_BoomerAMGSetAddRelaxType(HYPRE_Solver solver,
HYPRE_Int add_rlx_type);
/**
* (Optional) Defines the relaxation weight used for Jacobi within the
* (mult)additive or simple cycle portion.
* The default is 1.
* The weight only affects the Jacobi method, and has no effect on L1-Jacobi
**/
HYPRE_Int HYPRE_BoomerAMGSetAddRelaxWt(HYPRE_Solver solver,
HYPRE_Real add_rlx_wt);
/**
* (Optional) Sets maximal size for agglomeration or redundant coarse grid solve.
* When the system is smaller than this threshold, sequential AMG is used
* on process 0 or on all remaining active processes (if redundant = 1 ).
**/
HYPRE_Int HYPRE_BoomerAMGSetSeqThreshold(HYPRE_Solver solver,
HYPRE_Int seq_threshold);
/**
* (Optional) operates switch for redundancy. Needs to be used with
* HYPRE_BoomerAMGSetSeqThreshold. Default is 0, i.e. no redundancy.
**/
HYPRE_Int HYPRE_BoomerAMGSetRedundant(HYPRE_Solver solver,
HYPRE_Int redundant);
/*
* (Optional) Defines the number of sweeps for the fine and coarse grid,
* the up and down cycle.
*
* Note: This routine will be phased out!!!!
* Use HYPRE\_BoomerAMGSetNumSweeps or HYPRE\_BoomerAMGSetCycleNumSweeps instead.
**/
HYPRE_Int HYPRE_BoomerAMGSetNumGridSweeps(HYPRE_Solver solver,
HYPRE_Int *num_grid_sweeps);
/**
* (Optional) Sets the number of sweeps. On the finest level, the up and
* the down cycle the number of sweeps are set to num\_sweeps and on the
* coarsest level to 1. The default is 1.
**/
HYPRE_Int HYPRE_BoomerAMGSetNumSweeps(HYPRE_Solver solver,
HYPRE_Int num_sweeps);
/**
* (Optional) Sets the number of sweeps at a specified cycle.
* There are the following options for k:
*
* \begin{tabular}{|l|l|} \hline
* the down cycle & if k=1 \\
* the up cycle & if k=2 \\
* the coarsest level & if k=3.\\
* \hline
* \end{tabular}
**/
HYPRE_Int HYPRE_BoomerAMGSetCycleNumSweeps(HYPRE_Solver solver,
HYPRE_Int num_sweeps,
HYPRE_Int k);
/**
* (Optional) Defines which smoother is used on the fine and coarse grid,
* the up and down cycle.
*
* Note: This routine will be phased out!!!!
* Use HYPRE\_BoomerAMGSetRelaxType or HYPRE\_BoomerAMGSetCycleRelaxType instead.
**/
HYPRE_Int HYPRE_BoomerAMGSetGridRelaxType(HYPRE_Solver solver,
HYPRE_Int *grid_relax_type);
/**
* (Optional) Defines the smoother to be used. It uses the given
* smoother on the fine grid, the up and
* the down cycle and sets the solver on the coarsest level to Gaussian
* elimination (9). The default is Gauss-Seidel (3).
*
* There are the following options for relax\_type:
*
* \begin{tabular}{|c|l|} \hline
* 0 & Jacobi \\
* 1 & Gauss-Seidel, sequential (very slow!) \\
* 2 & Gauss-Seidel, interior points in parallel, boundary sequential (slow!) \\
* 3 & hybrid Gauss-Seidel or SOR, forward solve \\
* 4 & hybrid Gauss-Seidel or SOR, backward solve \\
* 5 & hybrid chaotic Gauss-Seidel (works only with OpenMP) \\
* 6 & hybrid symmetric Gauss-Seidel or SSOR \\
* 8 & $\ell_1$-scaled hybrid symmetric Gauss-Seidel\\
* 9 & Gaussian elimination (only on coarsest level) \\
* 15 & CG (warning - not a fixed smoother - may require FGMRES)\\
* 16 & Chebyshev\\
* 17 & FCF-Jacobi\\
* 18 & $\ell_1$-scaled jacobi\\
* \hline
* \end{tabular}
**/
HYPRE_Int HYPRE_BoomerAMGSetRelaxType(HYPRE_Solver solver,
HYPRE_Int relax_type);
/**
* (Optional) Defines the smoother at a given cycle.
* For options of relax\_type see
* description of HYPRE\_BoomerAMGSetRelaxType). Options for k are
*
* \begin{tabular}{|l|l|} \hline
* the down cycle & if k=1 \\
* the up cycle & if k=2 \\
* the coarsest level & if k=3. \\
* \hline
* \end{tabular}
**/
HYPRE_Int HYPRE_BoomerAMGSetCycleRelaxType(HYPRE_Solver solver,
HYPRE_Int relax_type,
HYPRE_Int k);
/**
* (Optional) Defines in which order the points are relaxed. There are
* the following options for
* relax\_order:
*
* \begin{tabular}{|c|l|} \hline
* 0 & the points are relaxed in natural or lexicographic
* order on each processor \\
* 1 & CF-relaxation is used, i.e on the fine grid and the down
* cycle the coarse points are relaxed first, \\
* & followed by the fine points; on the up cycle the F-points are relaxed
* first, followed by the C-points. \\
* & On the coarsest level, if an iterative scheme is used,
* the points are relaxed in lexicographic order. \\
* \hline
* \end{tabular}
*
* The default is 1 (CF-relaxation).
**/
HYPRE_Int HYPRE_BoomerAMGSetRelaxOrder(HYPRE_Solver solver,
HYPRE_Int relax_order);
/*
* (Optional) Defines in which order the points are relaxed.
*
* Note: This routine will be phased out!!!!
* Use HYPRE\_BoomerAMGSetRelaxOrder instead.
**/
HYPRE_Int HYPRE_BoomerAMGSetGridRelaxPoints(HYPRE_Solver solver,
HYPRE_Int **grid_relax_points);
/*
* (Optional) Defines the relaxation weight for smoothed Jacobi and hybrid SOR.
*
* Note: This routine will be phased out!!!!
* Use HYPRE\_BoomerAMGSetRelaxWt or HYPRE\_BoomerAMGSetLevelRelaxWt instead.
**/
HYPRE_Int HYPRE_BoomerAMGSetRelaxWeight(HYPRE_Solver solver,
HYPRE_Real *relax_weight);
/**
* (Optional) Defines the relaxation weight for smoothed Jacobi and hybrid SOR
* on all levels.
*
* \begin{tabular}{|l|l|} \hline
* relax\_weight > 0 & this assigns the given relaxation weight on all levels \\
* relax\_weight = 0 & the weight is determined on each level
* with the estimate $3 \over {4\|D^{-1/2}AD^{-1/2}\|}$,\\
* & where $D$ is the diagonal matrix of $A$ (this should only be used with Jacobi) \\
* relax\_weight = -k & the relaxation weight is determined with at most k CG steps
* on each level \\
* & this should only be used for symmetric positive definite problems) \\
* \hline
* \end{tabular}
*
* The default is 1.
**/
HYPRE_Int HYPRE_BoomerAMGSetRelaxWt(HYPRE_Solver solver,
HYPRE_Real relax_weight);
/**
* (Optional) Defines the relaxation weight for smoothed Jacobi and hybrid SOR
* on the user defined level. Note that the finest level is denoted 0, the
* next coarser level 1, etc. For nonpositive relax\_weight, the parameter is
* determined on the given level as described for HYPRE\_BoomerAMGSetRelaxWt.
* The default is 1.
**/
HYPRE_Int HYPRE_BoomerAMGSetLevelRelaxWt(HYPRE_Solver solver,
HYPRE_Real relax_weight,
HYPRE_Int level);
/**
* (Optional) Defines the outer relaxation weight for hybrid SOR.
* Note: This routine will be phased out!!!!
* Use HYPRE\_BoomerAMGSetOuterWt or HYPRE\_BoomerAMGSetLevelOuterWt instead.
**/
HYPRE_Int HYPRE_BoomerAMGSetOmega(HYPRE_Solver solver,
HYPRE_Real *omega);
/**
* (Optional) Defines the outer relaxation weight for hybrid SOR and SSOR
* on all levels.
*
* \begin{tabular}{|l|l|} \hline
* omega > 0 & this assigns the same outer relaxation weight omega on each level\\
* omega = -k & an outer relaxation weight is determined with at most k CG
* steps on each level \\
* & (this only makes sense for symmetric
* positive definite problems and smoothers, e.g. SSOR) \\
* \hline
* \end{tabular}
*
* The default is 1.
**/
HYPRE_Int HYPRE_BoomerAMGSetOuterWt(HYPRE_Solver solver,
HYPRE_Real omega);
/**
* (Optional) Defines the outer relaxation weight for hybrid SOR or SSOR
* on the user defined level. Note that the finest level is denoted 0, the
* next coarser level 1, etc. For nonpositive omega, the parameter is
* determined on the given level as described for HYPRE\_BoomerAMGSetOuterWt.
* The default is 1.
**/
HYPRE_Int HYPRE_BoomerAMGSetLevelOuterWt(HYPRE_Solver solver,
HYPRE_Real omega,
HYPRE_Int level);
/**
* (Optional) Defines the Order for Chebyshev smoother.
* The default is 2 (valid options are 1-4).
**/
HYPRE_Int HYPRE_BoomerAMGSetChebyOrder(HYPRE_Solver solver,
HYPRE_Int order);
/**
* (Optional) Fraction of the spectrum to use for the Chebyshev smoother.
* The default is .3 (i.e., damp on upper 30% of the spectrum).
**/
HYPRE_Int HYPRE_BoomerAMGSetChebyFraction (HYPRE_Solver solver,
HYPRE_Real ratio);
/*
* (Optional) Defines whether matrix should be scaled.
* The default is 1 (i.e., scaled).
**/
HYPRE_Int HYPRE_BoomerAMGSetChebyScale (HYPRE_Solver solver,
HYPRE_Int scale);
/*
* (Optional) Defines which polynomial variant should be used.
* The default is 0 (i.e., scaled).
**/
HYPRE_Int HYPRE_BoomerAMGSetChebyVariant (HYPRE_Solver solver,
HYPRE_Int variant);
/*
* (Optional) Defines how to estimate eigenvalues.
* The default is 10 (i.e., 10 CG iterations are used to find extreme
* eigenvalues.) If eig_est=0, the largest eigenvalue is estimated
* using Gershgorin, the smallest is set to 0.
* If eig_est is a positive number n, n iterations of CG are used to
* determine the smallest and largest eigenvalue.
**/
HYPRE_Int HYPRE_BoomerAMGSetChebyEigEst (HYPRE_Solver solver,
HYPRE_Int eig_est);
/**
* (Optional) Enables the use of more complex smoothers.
* The following options exist for smooth\_type:
*
* \begin{tabular}{|c|l|l|} \hline
* value & smoother & routines needed to set smoother parameters \\
* 6 & Schwarz smoothers & HYPRE\_BoomerAMGSetDomainType, HYPRE\_BoomerAMGSetOverlap, \\
* & & HYPRE\_BoomerAMGSetVariant, HYPRE\_BoomerAMGSetSchwarzRlxWeight \\
* 7 & Pilut & HYPRE\_BoomerAMGSetDropTol, HYPRE\_BoomerAMGSetMaxNzPerRow \\
* 8 & ParaSails & HYPRE\_BoomerAMGSetSym, HYPRE\_BoomerAMGSetLevel, \\
* & & HYPRE\_BoomerAMGSetFilter, HYPRE\_BoomerAMGSetThreshold \\
* 9 & Euclid & HYPRE\_BoomerAMGSetEuclidFile \\
* \hline
* \end{tabular}
*
* The default is 6. Also, if no smoother parameters are set via the routines mentioned in the table above,
* default values are used.
**/
HYPRE_Int HYPRE_BoomerAMGSetSmoothType(HYPRE_Solver solver,
HYPRE_Int smooth_type);
/**
* (Optional) Sets the number of levels for more complex smoothers.
* The smoothers,
* as defined by HYPRE\_BoomerAMGSetSmoothType, will be used
* on level 0 (the finest level) through level smooth\_num\_levels-1.
* The default is 0, i.e. no complex smoothers are used.
**/
HYPRE_Int HYPRE_BoomerAMGSetSmoothNumLevels(HYPRE_Solver solver,
HYPRE_Int smooth_num_levels);
/**
* (Optional) Sets the number of sweeps for more complex smoothers.
* The default is 1.
**/
HYPRE_Int HYPRE_BoomerAMGSetSmoothNumSweeps(HYPRE_Solver solver,
HYPRE_Int smooth_num_sweeps);
/**
* (Optional) Defines which variant of the Schwarz method is used.
* The following options exist for variant:
*
* \begin{tabular}{|c|l|} \hline
* 0 & hybrid multiplicative Schwarz method (no overlap across processor
* boundaries) \\
* 1 & hybrid additive Schwarz method (no overlap across processor
* boundaries) \\
* 2 & additive Schwarz method \\
* 3 & hybrid multiplicative Schwarz method (with overlap across processor
* boundaries) \\
* \hline
* \end{tabular}
*
* The default is 0.
**/
HYPRE_Int HYPRE_BoomerAMGSetVariant(HYPRE_Solver solver,
HYPRE_Int variant);
/**
* (Optional) Defines the overlap for the Schwarz method.
* The following options exist for overlap:
*
* \begin{tabular}{|c|l|} \hline
* 0 & no overlap \\
* 1 & minimal overlap (default) \\
* 2 & overlap generated by including all neighbors of domain boundaries \\
* \hline
* \end{tabular}
**/
HYPRE_Int HYPRE_BoomerAMGSetOverlap(HYPRE_Solver solver,
HYPRE_Int overlap);
/**
* (Optional) Defines the type of domain used for the Schwarz method.
* The following options exist for domain\_type:
*
* \begin{tabular}{|c|l|} \hline
* 0 & each point is a domain \\
* 1 & each node is a domain (only of interest in "systems" AMG) \\
* 2 & each domain is generated by agglomeration (default) \\
* \hline
* \end{tabular}
**/
HYPRE_Int HYPRE_BoomerAMGSetDomainType(HYPRE_Solver solver,
HYPRE_Int domain_type);
/**
* (Optional) Defines a smoothing parameter for the additive Schwarz method.
**/
HYPRE_Int HYPRE_BoomerAMGSetSchwarzRlxWeight(HYPRE_Solver solver,
HYPRE_Real schwarz_rlx_weight);
/**
* (Optional) Indicates that the aggregates may not be SPD for the Schwarz method.
* The following options exist for use\_nonsymm:
*
* \begin{tabular}{|c|l|} \hline
* 0 & assume SPD (default) \\
* 1 & assume non-symmetric \\
* \hline
* \end{tabular}
**/
HYPRE_Int HYPRE_BoomerAMGSetSchwarzUseNonSymm(HYPRE_Solver solver,
HYPRE_Int use_nonsymm);
/**
* (Optional) Defines symmetry for ParaSAILS.
* For further explanation see description of ParaSAILS.
**/
HYPRE_Int HYPRE_BoomerAMGSetSym(HYPRE_Solver solver,
HYPRE_Int sym);
/**
* (Optional) Defines number of levels for ParaSAILS.
* For further explanation see description of ParaSAILS.
**/
HYPRE_Int HYPRE_BoomerAMGSetLevel(HYPRE_Solver solver,
HYPRE_Int level);
/**
* (Optional) Defines threshold for ParaSAILS.
* For further explanation see description of ParaSAILS.
**/
HYPRE_Int HYPRE_BoomerAMGSetThreshold(HYPRE_Solver solver,
HYPRE_Real threshold);
/**
* (Optional) Defines filter for ParaSAILS.
* For further explanation see description of ParaSAILS.
**/
HYPRE_Int HYPRE_BoomerAMGSetFilter(HYPRE_Solver solver,
HYPRE_Real filter);
/**
* (Optional) Defines drop tolerance for PILUT.
* For further explanation see description of PILUT.
**/
HYPRE_Int HYPRE_BoomerAMGSetDropTol(HYPRE_Solver solver,
HYPRE_Real drop_tol);
/**
* (Optional) Defines maximal number of nonzeros for PILUT.
* For further explanation see description of PILUT.
**/
HYPRE_Int HYPRE_BoomerAMGSetMaxNzPerRow(HYPRE_Solver solver,
HYPRE_Int max_nz_per_row);
/**
* (Optional) Defines name of an input file for Euclid parameters.
* For further explanation see description of Euclid.
**/
HYPRE_Int HYPRE_BoomerAMGSetEuclidFile(HYPRE_Solver solver,
char *euclidfile);
/**
* (Optional) Defines number of levels for ILU(k) in Euclid.
* For further explanation see description of Euclid.
**/
HYPRE_Int HYPRE_BoomerAMGSetEuLevel(HYPRE_Solver solver,
HYPRE_Int eu_level);
/**
* (Optional) Defines filter for ILU(k) for Euclid.
* For further explanation see description of Euclid.
**/
HYPRE_Int HYPRE_BoomerAMGSetEuSparseA(HYPRE_Solver solver,
HYPRE_Real eu_sparse_A);
/**
* (Optional) Defines use of block jacobi ILUT for Euclid.
* For further explanation see description of Euclid.
**/
HYPRE_Int HYPRE_BoomerAMGSetEuBJ(HYPRE_Solver solver,
HYPRE_Int eu_bj);
/*
* (Optional)
**/
HYPRE_Int HYPRE_BoomerAMGSetRestriction(HYPRE_Solver solver,
HYPRE_Int restr_par);
/*
* (Optional) Name of file to which BoomerAMG will print;
* cf HYPRE\_BoomerAMGSetPrintLevel. (Presently this is ignored).
**/
HYPRE_Int HYPRE_BoomerAMGSetPrintFileName(HYPRE_Solver solver,
const char *print_file_name);
/**
* (Optional) Requests automatic printing of setup and solve information.
*
* \begin{tabular}{|c|l|} \hline
* 0 & no printout (default) \\
* 1 & print setup information \\
* 2 & print solve information \\
* 3 & print both setup and solve information \\
* \hline
* \end{tabular}
*
* Note, that if one desires to print information and uses BoomerAMG as a
* preconditioner, suggested print$\_$level is 1 to avoid excessive output,
* and use print$\_$level of solver for solve phase information.
**/
HYPRE_Int HYPRE_BoomerAMGSetPrintLevel(HYPRE_Solver solver,
HYPRE_Int print_level);
/**
* (Optional) Requests additional computations for diagnostic and similar
* data to be logged by the user. Default to 0 for do nothing. The latest
* residual will be available if logging > 1.
**/
HYPRE_Int HYPRE_BoomerAMGSetLogging(HYPRE_Solver solver,
HYPRE_Int logging);
/**
* (Optional)
**/
HYPRE_Int HYPRE_BoomerAMGSetDebugFlag(HYPRE_Solver solver,
HYPRE_Int debug_flag);
/**
* (Optional) This routine will be eliminated in the future.
**/
HYPRE_Int HYPRE_BoomerAMGInitGridRelaxation(HYPRE_Int **num_grid_sweeps_ptr,
HYPRE_Int **grid_relax_type_ptr,
HYPRE_Int ***grid_relax_points_ptr,
HYPRE_Int coarsen_type,
HYPRE_Real **relax_weights_ptr,
HYPRE_Int max_levels);
/**
* (Optional) If rap2 not equal 0, the triple matrix product RAP is
* replaced by two matrix products.
**/
HYPRE_Int HYPRE_BoomerAMGSetRAP2(HYPRE_Solver solver,
HYPRE_Int rap2);
/**
* (Optional) If set to 1, the local interpolation transposes will
* be saved to use more efficient matvecs instead of matvecTs
**/
HYPRE_Int HYPRE_BoomerAMGSetKeepTranspose(HYPRE_Solver solver,
HYPRE_Int keepTranspose);
/*@}*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name ParCSR PCG Solver
*
* These routines should be used in conjunction with the generic interface in
* \Ref{PCG Solver}.
**/
/*@{*/
/**
* Create a solver object.
**/
HYPRE_Int HYPRE_ParCSRPCGCreate(MPI_Comm comm,
HYPRE_Solver *solver);
/**
* Destroy a solver object.
**/
HYPRE_Int HYPRE_ParCSRPCGDestroy(HYPRE_Solver solver);
HYPRE_Int HYPRE_ParCSRPCGSetup(HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x);
HYPRE_Int HYPRE_ParCSRPCGSolve(HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x);
HYPRE_Int HYPRE_ParCSRPCGSetTol(HYPRE_Solver solver,
HYPRE_Real tol);
HYPRE_Int HYPRE_ParCSRPCGSetAbsoluteTol(HYPRE_Solver solver,
HYPRE_Real tol);
HYPRE_Int HYPRE_ParCSRPCGSetMaxIter(HYPRE_Solver solver,
HYPRE_Int max_iter);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_ParCSRPCGSetStopCrit(HYPRE_Solver solver,
HYPRE_Int stop_crit);
HYPRE_Int HYPRE_ParCSRPCGSetTwoNorm(HYPRE_Solver solver,
HYPRE_Int two_norm);
HYPRE_Int HYPRE_ParCSRPCGSetRelChange(HYPRE_Solver solver,
HYPRE_Int rel_change);
HYPRE_Int HYPRE_ParCSRPCGSetPrecond(HYPRE_Solver solver,
HYPRE_PtrToParSolverFcn precond,
HYPRE_PtrToParSolverFcn precond_setup,
HYPRE_Solver precond_solver);
HYPRE_Int HYPRE_ParCSRPCGGetPrecond(HYPRE_Solver solver,
HYPRE_Solver *precond_data);
HYPRE_Int HYPRE_ParCSRPCGSetLogging(HYPRE_Solver solver,
HYPRE_Int logging);
HYPRE_Int HYPRE_ParCSRPCGSetPrintLevel(HYPRE_Solver solver,
HYPRE_Int print_level);
HYPRE_Int HYPRE_ParCSRPCGGetNumIterations(HYPRE_Solver solver,
HYPRE_Int *num_iterations);
HYPRE_Int HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(HYPRE_Solver solver,
HYPRE_Real *norm);
/**
* Setup routine for diagonal preconditioning.
**/
HYPRE_Int HYPRE_ParCSRDiagScaleSetup(HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector y,
HYPRE_ParVector x);
/**
* Solve routine for diagonal preconditioning.
**/
HYPRE_Int HYPRE_ParCSRDiagScale(HYPRE_Solver solver,
HYPRE_ParCSRMatrix HA,
HYPRE_ParVector Hy,
HYPRE_ParVector Hx);
/*@}*/
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
* @name ParCSR GMRES Solver
*
* These routines should be used in conjunction with the generic interface in
* \Ref{GMRES Solver}.
**/
/*@{*/
/**
* Create a solver object.
**/
HYPRE_Int HYPRE_ParCSRGMRESCreate(MPI_Comm comm,
HYPRE_Solver *solver);
/**
* Destroy a solver object.
**/
HYPRE_Int HYPRE_ParCSRGMRESDestroy(HYPRE_Solver solver);
HYPRE_Int HYPRE_ParCSRGMRESSetup(HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x);
HYPRE_Int HYPRE_ParCSRGMRESSolve(HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x);
HYPRE_Int HYPRE_ParCSRGMRESSetKDim(HYPRE_Solver solver,
HYPRE_Int k_dim);
HYPRE_Int HYPRE_ParCSRGMRESSetTol(HYPRE_Solver solver,
HYPRE_Real tol);
HYPRE_Int HYPRE_ParCSRGMRESSetAbsoluteTol(HYPRE_Solver solver,
HYPRE_Real a_tol);
/*
* RE-VISIT
**/
HYPRE_Int HYPRE_ParCSRGMRESSetMinIter(HYPRE_Solver solver,
HYPRE_Int min_iter);
HYPRE_Int HYPRE_ParCSRGMRESSetMaxIter(HYPRE_Solver solver,
HYPRE_Int max_iter);
/*
* Obsolete
**/
HYPRE_Int HYPRE_ParCSRGMRESSetStopCrit(HYPRE_Solver solver,
HYPRE_Int stop_crit);
HYPRE_Int HYPRE_ParCSRGMRESSetPrecond(HYPRE_Solver solver,
HYPRE_PtrToParSolverFcn precond,
HYPRE_PtrToParSolverFcn precond_setup,
HYPRE_Solver precond_solver);
HYPRE_Int HYPRE_ParCSRGMRESGetPrecond(HYPRE_Solver solver,
HYPRE_Solver *precond_data);
HYPRE_Int HYPRE_ParCSRGMRESSetLogging(HYPRE_Solver solver,
HYPRE_Int logging);
HYPRE_Int HYPRE_ParCSRGMRESSetPrintLevel(HYPRE_Solver solver,
HYPRE_Int print_level);
HYPRE_Int HYPRE_ParCSRGMRESGetNumIterations(HYPRE_Solver solver,
HYPRE_Int *num_iterations);
HYPRE_Int HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm(HYPRE_Solver solver,
HYPRE_Real *norm);
/*@}*/
/*--------------------------------------------------------------------------
* Miscellaneous: These probably do not belong in the interface.
*--------------------------------------------------------------------------*/
HYPRE_ParCSRMatrix GenerateLaplacian(MPI_Comm comm,
HYPRE_Int nx,
HYPRE_Int ny,
HYPRE_Int nz,
HYPRE_Int P,
HYPRE_Int Q,
HYPRE_Int R,
HYPRE_Int p,
HYPRE_Int q,
HYPRE_Int r,
HYPRE_Real *value);
HYPRE_ParCSRMatrix GenerateLaplacian27pt(MPI_Comm comm,
HYPRE_Int nx,
HYPRE_Int ny,
HYPRE_Int nz,
HYPRE_Int P,
HYPRE_Int Q,
HYPRE_Int R,
HYPRE_Int p,
HYPRE_Int q,
HYPRE_Int r,
HYPRE_Real *value);
HYPRE_ParCSRMatrix GenerateLaplacian9pt(MPI_Comm comm,
HYPRE_Int nx,
HYPRE_Int ny,
HYPRE_Int P,
HYPRE_Int Q,
HYPRE_Int p,
HYPRE_Int q,
HYPRE_Real *value);
HYPRE_ParCSRMatrix GenerateDifConv(MPI_Comm comm,
HYPRE_Int nx,
HYPRE_Int ny,
HYPRE_Int nz,
HYPRE_Int P,
HYPRE_Int Q,
HYPRE_Int R,
HYPRE_Int p,
HYPRE_Int q,
HYPRE_Int r,
HYPRE_Real *value);
HYPRE_ParCSRMatrix
GenerateRotate7pt(MPI_Comm comm,
HYPRE_Int nx,
HYPRE_Int ny,
HYPRE_Int P,
HYPRE_Int Q,
HYPRE_Int p,
HYPRE_Int q,
HYPRE_Real alpha,
HYPRE_Real eps );
HYPRE_ParCSRMatrix
GenerateVarDifConv(MPI_Comm comm,
HYPRE_Int nx,
HYPRE_Int ny,
HYPRE_Int nz,
HYPRE_Int P,
HYPRE_Int Q,
HYPRE_Int R,
HYPRE_Int p,
HYPRE_Int q,
HYPRE_Int r,
HYPRE_Real eps,
HYPRE_ParVector *rhs_ptr);
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/*
* (Optional) Switches on use of Jacobi interpolation after computing
* an original interpolation
**/
HYPRE_Int HYPRE_BoomerAMGSetPostInterpType(HYPRE_Solver solver,
HYPRE_Int post_interp_type);
/*
* (Optional) Sets a truncation threshold for Jacobi interpolation.
**/
HYPRE_Int HYPRE_BoomerAMGSetJacobiTruncThreshold(HYPRE_Solver solver,
HYPRE_Real jacobi_trunc_threshold);
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/*@}*/
#ifdef __cplusplus
}
#endif
#endif
| 52,977 | 36.895565 | 107 | h |
AMG | AMG-master/parcsr_ls/HYPRE_parcsr_pcg.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGCreate( MPI_Comm comm, HYPRE_Solver *solver )
{
hypre_PCGFunctions * pcg_functions;
if (!solver)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
pcg_functions =
hypre_PCGFunctionsCreate(
hypre_CAlloc, hypre_ParKrylovFree, hypre_ParKrylovCommInfo,
hypre_ParKrylovCreateVector,
hypre_ParKrylovDestroyVector, hypre_ParKrylovMatvecCreate,
hypre_ParKrylovMatvec, hypre_ParKrylovMatvecDestroy,
hypre_ParKrylovInnerProd, hypre_ParKrylovCopyVector,
hypre_ParKrylovClearVector,
hypre_ParKrylovScaleVector, hypre_ParKrylovAxpy,
hypre_ParKrylovIdentitySetup, hypre_ParKrylovIdentity );
*solver = ( (HYPRE_Solver) hypre_PCGCreate( pcg_functions ) );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGDestroy( HYPRE_Solver solver )
{
return( hypre_PCGDestroy( (void *) solver ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetup( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
return( HYPRE_PCGSetup( solver,
(HYPRE_Matrix) A,
(HYPRE_Vector) b,
(HYPRE_Vector) x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSolve
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSolve( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
return( HYPRE_PCGSolve( solver,
(HYPRE_Matrix) A,
(HYPRE_Vector) b,
(HYPRE_Vector) x ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetTol( HYPRE_Solver solver,
HYPRE_Real tol )
{
return( HYPRE_PCGSetTol( solver, tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetAbsoluteTol
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetAbsoluteTol( HYPRE_Solver solver,
HYPRE_Real a_tol )
{
return( HYPRE_PCGSetAbsoluteTol( solver, a_tol ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetMaxIter
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetMaxIter( HYPRE_Solver solver,
HYPRE_Int max_iter )
{
return( HYPRE_PCGSetMaxIter( solver, max_iter ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetStopCrit
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetStopCrit( HYPRE_Solver solver,
HYPRE_Int stop_crit )
{
return( HYPRE_PCGSetStopCrit( solver, stop_crit ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetTwoNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetTwoNorm( HYPRE_Solver solver,
HYPRE_Int two_norm )
{
return( HYPRE_PCGSetTwoNorm( solver, two_norm ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetRelChange
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetRelChange( HYPRE_Solver solver,
HYPRE_Int rel_change )
{
return( HYPRE_PCGSetRelChange( solver, rel_change ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetPrecond( HYPRE_Solver solver,
HYPRE_PtrToParSolverFcn precond,
HYPRE_PtrToParSolverFcn precond_setup,
HYPRE_Solver precond_solver )
{
return( HYPRE_PCGSetPrecond( solver,
(HYPRE_PtrToSolverFcn) precond,
(HYPRE_PtrToSolverFcn) precond_setup,
precond_solver ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGGetPrecond
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGGetPrecond( HYPRE_Solver solver,
HYPRE_Solver *precond_data_ptr )
{
return( HYPRE_PCGGetPrecond( solver, precond_data_ptr ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetPrintLevel
* an obsolete function; use HYPRE_PCG* functions instead
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetPrintLevel( HYPRE_Solver solver,
HYPRE_Int level )
{
return( HYPRE_PCGSetPrintLevel( solver, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGSetLogging
* an obsolete function; use HYPRE_PCG* functions instead
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGSetLogging( HYPRE_Solver solver,
HYPRE_Int level )
{
return( HYPRE_PCGSetLogging( solver, level ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGGetNumIterations
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGGetNumIterations( HYPRE_Solver solver,
HYPRE_Int *num_iterations )
{
return( HYPRE_PCGGetNumIterations( solver, num_iterations ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRPCGGetFinalRelativeResidualNorm
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRPCGGetFinalRelativeResidualNorm( HYPRE_Solver solver,
HYPRE_Real *norm )
{
return( HYPRE_PCGGetFinalRelativeResidualNorm( solver, norm ) );
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRDiagScaleSetup
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRDiagScaleSetup( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector y,
HYPRE_ParVector x )
{
return 0;
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRDiagScale
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_ParCSRDiagScale( HYPRE_Solver solver,
HYPRE_ParCSRMatrix HA,
HYPRE_ParVector Hy,
HYPRE_ParVector Hx )
{
hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA;
hypre_ParVector *y = (hypre_ParVector *) Hy;
hypre_ParVector *x = (hypre_ParVector *) Hx;
HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y));
HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A));
HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x));
HYPRE_Int i, ierr = 0;
for (i=0; i < local_size; i++)
{
x_data[i] = y_data[i]/A_data[A_i[i]];
}
return ierr;
}
/*--------------------------------------------------------------------------
* HYPRE_ParCSRSymPrecondSetup
*--------------------------------------------------------------------------*/
/*
HYPRE_Int
HYPRE_ParCSRSymPrecondSetup( HYPRE_Solver solver,
HYPRE_ParCSRMatrix A,
HYPRE_ParVector b,
HYPRE_ParVector x )
{
hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) A;
hypre_ParVector *y = (hypre_ParVector *) b;
hypre_ParVector *x = (hypre_ParVector *) x;
HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y));
HYPRE_Real *A_diag = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A));
HYPRE_Real *A_offd = hypre_CSRMatrixData(hypre_ParCSRMatrixOffD(A));
HYPRE_Int i, ierr = 0;
hypre_ParCSRMatrix *Asym;
MPI_Comm comm;
HYPRE_Int global_num_rows;
HYPRE_Int global_num_cols;
HYPRE_Int *row_starts;
HYPRE_Int *col_starts;
HYPRE_Int num_cols_offd;
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd;
Asym = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
row_starts, col_starts, num_cols_offd,
num_nonzeros_diag, num_nonzeros_offd);
for (i=0; i < hypre_VectorSize(hypre_ParVectorLocalVector(x)); i++)
{
x_data[i] = y_data[i]/A_data[A_i[i]];
}
return ierr;
} */
| 11,088 | 34.428115 | 81 | c |
AMG | AMG-master/parcsr_ls/_hypre_parcsr_ls.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "HYPRE_parcsr_ls.h"
#ifndef hypre_PARCSR_LS_HEADER
#define hypre_PARCSR_LS_HEADER
#include "_hypre_utilities.h"
#include "krylov.h"
#include "seq_mv.h"
#include "_hypre_parcsr_mv.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct { HYPRE_Int prev; HYPRE_Int next; } Link;
#ifndef hypre_ParAMG_DATA_HEADER
#define hypre_ParAMG_DATA_HEADER
#define CUMNUMIT
/*--------------------------------------------------------------------------
* hypre_ParAMGData
*--------------------------------------------------------------------------*/
typedef struct
{
/* setup params */
HYPRE_Int max_levels;
HYPRE_Real strong_threshold;
HYPRE_Real max_row_sum;
HYPRE_Real trunc_factor;
HYPRE_Real agg_trunc_factor;
HYPRE_Real agg_P12_trunc_factor;
HYPRE_Real jacobi_trunc_threshold;
HYPRE_Real S_commpkg_switch;
HYPRE_Int measure_type;
HYPRE_Int setup_type;
HYPRE_Int coarsen_type;
HYPRE_Int P_max_elmts;
HYPRE_Int interp_type;
HYPRE_Int sep_weight;
HYPRE_Int agg_interp_type;
HYPRE_Int agg_P_max_elmts;
HYPRE_Int agg_P12_max_elmts;
HYPRE_Int restr_par;
HYPRE_Int agg_num_levels;
HYPRE_Int num_paths;
HYPRE_Int post_interp_type;
HYPRE_Int max_coarse_size;
HYPRE_Int min_coarse_size;
HYPRE_Int seq_threshold;
HYPRE_Int redundant;
HYPRE_Int participate;
/* solve params */
HYPRE_Int max_iter;
HYPRE_Int min_iter;
HYPRE_Int cycle_type;
HYPRE_Int *num_grid_sweeps;
HYPRE_Int *grid_relax_type;
HYPRE_Int **grid_relax_points;
HYPRE_Int relax_order;
HYPRE_Int user_coarse_relax_type;
HYPRE_Int user_relax_type;
HYPRE_Int user_num_sweeps;
HYPRE_Real user_relax_weight;
HYPRE_Real outer_wt;
HYPRE_Real *relax_weight;
HYPRE_Real *omega;
HYPRE_Real tol;
/* problem data */
hypre_ParCSRMatrix *A;
HYPRE_Int num_variables;
HYPRE_Int num_functions;
HYPRE_Int nodal;
HYPRE_Int nodal_levels;
HYPRE_Int nodal_diag;
HYPRE_Int num_points;
HYPRE_Int *dof_func;
HYPRE_Int *dof_point;
HYPRE_Int *point_dof_map;
/* data generated in the setup phase */
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix **P_array;
hypre_ParCSRMatrix **R_array;
HYPRE_Int **CF_marker_array;
HYPRE_Int **dof_func_array;
HYPRE_Int **dof_point_array;
HYPRE_Int **point_dof_map_array;
HYPRE_Int num_levels;
HYPRE_Real **l1_norms;
HYPRE_Int block_mode;
/* data for more complex smoothers */
HYPRE_Real *max_eig_est;
HYPRE_Real *min_eig_est;
HYPRE_Int cheby_eig_est;
HYPRE_Int cheby_order;
HYPRE_Int cheby_variant;
HYPRE_Int cheby_scale;
HYPRE_Real cheby_fraction;
HYPRE_Real **cheby_ds;
HYPRE_Real **cheby_coefs;
/* data needed for non-Galerkin option */
HYPRE_Int nongalerk_num_tol;
HYPRE_Real *nongalerk_tol;
HYPRE_Real nongalerkin_tol;
HYPRE_Real *nongal_tol_array;
/* data generated in the solve phase */
hypre_ParVector *Vtemp;
hypre_Vector *Vtemp_local;
HYPRE_Real *Vtemp_local_data;
HYPRE_Real cycle_op_count;
hypre_ParVector *Rtemp;
hypre_ParVector *Ptemp;
hypre_ParVector *Ztemp;
/* log info */
HYPRE_Int logging;
HYPRE_Int num_iterations;
#ifdef CUMNUMIT
HYPRE_Int cum_num_iterations;
#endif
HYPRE_Real rel_resid_norm;
hypre_ParVector *residual; /* available if logging>1 */
/* output params */
HYPRE_Int print_level;
char log_file_name[256];
HYPRE_Int debug_flag;
HYPRE_Real cum_nnz_AP;
/* enable redundant coarse grid solve */
HYPRE_Solver coarse_solver;
hypre_ParCSRMatrix *A_coarse;
hypre_ParVector *f_coarse;
hypre_ParVector *u_coarse;
MPI_Comm new_comm;
/* store matrix, vector and communication info for Gaussian elimination */
HYPRE_Real *A_mat;
HYPRE_Real *b_vec;
HYPRE_Int *comm_info;
/* information for multiplication with Lambda - additive AMG */
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int simple;
HYPRE_Int add_last_lvl;
HYPRE_Int add_P_max_elmts;
HYPRE_Real add_trunc_factor;
HYPRE_Int add_rlx_type;
HYPRE_Real add_rlx_wt;
hypre_ParCSRMatrix *Lambda;
hypre_ParCSRMatrix *Atilde;
hypre_ParVector *Rtilde;
hypre_ParVector *Xtilde;
HYPRE_Real *D_inv;
HYPRE_Int rap2;
HYPRE_Int keepTranspose;
} hypre_ParAMGData;
/*--------------------------------------------------------------------------
* Accessor functions for the hypre_AMGData structure
*--------------------------------------------------------------------------*/
/* setup params */
#define hypre_ParAMGDataRestriction(amg_data) ((amg_data)->restr_par)
#define hypre_ParAMGDataMaxLevels(amg_data) ((amg_data)->max_levels)
#define hypre_ParAMGDataStrongThreshold(amg_data) \
((amg_data)->strong_threshold)
#define hypre_ParAMGDataMaxRowSum(amg_data) ((amg_data)->max_row_sum)
#define hypre_ParAMGDataTruncFactor(amg_data) ((amg_data)->trunc_factor)
#define hypre_ParAMGDataAggTruncFactor(amg_data) ((amg_data)->agg_trunc_factor)
#define hypre_ParAMGDataAggP12TruncFactor(amg_data) ((amg_data)->agg_P12_trunc_factor)
#define hypre_ParAMGDataJacobiTruncThreshold(amg_data) ((amg_data)->jacobi_trunc_threshold)
#define hypre_ParAMGDataSCommPkgSwitch(amg_data) ((amg_data)->S_commpkg_switch)
#define hypre_ParAMGDataInterpType(amg_data) ((amg_data)->interp_type)
#define hypre_ParAMGDataSepWeight(amg_data) ((amg_data)->sep_weight)
#define hypre_ParAMGDataAggInterpType(amg_data) ((amg_data)->agg_interp_type)
#define hypre_ParAMGDataCoarsenType(amg_data) ((amg_data)->coarsen_type)
#define hypre_ParAMGDataMeasureType(amg_data) ((amg_data)->measure_type)
#define hypre_ParAMGDataSetupType(amg_data) ((amg_data)->setup_type)
#define hypre_ParAMGDataPMaxElmts(amg_data) ((amg_data)->P_max_elmts)
#define hypre_ParAMGDataAggPMaxElmts(amg_data) ((amg_data)->agg_P_max_elmts)
#define hypre_ParAMGDataAggP12MaxElmts(amg_data) ((amg_data)->agg_P12_max_elmts)
#define hypre_ParAMGDataNumPaths(amg_data) ((amg_data)->num_paths)
#define hypre_ParAMGDataAggNumLevels(amg_data) ((amg_data)->agg_num_levels)
#define hypre_ParAMGDataPostInterpType(amg_data) ((amg_data)->post_interp_type)
#define hypre_ParAMGDataL1Norms(amg_data) ((amg_data)->l1_norms)
#define hypre_ParAMGDataMaxCoarseSize(amg_data) ((amg_data)->max_coarse_size)
#define hypre_ParAMGDataMinCoarseSize(amg_data) ((amg_data)->min_coarse_size)
#define hypre_ParAMGDataSeqThreshold(amg_data) ((amg_data)->seq_threshold)
/* solve params */
#define hypre_ParAMGDataMinIter(amg_data) ((amg_data)->min_iter)
#define hypre_ParAMGDataMaxIter(amg_data) ((amg_data)->max_iter)
#define hypre_ParAMGDataCycleType(amg_data) ((amg_data)->cycle_type)
#define hypre_ParAMGDataTol(amg_data) ((amg_data)->tol)
#define hypre_ParAMGDataNumGridSweeps(amg_data) ((amg_data)->num_grid_sweeps)
#define hypre_ParAMGDataUserCoarseRelaxType(amg_data) ((amg_data)->user_coarse_relax_type)
#define hypre_ParAMGDataUserRelaxType(amg_data) ((amg_data)->user_relax_type)
#define hypre_ParAMGDataUserRelaxWeight(amg_data) ((amg_data)->user_relax_weight)
#define hypre_ParAMGDataUserNumSweeps(amg_data) ((amg_data)->user_num_sweeps)
#define hypre_ParAMGDataGridRelaxType(amg_data) ((amg_data)->grid_relax_type)
#define hypre_ParAMGDataGridRelaxPoints(amg_data) \
((amg_data)->grid_relax_points)
#define hypre_ParAMGDataRelaxOrder(amg_data) ((amg_data)->relax_order)
#define hypre_ParAMGDataRelaxWeight(amg_data) ((amg_data)->relax_weight)
#define hypre_ParAMGDataOmega(amg_data) ((amg_data)->omega)
#define hypre_ParAMGDataOuterWt(amg_data) ((amg_data)->outer_wt)
/* problem data parameters */
#define hypre_ParAMGDataNumVariables(amg_data) ((amg_data)->num_variables)
#define hypre_ParAMGDataNumFunctions(amg_data) ((amg_data)->num_functions)
#define hypre_ParAMGDataNodal(amg_data) ((amg_data)->nodal)
#define hypre_ParAMGDataNodalLevels(amg_data) ((amg_data)->nodal_levels)
#define hypre_ParAMGDataNodalDiag(amg_data) ((amg_data)->nodal_diag)
#define hypre_ParAMGDataNumPoints(amg_data) ((amg_data)->num_points)
#define hypre_ParAMGDataDofFunc(amg_data) ((amg_data)->dof_func)
#define hypre_ParAMGDataDofPoint(amg_data) ((amg_data)->dof_point)
#define hypre_ParAMGDataPointDofMap(amg_data) ((amg_data)->point_dof_map)
/* data generated by the setup phase */
#define hypre_ParAMGDataCFMarkerArray(amg_data) ((amg_data)-> CF_marker_array)
#define hypre_ParAMGDataAArray(amg_data) ((amg_data)->A_array)
#define hypre_ParAMGDataFArray(amg_data) ((amg_data)->F_array)
#define hypre_ParAMGDataUArray(amg_data) ((amg_data)->U_array)
#define hypre_ParAMGDataPArray(amg_data) ((amg_data)->P_array)
#define hypre_ParAMGDataRArray(amg_data) ((amg_data)->R_array)
#define hypre_ParAMGDataDofFuncArray(amg_data) ((amg_data)->dof_func_array)
#define hypre_ParAMGDataDofPointArray(amg_data) ((amg_data)->dof_point_array)
#define hypre_ParAMGDataPointDofMapArray(amg_data) \
((amg_data)->point_dof_map_array)
#define hypre_ParAMGDataNumLevels(amg_data) ((amg_data)->num_levels)
#define hypre_ParAMGDataMaxEigEst(amg_data) ((amg_data)->max_eig_est)
#define hypre_ParAMGDataMinEigEst(amg_data) ((amg_data)->min_eig_est)
#define hypre_ParAMGDataChebyOrder(amg_data) ((amg_data)->cheby_order)
#define hypre_ParAMGDataChebyFraction(amg_data) ((amg_data)->cheby_fraction)
#define hypre_ParAMGDataChebyEigEst(amg_data) ((amg_data)->cheby_eig_est)
#define hypre_ParAMGDataChebyVariant(amg_data) ((amg_data)->cheby_variant)
#define hypre_ParAMGDataChebyScale(amg_data) ((amg_data)->cheby_scale)
#define hypre_ParAMGDataChebyDS(amg_data) ((amg_data)->cheby_ds)
#define hypre_ParAMGDataChebyCoefs(amg_data) ((amg_data)->cheby_coefs)
#define hypre_ParAMGDataBlockMode(amg_data) ((amg_data)->block_mode)
/* data generated in the solve phase */
#define hypre_ParAMGDataVtemp(amg_data) ((amg_data)->Vtemp)
#define hypre_ParAMGDataVtempLocal(amg_data) ((amg_data)->Vtemp_local)
#define hypre_ParAMGDataVtemplocalData(amg_data) ((amg_data)->Vtemp_local_data)
#define hypre_ParAMGDataCycleOpCount(amg_data) ((amg_data)->cycle_op_count)
#define hypre_ParAMGDataRtemp(amg_data) ((amg_data)->Rtemp)
#define hypre_ParAMGDataPtemp(amg_data) ((amg_data)->Ptemp)
#define hypre_ParAMGDataZtemp(amg_data) ((amg_data)->Ztemp)
/* log info data */
#define hypre_ParAMGDataLogging(amg_data) ((amg_data)->logging)
#define hypre_ParAMGDataNumIterations(amg_data) ((amg_data)->num_iterations)
#ifdef CUMNUMIT
#define hypre_ParAMGDataCumNumIterations(amg_data) ((amg_data)->cum_num_iterations)
#endif
#define hypre_ParAMGDataRelativeResidualNorm(amg_data) ((amg_data)->rel_resid_norm)
#define hypre_ParAMGDataResidual(amg_data) ((amg_data)->residual)
/* output parameters */
#define hypre_ParAMGDataPrintLevel(amg_data) ((amg_data)->print_level)
#define hypre_ParAMGDataLogFileName(amg_data) ((amg_data)->log_file_name)
#define hypre_ParAMGDataDebugFlag(amg_data) ((amg_data)->debug_flag)
#define hypre_ParAMGDataCumNnzAP(amg_data) ((amg_data)->cum_nnz_AP)
#define hypre_ParAMGDataCoarseSolver(amg_data) ((amg_data)->coarse_solver)
#define hypre_ParAMGDataACoarse(amg_data) ((amg_data)->A_coarse)
#define hypre_ParAMGDataFCoarse(amg_data) ((amg_data)->f_coarse)
#define hypre_ParAMGDataUCoarse(amg_data) ((amg_data)->u_coarse)
#define hypre_ParAMGDataNewComm(amg_data) ((amg_data)->new_comm)
#define hypre_ParAMGDataRedundant(amg_data) ((amg_data)->redundant)
#define hypre_ParAMGDataParticipate(amg_data) ((amg_data)->participate)
#define hypre_ParAMGDataAMat(amg_data) ((amg_data)->A_mat)
#define hypre_ParAMGDataBVec(amg_data) ((amg_data)->b_vec)
#define hypre_ParAMGDataCommInfo(amg_data) ((amg_data)->comm_info)
/* additive AMG parameters */
#define hypre_ParAMGDataAdditive(amg_data) ((amg_data)->additive)
#define hypre_ParAMGDataMultAdditive(amg_data) ((amg_data)->mult_additive)
#define hypre_ParAMGDataSimple(amg_data) ((amg_data)->simple)
#define hypre_ParAMGDataAddLastLvl(amg_data) ((amg_data)->add_last_lvl)
#define hypre_ParAMGDataMultAddPMaxElmts(amg_data) ((amg_data)->add_P_max_elmts)
#define hypre_ParAMGDataMultAddTruncFactor(amg_data) ((amg_data)->add_trunc_factor)
#define hypre_ParAMGDataAddRelaxType(amg_data) ((amg_data)->add_rlx_type)
#define hypre_ParAMGDataAddRelaxWt(amg_data) ((amg_data)->add_rlx_wt)
#define hypre_ParAMGDataLambda(amg_data) ((amg_data)->Lambda)
#define hypre_ParAMGDataAtilde(amg_data) ((amg_data)->Atilde)
#define hypre_ParAMGDataRtilde(amg_data) ((amg_data)->Rtilde)
#define hypre_ParAMGDataXtilde(amg_data) ((amg_data)->Xtilde)
#define hypre_ParAMGDataDinv(amg_data) ((amg_data)->D_inv)
/* non-Galerkin parameters */
#define hypre_ParAMGDataNonGalerkNumTol(amg_data) ((amg_data)->nongalerk_num_tol)
#define hypre_ParAMGDataNonGalerkTol(amg_data) ((amg_data)->nongalerk_tol)
#define hypre_ParAMGDataNonGalerkinTol(amg_data) ((amg_data)->nongalerkin_tol)
#define hypre_ParAMGDataNonGalTolArray(amg_data) ((amg_data)->nongal_tol_array)
#define hypre_ParAMGDataRAP2(amg_data) ((amg_data)->rap2)
#define hypre_ParAMGDataKeepTranspose(amg_data) ((amg_data)->keepTranspose)
#endif
/* ams.c */
HYPRE_Int hypre_ParCSRRelax ( hypre_ParCSRMatrix *A , hypre_ParVector *f , HYPRE_Int relax_type , HYPRE_Int relax_times , HYPRE_Real *l1_norms , HYPRE_Real relax_weight , HYPRE_Real omega , HYPRE_Real max_eig_est , HYPRE_Real min_eig_est , HYPRE_Int cheby_order , HYPRE_Real cheby_fraction , hypre_ParVector *u , hypre_ParVector *v , hypre_ParVector *z );
hypre_ParVector *hypre_ParVectorInRangeOf ( hypre_ParCSRMatrix *A );
hypre_ParVector *hypre_ParVectorInDomainOf ( hypre_ParCSRMatrix *A );
HYPRE_Int hypre_ParVectorBlockSplit ( hypre_ParVector *x , hypre_ParVector *x_ [3 ], HYPRE_Int dim );
HYPRE_Int hypre_ParVectorBlockGather ( hypre_ParVector *x , hypre_ParVector *x_ [3 ], HYPRE_Int dim );
HYPRE_Int hypre_BoomerAMGBlockSolve ( void *B , hypre_ParCSRMatrix *A , hypre_ParVector *b , hypre_ParVector *x );
HYPRE_Int hypre_ParCSRMatrixFixZeroRows ( hypre_ParCSRMatrix *A );
HYPRE_Int hypre_ParCSRComputeL1Norms ( hypre_ParCSRMatrix *A , HYPRE_Int option , HYPRE_Int *cf_marker , HYPRE_Real **l1_norm_ptr );
HYPRE_Int hypre_ParCSRMatrixSetDiagRows ( hypre_ParCSRMatrix *A , HYPRE_Real d );
void *hypre_AMSCreate ( void );
HYPRE_Int hypre_AMSDestroy ( void *solver );
HYPRE_Int hypre_AMSSetDimension ( void *solver , HYPRE_Int dim );
HYPRE_Int hypre_AMSSetDiscreteGradient ( void *solver , hypre_ParCSRMatrix *G );
HYPRE_Int hypre_AMSSetCoordinateVectors ( void *solver , hypre_ParVector *x , hypre_ParVector *y , hypre_ParVector *z );
HYPRE_Int hypre_AMSSetEdgeConstantVectors ( void *solver , hypre_ParVector *Gx , hypre_ParVector *Gy , hypre_ParVector *Gz );
HYPRE_Int hypre_AMSSetInterpolations ( void *solver , hypre_ParCSRMatrix *Pi , hypre_ParCSRMatrix *Pix , hypre_ParCSRMatrix *Piy , hypre_ParCSRMatrix *Piz );
HYPRE_Int hypre_AMSSetAlphaPoissonMatrix ( void *solver , hypre_ParCSRMatrix *A_Pi );
HYPRE_Int hypre_AMSSetBetaPoissonMatrix ( void *solver , hypre_ParCSRMatrix *A_G );
HYPRE_Int hypre_AMSSetInteriorNodes ( void *solver , hypre_ParVector *interior_nodes );
HYPRE_Int hypre_AMSSetProjectionFrequency ( void *solver , HYPRE_Int projection_frequency );
HYPRE_Int hypre_AMSSetMaxIter ( void *solver , HYPRE_Int maxit );
HYPRE_Int hypre_AMSSetTol ( void *solver , HYPRE_Real tol );
HYPRE_Int hypre_AMSSetCycleType ( void *solver , HYPRE_Int cycle_type );
HYPRE_Int hypre_AMSSetPrintLevel ( void *solver , HYPRE_Int print_level );
HYPRE_Int hypre_AMSSetSmoothingOptions ( void *solver , HYPRE_Int A_relax_type , HYPRE_Int A_relax_times , HYPRE_Real A_relax_weight , HYPRE_Real A_omega );
HYPRE_Int hypre_AMSSetChebySmoothingOptions ( void *solver , HYPRE_Int A_cheby_order , HYPRE_Int A_cheby_fraction );
HYPRE_Int hypre_AMSSetAlphaAMGOptions ( void *solver , HYPRE_Int B_Pi_coarsen_type , HYPRE_Int B_Pi_agg_levels , HYPRE_Int B_Pi_relax_type , HYPRE_Real B_Pi_theta , HYPRE_Int B_Pi_interp_type , HYPRE_Int B_Pi_Pmax );
HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType ( void *solver , HYPRE_Int B_Pi_coarse_relax_type );
HYPRE_Int hypre_AMSSetBetaAMGOptions ( void *solver , HYPRE_Int B_G_coarsen_type , HYPRE_Int B_G_agg_levels , HYPRE_Int B_G_relax_type , HYPRE_Real B_G_theta , HYPRE_Int B_G_interp_type , HYPRE_Int B_G_Pmax );
HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType ( void *solver , HYPRE_Int B_G_coarse_relax_type );
HYPRE_Int hypre_AMSComputePi ( hypre_ParCSRMatrix *A , hypre_ParCSRMatrix *G , hypre_ParVector *Gx , hypre_ParVector *Gy , hypre_ParVector *Gz , HYPRE_Int dim , hypre_ParCSRMatrix **Pi_ptr );
HYPRE_Int hypre_AMSComputePixyz ( hypre_ParCSRMatrix *A , hypre_ParCSRMatrix *G , hypre_ParVector *Gx , hypre_ParVector *Gy , hypre_ParVector *Gz , HYPRE_Int dim , hypre_ParCSRMatrix **Pix_ptr , hypre_ParCSRMatrix **Piy_ptr , hypre_ParCSRMatrix **Piz_ptr );
HYPRE_Int hypre_AMSComputeGPi ( hypre_ParCSRMatrix *A , hypre_ParCSRMatrix *G , hypre_ParVector *Gx , hypre_ParVector *Gy , hypre_ParVector *Gz , HYPRE_Int dim , hypre_ParCSRMatrix **GPi_ptr );
HYPRE_Int hypre_AMSSetup ( void *solver , hypre_ParCSRMatrix *A , hypre_ParVector *b , hypre_ParVector *x );
HYPRE_Int hypre_AMSSolve ( void *solver , hypre_ParCSRMatrix *A , hypre_ParVector *b , hypre_ParVector *x );
HYPRE_Int hypre_ParCSRSubspacePrec ( hypre_ParCSRMatrix *A0 , HYPRE_Int A0_relax_type , HYPRE_Int A0_relax_times , HYPRE_Real *A0_l1_norms , HYPRE_Real A0_relax_weight , HYPRE_Real A0_omega , HYPRE_Real A0_max_eig_est , HYPRE_Real A0_min_eig_est , HYPRE_Int A0_cheby_order , HYPRE_Real A0_cheby_fraction , hypre_ParCSRMatrix **A , HYPRE_Solver *B , HYPRE_PtrToSolverFcn *HB , hypre_ParCSRMatrix **P , hypre_ParVector **r , hypre_ParVector **g , hypre_ParVector *x , hypre_ParVector *y , hypre_ParVector *r0 , hypre_ParVector *g0 , char *cycle , hypre_ParVector *z );
HYPRE_Int hypre_AMSGetNumIterations ( void *solver , HYPRE_Int *num_iterations );
HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm ( void *solver , HYPRE_Real *rel_resid_norm );
HYPRE_Int hypre_AMSProjectOutGradients ( void *solver , hypre_ParVector *x );
HYPRE_Int hypre_AMSConstructDiscreteGradient ( hypre_ParCSRMatrix *A , hypre_ParVector *x_coord , HYPRE_Int *edge_vertex , HYPRE_Int edge_orientation , hypre_ParCSRMatrix **G_ptr );
HYPRE_Int hypre_AMSFEISetup ( void *solver , hypre_ParCSRMatrix *A , hypre_ParVector *b , hypre_ParVector *x , HYPRE_Int num_vert , HYPRE_Int num_local_vert , HYPRE_Int *vert_number , HYPRE_Real *vert_coord , HYPRE_Int num_edges , HYPRE_Int *edge_vertex );
HYPRE_Int hypre_AMSFEIDestroy ( void *solver );
HYPRE_Int hypre_ParCSRComputeL1NormsThreads ( hypre_ParCSRMatrix *A , HYPRE_Int option , HYPRE_Int num_threads , HYPRE_Int *cf_marker , HYPRE_Real **l1_norm_ptr );
HYPRE_Int hypre_ParCSRRelaxThreads ( hypre_ParCSRMatrix *A , hypre_ParVector *f , HYPRE_Int relax_type , HYPRE_Int relax_times , HYPRE_Real *l1_norms , HYPRE_Real relax_weight , HYPRE_Real omega , hypre_ParVector *u , hypre_ParVector *Vtemp , hypre_ParVector *z );
/* aux_interp.c */
HYPRE_Int hypre_alt_insert_new_nodes ( hypre_ParCSRCommPkg *comm_pkg , hypre_ParCSRCommPkg *extend_comm_pkg , HYPRE_Int *IN_marker , HYPRE_Int full_off_procNodes , HYPRE_Int *OUT_marker );
HYPRE_Int hypre_ParCSRFindExtendCommPkg ( hypre_ParCSRMatrix *A , HYPRE_Int newoff , HYPRE_Int *found , hypre_ParCSRCommPkg **extend_comm_pkg );
HYPRE_Int hypre_ssort ( HYPRE_Int *data , HYPRE_Int n );
HYPRE_Int hypre_index_of_minimum ( HYPRE_Int *data , HYPRE_Int n );
void hypre_swap_int ( HYPRE_Int *data , HYPRE_Int a , HYPRE_Int b );
void hypre_initialize_vecs ( HYPRE_Int diag_n , HYPRE_Int offd_n , HYPRE_Int *diag_ftc , HYPRE_Int *offd_ftc , HYPRE_Int *diag_pm , HYPRE_Int *offd_pm , HYPRE_Int *tmp_CF );
/*HYPRE_Int hypre_new_offd_nodes(HYPRE_Int **found , HYPRE_Int num_cols_A_offd , HYPRE_Int *A_ext_i , HYPRE_Int *A_ext_j, HYPRE_Int num_cols_S_offd, HYPRE_Int *col_map_offd, HYPRE_Int col_1, HYPRE_Int col_n, HYPRE_Int *Sop_i, HYPRE_Int *Sop_j, HYPRE_Int *CF_marker_offd );*/
HYPRE_Int hypre_exchange_marker(hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int *IN_marker, HYPRE_Int *OUT_marker);
HYPRE_Int hypre_exchange_interp_data( HYPRE_Int **CF_marker_offd, HYPRE_Int **dof_func_offd, hypre_CSRMatrix **A_ext, HYPRE_Int *full_off_procNodes, hypre_CSRMatrix **Sop, hypre_ParCSRCommPkg **extend_comm_pkg, hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int skip_fine_or_same_sign);
void hypre_build_interp_colmap(hypre_ParCSRMatrix *P, HYPRE_Int full_off_procNodes, HYPRE_Int *tmp_CF_marker_offd, HYPRE_Int *fine_to_coarse_offd);
/* gen_redcs_mat.c */
HYPRE_Int hypre_seqAMGSetup ( hypre_ParAMGData *amg_data , HYPRE_Int p_level , HYPRE_Int coarse_threshold );
HYPRE_Int hypre_seqAMGCycle ( hypre_ParAMGData *amg_data , HYPRE_Int p_level , hypre_ParVector **Par_F_array , hypre_ParVector **Par_U_array );
HYPRE_Int hypre_GenerateSubComm ( MPI_Comm comm , HYPRE_Int participate , MPI_Comm *new_comm_ptr );
void hypre_merge_lists ( HYPRE_Int *list1 , HYPRE_Int *list2 , hypre_int *np1 , hypre_MPI_Datatype *dptr );
/* HYPRE_parcsr_amg.c */
HYPRE_Int HYPRE_BoomerAMGCreate ( HYPRE_Solver *solver );
HYPRE_Int HYPRE_BoomerAMGDestroy ( HYPRE_Solver solver );
HYPRE_Int HYPRE_BoomerAMGSetup ( HYPRE_Solver solver , HYPRE_ParCSRMatrix A , HYPRE_ParVector b , HYPRE_ParVector x );
HYPRE_Int HYPRE_BoomerAMGSolve ( HYPRE_Solver solver , HYPRE_ParCSRMatrix A , HYPRE_ParVector b , HYPRE_ParVector x );
HYPRE_Int HYPRE_BoomerAMGSolveT ( HYPRE_Solver solver , HYPRE_ParCSRMatrix A , HYPRE_ParVector b , HYPRE_ParVector x );
HYPRE_Int HYPRE_BoomerAMGSetRestriction ( HYPRE_Solver solver , HYPRE_Int restr_par );
HYPRE_Int HYPRE_BoomerAMGSetMaxLevels ( HYPRE_Solver solver , HYPRE_Int max_levels );
HYPRE_Int HYPRE_BoomerAMGGetMaxLevels ( HYPRE_Solver solver , HYPRE_Int *max_levels );
HYPRE_Int HYPRE_BoomerAMGSetMaxCoarseSize ( HYPRE_Solver solver , HYPRE_Int max_coarse_size );
HYPRE_Int HYPRE_BoomerAMGGetMaxCoarseSize ( HYPRE_Solver solver , HYPRE_Int *max_coarse_size );
HYPRE_Int HYPRE_BoomerAMGSetMinCoarseSize ( HYPRE_Solver solver , HYPRE_Int min_coarse_size );
HYPRE_Int HYPRE_BoomerAMGGetMinCoarseSize ( HYPRE_Solver solver , HYPRE_Int *min_coarse_size );
HYPRE_Int HYPRE_BoomerAMGSetSeqThreshold ( HYPRE_Solver solver , HYPRE_Int seq_threshold );
HYPRE_Int HYPRE_BoomerAMGGetSeqThreshold ( HYPRE_Solver solver , HYPRE_Int *seq_threshold );
HYPRE_Int HYPRE_BoomerAMGSetRedundant ( HYPRE_Solver solver , HYPRE_Int redundant );
HYPRE_Int HYPRE_BoomerAMGGetRedundant ( HYPRE_Solver solver , HYPRE_Int *redundant );
HYPRE_Int HYPRE_BoomerAMGSetStrongThreshold ( HYPRE_Solver solver , HYPRE_Real strong_threshold );
HYPRE_Int HYPRE_BoomerAMGGetStrongThreshold ( HYPRE_Solver solver , HYPRE_Real *strong_threshold );
HYPRE_Int HYPRE_BoomerAMGSetMaxRowSum ( HYPRE_Solver solver , HYPRE_Real max_row_sum );
HYPRE_Int HYPRE_BoomerAMGGetMaxRowSum ( HYPRE_Solver solver , HYPRE_Real *max_row_sum );
HYPRE_Int HYPRE_BoomerAMGSetTruncFactor ( HYPRE_Solver solver , HYPRE_Real trunc_factor );
HYPRE_Int HYPRE_BoomerAMGGetTruncFactor ( HYPRE_Solver solver , HYPRE_Real *trunc_factor );
HYPRE_Int HYPRE_BoomerAMGSetPMaxElmts ( HYPRE_Solver solver , HYPRE_Int P_max_elmts );
HYPRE_Int HYPRE_BoomerAMGGetPMaxElmts ( HYPRE_Solver solver , HYPRE_Int *P_max_elmts );
HYPRE_Int HYPRE_BoomerAMGSetJacobiTruncThreshold ( HYPRE_Solver solver , HYPRE_Real jacobi_trunc_threshold );
HYPRE_Int HYPRE_BoomerAMGGetJacobiTruncThreshold ( HYPRE_Solver solver , HYPRE_Real *jacobi_trunc_threshold );
HYPRE_Int HYPRE_BoomerAMGSetPostInterpType ( HYPRE_Solver solver , HYPRE_Int post_interp_type );
HYPRE_Int HYPRE_BoomerAMGGetPostInterpType ( HYPRE_Solver solver , HYPRE_Int *post_interp_type );
HYPRE_Int HYPRE_BoomerAMGSetSCommPkgSwitch ( HYPRE_Solver solver , HYPRE_Real S_commpkg_switch );
HYPRE_Int HYPRE_BoomerAMGSetInterpType ( HYPRE_Solver solver , HYPRE_Int interp_type );
HYPRE_Int HYPRE_BoomerAMGSetSepWeight ( HYPRE_Solver solver , HYPRE_Int sep_weight );
HYPRE_Int HYPRE_BoomerAMGSetMinIter ( HYPRE_Solver solver , HYPRE_Int min_iter );
HYPRE_Int HYPRE_BoomerAMGSetMaxIter ( HYPRE_Solver solver , HYPRE_Int max_iter );
HYPRE_Int HYPRE_BoomerAMGGetMaxIter ( HYPRE_Solver solver , HYPRE_Int *max_iter );
HYPRE_Int HYPRE_BoomerAMGSetCoarsenType ( HYPRE_Solver solver , HYPRE_Int coarsen_type );
HYPRE_Int HYPRE_BoomerAMGGetCoarsenType ( HYPRE_Solver solver , HYPRE_Int *coarsen_type );
HYPRE_Int HYPRE_BoomerAMGSetMeasureType ( HYPRE_Solver solver , HYPRE_Int measure_type );
HYPRE_Int HYPRE_BoomerAMGGetMeasureType ( HYPRE_Solver solver , HYPRE_Int *measure_type );
HYPRE_Int HYPRE_BoomerAMGSetSetupType ( HYPRE_Solver solver , HYPRE_Int setup_type );
HYPRE_Int HYPRE_BoomerAMGSetOldDefault ( HYPRE_Solver solver );
HYPRE_Int HYPRE_BoomerAMGSetCycleType ( HYPRE_Solver solver , HYPRE_Int cycle_type );
HYPRE_Int HYPRE_BoomerAMGGetCycleType ( HYPRE_Solver solver , HYPRE_Int *cycle_type );
HYPRE_Int HYPRE_BoomerAMGSetTol ( HYPRE_Solver solver , HYPRE_Real tol );
HYPRE_Int HYPRE_BoomerAMGGetTol ( HYPRE_Solver solver , HYPRE_Real *tol );
HYPRE_Int HYPRE_BoomerAMGSetNumGridSweeps ( HYPRE_Solver solver , HYPRE_Int *num_grid_sweeps );
HYPRE_Int HYPRE_BoomerAMGSetNumSweeps ( HYPRE_Solver solver , HYPRE_Int num_sweeps );
HYPRE_Int HYPRE_BoomerAMGSetCycleNumSweeps ( HYPRE_Solver solver , HYPRE_Int num_sweeps , HYPRE_Int k );
HYPRE_Int HYPRE_BoomerAMGGetCycleNumSweeps ( HYPRE_Solver solver , HYPRE_Int *num_sweeps , HYPRE_Int k );
HYPRE_Int HYPRE_BoomerAMGInitGridRelaxation ( HYPRE_Int **num_grid_sweeps_ptr , HYPRE_Int **grid_relax_type_ptr , HYPRE_Int ***grid_relax_points_ptr , HYPRE_Int coarsen_type , HYPRE_Real **relax_weights_ptr , HYPRE_Int max_levels );
HYPRE_Int HYPRE_BoomerAMGSetGridRelaxType ( HYPRE_Solver solver , HYPRE_Int *grid_relax_type );
HYPRE_Int HYPRE_BoomerAMGSetRelaxType ( HYPRE_Solver solver , HYPRE_Int relax_type );
HYPRE_Int HYPRE_BoomerAMGSetCycleRelaxType ( HYPRE_Solver solver , HYPRE_Int relax_type , HYPRE_Int k );
HYPRE_Int HYPRE_BoomerAMGGetCycleRelaxType ( HYPRE_Solver solver , HYPRE_Int *relax_type , HYPRE_Int k );
HYPRE_Int HYPRE_BoomerAMGSetRelaxOrder ( HYPRE_Solver solver , HYPRE_Int relax_order );
HYPRE_Int HYPRE_BoomerAMGSetGridRelaxPoints ( HYPRE_Solver solver , HYPRE_Int **grid_relax_points );
HYPRE_Int HYPRE_BoomerAMGSetRelaxWeight ( HYPRE_Solver solver , HYPRE_Real *relax_weight );
HYPRE_Int HYPRE_BoomerAMGSetRelaxWt ( HYPRE_Solver solver , HYPRE_Real relax_wt );
HYPRE_Int HYPRE_BoomerAMGSetLevelRelaxWt ( HYPRE_Solver solver , HYPRE_Real relax_wt , HYPRE_Int level );
HYPRE_Int HYPRE_BoomerAMGSetOmega ( HYPRE_Solver solver , HYPRE_Real *omega );
HYPRE_Int HYPRE_BoomerAMGSetOuterWt ( HYPRE_Solver solver , HYPRE_Real outer_wt );
HYPRE_Int HYPRE_BoomerAMGSetLevelOuterWt ( HYPRE_Solver solver , HYPRE_Real outer_wt , HYPRE_Int level );
HYPRE_Int HYPRE_BoomerAMGSetLogging ( HYPRE_Solver solver , HYPRE_Int logging );
HYPRE_Int HYPRE_BoomerAMGGetLogging ( HYPRE_Solver solver , HYPRE_Int *logging );
HYPRE_Int HYPRE_BoomerAMGSetPrintLevel ( HYPRE_Solver solver , HYPRE_Int print_level );
HYPRE_Int HYPRE_BoomerAMGGetPrintLevel ( HYPRE_Solver solver , HYPRE_Int *print_level );
HYPRE_Int HYPRE_BoomerAMGSetPrintFileName ( HYPRE_Solver solver , const char *print_file_name );
HYPRE_Int HYPRE_BoomerAMGSetDebugFlag ( HYPRE_Solver solver , HYPRE_Int debug_flag );
HYPRE_Int HYPRE_BoomerAMGGetDebugFlag ( HYPRE_Solver solver , HYPRE_Int *debug_flag );
HYPRE_Int HYPRE_BoomerAMGGetNumIterations ( HYPRE_Solver solver , HYPRE_Int *num_iterations );
HYPRE_Int HYPRE_BoomerAMGGetCumNumIterations ( HYPRE_Solver solver , HYPRE_Int *cum_num_iterations );
HYPRE_Int HYPRE_BoomerAMGGetCumNnzAP ( HYPRE_Solver solver , HYPRE_Real *cum_nnz_AP );
HYPRE_Int HYPRE_BoomerAMGGetResidual ( HYPRE_Solver solver , HYPRE_ParVector *residual );
HYPRE_Int HYPRE_BoomerAMGGetFinalRelativeResidualNorm ( HYPRE_Solver solver , HYPRE_Real *rel_resid_norm );
HYPRE_Int HYPRE_BoomerAMGSetNumFunctions ( HYPRE_Solver solver , HYPRE_Int num_functions );
HYPRE_Int HYPRE_BoomerAMGGetNumFunctions ( HYPRE_Solver solver , HYPRE_Int *num_functions );
HYPRE_Int HYPRE_BoomerAMGSetNodal ( HYPRE_Solver solver , HYPRE_Int nodal );
HYPRE_Int HYPRE_BoomerAMGSetNodalLevels ( HYPRE_Solver solver , HYPRE_Int nodal_levels );
HYPRE_Int HYPRE_BoomerAMGSetNodalDiag ( HYPRE_Solver solver , HYPRE_Int nodal );
HYPRE_Int HYPRE_BoomerAMGSetDofFunc ( HYPRE_Solver solver , HYPRE_Int *dof_func );
HYPRE_Int HYPRE_BoomerAMGSetNumPaths ( HYPRE_Solver solver , HYPRE_Int num_paths );
HYPRE_Int HYPRE_BoomerAMGSetAggNumLevels ( HYPRE_Solver solver , HYPRE_Int agg_num_levels );
HYPRE_Int HYPRE_BoomerAMGSetAggInterpType ( HYPRE_Solver solver , HYPRE_Int agg_interp_type );
HYPRE_Int HYPRE_BoomerAMGSetAggTruncFactor ( HYPRE_Solver solver , HYPRE_Real agg_trunc_factor );
HYPRE_Int HYPRE_BoomerAMGSetAddTruncFactor ( HYPRE_Solver solver , HYPRE_Real add_trunc_factor );
HYPRE_Int HYPRE_BoomerAMGSetMultAddTruncFactor ( HYPRE_Solver solver , HYPRE_Real add_trunc_factor );
HYPRE_Int HYPRE_BoomerAMGSetAggP12TruncFactor ( HYPRE_Solver solver , HYPRE_Real agg_P12_trunc_factor );
HYPRE_Int HYPRE_BoomerAMGSetAggPMaxElmts ( HYPRE_Solver solver , HYPRE_Int agg_P_max_elmts );
HYPRE_Int HYPRE_BoomerAMGSetAddPMaxElmts ( HYPRE_Solver solver , HYPRE_Int add_P_max_elmts );
HYPRE_Int HYPRE_BoomerAMGSetMultAddPMaxElmts ( HYPRE_Solver solver , HYPRE_Int add_P_max_elmts );
HYPRE_Int HYPRE_BoomerAMGSetAddRelaxType ( HYPRE_Solver solver , HYPRE_Int add_rlx_type );
HYPRE_Int HYPRE_BoomerAMGSetAddRelaxWt ( HYPRE_Solver solver , HYPRE_Real add_rlx_wt );
HYPRE_Int HYPRE_BoomerAMGSetAggP12MaxElmts ( HYPRE_Solver solver , HYPRE_Int agg_P12_max_elmts );
HYPRE_Int HYPRE_BoomerAMGSetChebyOrder ( HYPRE_Solver solver , HYPRE_Int order );
HYPRE_Int HYPRE_BoomerAMGSetChebyFraction ( HYPRE_Solver solver , HYPRE_Real ratio );
HYPRE_Int HYPRE_BoomerAMGSetChebyEigEst ( HYPRE_Solver solver , HYPRE_Int eig_est );
HYPRE_Int HYPRE_BoomerAMGSetChebyVariant ( HYPRE_Solver solver , HYPRE_Int variant );
HYPRE_Int HYPRE_BoomerAMGSetChebyScale ( HYPRE_Solver solver , HYPRE_Int scale );
HYPRE_Int HYPRE_BoomerAMGSetAdditive ( HYPRE_Solver solver , HYPRE_Int additive );
HYPRE_Int HYPRE_BoomerAMGGetAdditive ( HYPRE_Solver solver , HYPRE_Int *additive );
HYPRE_Int HYPRE_BoomerAMGSetMultAdditive ( HYPRE_Solver solver , HYPRE_Int mult_additive );
HYPRE_Int HYPRE_BoomerAMGGetMultAdditive ( HYPRE_Solver solver , HYPRE_Int *mult_additive );
HYPRE_Int HYPRE_BoomerAMGSetSimple ( HYPRE_Solver solver , HYPRE_Int simple );
HYPRE_Int HYPRE_BoomerAMGGetSimple ( HYPRE_Solver solver , HYPRE_Int *simple );
HYPRE_Int HYPRE_BoomerAMGSetAddLastLvl ( HYPRE_Solver solver , HYPRE_Int add_last_lvl );
HYPRE_Int HYPRE_BoomerAMGSetNonGalerkinTol ( HYPRE_Solver solver , HYPRE_Real nongalerkin_tol );
HYPRE_Int HYPRE_BoomerAMGSetLevelNonGalerkinTol ( HYPRE_Solver solver , HYPRE_Real nongalerkin_tol , HYPRE_Int level );
HYPRE_Int HYPRE_BoomerAMGSetNonGalerkTol ( HYPRE_Solver solver , HYPRE_Int nongalerk_num_tol , HYPRE_Real *nongalerk_tol );
HYPRE_Int HYPRE_BoomerAMGSetRAP2 ( HYPRE_Solver solver , HYPRE_Int rap2 );
HYPRE_Int HYPRE_BoomerAMGSetKeepTranspose ( HYPRE_Solver solver , HYPRE_Int keepTranspose );
/* HYPRE_parcsr_gmres.c */
HYPRE_Int HYPRE_ParCSRGMRESCreate ( MPI_Comm comm , HYPRE_Solver *solver );
HYPRE_Int HYPRE_ParCSRGMRESDestroy ( HYPRE_Solver solver );
HYPRE_Int HYPRE_ParCSRGMRESSetup ( HYPRE_Solver solver , HYPRE_ParCSRMatrix A , HYPRE_ParVector b , HYPRE_ParVector x );
HYPRE_Int HYPRE_ParCSRGMRESSolve ( HYPRE_Solver solver , HYPRE_ParCSRMatrix A , HYPRE_ParVector b , HYPRE_ParVector x );
HYPRE_Int HYPRE_ParCSRGMRESSetKDim ( HYPRE_Solver solver , HYPRE_Int k_dim );
HYPRE_Int HYPRE_ParCSRGMRESSetTol ( HYPRE_Solver solver , HYPRE_Real tol );
HYPRE_Int HYPRE_ParCSRGMRESSetAbsoluteTol ( HYPRE_Solver solver , HYPRE_Real a_tol );
HYPRE_Int HYPRE_ParCSRGMRESSetMinIter ( HYPRE_Solver solver , HYPRE_Int min_iter );
HYPRE_Int HYPRE_ParCSRGMRESSetMaxIter ( HYPRE_Solver solver , HYPRE_Int max_iter );
HYPRE_Int HYPRE_ParCSRGMRESSetStopCrit ( HYPRE_Solver solver , HYPRE_Int stop_crit );
HYPRE_Int HYPRE_ParCSRGMRESSetPrecond ( HYPRE_Solver solver , HYPRE_PtrToParSolverFcn precond , HYPRE_PtrToParSolverFcn precond_setup , HYPRE_Solver precond_solver );
HYPRE_Int HYPRE_ParCSRGMRESGetPrecond ( HYPRE_Solver solver , HYPRE_Solver *precond_data_ptr );
HYPRE_Int HYPRE_ParCSRGMRESSetLogging ( HYPRE_Solver solver , HYPRE_Int logging );
HYPRE_Int HYPRE_ParCSRGMRESSetPrintLevel ( HYPRE_Solver solver , HYPRE_Int print_level );
HYPRE_Int HYPRE_ParCSRGMRESGetNumIterations ( HYPRE_Solver solver , HYPRE_Int *num_iterations );
HYPRE_Int HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm ( HYPRE_Solver solver , HYPRE_Real *norm );
/* HYPRE_parcsr_pcg.c */
HYPRE_Int HYPRE_ParCSRPCGCreate ( MPI_Comm comm , HYPRE_Solver *solver );
HYPRE_Int HYPRE_ParCSRPCGDestroy ( HYPRE_Solver solver );
HYPRE_Int HYPRE_ParCSRPCGSetup ( HYPRE_Solver solver , HYPRE_ParCSRMatrix A , HYPRE_ParVector b , HYPRE_ParVector x );
HYPRE_Int HYPRE_ParCSRPCGSolve ( HYPRE_Solver solver , HYPRE_ParCSRMatrix A , HYPRE_ParVector b , HYPRE_ParVector x );
HYPRE_Int HYPRE_ParCSRPCGSetTol ( HYPRE_Solver solver , HYPRE_Real tol );
HYPRE_Int HYPRE_ParCSRPCGSetAbsoluteTol ( HYPRE_Solver solver , HYPRE_Real a_tol );
HYPRE_Int HYPRE_ParCSRPCGSetMaxIter ( HYPRE_Solver solver , HYPRE_Int max_iter );
HYPRE_Int HYPRE_ParCSRPCGSetStopCrit ( HYPRE_Solver solver , HYPRE_Int stop_crit );
HYPRE_Int HYPRE_ParCSRPCGSetTwoNorm ( HYPRE_Solver solver , HYPRE_Int two_norm );
HYPRE_Int HYPRE_ParCSRPCGSetRelChange ( HYPRE_Solver solver , HYPRE_Int rel_change );
HYPRE_Int HYPRE_ParCSRPCGSetPrecond ( HYPRE_Solver solver , HYPRE_PtrToParSolverFcn precond , HYPRE_PtrToParSolverFcn precond_setup , HYPRE_Solver precond_solver );
HYPRE_Int HYPRE_ParCSRPCGGetPrecond ( HYPRE_Solver solver , HYPRE_Solver *precond_data_ptr );
HYPRE_Int HYPRE_ParCSRPCGSetPrintLevel ( HYPRE_Solver solver , HYPRE_Int level );
HYPRE_Int HYPRE_ParCSRPCGSetLogging ( HYPRE_Solver solver , HYPRE_Int level );
HYPRE_Int HYPRE_ParCSRPCGGetNumIterations ( HYPRE_Solver solver , HYPRE_Int *num_iterations );
HYPRE_Int HYPRE_ParCSRPCGGetFinalRelativeResidualNorm ( HYPRE_Solver solver , HYPRE_Real *norm );
HYPRE_Int HYPRE_ParCSRDiagScaleSetup ( HYPRE_Solver solver , HYPRE_ParCSRMatrix A , HYPRE_ParVector y , HYPRE_ParVector x );
HYPRE_Int HYPRE_ParCSRDiagScale ( HYPRE_Solver solver , HYPRE_ParCSRMatrix HA , HYPRE_ParVector Hy , HYPRE_ParVector Hx );
/* par_add_cycle.c */
HYPRE_Int hypre_BoomerAMGAdditiveCycle ( void *amg_vdata );
HYPRE_Int hypre_CreateLambda ( void *amg_vdata );
HYPRE_Int hypre_CreateDinv ( void *amg_vdata );
/* par_amg.c */
void *hypre_BoomerAMGCreate ( void );
HYPRE_Int hypre_BoomerAMGDestroy ( void *data );
HYPRE_Int hypre_BoomerAMGSetRestriction ( void *data , HYPRE_Int restr_par );
HYPRE_Int hypre_BoomerAMGSetMaxLevels ( void *data , HYPRE_Int max_levels );
HYPRE_Int hypre_BoomerAMGGetMaxLevels ( void *data , HYPRE_Int *max_levels );
HYPRE_Int hypre_BoomerAMGSetMaxCoarseSize ( void *data , HYPRE_Int max_coarse_size );
HYPRE_Int hypre_BoomerAMGGetMaxCoarseSize ( void *data , HYPRE_Int *max_coarse_size );
HYPRE_Int hypre_BoomerAMGSetMinCoarseSize ( void *data , HYPRE_Int min_coarse_size );
HYPRE_Int hypre_BoomerAMGGetMinCoarseSize ( void *data , HYPRE_Int *min_coarse_size );
HYPRE_Int hypre_BoomerAMGSetSeqThreshold ( void *data , HYPRE_Int seq_threshold );
HYPRE_Int hypre_BoomerAMGGetSeqThreshold ( void *data , HYPRE_Int *seq_threshold );
HYPRE_Int hypre_BoomerAMGSetRedundant ( void *data , HYPRE_Int redundant );
HYPRE_Int hypre_BoomerAMGGetRedundant ( void *data , HYPRE_Int *redundant );
HYPRE_Int hypre_BoomerAMGSetStrongThreshold ( void *data , HYPRE_Real strong_threshold );
HYPRE_Int hypre_BoomerAMGGetStrongThreshold ( void *data , HYPRE_Real *strong_threshold );
HYPRE_Int hypre_BoomerAMGSetMaxRowSum ( void *data , HYPRE_Real max_row_sum );
HYPRE_Int hypre_BoomerAMGGetMaxRowSum ( void *data , HYPRE_Real *max_row_sum );
HYPRE_Int hypre_BoomerAMGSetTruncFactor ( void *data , HYPRE_Real trunc_factor );
HYPRE_Int hypre_BoomerAMGGetTruncFactor ( void *data , HYPRE_Real *trunc_factor );
HYPRE_Int hypre_BoomerAMGSetPMaxElmts ( void *data , HYPRE_Int P_max_elmts );
HYPRE_Int hypre_BoomerAMGGetPMaxElmts ( void *data , HYPRE_Int *P_max_elmts );
HYPRE_Int hypre_BoomerAMGSetJacobiTruncThreshold ( void *data , HYPRE_Real jacobi_trunc_threshold );
HYPRE_Int hypre_BoomerAMGGetJacobiTruncThreshold ( void *data , HYPRE_Real *jacobi_trunc_threshold );
HYPRE_Int hypre_BoomerAMGSetPostInterpType ( void *data , HYPRE_Int post_interp_type );
HYPRE_Int hypre_BoomerAMGGetPostInterpType ( void *data , HYPRE_Int *post_interp_type );
HYPRE_Int hypre_BoomerAMGSetSCommPkgSwitch ( void *data , HYPRE_Real S_commpkg_switch );
HYPRE_Int hypre_BoomerAMGGetSCommPkgSwitch ( void *data , HYPRE_Real *S_commpkg_switch );
HYPRE_Int hypre_BoomerAMGSetInterpType ( void *data , HYPRE_Int interp_type );
HYPRE_Int hypre_BoomerAMGGetInterpType ( void *data , HYPRE_Int *interp_type );
HYPRE_Int hypre_BoomerAMGSetSepWeight ( void *data , HYPRE_Int sep_weight );
HYPRE_Int hypre_BoomerAMGSetMinIter ( void *data , HYPRE_Int min_iter );
HYPRE_Int hypre_BoomerAMGGetMinIter ( void *data , HYPRE_Int *min_iter );
HYPRE_Int hypre_BoomerAMGSetMaxIter ( void *data , HYPRE_Int max_iter );
HYPRE_Int hypre_BoomerAMGGetMaxIter ( void *data , HYPRE_Int *max_iter );
HYPRE_Int hypre_BoomerAMGSetCoarsenType ( void *data , HYPRE_Int coarsen_type );
HYPRE_Int hypre_BoomerAMGGetCoarsenType ( void *data , HYPRE_Int *coarsen_type );
HYPRE_Int hypre_BoomerAMGSetMeasureType ( void *data , HYPRE_Int measure_type );
HYPRE_Int hypre_BoomerAMGGetMeasureType ( void *data , HYPRE_Int *measure_type );
HYPRE_Int hypre_BoomerAMGSetSetupType ( void *data , HYPRE_Int setup_type );
HYPRE_Int hypre_BoomerAMGGetSetupType ( void *data , HYPRE_Int *setup_type );
HYPRE_Int hypre_BoomerAMGSetCycleType ( void *data , HYPRE_Int cycle_type );
HYPRE_Int hypre_BoomerAMGGetCycleType ( void *data , HYPRE_Int *cycle_type );
HYPRE_Int hypre_BoomerAMGSetTol ( void *data , HYPRE_Real tol );
HYPRE_Int hypre_BoomerAMGGetTol ( void *data , HYPRE_Real *tol );
HYPRE_Int hypre_BoomerAMGSetNumSweeps ( void *data , HYPRE_Int num_sweeps );
HYPRE_Int hypre_BoomerAMGSetCycleNumSweeps ( void *data , HYPRE_Int num_sweeps , HYPRE_Int k );
HYPRE_Int hypre_BoomerAMGGetCycleNumSweeps ( void *data , HYPRE_Int *num_sweeps , HYPRE_Int k );
HYPRE_Int hypre_BoomerAMGSetNumGridSweeps ( void *data , HYPRE_Int *num_grid_sweeps );
HYPRE_Int hypre_BoomerAMGGetNumGridSweeps ( void *data , HYPRE_Int **num_grid_sweeps );
HYPRE_Int hypre_BoomerAMGSetRelaxType ( void *data , HYPRE_Int relax_type );
HYPRE_Int hypre_BoomerAMGSetCycleRelaxType ( void *data , HYPRE_Int relax_type , HYPRE_Int k );
HYPRE_Int hypre_BoomerAMGGetCycleRelaxType ( void *data , HYPRE_Int *relax_type , HYPRE_Int k );
HYPRE_Int hypre_BoomerAMGSetRelaxOrder ( void *data , HYPRE_Int relax_order );
HYPRE_Int hypre_BoomerAMGGetRelaxOrder ( void *data , HYPRE_Int *relax_order );
HYPRE_Int hypre_BoomerAMGSetGridRelaxType ( void *data , HYPRE_Int *grid_relax_type );
HYPRE_Int hypre_BoomerAMGGetGridRelaxType ( void *data , HYPRE_Int **grid_relax_type );
HYPRE_Int hypre_BoomerAMGSetGridRelaxPoints ( void *data , HYPRE_Int **grid_relax_points );
HYPRE_Int hypre_BoomerAMGGetGridRelaxPoints ( void *data , HYPRE_Int ***grid_relax_points );
HYPRE_Int hypre_BoomerAMGSetRelaxWeight ( void *data , HYPRE_Real *relax_weight );
HYPRE_Int hypre_BoomerAMGGetRelaxWeight ( void *data , HYPRE_Real **relax_weight );
HYPRE_Int hypre_BoomerAMGSetRelaxWt ( void *data , HYPRE_Real relax_weight );
HYPRE_Int hypre_BoomerAMGSetLevelRelaxWt ( void *data , HYPRE_Real relax_weight , HYPRE_Int level );
HYPRE_Int hypre_BoomerAMGGetLevelRelaxWt ( void *data , HYPRE_Real *relax_weight , HYPRE_Int level );
HYPRE_Int hypre_BoomerAMGSetOmega ( void *data , HYPRE_Real *omega );
HYPRE_Int hypre_BoomerAMGGetOmega ( void *data , HYPRE_Real **omega );
HYPRE_Int hypre_BoomerAMGSetOuterWt ( void *data , HYPRE_Real omega );
HYPRE_Int hypre_BoomerAMGSetLevelOuterWt ( void *data , HYPRE_Real omega , HYPRE_Int level );
HYPRE_Int hypre_BoomerAMGGetLevelOuterWt ( void *data , HYPRE_Real *omega , HYPRE_Int level );
HYPRE_Int hypre_BoomerAMGSetLogging ( void *data , HYPRE_Int logging );
HYPRE_Int hypre_BoomerAMGGetLogging ( void *data , HYPRE_Int *logging );
HYPRE_Int hypre_BoomerAMGSetPrintLevel ( void *data , HYPRE_Int print_level );
HYPRE_Int hypre_BoomerAMGGetPrintLevel ( void *data , HYPRE_Int *print_level );
HYPRE_Int hypre_BoomerAMGSetPrintFileName ( void *data , const char *print_file_name );
HYPRE_Int hypre_BoomerAMGGetPrintFileName ( void *data , char **print_file_name );
HYPRE_Int hypre_BoomerAMGSetNumIterations ( void *data , HYPRE_Int num_iterations );
HYPRE_Int hypre_BoomerAMGSetDebugFlag ( void *data , HYPRE_Int debug_flag );
HYPRE_Int hypre_BoomerAMGGetDebugFlag ( void *data , HYPRE_Int *debug_flag );
HYPRE_Int hypre_BoomerAMGSetNumFunctions ( void *data , HYPRE_Int num_functions );
HYPRE_Int hypre_BoomerAMGGetNumFunctions ( void *data , HYPRE_Int *num_functions );
HYPRE_Int hypre_BoomerAMGSetNodal ( void *data , HYPRE_Int nodal );
HYPRE_Int hypre_BoomerAMGSetNodalLevels ( void *data , HYPRE_Int nodal_levels );
HYPRE_Int hypre_BoomerAMGSetNodalDiag ( void *data , HYPRE_Int nodal );
HYPRE_Int hypre_BoomerAMGSetNumPaths ( void *data , HYPRE_Int num_paths );
HYPRE_Int hypre_BoomerAMGSetAggNumLevels ( void *data , HYPRE_Int agg_num_levels );
HYPRE_Int hypre_BoomerAMGSetAggInterpType ( void *data , HYPRE_Int agg_interp_type );
HYPRE_Int hypre_BoomerAMGSetAggPMaxElmts ( void *data , HYPRE_Int agg_P_max_elmts );
HYPRE_Int hypre_BoomerAMGSetMultAddPMaxElmts ( void *data , HYPRE_Int add_P_max_elmts );
HYPRE_Int hypre_BoomerAMGSetAddRelaxType ( void *data , HYPRE_Int add_rlx_type );
HYPRE_Int hypre_BoomerAMGSetAddRelaxWt ( void *data , HYPRE_Real add_rlx_wt );
HYPRE_Int hypre_BoomerAMGSetAggP12MaxElmts ( void *data , HYPRE_Int agg_P12_max_elmts );
HYPRE_Int hypre_BoomerAMGSetAggTruncFactor ( void *data , HYPRE_Real agg_trunc_factor );
HYPRE_Int hypre_BoomerAMGSetMultAddTruncFactor ( void *data , HYPRE_Real add_trunc_factor );
HYPRE_Int hypre_BoomerAMGSetAggP12TruncFactor ( void *data , HYPRE_Real agg_P12_trunc_factor );
HYPRE_Int hypre_BoomerAMGSetNumPoints ( void *data , HYPRE_Int num_points );
HYPRE_Int hypre_BoomerAMGSetDofFunc ( void *data , HYPRE_Int *dof_func );
HYPRE_Int hypre_BoomerAMGSetPointDofMap ( void *data , HYPRE_Int *point_dof_map );
HYPRE_Int hypre_BoomerAMGSetDofPoint ( void *data , HYPRE_Int *dof_point );
HYPRE_Int hypre_BoomerAMGGetNumIterations ( void *data , HYPRE_Int *num_iterations );
HYPRE_Int hypre_BoomerAMGGetCumNumIterations ( void *data , HYPRE_Int *cum_num_iterations );
HYPRE_Int hypre_BoomerAMGGetCumNnzAP ( void *data , HYPRE_Real *cum_nnz_AP );
HYPRE_Int hypre_BoomerAMGGetResidual ( void *data , hypre_ParVector **resid );
HYPRE_Int hypre_BoomerAMGGetRelResidualNorm ( void *data , HYPRE_Real *rel_resid_norm );
HYPRE_Int hypre_BoomerAMGSetChebyOrder ( void *data , HYPRE_Int order );
HYPRE_Int hypre_BoomerAMGSetChebyFraction ( void *data , HYPRE_Real ratio );
HYPRE_Int hypre_BoomerAMGSetChebyEigEst ( void *data , HYPRE_Int eig_est );
HYPRE_Int hypre_BoomerAMGSetChebyVariant ( void *data , HYPRE_Int variant );
HYPRE_Int hypre_BoomerAMGSetChebyScale ( void *data , HYPRE_Int scale );
HYPRE_Int hypre_BoomerAMGSetAdditive ( void *data , HYPRE_Int additive );
HYPRE_Int hypre_BoomerAMGGetAdditive ( void *data , HYPRE_Int *additive );
HYPRE_Int hypre_BoomerAMGSetMultAdditive ( void *data , HYPRE_Int mult_additive );
HYPRE_Int hypre_BoomerAMGGetMultAdditive ( void *data , HYPRE_Int *mult_additive );
HYPRE_Int hypre_BoomerAMGSetSimple ( void *data , HYPRE_Int simple );
HYPRE_Int hypre_BoomerAMGGetSimple ( void *data , HYPRE_Int *simple );
HYPRE_Int hypre_BoomerAMGSetAddLastLvl ( void *data , HYPRE_Int add_last_lvl );
HYPRE_Int hypre_BoomerAMGSetNonGalerkinTol ( void *data , HYPRE_Real nongalerkin_tol );
HYPRE_Int hypre_BoomerAMGSetLevelNonGalerkinTol ( void *data , HYPRE_Real nongalerkin_tol , HYPRE_Int level );
HYPRE_Int hypre_BoomerAMGSetNonGalerkTol ( void *data , HYPRE_Int nongalerk_num_tol , HYPRE_Real *nongalerk_tol );
HYPRE_Int hypre_BoomerAMGSetRAP2 ( void *data , HYPRE_Int rap2 );
HYPRE_Int hypre_BoomerAMGSetKeepTranspose ( void *data , HYPRE_Int keepTranspose );
/* par_amg_setup.c */
HYPRE_Int hypre_BoomerAMGSetup ( void *amg_vdata , hypre_ParCSRMatrix *A , hypre_ParVector *f , hypre_ParVector *u );
/* par_amg_solve.c */
HYPRE_Int hypre_BoomerAMGSolve ( void *amg_vdata , hypre_ParCSRMatrix *A , hypre_ParVector *f , hypre_ParVector *u );
/* par_cg_relax_wt.c */
HYPRE_Int hypre_BoomerAMGCGRelaxWt ( void *amg_vdata , HYPRE_Int level , HYPRE_Int num_cg_sweeps , HYPRE_Real *rlx_wt_ptr );
HYPRE_Int hypre_Bisection ( HYPRE_Int n , HYPRE_Real *diag , HYPRE_Real *offd , HYPRE_Real y , HYPRE_Real z , HYPRE_Real tol , HYPRE_Int k , HYPRE_Real *ev_ptr );
/* par_cheby.c */
HYPRE_Int hypre_ParCSRRelax_Cheby_Setup ( hypre_ParCSRMatrix *A , HYPRE_Real max_eig , HYPRE_Real min_eig , HYPRE_Real fraction , HYPRE_Int order , HYPRE_Int scale , HYPRE_Int variant , HYPRE_Real **coefs_ptr , HYPRE_Real **ds_ptr );
HYPRE_Int hypre_ParCSRRelax_Cheby_Solve ( hypre_ParCSRMatrix *A , hypre_ParVector *f , HYPRE_Real *ds_data , HYPRE_Real *coefs , HYPRE_Int order , HYPRE_Int scale , HYPRE_Int variant , hypre_ParVector *u , hypre_ParVector *v , hypre_ParVector *r );
/* par_coarsen.c */
HYPRE_Int hypre_BoomerAMGCoarsen ( hypre_ParCSRMatrix *S , hypre_ParCSRMatrix *A , HYPRE_Int CF_init , HYPRE_Int debug_flag , HYPRE_Int **CF_marker_ptr );
HYPRE_Int hypre_BoomerAMGCoarsenRuge ( hypre_ParCSRMatrix *S , hypre_ParCSRMatrix *A , HYPRE_Int measure_type , HYPRE_Int coarsen_type , HYPRE_Int debug_flag , HYPRE_Int **CF_marker_ptr );
HYPRE_Int hypre_BoomerAMGCoarsenFalgout ( hypre_ParCSRMatrix *S , hypre_ParCSRMatrix *A , HYPRE_Int measure_type , HYPRE_Int debug_flag , HYPRE_Int **CF_marker_ptr );
HYPRE_Int hypre_BoomerAMGCoarsenHMIS ( hypre_ParCSRMatrix *S , hypre_ParCSRMatrix *A , HYPRE_Int measure_type , HYPRE_Int debug_flag , HYPRE_Int **CF_marker_ptr );
HYPRE_Int hypre_BoomerAMGCoarsenPMIS ( hypre_ParCSRMatrix *S , hypre_ParCSRMatrix *A , HYPRE_Int CF_init , HYPRE_Int debug_flag , HYPRE_Int **CF_marker_ptr );
/* par_coarse_parms.c */
HYPRE_Int hypre_BoomerAMGCoarseParms ( MPI_Comm comm , HYPRE_Int local_num_variables , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int *CF_marker , HYPRE_Int **coarse_dof_func_ptr , HYPRE_Int **coarse_pnts_global_ptr );
/* par_cycle.c */
HYPRE_Int hypre_BoomerAMGCycle ( void *amg_vdata , hypre_ParVector **F_array , hypre_ParVector **U_array );
/* par_difconv.c */
HYPRE_ParCSRMatrix GenerateDifConv ( MPI_Comm comm , HYPRE_Int nx , HYPRE_Int ny , HYPRE_Int nz , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int R , HYPRE_Int p , HYPRE_Int q , HYPRE_Int r , HYPRE_Real *value );
/* par_indepset.c */
HYPRE_Int hypre_BoomerAMGIndepSetInit ( hypre_ParCSRMatrix *S , HYPRE_Real *measure_array , HYPRE_Int seq_rand );
HYPRE_Int hypre_BoomerAMGIndepSet ( hypre_ParCSRMatrix *S , HYPRE_Real *measure_array , HYPRE_Int *graph_array , HYPRE_Int graph_array_size , HYPRE_Int *graph_array_offd , HYPRE_Int graph_array_offd_size , HYPRE_Int *IS_marker , HYPRE_Int *IS_marker_offd );
/* par_interp.c */
HYPRE_Int hypre_BoomerAMGBuildInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGBuildInterpHE ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGBuildDirInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGInterpTruncation ( hypre_ParCSRMatrix *P , HYPRE_Real trunc_factor , HYPRE_Int max_elmts );
void hypre_qsort2abs ( HYPRE_Int *v , HYPRE_Real *w , HYPRE_Int left , HYPRE_Int right );
HYPRE_Int hypre_BoomerAMGBuildInterpModUnk ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGTruncandBuild ( hypre_ParCSRMatrix *P , HYPRE_Real trunc_factor , HYPRE_Int max_elmts );
hypre_ParCSRMatrix *hypre_CreateC ( hypre_ParCSRMatrix *A , HYPRE_Real w );
/* par_jacobi_interp.c */
void hypre_BoomerAMGJacobiInterp ( hypre_ParCSRMatrix *A , hypre_ParCSRMatrix **P , hypre_ParCSRMatrix *S , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int *CF_marker , HYPRE_Int level , HYPRE_Real truncation_threshold , HYPRE_Real truncation_threshold_minus );
void hypre_BoomerAMGJacobiInterp_1 ( hypre_ParCSRMatrix *A , hypre_ParCSRMatrix **P , hypre_ParCSRMatrix *S , HYPRE_Int *CF_marker , HYPRE_Int level , HYPRE_Real truncation_threshold , HYPRE_Real truncation_threshold_minus , HYPRE_Int *dof_func , HYPRE_Int *dof_func_offd , HYPRE_Real weight_AF );
void hypre_BoomerAMGTruncateInterp ( hypre_ParCSRMatrix *P , HYPRE_Real eps , HYPRE_Real dlt , HYPRE_Int *CF_marker );
HYPRE_Int hypre_ParCSRMatrix_dof_func_offd ( hypre_ParCSRMatrix *A , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int **dof_func_offd );
/* par_laplace_27pt.c */
HYPRE_ParCSRMatrix GenerateLaplacian27pt ( MPI_Comm comm , HYPRE_Int nx , HYPRE_Int ny , HYPRE_Int nz , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int R , HYPRE_Int p , HYPRE_Int q , HYPRE_Int r , HYPRE_Real *value );
HYPRE_Int hypre_map3 ( HYPRE_Int ix , HYPRE_Int iy , HYPRE_Int iz , HYPRE_Int p , HYPRE_Int q , HYPRE_Int r , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int R , HYPRE_Int *nx_part , HYPRE_Int *ny_part , HYPRE_Int *nz_part , HYPRE_Int *global_part );
/* par_laplace_9pt.c */
HYPRE_ParCSRMatrix GenerateLaplacian9pt ( MPI_Comm comm , HYPRE_Int nx , HYPRE_Int ny , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int p , HYPRE_Int q , HYPRE_Real *value );
HYPRE_Int hypre_map2 ( HYPRE_Int ix , HYPRE_Int iy , HYPRE_Int p , HYPRE_Int q , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int *nx_part , HYPRE_Int *ny_part , HYPRE_Int *global_part );
/* par_laplace.c */
HYPRE_ParCSRMatrix GenerateLaplacian ( MPI_Comm comm , HYPRE_Int nx , HYPRE_Int ny , HYPRE_Int nz , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int R , HYPRE_Int p , HYPRE_Int q , HYPRE_Int r , HYPRE_Real *value );
HYPRE_Int hypre_map ( HYPRE_Int ix , HYPRE_Int iy , HYPRE_Int iz , HYPRE_Int p , HYPRE_Int q , HYPRE_Int r , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int R , HYPRE_Int *nx_part , HYPRE_Int *ny_part , HYPRE_Int *nz_part , HYPRE_Int *global_part );
HYPRE_ParCSRMatrix GenerateSysLaplacian ( MPI_Comm comm , HYPRE_Int nx , HYPRE_Int ny , HYPRE_Int nz , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int R , HYPRE_Int p , HYPRE_Int q , HYPRE_Int r , HYPRE_Int num_fun , HYPRE_Real *mtrx , HYPRE_Real *value );
HYPRE_ParCSRMatrix GenerateSysLaplacianVCoef ( MPI_Comm comm , HYPRE_Int nx , HYPRE_Int ny , HYPRE_Int nz , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int R , HYPRE_Int p , HYPRE_Int q , HYPRE_Int r , HYPRE_Int num_fun , HYPRE_Real *mtrx , HYPRE_Real *value );
/* par_lr_interp.c */
HYPRE_Int hypre_BoomerAMGBuildStdInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int sep_weight , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGBuildExtPIInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGBuildExtPICCInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGBuildFFInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGBuildFF1Interp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGBuildExtInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
/* par_multi_interp.c */
HYPRE_Int hypre_BoomerAMGBuildMultipass ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int P_max_elmts , HYPRE_Int weight_option , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
/* par_nongalerkin.c */
HYPRE_Int hypre_GrabSubArray ( HYPRE_Int *indices , HYPRE_Int start , HYPRE_Int end , HYPRE_Int *array , HYPRE_Int *output );
void hypre_qsort2_abs ( HYPRE_Int *v , HYPRE_Real *w , HYPRE_Int left , HYPRE_Int right );
HYPRE_Int hypre_IntersectTwoArrays ( HYPRE_Int *x , HYPRE_Real *x_data , HYPRE_Int x_length , HYPRE_Int *y , HYPRE_Int y_length , HYPRE_Int *z , HYPRE_Real *output_x_data , HYPRE_Int *intersect_length );
HYPRE_Int hypre_SortedCopyParCSRData ( hypre_ParCSRMatrix *A , hypre_ParCSRMatrix *B );
HYPRE_Int hypre_BoomerAMG_MyCreateS ( hypre_ParCSRMatrix *A , HYPRE_Real strength_threshold , HYPRE_Real max_row_sum , HYPRE_Int num_functions , HYPRE_Int *dof_func , hypre_ParCSRMatrix **S_ptr );
HYPRE_Int hypre_NonGalerkinIJBufferInit ( HYPRE_Int *ijbuf_cnt , HYPRE_Int *ijbuf_rowcounter , HYPRE_Int *ijbuf_numcols );
HYPRE_Int hypre_NonGalerkinIJBufferNewRow ( HYPRE_Int *ijbuf_rownums , HYPRE_Int *ijbuf_numcols , HYPRE_Int *ijbuf_rowcounter , HYPRE_Int new_row );
HYPRE_Int hypre_NonGalerkinIJBufferCompressRow ( HYPRE_Int *ijbuf_cnt , HYPRE_Int ijbuf_rowcounter , HYPRE_Real *ijbuf_data , HYPRE_Int *ijbuf_cols , HYPRE_Int *ijbuf_rownums , HYPRE_Int *ijbuf_numcols );
HYPRE_Int hypre_NonGalerkinIJBufferCompress ( HYPRE_Int ijbuf_size , HYPRE_Int *ijbuf_cnt , HYPRE_Int *ijbuf_rowcounter , HYPRE_Real **ijbuf_data , HYPRE_Int **ijbuf_cols , HYPRE_Int **ijbuf_rownums , HYPRE_Int **ijbuf_numcols );
HYPRE_Int hypre_NonGalerkinIJBufferWrite ( HYPRE_IJMatrix B , HYPRE_Int *ijbuf_cnt , HYPRE_Int ijbuf_size , HYPRE_Int *ijbuf_rowcounter , HYPRE_Real **ijbuf_data , HYPRE_Int **ijbuf_cols , HYPRE_Int **ijbuf_rownums , HYPRE_Int **ijbuf_numcols , HYPRE_Int row_to_write , HYPRE_Int col_to_write , HYPRE_Real val_to_write );
HYPRE_Int hypre_NonGalerkinIJBufferEmpty ( HYPRE_IJMatrix B , HYPRE_Int ijbuf_size , HYPRE_Int *ijbuf_cnt , HYPRE_Int ijbuf_rowcounter , HYPRE_Real **ijbuf_data , HYPRE_Int **ijbuf_cols , HYPRE_Int **ijbuf_rownums , HYPRE_Int **ijbuf_numcols );
hypre_ParCSRMatrix * hypre_NonGalerkinSparsityPattern(hypre_ParCSRMatrix *R_IAP, hypre_ParCSRMatrix *RAP, HYPRE_Int * CF_marker, HYPRE_Real droptol, HYPRE_Int sym_collapse, HYPRE_Int collapse_beta );
HYPRE_Int hypre_BoomerAMGBuildNonGalerkinCoarseOperator( hypre_ParCSRMatrix **RAP_ptr, hypre_ParCSRMatrix *AP, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, HYPRE_Int num_functions, HYPRE_Int * dof_func_value, HYPRE_Real S_commpkg_switch, HYPRE_Int * CF_marker, HYPRE_Real droptol, HYPRE_Int sym_collapse, HYPRE_Real lump_percent, HYPRE_Int collapse_beta );
/* par_rap.c */
hypre_CSRMatrix *hypre_ExchangeRAPData ( hypre_CSRMatrix *RAP_int , hypre_ParCSRCommPkg *comm_pkg_RT );
HYPRE_Int hypre_BoomerAMGBuildCoarseOperator ( hypre_ParCSRMatrix *RT , hypre_ParCSRMatrix *A , hypre_ParCSRMatrix *P , hypre_ParCSRMatrix **RAP_ptr );
HYPRE_Int hypre_BoomerAMGBuildCoarseOperatorKT ( hypre_ParCSRMatrix *RT , hypre_ParCSRMatrix *A , hypre_ParCSRMatrix *P , HYPRE_Int keepTranspose, hypre_ParCSRMatrix **RAP_ptr );
/* par_rap_communication.c */
HYPRE_Int hypre_GetCommPkgRTFromCommPkgA ( hypre_ParCSRMatrix *RT , hypre_ParCSRMatrix *A , HYPRE_Int *fine_to_coarse_offd );
HYPRE_Int hypre_GenerateSendMapAndCommPkg ( MPI_Comm comm , HYPRE_Int num_sends , HYPRE_Int num_recvs , HYPRE_Int *recv_procs , HYPRE_Int *send_procs , HYPRE_Int *recv_vec_starts , hypre_ParCSRMatrix *A );
/* par_relax.c */
HYPRE_Int hypre_BoomerAMGRelax ( hypre_ParCSRMatrix *A , hypre_ParVector *f , HYPRE_Int *cf_marker , HYPRE_Int relax_type , HYPRE_Int relax_points , HYPRE_Real relax_weight , HYPRE_Real omega , HYPRE_Real *l1_norms , hypre_ParVector *u , hypre_ParVector *Vtemp , hypre_ParVector *Ztemp );
HYPRE_Int hypre_GaussElimSetup ( hypre_ParAMGData *amg_data , HYPRE_Int level , HYPRE_Int relax_type );
HYPRE_Int hypre_GaussElimSolve ( hypre_ParAMGData *amg_data , HYPRE_Int level , HYPRE_Int relax_type );
HYPRE_Int gselim ( HYPRE_Real *A , HYPRE_Real *x , HYPRE_Int n );
/* par_relax_interface.c */
HYPRE_Int hypre_BoomerAMGRelaxIF ( hypre_ParCSRMatrix *A , hypre_ParVector *f , HYPRE_Int *cf_marker , HYPRE_Int relax_type , HYPRE_Int relax_order , HYPRE_Int cycle_type , HYPRE_Real relax_weight , HYPRE_Real omega , HYPRE_Real *l1_norms , hypre_ParVector *u , hypre_ParVector *Vtemp , hypre_ParVector *Ztemp );
/* par_relax_more.c */
HYPRE_Int hypre_ParCSRMaxEigEstimate ( hypre_ParCSRMatrix *A , HYPRE_Int scale , HYPRE_Real *max_eig );
HYPRE_Int hypre_ParCSRMaxEigEstimateCG ( hypre_ParCSRMatrix *A , HYPRE_Int scale , HYPRE_Int max_iter , HYPRE_Real *max_eig , HYPRE_Real *min_eig );
HYPRE_Int hypre_ParCSRRelax_Cheby ( hypre_ParCSRMatrix *A , hypre_ParVector *f , HYPRE_Real max_eig , HYPRE_Real min_eig , HYPRE_Real fraction , HYPRE_Int order , HYPRE_Int scale , HYPRE_Int variant , hypre_ParVector *u , hypre_ParVector *v , hypre_ParVector *r );
HYPRE_Int hypre_BoomerAMGRelax_FCFJacobi ( hypre_ParCSRMatrix *A , hypre_ParVector *f , HYPRE_Int *cf_marker , HYPRE_Real relax_weight , hypre_ParVector *u , hypre_ParVector *Vtemp );
HYPRE_Int hypre_ParCSRRelax_CG ( HYPRE_Solver solver , hypre_ParCSRMatrix *A , hypre_ParVector *f , hypre_ParVector *u , HYPRE_Int num_its );
HYPRE_Int hypre_LINPACKcgtql1 ( HYPRE_Int *n , HYPRE_Real *d , HYPRE_Real *e , HYPRE_Int *ierr );
HYPRE_Real hypre_LINPACKcgpthy ( HYPRE_Real *a , HYPRE_Real *b );
HYPRE_Int hypre_ParCSRRelax_L1_Jacobi ( hypre_ParCSRMatrix *A , hypre_ParVector *f , HYPRE_Int *cf_marker , HYPRE_Int relax_points , HYPRE_Real relax_weight , HYPRE_Real *l1_norms , hypre_ParVector *u , hypre_ParVector *Vtemp );
/* par_rotate_7pt.c */
HYPRE_ParCSRMatrix GenerateRotate7pt ( MPI_Comm comm , HYPRE_Int nx , HYPRE_Int ny , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int p , HYPRE_Int q , HYPRE_Real alpha , HYPRE_Real eps );
/* par_scaled_matnorm.c */
HYPRE_Int hypre_ParCSRMatrixScaledNorm ( hypre_ParCSRMatrix *A , HYPRE_Real *scnorm );
/* par_stats.c */
HYPRE_Int hypre_BoomerAMGSetupStats ( void *amg_vdata , hypre_ParCSRMatrix *A );
HYPRE_Int hypre_BoomerAMGWriteSolverParams ( void *data );
/* par_strength.c */
HYPRE_Int hypre_BoomerAMGCreateS ( hypre_ParCSRMatrix *A , HYPRE_Real strength_threshold , HYPRE_Real max_row_sum , HYPRE_Int num_functions , HYPRE_Int *dof_func , hypre_ParCSRMatrix **S_ptr );
HYPRE_Int hypre_BoomerAMGCreateSabs ( hypre_ParCSRMatrix *A , HYPRE_Real strength_threshold , HYPRE_Real max_row_sum , HYPRE_Int num_functions , HYPRE_Int *dof_func , hypre_ParCSRMatrix **S_ptr );
HYPRE_Int hypre_BoomerAMGCreateSCommPkg ( hypre_ParCSRMatrix *A , hypre_ParCSRMatrix *S , HYPRE_Int **col_offd_S_to_A_ptr );
HYPRE_Int hypre_BoomerAMGCreate2ndS ( hypre_ParCSRMatrix *S , HYPRE_Int *CF_marker , HYPRE_Int num_paths , HYPRE_Int *coarse_row_starts , hypre_ParCSRMatrix **C_ptr );
HYPRE_Int hypre_BoomerAMGCorrectCFMarker ( HYPRE_Int *CF_marker , HYPRE_Int num_var , HYPRE_Int *new_CF_marker );
HYPRE_Int hypre_BoomerAMGCorrectCFMarker2 ( HYPRE_Int *CF_marker , HYPRE_Int num_var , HYPRE_Int *new_CF_marker );
/* partial.c */
HYPRE_Int hypre_BoomerAMGBuildPartialExtPIInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int *num_old_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGBuildPartialStdInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int *num_old_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int sep_weight , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
HYPRE_Int hypre_BoomerAMGBuildPartialExtInterp ( hypre_ParCSRMatrix *A , HYPRE_Int *CF_marker , hypre_ParCSRMatrix *S , HYPRE_Int *num_cpts_global , HYPRE_Int *num_old_cpts_global , HYPRE_Int num_functions , HYPRE_Int *dof_func , HYPRE_Int debug_flag , HYPRE_Real trunc_factor , HYPRE_Int max_elmts , HYPRE_Int *col_offd_S_to_A , hypre_ParCSRMatrix **P_ptr );
/* par_vardifconv.c */
HYPRE_ParCSRMatrix GenerateVarDifConv ( MPI_Comm comm , HYPRE_Int nx , HYPRE_Int ny , HYPRE_Int nz , HYPRE_Int P , HYPRE_Int Q , HYPRE_Int R , HYPRE_Int p , HYPRE_Int q , HYPRE_Int r , HYPRE_Real eps , HYPRE_ParVector *rhs_ptr );
HYPRE_Real afun ( HYPRE_Real xx , HYPRE_Real yy , HYPRE_Real zz );
HYPRE_Real bfun ( HYPRE_Real xx , HYPRE_Real yy , HYPRE_Real zz );
HYPRE_Real cfun ( HYPRE_Real xx , HYPRE_Real yy , HYPRE_Real zz );
HYPRE_Real dfun ( HYPRE_Real xx , HYPRE_Real yy , HYPRE_Real zz );
HYPRE_Real efun ( HYPRE_Real xx , HYPRE_Real yy , HYPRE_Real zz );
HYPRE_Real ffun ( HYPRE_Real xx , HYPRE_Real yy , HYPRE_Real zz );
HYPRE_Real gfun ( HYPRE_Real xx , HYPRE_Real yy , HYPRE_Real zz );
HYPRE_Real rfun ( HYPRE_Real xx , HYPRE_Real yy , HYPRE_Real zz );
HYPRE_Real bndfun ( HYPRE_Real xx , HYPRE_Real yy , HYPRE_Real zz );
/* pcg_par.c */
char *hypre_ParKrylovCAlloc ( HYPRE_Int count , HYPRE_Int elt_size );
HYPRE_Int hypre_ParKrylovFree ( char *ptr );
void *hypre_ParKrylovCreateVector ( void *vvector );
void *hypre_ParKrylovCreateVectorArray ( HYPRE_Int n , void *vvector );
HYPRE_Int hypre_ParKrylovDestroyVector ( void *vvector );
void *hypre_ParKrylovMatvecCreate ( void *A , void *x );
HYPRE_Int hypre_ParKrylovMatvec ( void *matvec_data , HYPRE_Complex alpha , void *A , void *x , HYPRE_Complex beta , void *y );
HYPRE_Int hypre_ParKrylovMatvecT ( void *matvec_data , HYPRE_Complex alpha , void *A , void *x , HYPRE_Complex beta , void *y );
HYPRE_Int hypre_ParKrylovMatvecDestroy ( void *matvec_data );
HYPRE_Real hypre_ParKrylovInnerProd ( void *x , void *y );
HYPRE_Int hypre_ParKrylovCopyVector ( void *x , void *y );
HYPRE_Int hypre_ParKrylovClearVector ( void *x );
HYPRE_Int hypre_ParKrylovScaleVector ( HYPRE_Complex alpha , void *x );
HYPRE_Int hypre_ParKrylovAxpy ( HYPRE_Complex alpha , void *x , void *y );
HYPRE_Int hypre_ParKrylovCommInfo ( void *A , HYPRE_Int *my_id , HYPRE_Int *num_procs );
HYPRE_Int hypre_ParKrylovIdentitySetup ( void *vdata , void *A , void *b , void *x );
HYPRE_Int hypre_ParKrylovIdentity ( void *vdata , void *A , void *b , void *x );
#ifdef __cplusplus
}
#endif
#endif
| 66,694 | 74.446833 | 566 | h |
AMG | AMG-master/parcsr_ls/ams.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "float.h"
#include "ams.h"
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax
*
* Relaxation on the ParCSR matrix A with right-hand side f and
* initial guess u. Possible values for relax_type are:
*
* 1 = l1-scaled (or weighted) Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
* 3 = Kaczmarz
* 4 = truncated version of 2 (Remark 6.2 in smoothers paper)
* x = BoomerAMG relaxation with relax_type = |x|
* (16 = Cheby)
*
* The default value of relax_type is 2.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelax(/* matrix to relax with */
hypre_ParCSRMatrix *A,
/* right-hand side */
hypre_ParVector *f,
/* relaxation type */
HYPRE_Int relax_type,
/* number of sweeps */
HYPRE_Int relax_times,
/* l1 norms of the rows of A */
HYPRE_Real *l1_norms,
/* damping coefficient (usually <= 1) */
HYPRE_Real relax_weight,
/* SOR parameter (usually in (0,2) */
HYPRE_Real omega,
/* for cheby smoothers */
HYPRE_Real max_eig_est,
HYPRE_Real min_eig_est,
HYPRE_Int cheby_order,
HYPRE_Real cheby_fraction,
/* initial/updated approximation */
hypre_ParVector *u,
/* temporary vector */
hypre_ParVector *v,
/* temporary vector */
hypre_ParVector *z)
{
HYPRE_Int sweep;
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
for (sweep = 0; sweep < relax_times; sweep++)
{
if (relax_type == 1) /* l1-scaled Jacobi */
{
HYPRE_Int i, num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_ParVectorCopy(f,v);
hypre_ParCSRMatrixMatvec(-relax_weight, A, u, relax_weight, v);
/* u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1 */
for (i = 0; i < num_rows; i++)
u_data[i] += v_data[i] / l1_norms[i];
}
else if (relax_type == 2 || relax_type == 4) /* offd-l1-scaled block GS */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real,num_cols_offd);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data);
}
if (relax_weight == 1.0 && omega == 1.0) /* symmetric Gauss-Seidel */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += res / l1_norms[i];
}
}
else if (relax_weight == 1.0) /* SSOR */
{
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += omega * res / l1_norms[i];
}
}
else /* scaled SSOR */
{
HYPRE_Real dif;
HYPRE_Real c1 = omega * relax_weight;
HYPRE_Real c2 = omega * (1.0 - relax_weight);
/* Forward local pass (save initial guess in v_data) */
for (i = 0; i < num_rows; i++)
{
dif = 0.0;
v_data[i] = u_data[i];
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] < i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
dif = 0.0;
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (A_diag_J[j] > i)
dif += A_diag_data[j] * (v_data[A_diag_J[j]] - u_data[A_diag_J[j]]);
}
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
u_data[i] += (c1 * res + c2 * dif) / l1_norms[i];
}
}
hypre_TFree(u_offd_data);
}
else if (relax_type == 3) /* Kaczmarz */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real *u_offd_data = hypre_TAlloc(HYPRE_Real,num_cols_offd);
HYPRE_Real res;
HYPRE_Int num_procs;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs);
/* Copy off-diagonal values of u to the current processor */
if (num_procs > 1)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Real *u_buf_data;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int index = 0, start;
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
u_buf_data = hypre_TAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
u_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1,comm_pkg,u_buf_data,u_offd_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(u_buf_data);
}
/* Forward local pass */
for (i = 0; i < num_rows; i++)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
/* Backward local pass */
for (i = num_rows-1; i > -1; i--)
{
res = f_data[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
res -= A_diag_data[j] * u_data[A_diag_J[j]];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
res -= A_offd_data[j] * u_offd_data[A_offd_J[j]];
res /= l1_norms[i];
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
u_data[A_diag_J[j]] += omega * res * A_diag_data[j];
}
hypre_TFree(u_offd_data);
}
else /* call BoomerAMG relaxation */
{
if (relax_type == 16)
{
hypre_ParCSRRelax_Cheby(A,
f,
max_eig_est,
min_eig_est,
cheby_fraction, cheby_order, 1,
0, u, v, z);
}
else
hypre_BoomerAMGRelax(A, f, NULL, hypre_abs(relax_type), 0, relax_weight,
omega, l1_norms, u, v, z);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInRangeOf
*
* Return a vector that belongs to the range of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInRangeOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorInDomainOf
*
* Return a vector that belongs to the domain of a given matrix.
*--------------------------------------------------------------------------*/
hypre_ParVector *hypre_ParVectorInDomainOf(hypre_ParCSRMatrix *A)
{
hypre_ParVector *x;
x = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixColStarts(A));
hypre_ParVectorInitialize(x);
hypre_ParVectorOwnsData(x) = 1;
hypre_ParVectorOwnsPartitioning(x) = 0;
return x;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockSplit
*
* Extract the dim sub-vectors x_0,...,x_{dim-1} composing a parallel
* block vector x. It is assumed that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockSplit(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data_[d][i] = x_data[dim*i+d];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParVectorBlockGather
*
* Compose a parallel block vector x from dim given sub-vectors
* x_0,...,x_{dim-1}, such that &x[i] = [x_0[i],...,x_{dim-1}[i]].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParVectorBlockGather(hypre_ParVector *x,
hypre_ParVector *x_[3],
HYPRE_Int dim)
{
HYPRE_Int i, d, size_;
HYPRE_Real *x_data, *x_data_[3];
size_ = hypre_VectorSize(hypre_ParVectorLocalVector(x_[0]));
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x));
for (d = 0; d < dim; d++)
x_data_[d] = hypre_VectorData(hypre_ParVectorLocalVector(x_[d]));
for (i = 0; i < size_; i++)
for (d = 0; d < dim; d++)
x_data[dim*i+d] = x_data_[d][i];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGBlockSolve
*
* Apply the block-diagonal solver diag(B) to the system diag(A) x = b.
* Here B is a given BoomerAMG solver for A, while x and b are "block"
* parallel vectors.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_BoomerAMGBlockSolve(void *B,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
HYPRE_Int d, dim = 1;
hypre_ParVector *b_[3];
hypre_ParVector *x_[3];
dim = hypre_ParVectorGlobalSize(x) / hypre_ParCSRMatrixGlobalNumRows(A);
if (dim == 1)
{
hypre_BoomerAMGSolve(B, A, b, x);
return hypre_error_flag;
}
for (d = 0; d < dim; d++)
{
b_[d] = hypre_ParVectorInRangeOf(A);
x_[d] = hypre_ParVectorInRangeOf(A);
}
hypre_ParVectorBlockSplit(b, b_, dim);
hypre_ParVectorBlockSplit(x, x_, dim);
for (d = 0; d < dim; d++)
hypre_BoomerAMGSolve(B, A, b_[d], x_[d]);
hypre_ParVectorBlockGather(x, x_, dim);
for (d = 0; d < dim; d++)
{
hypre_ParVectorDestroy(b_[d]);
hypre_ParVectorDestroy(x_[d]);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFixZeroRows
*
* For every zero row in the matrix: set the diagonal element to 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixFixZeroRows(hypre_ParCSRMatrix *A)
{
HYPRE_Int i, j;
HYPRE_Real l1_norm;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
/* a row will be considered zero if its l1 norm is less than eps */
HYPRE_Real eps = 0.0; /* DBL_EPSILON * 1e+4; */
for (i = 0; i < num_rows; i++)
{
l1_norm = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm += fabs(A_diag_data[j]);
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm += fabs(A_offd_data[j]);
if (l1_norm <= eps)
{
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (A_diag_J[j] == i)
A_diag_data[j] = 1.0;
else
A_diag_data[j] = 0.0;
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
A_offd_data[j] = 0.0;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_TAlloc(HYPRE_Real, num_rows);
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
if (option == 1)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
diag = l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the scaled l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the scaled CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
else if (option == 5) /*stores diagonal of A for Jacobi using matvec, rlx 7 */
{
for (i = 0; i < num_rows; i++)
{
diag = A_diag_data[A_diag_I[i]];
if (diag != 0.0) l1_norm[i] = diag;
else l1_norm[i] = 1.0;
}
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/* Handle negative definite matrices */
for (i = 0; i < num_rows; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = 0; i < num_rows; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
//for (i = 0; i < num_rows; i++) l1_norm[i]=1.0/l1_norm[i];
hypre_TFree(cf_marker_offd);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDiagRows
*
* For every row containing only a diagonal element: set it to d.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRMatrixSetDiagRows(hypre_ParCSRMatrix *A, HYPRE_Real d)
{
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
for (i = 0; i < num_rows; i++)
{
j = A_diag_I[i];
if ((A_diag_I[i+1] == j+1) && (A_diag_J[j] == i) &&
(!num_cols_offd || (A_offd_I[i+1] == A_offd_I[i])))
{
A_diag_data[j] = d;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSCreate
*
* Allocate the AMS solver structure.
*--------------------------------------------------------------------------*/
void * hypre_AMSCreate()
{
hypre_AMSData *ams_data;
ams_data = hypre_CTAlloc(hypre_AMSData, 1);
/* Default parameters */
ams_data -> dim = 3; /* 3D problem */
ams_data -> maxit = 20; /* perform at most 20 iterations */
ams_data -> tol = 1e-6; /* convergence tolerance */
ams_data -> print_level = 1; /* print residual norm at each step */
ams_data -> cycle_type = 1; /* a 3-level multiplicative solver */
ams_data -> A_relax_type = 2; /* offd-l1-scaled GS */
ams_data -> A_relax_times = 1; /* one relaxation sweep */
ams_data -> A_relax_weight = 1.0; /* damping parameter */
ams_data -> A_omega = 1.0; /* SSOR coefficient */
ams_data -> A_cheby_order = 2; /* Cheby: order (1 -4 are vaild) */
ams_data -> A_cheby_fraction = .3; /* Cheby: fraction of spectrum to smooth */
ams_data -> B_G_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_G_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_G_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_G_theta = 0.25; /* strength threshold */
ams_data -> B_G_interp_type = 0; /* interpolation type */
ams_data -> B_G_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> B_Pi_coarsen_type = 10; /* HMIS coarsening */
ams_data -> B_Pi_agg_levels = 1; /* Levels of aggressive coarsening */
ams_data -> B_Pi_relax_type = 3; /* hybrid G-S/Jacobi */
ams_data -> B_Pi_theta = 0.25; /* strength threshold */
ams_data -> B_Pi_interp_type = 0; /* interpolation type */
ams_data -> B_Pi_Pmax = 0; /* max nonzero elements in interp. rows */
ams_data -> beta_is_zero = 0; /* the problem has a mass term */
/* By default, do l1-GS smoothing on the coarsest grid */
ams_data -> B_G_coarse_relax_type = 8;
ams_data -> B_Pi_coarse_relax_type = 8;
/* The rest of the fields are initialized using the Set functions */
ams_data -> A = NULL;
ams_data -> G = NULL;
ams_data -> A_G = NULL;
ams_data -> B_G = 0;
ams_data -> Pi = NULL;
ams_data -> A_Pi = NULL;
ams_data -> B_Pi = 0;
ams_data -> x = NULL;
ams_data -> y = NULL;
ams_data -> z = NULL;
ams_data -> Gx = NULL;
ams_data -> Gy = NULL;
ams_data -> Gz = NULL;
ams_data -> r0 = NULL;
ams_data -> g0 = NULL;
ams_data -> r1 = NULL;
ams_data -> g1 = NULL;
ams_data -> r2 = NULL;
ams_data -> g2 = NULL;
ams_data -> Pix = NULL;
ams_data -> Piy = NULL;
ams_data -> Piz = NULL;
ams_data -> A_Pix = NULL;
ams_data -> A_Piy = NULL;
ams_data -> A_Piz = NULL;
ams_data -> B_Pix = 0;
ams_data -> B_Piy = 0;
ams_data -> B_Piz = 0;
ams_data -> interior_nodes = NULL;
ams_data -> G0 = NULL;
ams_data -> A_G0 = NULL;
ams_data -> B_G0 = 0;
ams_data -> projection_frequency = 5;
ams_data -> A_l1_norms = NULL;
ams_data -> A_max_eig_est = 0;
ams_data -> A_min_eig_est = 0;
ams_data -> owns_Pi = 1;
ams_data -> owns_A_G = 0;
ams_data -> owns_A_Pi = 0;
return (void *) ams_data;
}
/*--------------------------------------------------------------------------
* hypre_AMSDestroy
*
* Deallocate the AMS solver structure. Note that the input data (given
* through the Set functions) is not destroyed.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (!ams_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ams_data -> owns_A_G)
if (ams_data -> A_G)
hypre_ParCSRMatrixDestroy(ams_data -> A_G);
if (!ams_data -> beta_is_zero)
if (ams_data -> B_G)
HYPRE_BoomerAMGDestroy(ams_data -> B_G);
if (ams_data -> owns_Pi && ams_data -> Pi)
hypre_ParCSRMatrixDestroy(ams_data -> Pi);
if (ams_data -> owns_A_Pi)
if (ams_data -> A_Pi)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pi);
if (ams_data -> B_Pi)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pi);
if (ams_data -> owns_Pi && ams_data -> Pix)
hypre_ParCSRMatrixDestroy(ams_data -> Pix);
if (ams_data -> A_Pix)
hypre_ParCSRMatrixDestroy(ams_data -> A_Pix);
if (ams_data -> B_Pix)
HYPRE_BoomerAMGDestroy(ams_data -> B_Pix);
if (ams_data -> owns_Pi && ams_data -> Piy)
hypre_ParCSRMatrixDestroy(ams_data -> Piy);
if (ams_data -> A_Piy)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piy);
if (ams_data -> B_Piy)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piy);
if (ams_data -> owns_Pi && ams_data -> Piz)
hypre_ParCSRMatrixDestroy(ams_data -> Piz);
if (ams_data -> A_Piz)
hypre_ParCSRMatrixDestroy(ams_data -> A_Piz);
if (ams_data -> B_Piz)
HYPRE_BoomerAMGDestroy(ams_data -> B_Piz);
if (ams_data -> r0)
hypre_ParVectorDestroy(ams_data -> r0);
if (ams_data -> g0)
hypre_ParVectorDestroy(ams_data -> g0);
if (ams_data -> r1)
hypre_ParVectorDestroy(ams_data -> r1);
if (ams_data -> g1)
hypre_ParVectorDestroy(ams_data -> g1);
if (ams_data -> r2)
hypre_ParVectorDestroy(ams_data -> r2);
if (ams_data -> g2)
hypre_ParVectorDestroy(ams_data -> g2);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> A);
if (ams_data -> G0)
hypre_ParCSRMatrixDestroy(ams_data -> G0);
if (ams_data -> A_G0)
hypre_ParCSRMatrixDestroy(ams_data -> A_G0);
if (ams_data -> B_G0)
HYPRE_BoomerAMGDestroy(ams_data -> B_G0);
if (ams_data -> A_l1_norms)
hypre_TFree(ams_data -> A_l1_norms);
/* G, x, y ,z, Gx, Gy and Gz are not destroyed */
if (ams_data)
hypre_TFree(ams_data);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDimension
*
* Set problem dimension (2 or 3). By default we assume dim = 3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDimension(void *solver,
HYPRE_Int dim)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (dim != 2 && dim != 3)
hypre_error_in_arg(2);
ams_data -> dim = dim;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetDiscreteGradient
*
* Set the discrete gradient matrix G.
* This function should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetDiscreteGradient(void *solver,
hypre_ParCSRMatrix *G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> G = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCoordinateVectors
*
* Set the x, y and z coordinates of the vertices in the mesh.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCoordinateVectors(void *solver,
hypre_ParVector *x,
hypre_ParVector *y,
hypre_ParVector *z)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> x = x;
ams_data -> y = y;
ams_data -> z = z;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetEdgeConstantVectors
*
* Set the vectors Gx, Gy and Gz which give the representations of
* the constant vector fields (1,0,0), (0,1,0) and (0,0,1) in the
* edge element basis.
*
* Either SetCoordinateVectors or SetEdgeConstantVectors should be
* called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetEdgeConstantVectors(void *solver,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Gx = Gx;
ams_data -> Gy = Gy;
ams_data -> Gz = Gz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInterpolations
*
* Set the (components of) the Nedelec interpolation matrix Pi=[Pix,Piy,Piz].
*
* This function is generally intended to be used only for high-order Nedelec
* discretizations (in the lowest order case, Pi is constructed internally in
* AMS from the discreet gradient matrix and the coordinates of the vertices),
* though it can also be used in the lowest-order case or for other types of
* discretizations (e.g. ones based on the second family of Nedelec elements).
*
* By definition, Pi is the matrix representation of the linear operator that
* interpolates (high-order) vector nodal finite elements into the (high-order)
* Nedelec space. The component matrices are defined as Pix phi = Pi (phi,0,0)
* and similarly for Piy and Piz. Note that all these operators depend on the
* choice of the basis and degrees of freedom in the high-order spaces.
*
* The column numbering of Pi should be node-based, i.e. the x/y/z components of
* the first node (vertex or high-order dof) should be listed first, followed by
* the x/y/z components of the second node and so on (see the documentation of
* HYPRE_BoomerAMGSetDofFunc).
*
* If used, this function should be called before hypre_AMSSetup() and there is
* no need to provide the vertex coordinates. Furthermore, only one of the sets
* {Pi} and {Pix,Piy,Piz} needs to be specified (though it is OK to provide
* both). If Pix is NULL, then scalar Pi-based AMS cycles, i.e. those with
* cycle_type > 10, will be unavailable. Similarly, AMS cycles based on
* monolithic Pi (cycle_type < 10) require that Pi is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInterpolations(void *solver,
hypre_ParCSRMatrix *Pi,
hypre_ParCSRMatrix *Pix,
hypre_ParCSRMatrix *Piy,
hypre_ParCSRMatrix *Piz)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> Pi = Pi;
ams_data -> Pix = Pix;
ams_data -> Piy = Piy;
ams_data -> Piz = Piz;
ams_data -> owns_Pi = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* alpha (the curl-curl term coefficient in the Maxwell problem).
*
* If this function is called, the coarse space solver on the range
* of Pi^T is a block-diagonal version of A_Pi. If this function is not
* called, the coarse space solver on the range of Pi^T is constructed
* as Pi^T A Pi in hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_Pi)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_Pi = A_Pi;
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_Pi, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_Pi)); */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaPoissonMatrix
*
* Set the matrix corresponding to the Poisson problem with coefficient
* beta (the mass term coefficient in the Maxwell problem).
*
* This function call is optional - if not given, the Poisson matrix will
* be computed in hypre_AMSSetup(). If the given matrix is NULL, we assume
* that beta is 0 and use two-level (instead of three-level) methods.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaPoissonMatrix(void *solver,
hypre_ParCSRMatrix *A_G)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_G = A_G;
if (!A_G)
ams_data -> beta_is_zero = 1;
else
{
/* Penalize the eliminated degrees of freedom */
hypre_ParCSRMatrixSetDiagRows(A_G, HYPRE_REAL_MAX);
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A_G)); */
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetInteriorNodes
*
* Set the list of nodes which are interior to the zero-conductivity region.
* A node is interior if interior_nodes[i] == 1.0.
*
* Should be called before hypre_AMSSetup()!
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetInteriorNodes(void *solver,
hypre_ParVector *interior_nodes)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> interior_nodes = interior_nodes;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetProjectionFrequency
*
* How often to project the r.h.s. onto the compatible sub-space Ker(G0^T),
* when iterating with the solver.
*
* The default value is every 5th iteration.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetProjectionFrequency(void *solver,
HYPRE_Int projection_frequency)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> projection_frequency = projection_frequency;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetMaxIter
*
* Set the maximum number of iterations in the three-level method.
* The default value is 20. To use the AMS solver as a preconditioner,
* set maxit to 1, tol to 0.0 and print_level to 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetMaxIter(void *solver,
HYPRE_Int maxit)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> maxit = maxit;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetTol
*
* Set the convergence tolerance (if the method is used as a solver).
* The default value is 1e-6.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetTol(void *solver,
HYPRE_Real tol)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> tol = tol;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetCycleType
*
* Choose which three-level solver to use. Possible values are:
*
* 1 = 3-level multipl. solver (01210) <-- small solution time
* 2 = 3-level additive solver (0+1+2)
* 3 = 3-level multipl. solver (02120)
* 4 = 3-level additive solver (010+2)
* 5 = 3-level multipl. solver (0102010) <-- small solution time
* 6 = 3-level additive solver (1+020)
* 7 = 3-level multipl. solver (0201020) <-- small number of iterations
* 8 = 3-level additive solver (0(1+2)0) <-- small solution time
* 9 = 3-level multipl. solver (01210) with discrete divergence
* 11 = 5-level multipl. solver (013454310) <-- small solution time, memory
* 12 = 5-level additive solver (0+1+3+4+5)
* 13 = 5-level multipl. solver (034515430) <-- small solution time, memory
* 14 = 5-level additive solver (01(3+4+5)10)
* 20 = 2-level multipl. solver (0[12]0)
*
* 0 = a Hiptmair-like smoother (010)
*
* The default value is 1.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetCycleType(void *solver,
HYPRE_Int cycle_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> cycle_type = cycle_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetPrintLevel
*
* Control how much information is printed during the solution iterations.
* The defaut values is 1 (print residual norm at each step).
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetPrintLevel(void *solver,
HYPRE_Int print_level)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> print_level = print_level;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetSmoothingOptions
*
* Set relaxation parameters for A. Default values: 2, 1, 1.0, 1.0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetSmoothingOptions(void *solver,
HYPRE_Int A_relax_type,
HYPRE_Int A_relax_times,
HYPRE_Real A_relax_weight,
HYPRE_Real A_omega)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_relax_type = A_relax_type;
ams_data -> A_relax_times = A_relax_times;
ams_data -> A_relax_weight = A_relax_weight;
ams_data -> A_omega = A_omega;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetChebySmoothingOptions
* AB: note: this could be added to the above,
* but I didn't want to change parameter list)
* Set parameters for chebyshev smoother for A. Default values: 2,.3.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetChebySmoothingOptions(void *solver,
HYPRE_Int A_cheby_order,
HYPRE_Int A_cheby_fraction)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> A_cheby_order = A_cheby_order;
ams_data -> A_cheby_fraction = A_cheby_fraction;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGOptions
*
* Set AMG parameters for B_Pi. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGOptions(void *solver,
HYPRE_Int B_Pi_coarsen_type,
HYPRE_Int B_Pi_agg_levels,
HYPRE_Int B_Pi_relax_type,
HYPRE_Real B_Pi_theta,
HYPRE_Int B_Pi_interp_type,
HYPRE_Int B_Pi_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_Pi_coarsen_type = B_Pi_coarsen_type;
ams_data -> B_Pi_agg_levels = B_Pi_agg_levels;
ams_data -> B_Pi_relax_type = B_Pi_relax_type;
ams_data -> B_Pi_theta = B_Pi_theta;
ams_data -> B_Pi_interp_type = B_Pi_interp_type;
ams_data -> B_Pi_Pmax = B_Pi_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetAlphaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_Pi. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetAlphaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_Pi_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *)solver;
ams_data -> B_Pi_coarse_relax_type = B_Pi_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGOptions
*
* Set AMG parameters for B_G. Default values: 10, 1, 3, 0.25, 0, 0.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGOptions(void *solver,
HYPRE_Int B_G_coarsen_type,
HYPRE_Int B_G_agg_levels,
HYPRE_Int B_G_relax_type,
HYPRE_Real B_G_theta,
HYPRE_Int B_G_interp_type,
HYPRE_Int B_G_Pmax)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarsen_type = B_G_coarsen_type;
ams_data -> B_G_agg_levels = B_G_agg_levels;
ams_data -> B_G_relax_type = B_G_relax_type;
ams_data -> B_G_theta = B_G_theta;
ams_data -> B_G_interp_type = B_G_interp_type;
ams_data -> B_G_Pmax = B_G_Pmax;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetBetaAMGCoarseRelaxType
*
* Set the AMG coarsest level relaxation for B_G. Default value: 8.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetBetaAMGCoarseRelaxType(void *solver,
HYPRE_Int B_G_coarse_relax_type)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
ams_data -> B_G_coarse_relax_type = B_G_coarse_relax_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePi
*
* Construct the Pi interpolation matrix, which maps the space of vector
* linear finite elements to the space of edge finite elements.
*
* The construction is based on the fact that Pi = [Pi_x, Pi_y, Pi_z],
* where each block has the same sparsity structure as G, and the entries
* can be computed from the vectors Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pi_ptr)
{
hypre_ParCSRMatrix *Pi;
/* Compute Pi = [Pi_x, Pi_y, Pi_z] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_Int global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_Int col_starts_size, *col_starts;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_Int *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_Int,col_starts_size);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = dim * col_starts_G[i];
Pi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pi) = 1;
hypre_ParCSRMatrixInitialize(Pi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pi_diag = hypre_ParCSRMatrixDiag(Pi);
HYPRE_Int *Pi_diag_I = hypre_CSRMatrixI(Pi_diag);
HYPRE_Int *Pi_diag_J = hypre_CSRMatrixJ(Pi_diag);
HYPRE_Real *Pi_diag_data = hypre_CSRMatrixData(Pi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
Pi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
Pi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pi_offd = hypre_ParCSRMatrixOffd(Pi);
HYPRE_Int *Pi_offd_I = hypre_CSRMatrixI(Pi_offd);
HYPRE_Int *Pi_offd_J = hypre_CSRMatrixJ(Pi_offd);
HYPRE_Real *Pi_offd_data = hypre_CSRMatrixData(Pi_offd);
HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_Int *Pi_cmap = hypre_ParCSRMatrixColMapOffd(Pi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
Pi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
Pi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 3)
*Pi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
Pi_cmap[dim*i+d] = dim*G_cmap[i]+d;
}
}
*Pi_ptr = Pi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputePixyz
*
* Construct the components Pix, Piy, Piz of the interpolation matrix Pi,
* which maps the space of vector linear finite elements to the space of
* edge finite elements.
*
* The construction is based on the fact that each component has the same
* sparsity structure as G, and the entries can be computed from the vectors
* Gx, Gy, Gz.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputePixyz(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **Pix_ptr,
hypre_ParCSRMatrix **Piy_ptr,
hypre_ParCSRMatrix **Piz_ptr)
{
hypre_ParCSRMatrix *Pix, *Piy, *Piz;
/* Compute Pix, Piy, Piz */
{
HYPRE_Int i, j;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_Int global_num_cols = hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(G);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
Pix = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Pix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(Pix) = 0;
hypre_ParCSRMatrixInitialize(Pix);
Piy = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piy) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piy) = 0;
hypre_ParCSRMatrixInitialize(Piy);
if (dim == 3)
{
Piz = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(Piz) = 1;
hypre_ParCSRMatrixOwnsRowStarts(Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(Piz) = 0;
hypre_ParCSRMatrixInitialize(Piz);
}
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 3)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
hypre_CSRMatrix *Piz_diag = hypre_ParCSRMatrixDiag(Piz);
HYPRE_Int *Piz_diag_I = hypre_CSRMatrixI(Piz_diag);
HYPRE_Int *Piz_diag_J = hypre_CSRMatrixJ(Piz_diag);
HYPRE_Real *Piz_diag_data = hypre_CSRMatrixData(Piz_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
Piz_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
Piz_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
*Piz_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
else
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *Pix_diag = hypre_ParCSRMatrixDiag(Pix);
HYPRE_Int *Pix_diag_I = hypre_CSRMatrixI(Pix_diag);
HYPRE_Int *Pix_diag_J = hypre_CSRMatrixJ(Pix_diag);
HYPRE_Real *Pix_diag_data = hypre_CSRMatrixData(Pix_diag);
hypre_CSRMatrix *Piy_diag = hypre_ParCSRMatrixDiag(Piy);
HYPRE_Int *Piy_diag_I = hypre_CSRMatrixI(Piy_diag);
HYPRE_Int *Piy_diag_J = hypre_CSRMatrixJ(Piy_diag);
HYPRE_Real *Piy_diag_data = hypre_CSRMatrixData(Piy_diag);
for (i = 0; i < G_diag_nrows+1; i++)
{
Pix_diag_I[i] = G_diag_I[i];
Piy_diag_I[i] = G_diag_I[i];
}
for (i = 0; i < G_diag_nnz; i++)
{
Pix_diag_J[i] = G_diag_J[i];
Piy_diag_J[i] = G_diag_J[i];
}
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*Pix_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*Piy_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
}
}
/* Fill-in the off-diagonal part */
if (dim == 3)
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
hypre_CSRMatrix *Piz_offd = hypre_ParCSRMatrixOffd(Piz);
HYPRE_Int *Piz_offd_I = hypre_CSRMatrixI(Piz_offd);
HYPRE_Int *Piz_offd_J = hypre_CSRMatrixJ(Piz_offd);
HYPRE_Real *Piz_offd_data = hypre_CSRMatrixData(Piz_offd);
HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_Int *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_Int *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
HYPRE_Int *Piz_cmap = hypre_ParCSRMatrixColMapOffd(Piz);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
Piz_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
Piz_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
*Piz_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
Piz_cmap[i] = G_cmap[i];
}
}
else
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *Pix_offd = hypre_ParCSRMatrixOffd(Pix);
HYPRE_Int *Pix_offd_I = hypre_CSRMatrixI(Pix_offd);
HYPRE_Int *Pix_offd_J = hypre_CSRMatrixJ(Pix_offd);
HYPRE_Real *Pix_offd_data = hypre_CSRMatrixData(Pix_offd);
hypre_CSRMatrix *Piy_offd = hypre_ParCSRMatrixOffd(Piy);
HYPRE_Int *Piy_offd_I = hypre_CSRMatrixI(Piy_offd);
HYPRE_Int *Piy_offd_J = hypre_CSRMatrixJ(Piy_offd);
HYPRE_Real *Piy_offd_data = hypre_CSRMatrixData(Piy_offd);
HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_Int *Pix_cmap = hypre_ParCSRMatrixColMapOffd(Pix);
HYPRE_Int *Piy_cmap = hypre_ParCSRMatrixColMapOffd(Piy);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
{
Pix_offd_I[i] = G_offd_I[i];
Piy_offd_I[i] = G_offd_I[i];
}
for (i = 0; i < G_offd_nnz; i++)
{
Pix_offd_J[i] = G_offd_J[i];
Piy_offd_J[i] = G_offd_J[i];
}
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*Pix_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*Piy_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
{
Pix_cmap[i] = G_cmap[i];
Piy_cmap[i] = G_cmap[i];
}
}
}
*Pix_ptr = Pix;
*Piy_ptr = Piy;
if (dim == 3)
*Piz_ptr = Piz;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSComputeGPi
*
* Construct the matrix [G,Pi] which can be considered an interpolation
* matrix from S_h^4 (4 copies of the scalar linear finite element space)
* to the edge finite elements space.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSComputeGPi(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *G,
hypre_ParVector *Gx,
hypre_ParVector *Gy,
hypre_ParVector *Gz,
HYPRE_Int dim,
hypre_ParCSRMatrix **GPi_ptr)
{
hypre_ParCSRMatrix *GPi;
/* Take into account G */
dim++;
/* Compute GPi = [Pi_x, Pi_y, Pi_z, G] */
{
HYPRE_Int i, j, d;
HYPRE_Real *Gx_data, *Gy_data, *Gz_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(G);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(G);
HYPRE_Int global_num_cols = dim*hypre_ParCSRMatrixGlobalNumCols(G);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(G);
HYPRE_Int col_starts_size, *col_starts;
HYPRE_Int num_cols_offd = dim*hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(G));
HYPRE_Int num_nonzeros_diag = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(G));
HYPRE_Int num_nonzeros_offd = dim*hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(G));
HYPRE_Int *col_starts_G = hypre_ParCSRMatrixColStarts(G);
#ifdef HYPRE_NO_GLOBAL_PARTITION
col_starts_size = 2;
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
col_starts_size = num_procs+1;
#endif
col_starts = hypre_TAlloc(HYPRE_Int,col_starts_size);
for (i = 0; i < col_starts_size; i++)
col_starts[i] = dim * col_starts_G[i];
GPi = hypre_ParCSRMatrixCreate(comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
num_cols_offd,
num_nonzeros_diag,
num_nonzeros_offd);
hypre_ParCSRMatrixOwnsData(GPi) = 1;
hypre_ParCSRMatrixOwnsRowStarts(GPi) = 0;
hypre_ParCSRMatrixOwnsColStarts(GPi) = 1;
hypre_ParCSRMatrixInitialize(GPi);
Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(Gx));
Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(Gy));
if (dim == 4)
Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(Gz));
/* Fill-in the diagonal part */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
HYPRE_Int *G_diag_I = hypre_CSRMatrixI(G_diag);
HYPRE_Int *G_diag_J = hypre_CSRMatrixJ(G_diag);
HYPRE_Real *G_diag_data = hypre_CSRMatrixData(G_diag);
HYPRE_Int G_diag_nrows = hypre_CSRMatrixNumRows(G_diag);
HYPRE_Int G_diag_nnz = hypre_CSRMatrixNumNonzeros(G_diag);
hypre_CSRMatrix *GPi_diag = hypre_ParCSRMatrixDiag(GPi);
HYPRE_Int *GPi_diag_I = hypre_CSRMatrixI(GPi_diag);
HYPRE_Int *GPi_diag_J = hypre_CSRMatrixJ(GPi_diag);
HYPRE_Real *GPi_diag_data = hypre_CSRMatrixData(GPi_diag);
for (i = 0; i < G_diag_nrows+1; i++)
GPi_diag_I[i] = dim * G_diag_I[i];
for (i = 0; i < G_diag_nnz; i++)
for (d = 0; d < dim; d++)
GPi_diag_J[dim*i+d] = dim*G_diag_J[i]+d;
for (i = 0; i < G_diag_nrows; i++)
for (j = G_diag_I[i]; j < G_diag_I[i+1]; j++)
{
*GPi_diag_data++ = G_diag_data[j];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gx_data[i];
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_diag_data++ = fabs(G_diag_data[j]) * 0.5 * Gz_data[i];
}
}
/* Fill-in the off-diagonal part */
{
hypre_CSRMatrix *G_offd = hypre_ParCSRMatrixOffd(G);
HYPRE_Int *G_offd_I = hypre_CSRMatrixI(G_offd);
HYPRE_Int *G_offd_J = hypre_CSRMatrixJ(G_offd);
HYPRE_Real *G_offd_data = hypre_CSRMatrixData(G_offd);
HYPRE_Int G_offd_nrows = hypre_CSRMatrixNumRows(G_offd);
HYPRE_Int G_offd_ncols = hypre_CSRMatrixNumCols(G_offd);
HYPRE_Int G_offd_nnz = hypre_CSRMatrixNumNonzeros(G_offd);
hypre_CSRMatrix *GPi_offd = hypre_ParCSRMatrixOffd(GPi);
HYPRE_Int *GPi_offd_I = hypre_CSRMatrixI(GPi_offd);
HYPRE_Int *GPi_offd_J = hypre_CSRMatrixJ(GPi_offd);
HYPRE_Real *GPi_offd_data = hypre_CSRMatrixData(GPi_offd);
HYPRE_Int *G_cmap = hypre_ParCSRMatrixColMapOffd(G);
HYPRE_Int *GPi_cmap = hypre_ParCSRMatrixColMapOffd(GPi);
if (G_offd_ncols)
for (i = 0; i < G_offd_nrows+1; i++)
GPi_offd_I[i] = dim * G_offd_I[i];
for (i = 0; i < G_offd_nnz; i++)
for (d = 0; d < dim; d++)
GPi_offd_J[dim*i+d] = dim*G_offd_J[i]+d;
for (i = 0; i < G_offd_nrows; i++)
for (j = G_offd_I[i]; j < G_offd_I[i+1]; j++)
{
*GPi_offd_data++ = G_offd_data[j];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gx_data[i];
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gy_data[i];
if (dim == 4)
*GPi_offd_data++ = fabs(G_offd_data[j]) * 0.5 * Gz_data[i];
}
for (i = 0; i < G_offd_ncols; i++)
for (d = 0; d < dim; d++)
GPi_cmap[dim*i+d] = dim*G_cmap[i]+d;
}
}
*GPi_ptr = GPi;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSetup
*
* Construct the AMS solver components.
*
* The following functions need to be called before hypre_AMSSetup():
* - hypre_AMSSetDimension() (if solving a 2D problem)
* - hypre_AMSSetDiscreteGradient()
* - hypre_AMSSetCoordinateVectors() or hypre_AMSSetEdgeConstantVectors
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int input_info = 0;
ams_data -> A = A;
/* Modifications for problems with zero-conductivity regions */
if (ams_data -> interior_nodes)
{
hypre_ParCSRMatrix *G0t, *Aorig = A;
/* Make sure that multiple Setup()+Solve() give identical results */
ams_data -> solve_counter = 0;
/* Construct the discrete gradient matrix for the zero-conductivity region
by eliminating the zero-conductivity nodes from G^t. The range of G0
represents the kernel of A, i.e. the gradients of nodal basis functions
supported in zero-conductivity regions. */
hypre_ParCSRMatrixTranspose(ams_data -> G, &G0t, 1);
{
HYPRE_Int i, j;
HYPRE_Int nv = hypre_ParCSRMatrixNumCols(ams_data -> G);
hypre_CSRMatrix *G0td = hypre_ParCSRMatrixDiag(G0t);
HYPRE_Int *G0tdI = hypre_CSRMatrixI(G0td);
HYPRE_Real *G0tdA = hypre_CSRMatrixData(G0td);
hypre_CSRMatrix *G0to = hypre_ParCSRMatrixOffd(G0t);
HYPRE_Int *G0toI = hypre_CSRMatrixI(G0to);
HYPRE_Real *G0toA = hypre_CSRMatrixData(G0to);
HYPRE_Real *interior_nodes_data=hypre_VectorData(
hypre_ParVectorLocalVector((hypre_ParVector*) ams_data -> interior_nodes));
for (i = 0; i < nv; i++)
{
if (interior_nodes_data[i] != 1)
{
for (j = G0tdI[i]; j < G0tdI[i+1]; j++)
G0tdA[j] = 0.0;
if (G0toI)
for (j = G0toI[i]; j < G0toI[i+1]; j++)
G0toA[j] = 0.0;
}
}
}
hypre_ParCSRMatrixTranspose(G0t, & ams_data -> G0, 1);
/* Construct the subspace matrix A_G0 = G0^T G0 */
ams_data -> A_G0 = hypre_ParMatmul(G0t, ams_data -> G0);
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G0);
/* Create AMG solver for A_G0 */
HYPRE_BoomerAMGCreate(&ams_data -> B_G0);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G0, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G0, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G0, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G0, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G0, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G0, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G0, 3); /* use just a few V-cycles */
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G0, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G0, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G0, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G0, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G0, ams_data -> B_G_coarse_relax_type, 3);
HYPRE_BoomerAMGSetup(ams_data -> B_G0,
(HYPRE_ParCSRMatrix)ams_data -> A_G0,
0, 0);
/* Construct the preconditioner for ams_data->A = A + G0 G0^T.
NOTE: this can be optimized significantly by taking into account that
the sparsity pattern of A is subset of the sparsity pattern of G0 G0^T */
{
hypre_ParCSRMatrix *A = hypre_ParMatmul(ams_data -> G0, G0t);
hypre_ParCSRMatrix *B = Aorig;
hypre_ParCSRMatrix **C_ptr = &ams_data -> A;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local, *C_tmp;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
/* scale (penalize) G0 G0^T before adding it to the matrix */
{
HYPRE_Int i, nnz = hypre_CSRMatrixNumNonzeros(A_local);
HYPRE_Real *data = hypre_CSRMatrixData(A_local);
HYPRE_Real *dataB = hypre_CSRMatrixData(B_local);
HYPRE_Int nnzB = hypre_CSRMatrixNumNonzeros(B_local);
HYPRE_Real factor, lfactor;
lfactor = -1;
for (i = 0; i < nnzB; i++)
if (fabs(dataB[i]) > lfactor)
lfactor = fabs(dataB[i]);
lfactor *= 1e-10; /* scaling factor: max|A_ij|*1e-10 */
hypre_MPI_Allreduce(&lfactor, &factor, 1, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
for (i = 0; i < nnz; i++)
data[i] *= factor;
}
C_tmp = hypre_CSRMatrixAdd(A_local, B_local);
C_local = hypre_CSRMatrixDeleteZeros(C_tmp,0.0);
if (C_local)
hypre_CSRMatrixDestroy(C_tmp);
else
C_local = C_tmp;
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 1;
hypre_ParCSRMatrixOwnsColStarts(G0t) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
hypre_ParCSRMatrixDestroy(A);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(G0t);
}
/* Make sure that the first entry in each row is the diagonal one. */
/* hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(ams_data -> A)); */
/* Compute the l1 norm of the rows of A */
if (ams_data -> A_relax_type >= 1 && ams_data -> A_relax_type <= 4)
hypre_ParCSRComputeL1Norms(ams_data -> A, ams_data -> A_relax_type,
NULL, &ams_data -> A_l1_norms);
/* Chebyshev? */
if (ams_data -> A_relax_type == 16)
{
hypre_ParCSRMaxEigEstimateCG(ams_data->A, 1, 10,
&ams_data->A_max_eig_est,
&ams_data->A_min_eig_est);
}
/* If not given, compute Gx, Gy and Gz */
{
if (ams_data -> x != NULL && ams_data -> y != NULL &&
(ams_data -> dim == 2 || ams_data -> z != NULL))
input_info = 1;
if (ams_data -> Gx != NULL && ams_data -> Gy != NULL &&
(ams_data -> dim == 2 || ams_data -> Gz != NULL))
input_info = 2;
if (input_info == 1)
{
ams_data -> Gx = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> x, 0.0, ams_data -> Gx);
ams_data -> Gy = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> y, 0.0, ams_data -> Gy);
if (ams_data -> dim == 3)
{
ams_data -> Gz = hypre_ParVectorInRangeOf(ams_data -> G);
hypre_ParCSRMatrixMatvec (1.0, ams_data -> G, ams_data -> z, 0.0, ams_data -> Gz);
}
}
}
if (ams_data -> Pi == NULL && ams_data -> Pix == NULL)
{
if (ams_data -> cycle_type == 20)
hypre_AMSComputeGPi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
else if (ams_data -> cycle_type > 10)
/* Construct Pi{x,y,z} instead of Pi = [Pix,Piy,Piz] */
hypre_AMSComputePixyz(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pix,
&ams_data -> Piy,
&ams_data -> Piz);
else
/* Construct the Pi interpolation matrix */
hypre_AMSComputePi(ams_data -> A,
ams_data -> G,
ams_data -> Gx,
ams_data -> Gy,
ams_data -> Gz,
ams_data -> dim,
&ams_data -> Pi);
}
/* Keep Gx, Gy and Gz only if use the method with discrete divergence
stabilization (where we use them to compute the local mesh size). */
if (input_info == 1 && ams_data -> cycle_type != 9)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
/* Create the AMG solver on the range of G^T */
if (!ams_data -> beta_is_zero && ams_data -> cycle_type != 20)
{
HYPRE_BoomerAMGCreate(&ams_data -> B_G);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_G, ams_data -> B_G_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_G, ams_data -> B_G_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_G, ams_data -> B_G_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_G, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_G, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_G, ams_data -> B_G_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_G, ams_data -> B_G_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_G, ams_data -> B_G_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_G, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_G, ams_data -> B_G_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_G, 2);
/* If not given, construct the coarse space matrix by RAP */
if (!ams_data -> A_G)
{
HYPRE_Int G_owned_col_starts;
if (!hypre_ParCSRMatrixCommPkg(ams_data -> G))
hypre_MatvecCommPkgCreate(ams_data -> G);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
G_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> G);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> G,
ams_data -> A,
ams_data -> G,
&ams_data -> A_G);
/* Make sure that A_G has no zero rows (this can happen
if beta is zero in part of the domain). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_G);
hypre_ParCSRMatrixOwnsColStarts(ams_data -> G) = G_owned_col_starts;
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_G) = 0;
ams_data -> owns_A_G = 1;
}
HYPRE_BoomerAMGSetup(ams_data -> B_G,
(HYPRE_ParCSRMatrix)ams_data -> A_G,
0, 0);
}
if (ams_data -> cycle_type > 10 && ams_data -> cycle_type != 20)
/* Create the AMG solvers on the range of Pi{x,y,z}^T */
{
HYPRE_Int P_owned_col_starts;
HYPRE_BoomerAMGCreate(&ams_data -> B_Pix);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pix, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pix, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pix, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pix, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pix, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pix, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pix, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piy);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piy, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piy, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piy, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piy, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piy, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piy, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piy, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGCreate(&ams_data -> B_Piz);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Piz, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Piz, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Piz, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Piz, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Piz, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Piz, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Piz, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Piz, 2);
/* Generally, don't use exact solve on the coarsest level (matrices may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pix, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piy, ams_data -> B_Pi_coarse_relax_type, 3);
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Piz, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
{
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pix, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piy, 2);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Piz, 2);
}
/* Construct the coarse space matrices by RAP */
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pix))
hypre_MatvecCommPkgCreate(ams_data -> Pix);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pix);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pix,
ams_data -> A,
ams_data -> Pix,
&ams_data -> A_Pix);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pix) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pix) = 0;
}
/* Make sure that A_Pix has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pix);
HYPRE_BoomerAMGSetup(ams_data -> B_Pix,
(HYPRE_ParCSRMatrix)ams_data -> A_Pix,
0, 0);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piy))
hypre_MatvecCommPkgCreate(ams_data -> Piy);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piy);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piy,
ams_data -> A,
ams_data -> Piy,
&ams_data -> A_Piy);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piy) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piy) = 0;
}
/* Make sure that A_Piy has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piy);
HYPRE_BoomerAMGSetup(ams_data -> B_Piy,
(HYPRE_ParCSRMatrix)ams_data -> A_Piy,
0, 0);
if (ams_data -> Piz)
{
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Piz))
hypre_MatvecCommPkgCreate(ams_data -> Piz);
P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Piz);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Piz,
ams_data -> A,
ams_data -> Piz,
&ams_data -> A_Piz);
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Piz) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Piz) = 0;
}
/* Make sure that A_Piz has no zero rows (this can happen
for some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Piz);
HYPRE_BoomerAMGSetup(ams_data -> B_Piz,
(HYPRE_ParCSRMatrix)ams_data -> A_Piz,
0, 0);
}
}
else
/* Create the AMG solver on the range of Pi^T */
{
HYPRE_BoomerAMGCreate(&ams_data -> B_Pi);
HYPRE_BoomerAMGSetCoarsenType(ams_data -> B_Pi, ams_data -> B_Pi_coarsen_type);
HYPRE_BoomerAMGSetAggNumLevels(ams_data -> B_Pi, ams_data -> B_Pi_agg_levels);
HYPRE_BoomerAMGSetRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_relax_type);
HYPRE_BoomerAMGSetNumSweeps(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 25);
HYPRE_BoomerAMGSetTol(ams_data -> B_Pi, 0.0);
HYPRE_BoomerAMGSetMaxIter(ams_data -> B_Pi, 1);
HYPRE_BoomerAMGSetStrongThreshold(ams_data -> B_Pi, ams_data -> B_Pi_theta);
HYPRE_BoomerAMGSetInterpType(ams_data -> B_Pi, ams_data -> B_Pi_interp_type);
HYPRE_BoomerAMGSetPMaxElmts(ams_data -> B_Pi, ams_data -> B_Pi_Pmax);
HYPRE_BoomerAMGSetMinCoarseSize(ams_data -> B_Pi, 2); /* don't coarsen to 0 */
/* Generally, don't use exact solve on the coarsest level (matrix may be singular) */
HYPRE_BoomerAMGSetCycleRelaxType(ams_data -> B_Pi, ams_data -> B_Pi_coarse_relax_type, 3);
if (ams_data -> cycle_type == 0)
HYPRE_BoomerAMGSetMaxLevels(ams_data -> B_Pi, 2);
/* If not given, construct the coarse space matrix by RAP and
notify BoomerAMG that this is a dim x dim block system. */
if (!ams_data -> A_Pi)
{
HYPRE_Int P_owned_col_starts = hypre_ParCSRMatrixOwnsColStarts(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> Pi))
hypre_MatvecCommPkgCreate(ams_data -> Pi);
if (!hypre_ParCSRMatrixCommPkg(ams_data -> A))
hypre_MatvecCommPkgCreate(ams_data -> A);
if (ams_data -> cycle_type == 9)
{
/* Add a discrete divergence term to A before computing Pi^t A Pi */
{
hypre_ParCSRMatrix *Gt, *GGt, *ApGGt;
hypre_ParCSRMatrixTranspose(ams_data -> G, &Gt, 1);
hypre_ParCSRMatrixOwnsColStarts(Gt) = 0;
hypre_ParCSRMatrixOwnsRowStarts(Gt) = 0;
/* scale GGt by h^2 */
{
HYPRE_Real h2;
HYPRE_Int i, j, k, ne;
hypre_CSRMatrix *Gt_diag = hypre_ParCSRMatrixDiag(Gt);
HYPRE_Int Gt_num_rows = hypre_CSRMatrixNumRows(Gt_diag);
HYPRE_Int *Gt_diag_I = hypre_CSRMatrixI(Gt_diag);
HYPRE_Int *Gt_diag_J = hypre_CSRMatrixJ(Gt_diag);
HYPRE_Real *Gt_diag_data = hypre_CSRMatrixData(Gt_diag);
hypre_CSRMatrix *Gt_offd = hypre_ParCSRMatrixOffd(Gt);
HYPRE_Int *Gt_offd_I = hypre_CSRMatrixI(Gt_offd);
HYPRE_Real *Gt_offd_data = hypre_CSRMatrixData(Gt_offd);
HYPRE_Real *Gx_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gx));
HYPRE_Real *Gy_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gy));
HYPRE_Real *Gz_data = hypre_VectorData(hypre_ParVectorLocalVector(ams_data -> Gz));
for (i = 0; i < Gt_num_rows; i++)
{
/* determine the characteristic mesh size for vertex i */
h2 = 0.0;
ne = 0;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
{
k = Gt_diag_J[j];
h2 += Gx_data[k]*Gx_data[k]+Gy_data[k]*Gy_data[k]+Gz_data[k]*Gz_data[k];
ne++;
}
if (ne != 0)
{
h2 /= ne;
for (j = Gt_diag_I[i]; j < Gt_diag_I[i+1]; j++)
Gt_diag_data[j] *= h2;
for (j = Gt_offd_I[i]; j < Gt_offd_I[i+1]; j++)
Gt_offd_data[j] *= h2;
}
}
}
/* we only needed Gx, Gy and Gz to compute the local mesh size */
if (input_info == 1)
{
hypre_ParVectorDestroy(ams_data -> Gx);
hypre_ParVectorDestroy(ams_data -> Gy);
if (ams_data -> dim == 3)
hypre_ParVectorDestroy(ams_data -> Gz);
}
GGt = hypre_ParMatmul(ams_data -> G, Gt);
hypre_ParCSRMatrixDestroy(Gt);
/* hypre_ParCSRMatrixAdd(GGt, A, &ams_data -> A); */
{
hypre_ParCSRMatrix *A = GGt;
hypre_ParCSRMatrix *B = ams_data -> A;
hypre_ParCSRMatrix **C_ptr = &ApGGt;
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *A_local, *B_local, *C_local;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int A_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
HYPRE_Int A_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A));
HYPRE_Int A_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A));
HYPRE_Int B_num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(B));
HYPRE_Int B_num_nonzeros_diag = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(B));
HYPRE_Int B_num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(B));
A_local = hypre_MergeDiagAndOffd(A);
B_local = hypre_MergeDiagAndOffd(B);
C_local = hypre_CSRMatrixAdd(A_local, B_local);
C = hypre_ParCSRMatrixCreate (comm,
global_num_rows,
global_num_cols,
row_starts,
col_starts,
A_num_cols_offd + B_num_cols_offd,
A_num_nonzeros_diag + B_num_nonzeros_diag,
A_num_nonzeros_offd + B_num_nonzeros_offd);
GenerateDiagAndOffd(C_local, C,
hypre_ParCSRMatrixFirstColDiag(A),
hypre_ParCSRMatrixLastColDiag(A));
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
hypre_CSRMatrixDestroy(A_local);
hypre_CSRMatrixDestroy(B_local);
hypre_CSRMatrixDestroy(C_local);
*C_ptr = C;
}
hypre_ParCSRMatrixDestroy(GGt);
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ApGGt,
ams_data -> Pi,
&ams_data -> A_Pi);
}
}
else
{
hypre_BoomerAMGBuildCoarseOperator(ams_data -> Pi,
ams_data -> A,
ams_data -> Pi,
&ams_data -> A_Pi);
}
if (!P_owned_col_starts)
{
hypre_ParCSRMatrixOwnsRowStarts(ams_data -> A_Pi) = 0;
hypre_ParCSRMatrixOwnsColStarts(ams_data -> A_Pi) = 0;
}
ams_data -> owns_A_Pi = 1;
if (ams_data -> cycle_type != 20)
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim);
else
HYPRE_BoomerAMGSetNumFunctions(ams_data -> B_Pi, ams_data -> dim + 1);
/* HYPRE_BoomerAMGSetNodal(ams_data -> B_Pi, 1); */
}
/* Make sure that A_Pi has no zero rows (this can happen for
some kinds of boundary conditions with contact). */
hypre_ParCSRMatrixFixZeroRows(ams_data -> A_Pi);
HYPRE_BoomerAMGSetup(ams_data -> B_Pi,
(HYPRE_ParCSRMatrix)ams_data -> A_Pi,
0, 0);
}
/* Allocate temporary vectors */
ams_data -> r0 = hypre_ParVectorInRangeOf(ams_data -> A);
ams_data -> g0 = hypre_ParVectorInRangeOf(ams_data -> A);
if (ams_data -> A_G)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_G);
}
if (ams_data -> r1 == NULL && ams_data -> A_Pix)
{
ams_data -> r1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
ams_data -> g1 = hypre_ParVectorInRangeOf(ams_data -> A_Pix);
}
if (ams_data -> Pi)
{
ams_data -> r2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
ams_data -> g2 = hypre_ParVectorInDomainOf(ams_data -> Pi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSSolve
*
* Solve the system A x = b.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSSolve(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, my_id = -1;
HYPRE_Real r0_norm, r_norm, b_norm, relative_resid = 0, old_resid;
char cycle[30];
hypre_ParCSRMatrix *Ai[5], *Pi[5];
HYPRE_Solver Bi[5];
HYPRE_PtrToSolverFcn HBi[5];
hypre_ParVector *ri[5], *gi[5];
hypre_ParVector *z = NULL;
Ai[0] = ams_data -> A_G; Pi[0] = ams_data -> G;
Ai[1] = ams_data -> A_Pi; Pi[1] = ams_data -> Pi;
Ai[2] = ams_data -> A_Pix; Pi[2] = ams_data -> Pix;
Ai[3] = ams_data -> A_Piy; Pi[3] = ams_data -> Piy;
Ai[4] = ams_data -> A_Piz; Pi[4] = ams_data -> Piz;
Bi[0] = ams_data -> B_G; HBi[0] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[1] = ams_data -> B_Pi; HBi[1] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGBlockSolve;
Bi[2] = ams_data -> B_Pix; HBi[2] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[3] = ams_data -> B_Piy; HBi[3] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
Bi[4] = ams_data -> B_Piz; HBi[4] = (HYPRE_PtrToSolverFcn) hypre_BoomerAMGSolve;
ri[0] = ams_data -> r1; gi[0] = ams_data -> g1;
ri[1] = ams_data -> r2; gi[1] = ams_data -> g2;
ri[2] = ams_data -> r1; gi[2] = ams_data -> g1;
ri[3] = ams_data -> r1; gi[3] = ams_data -> g1;
ri[4] = ams_data -> r1; gi[4] = ams_data -> g1;
/* may need to create an additional temporary vector for relaxation */
if (hypre_NumThreads() > 1 || ams_data -> A_relax_type == 16)
{
z = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(z);
hypre_ParVectorSetPartitioningOwner(z,0);
}
if (ams_data -> print_level > 0)
hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id);
/* Compatible subspace projection for problems with zero-conductivity regions.
Note that this modifies the input (r.h.s.) vector b! */
if ( (ams_data -> B_G0) &&
(++ams_data->solve_counter % ( ams_data -> projection_frequency ) == 0) )
{
/* hypre_printf("Projecting onto the compatible subspace...\n"); */
hypre_AMSProjectOutGradients(ams_data, b);
}
if (ams_data -> beta_is_zero)
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","0");
break;
case 1:
case 3:
case 5:
case 7:
default:
hypre_sprintf(cycle,"%s","020");
break;
case 2:
case 4:
case 6:
case 8:
hypre_sprintf(cycle,"%s","(0+2)");
break;
case 11:
case 13:
hypre_sprintf(cycle,"%s","0345430");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+3+4+5)");
break;
case 14:
hypre_sprintf(cycle,"%s","0(+3+4+5)0");
break;
}
}
else
{
switch (ams_data -> cycle_type)
{
case 0:
hypre_sprintf(cycle,"%s","010");
break;
case 1:
default:
hypre_sprintf(cycle,"%s","01210");
break;
case 2:
hypre_sprintf(cycle,"%s","(0+1+2)");
break;
case 3:
hypre_sprintf(cycle,"%s","02120");
break;
case 4:
hypre_sprintf(cycle,"%s","(010+2)");
break;
case 5:
hypre_sprintf(cycle,"%s","0102010");
break;
case 6:
hypre_sprintf(cycle,"%s","(020+1)");
break;
case 7:
hypre_sprintf(cycle,"%s","0201020");
break;
case 8:
hypre_sprintf(cycle,"%s","0(+1+2)0");
break;
case 9:
hypre_sprintf(cycle,"%s","01210");
break;
case 11:
hypre_sprintf(cycle,"%s","013454310");
break;
case 12:
hypre_sprintf(cycle,"%s","(0+1+3+4+5)");
break;
case 13:
hypre_sprintf(cycle,"%s","034515430");
break;
case 14:
hypre_sprintf(cycle,"%s","01(+3+4+5)10");
break;
case 20:
hypre_sprintf(cycle,"%s","020");
break;
}
}
for (i = 0; i < ams_data -> maxit; i++)
{
/* Compute initial residual norms */
if (ams_data -> maxit > 1 && i == 0)
{
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
r0_norm = r_norm;
b_norm = sqrt(hypre_ParVectorInnerProd(b, b));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",
r_norm, relative_resid);
}
}
/* Apply the preconditioner */
hypre_ParCSRSubspacePrec(ams_data -> A,
ams_data -> A_relax_type,
ams_data -> A_relax_times,
ams_data -> A_l1_norms,
ams_data -> A_relax_weight,
ams_data -> A_omega,
ams_data -> A_max_eig_est,
ams_data -> A_min_eig_est,
ams_data -> A_cheby_order,
ams_data -> A_cheby_fraction,
Ai, Bi, HBi, Pi, ri, gi,
b, x,
ams_data -> r0,
ams_data -> g0,
cycle,
z);
/* Compute new residual norms */
if (ams_data -> maxit > 1)
{
old_resid = r_norm;
hypre_ParVectorCopy(b, ams_data -> r0);
hypre_ParCSRMatrixMatvec(-1.0, ams_data -> A, x, 1.0, ams_data -> r0);
r_norm = sqrt(hypre_ParVectorInnerProd(ams_data -> r0,ams_data -> r0));
if (b_norm)
relative_resid = r_norm / b_norm;
else
relative_resid = r_norm;
if (my_id == 0 && ams_data -> print_level > 0)
hypre_printf(" Cycle %2d %e %f %e \n",
i+1, r_norm, r_norm / old_resid, relative_resid);
}
if (relative_resid < ams_data -> tol)
{
i++;
break;
}
}
if (my_id == 0 && ams_data -> print_level > 0 && ams_data -> maxit > 1)
hypre_printf("\n\n Average Convergence Factor = %f\n\n",
pow((r_norm/r0_norm),(1.0/(HYPRE_Real) i)));
ams_data -> num_iterations = i;
ams_data -> rel_resid_norm = relative_resid;
if (ams_data -> num_iterations == ams_data -> maxit && ams_data -> tol > 0.0)
hypre_error(HYPRE_ERROR_CONV);
if (z)
hypre_ParVectorDestroy(z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRSubspacePrec
*
* General subspace preconditioner for A0 y = x, based on ParCSR storage.
*
* P[i] and A[i] are the interpolation and coarse grid matrices for
* the (i+1)'th subspace. B[i] is an AMG solver for A[i]. r[i] and g[i]
* are temporary vectors. A0_* are the fine grid smoothing parameters.
*
* The default mode is multiplicative, '+' changes the next correction
* to additive, based on residual computed at '('.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRSubspacePrec(/* fine space matrix */
hypre_ParCSRMatrix *A0,
/* relaxation parameters */
HYPRE_Int A0_relax_type,
HYPRE_Int A0_relax_times,
HYPRE_Real *A0_l1_norms,
HYPRE_Real A0_relax_weight,
HYPRE_Real A0_omega,
HYPRE_Real A0_max_eig_est,
HYPRE_Real A0_min_eig_est,
HYPRE_Int A0_cheby_order,
HYPRE_Real A0_cheby_fraction,
/* subspace matrices */
hypre_ParCSRMatrix **A,
/* subspace preconditioners */
HYPRE_Solver *B,
/* hypre solver functions for B */
HYPRE_PtrToSolverFcn *HB,
/* subspace interpolations */
hypre_ParCSRMatrix **P,
/* temporary subspace vectors */
hypre_ParVector **r,
hypre_ParVector **g,
/* right-hand side */
hypre_ParVector *x,
/* current approximation */
hypre_ParVector *y,
/* current residual */
hypre_ParVector *r0,
/* temporary vector */
hypre_ParVector *g0,
char *cycle,
/* temporary vector */
hypre_ParVector *z)
{
char *op;
HYPRE_Int use_saved_residual = 0;
for (op = cycle; *op != '\0'; op++)
{
/* do nothing */
if (*op == ')')
continue;
/* compute the residual: r = x - Ay */
else if (*op == '(')
{
hypre_ParVectorCopy(x,r0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, r0);
}
/* switch to additive correction */
else if (*op == '+')
{
use_saved_residual = 1;
continue;
}
/* smooth: y += S (x - Ay) */
else if (*op == '0')
{
hypre_ParCSRRelax(A0, x,
A0_relax_type,
A0_relax_times,
A0_l1_norms,
A0_relax_weight,
A0_omega,
A0_max_eig_est,
A0_min_eig_est,
A0_cheby_order,
A0_cheby_fraction,
y, g0, z);
}
/* subspace correction: y += P B^{-1} P^t r */
else
{
HYPRE_Int i = *op - '1';
if (i < 0)
hypre_error_in_arg(16);
/* skip empty subspaces */
if (!A[i]) continue;
/* compute the residual? */
if (use_saved_residual)
{
use_saved_residual = 0;
hypre_ParCSRMatrixMatvecT(1.0, P[i], r0, 0.0, r[i]);
}
else
{
hypre_ParVectorCopy(x,g0);
hypre_ParCSRMatrixMatvec(-1.0, A0, y, 1.0, g0);
hypre_ParCSRMatrixMatvecT(1.0, P[i], g0, 0.0, r[i]);
}
hypre_ParVectorSetConstantValues(g[i], 0.0);
(*HB[i]) (B[i], (HYPRE_Matrix)A[i],
(HYPRE_Vector)r[i], (HYPRE_Vector)g[i]);
hypre_ParCSRMatrixMatvec(1.0, P[i], g[i], 0.0, g0);
hypre_ParVectorAxpy(1.0, g0, y);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetNumIterations
*
* Get the number of AMS iterations.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetNumIterations(void *solver,
HYPRE_Int *num_iterations)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*num_iterations = ams_data -> num_iterations;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSGetFinalRelativeResidualNorm
*
* Get the final relative residual norm in AMS.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSGetFinalRelativeResidualNorm(void *solver,
HYPRE_Real *rel_resid_norm)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
*rel_resid_norm = ams_data -> rel_resid_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSProjectOutGradients
*
* For problems with zero-conductivity regions, project the vector onto the
* compatible subspace: x = (I - G0 (G0^t G0)^{-1} G0^T) x, where G0 is the
* discrete gradient restricted to the interior nodes of the regions with
* zero conductivity. This ensures that x is orthogonal to the gradients in
* the range of G0.
*
* This function is typically called after the solution iteration is complete,
* in order to facilitate the visualization of the computed field. Without it
* the values in the zero-conductivity regions contain kernel components.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSProjectOutGradients(void *solver,
hypre_ParVector *x)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> B_G0)
{
hypre_ParCSRMatrixMatvecT(1.0, ams_data -> G0, x, 0.0, ams_data -> r1);
hypre_ParVectorSetConstantValues(ams_data -> g1, 0.0);
hypre_BoomerAMGSolve(ams_data -> B_G0, ams_data -> A_G0, ams_data -> r1, ams_data -> g1);
hypre_ParCSRMatrixMatvec(1.0, ams_data -> G0, ams_data -> g1, 0.0, ams_data -> g0);
hypre_ParVectorAxpy(-1.0, ams_data -> g0, x);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSConstructDiscreteGradient
*
* Construct and return the lowest-order discrete gradient matrix G, based on:
* - a matrix on the egdes (e.g. the stiffness matrix A)
* - a vector on the vertices (e.g. the x coordinates)
* - the array edge_vertex, which lists the global indexes of the
* vertices of the local edges.
*
* We assume that edge_vertex lists the edge vertices consecutively,
* and that the orientation of all edges is consistent. More specificaly:
* If edge_orientation = 1, the edges are already oriented.
* If edge_orientation = 2, the orientation of edge i depends only on the
* sign of edge_vertex[2*i+1] - edge_vertex[2*i].
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSConstructDiscreteGradient(hypre_ParCSRMatrix *A,
hypre_ParVector *x_coord,
HYPRE_Int *edge_vertex,
HYPRE_Int edge_orientation,
hypre_ParCSRMatrix **G_ptr)
{
hypre_ParCSRMatrix *G;
HYPRE_Int nedges;
nedges = hypre_ParCSRMatrixNumRows(A);
/* Construct the local part of G based on edge_vertex and the edge
and vertex partitionings from A and x_coord */
{
HYPRE_Int i, *I = hypre_CTAlloc(HYPRE_Int, nedges+1);
HYPRE_Int part_size, *row_starts, *col_starts;
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*nedges);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (nedges,
hypre_ParVectorGlobalSize(x_coord),
2*nedges);
for (i = 0; i <= nedges; i++)
I[i] = 2*i;
if (edge_orientation == 1)
{
/* Assume that the edges are already oriented */
for (i = 0; i < 2*nedges; i+=2)
{
data[i] = -1.0;
data[i+1] = 1.0;
}
}
else if (edge_orientation == 2)
{
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*nedges; i+=2)
{
if (edge_vertex[i] < edge_vertex[i+1])
{
data[i] = -1.0;
data[i+1] = 1.0;
}
else
{
data[i] = 1.0;
data[i+1] = -1.0;
}
}
}
else
hypre_error_in_arg(4);
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = nedges;
/* Copy partitioning from A and x_coord (previously they were re-used) */
#ifdef HYPRE_NO_GLOBAL_PARTITION
part_size = 2;
#else
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &part_size);
part_size++;
#endif
row_starts = hypre_TAlloc(HYPRE_Int,part_size);
col_starts = hypre_TAlloc(HYPRE_Int,part_size);
for (i = 0; i < part_size; i++)
{
row_starts[i] = hypre_ParCSRMatrixRowStarts(A)[i];
col_starts[i] = hypre_ParVectorPartitioning(x_coord)[i];
}
/* Generate the discrete gradient matrix */
G = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParVectorGlobalSize(x_coord),
row_starts, col_starts, 0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 1;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
GenerateDiagAndOffd(local, G,
hypre_ParVectorFirstIndex(x_coord),
hypre_ParVectorLastIndex(x_coord));
/* Account for empty rows in G. These may appear when A includes only
the interior (non-Dirichlet b.c.) edges. */
{
hypre_CSRMatrix *G_diag = hypre_ParCSRMatrixDiag(G);
G_diag->num_cols = hypre_VectorSize(hypre_ParVectorLocalVector(x_coord));
}
/* Free the local matrix */
hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
*G_ptr = G;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEISetup
*
* Construct an AMS solver object based on the following data:
*
* A - the edge element stiffness matrix
* num_vert - number of vertices (nodes) in the processor
* num_local_vert - number of vertices owned by the processor
* vert_number - global indexes of the vertices in the processor
* vert_coord - coordinates of the vertices in the processor
* num_edges - number of edges owned by the processor
* edge_vertex - the vertices of the edges owned by the processor.
* Vertices are in local numbering (the same as in
* vert_number), and edge orientation is always from
* the first to the second vertex.
*
* Here we distinguish between vertices that belong to elements in the
* current processor, and the subset of these vertices that is owned by
* the processor.
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSSetup().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEISetup(void *solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *b,
hypre_ParVector *x,
HYPRE_Int num_vert,
HYPRE_Int num_local_vert,
HYPRE_Int *vert_number,
HYPRE_Real *vert_coord,
HYPRE_Int num_edges,
HYPRE_Int *edge_vertex)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
HYPRE_Int i, j;
hypre_ParCSRMatrix *G;
hypre_ParVector *x_coord, *y_coord, *z_coord;
HYPRE_Real *x_data, *y_data, *z_data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int *vert_part, num_global_vert;
HYPRE_Int vert_start, vert_end;
/* Find the processor partitioning of the vertices */
#ifdef HYPRE_NO_GLOBAL_PARTITION
vert_part = hypre_TAlloc(HYPRE_Int,2);
hypre_MPI_Scan(&num_local_vert, &vert_part[1], 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
vert_part[0] = vert_part[1] - num_local_vert;
hypre_MPI_Allreduce(&num_local_vert, &num_global_vert, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
#else
HYPRE_Int num_procs;
hypre_MPI_Comm_size(comm, &num_procs);
vert_part = hypre_TAlloc(HYPRE_Int,num_procs+1);
hypre_MPI_Allgather(&num_local_vert, 1, HYPRE_MPI_INT, &vert_part[1], 1, HYPRE_MPI_INT, comm);
vert_part[0] = 0;
for (i = 0; i < num_procs; i++)
vert_part[i+1] += vert_part[i];
num_global_vert = vert_part[num_procs];
#endif
/* Construct hypre parallel vectors for the vertex coordinates */
x_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(x_coord);
hypre_ParVectorOwnsData(x_coord) = 1;
hypre_ParVectorOwnsPartitioning(x_coord) = 0;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(x_coord));
y_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(y_coord);
hypre_ParVectorOwnsData(y_coord) = 1;
hypre_ParVectorOwnsPartitioning(y_coord) = 0;
y_data = hypre_VectorData(hypre_ParVectorLocalVector(y_coord));
z_coord = hypre_ParVectorCreate(comm, num_global_vert, vert_part);
hypre_ParVectorInitialize(z_coord);
hypre_ParVectorOwnsData(z_coord) = 1;
hypre_ParVectorOwnsPartitioning(z_coord) = 0;
z_data = hypre_VectorData(hypre_ParVectorLocalVector(z_coord));
vert_start = hypre_ParVectorFirstIndex(x_coord);
vert_end = hypre_ParVectorLastIndex(x_coord);
/* Save coordinates of locally owned vertices */
for (i = 0; i < num_vert; i++)
{
if (vert_number[i] >= vert_start && vert_number[i] <= vert_end)
{
j = vert_number[i] - vert_start;
x_data[j] = vert_coord[3*i];
y_data[j] = vert_coord[3*i+1];
z_data[j] = vert_coord[3*i+2];
}
}
/* Change vertex numbers from local to global */
for (i = 0; i < 2*num_edges; i++)
edge_vertex[i] = vert_number[edge_vertex[i]];
/* Construct the local part of G based on edge_vertex */
{
/* HYPRE_Int num_edges = hypre_ParCSRMatrixNumRows(A); */
HYPRE_Int *I = hypre_CTAlloc(HYPRE_Int, num_edges+1);
HYPRE_Real *data = hypre_CTAlloc(HYPRE_Real, 2*num_edges);
hypre_CSRMatrix *local = hypre_CSRMatrixCreate (num_edges,
num_global_vert,
2*num_edges);
for (i = 0; i <= num_edges; i++)
I[i] = 2*i;
/* Assume that the edge orientation is based on the vertex indexes */
for (i = 0; i < 2*num_edges; i+=2)
{
data[i] = 1.0;
data[i+1] = -1.0;
}
hypre_CSRMatrixI(local) = I;
hypre_CSRMatrixJ(local) = edge_vertex;
hypre_CSRMatrixData(local) = data;
hypre_CSRMatrixRownnz(local) = NULL;
hypre_CSRMatrixOwnsData(local) = 1;
hypre_CSRMatrixNumRownnz(local) = num_edges;
G = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
num_global_vert,
hypre_ParCSRMatrixRowStarts(A),
vert_part,
0, 0, 0);
hypre_ParCSRMatrixOwnsRowStarts(G) = 0;
hypre_ParCSRMatrixOwnsColStarts(G) = 1;
GenerateDiagAndOffd(local, G, vert_start, vert_end);
hypre_CSRMatrixJ(local) = NULL;
hypre_CSRMatrixDestroy(local);
}
ams_data -> G = G;
ams_data -> x = x_coord;
ams_data -> y = y_coord;
ams_data -> z = z_coord;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AMSFEIDestroy
*
* Free the additional memory allocated in hypre_AMSFEISetup().
*
* This function is written specifically for input from the FEI and should
* be called before hypre_AMSDestroy().
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_AMSFEIDestroy(void *solver)
{
hypre_AMSData *ams_data = (hypre_AMSData *) solver;
if (ams_data -> G)
hypre_ParCSRMatrixDestroy(ams_data -> G);
if (ams_data -> x)
hypre_ParVectorDestroy(ams_data -> x);
if (ams_data -> y)
hypre_ParVectorDestroy(ams_data -> y);
if (ams_data -> z)
hypre_ParVectorDestroy(ams_data -> z);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing"
*
* The above computations are done in a CF manner, whenever the provided
* cf_marker is not NULL.
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
HYPRE_Int option,
HYPRE_Int num_threads,
HYPRE_Int *cf_marker,
HYPRE_Real **l1_norm_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_I = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_J = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_I = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_J = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Real diag;
HYPRE_Real *l1_norm = hypre_CTAlloc(HYPRE_Real, num_rows);
HYPRE_Int ii, ns, ne, rest, size;
HYPRE_Int *cf_marker_offd = NULL;
HYPRE_Int cf_diag;
/* collect the cf marker data from other procs */
if (cf_marker != NULL)
{
HYPRE_Int index;
HYPRE_Int num_sends;
HYPRE_Int start;
HYPRE_Int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(HYPRE_Int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,k,ns,ne,rest,size,diag,cf_diag) HYPRE_SMP_SCHEDULE
#endif
for (k = 0; k < num_threads; k++)
{
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (k < rest)
{
ns = k*size+k;
ne = (k+1)*size+k+1;
}
else
{
ns = k*size+rest;
ne = (k+1)*size+rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = ns; i < ne; i++)
/* if (fabs(l1_norm[i]) < DBL_EPSILON) */
if (fabs(l1_norm[i]) == 0.0)
{
hypre_error_in_arg(1);
break;
}
}
hypre_TFree(cf_marker_offd);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelaxThreads
* 1 = l1-scaled Jacobi
* 2 = l1-scaled block Gauss-Seidel/SSOR
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_ParCSRRelaxThreads(hypre_ParCSRMatrix *A,
hypre_ParVector *f,
HYPRE_Int relax_type,
HYPRE_Int relax_times,
HYPRE_Real *l1_norms,
HYPRE_Real relax_weight,
HYPRE_Real omega,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *z)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
HYPRE_Real *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
HYPRE_Real *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local);
HYPRE_Real *Vext_data;
HYPRE_Real *v_buf_data;
HYPRE_Real *tmp_data;
HYPRE_Int i, j;
HYPRE_Int ii, jj;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int relax_error = 0;
HYPRE_Int num_sends;
HYPRE_Int index, start;
HYPRE_Int num_procs, num_threads, my_id;
HYPRE_Real zero = 0.0;
HYPRE_Real res, res2;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/* only allow jacobi and GS */
if (relax_type > 2)
relax_type = 2;
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(HYPRE_Real,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(HYPRE_Real,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
if (relax_type == 1) /* Jacobi */
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,jj,res) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
else if (relax_type == 2) /* GS */
{
if (relax_weight == 1 && omega == 1)
{
tmp_data = hypre_CTAlloc(HYPRE_Real,n);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
hypre_TFree(tmp_data);
}
else
{
HYPRE_Real c1 = omega*relax_weight;
HYPRE_Real c2 = omega*(1.0-relax_weight);
tmp_data = hypre_CTAlloc(HYPRE_Real,n);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,ii,j,jj,ns,ne,res,rest,size) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
Vtemp_data[i] = u_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii < i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii > i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
}
hypre_TFree(tmp_data);
}
} /* end of Jacobi or G.S. */
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return(relax_error);
}
| 149,125 | 37.494063 | 104 | c |
AMG | AMG-master/parcsr_ls/ams.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef hypre_AMS_DATA_HEADER
#define hypre_AMS_DATA_HEADER
/*--------------------------------------------------------------------------
* Auxiliary space Maxwell Solver data
*--------------------------------------------------------------------------*/
typedef struct
{
/* Space dimension (2 or 3) */
HYPRE_Int dim;
/* Edge element (ND1) stiffness matrix */
hypre_ParCSRMatrix *A;
/* Discrete gradient matrix (vertex-to-edge) */
hypre_ParCSRMatrix *G;
/* Coarse grid matrix on the range of G^T */
hypre_ParCSRMatrix *A_G;
/* AMG solver for A_G */
HYPRE_Solver B_G;
/* Is the mass term coefficient zero? */
HYPRE_Int beta_is_zero;
/* Nedelec nodal interpolation matrix (vertex^dim-to-edge) */
hypre_ParCSRMatrix *Pi;
/* Coarse grid matrix on the range of Pi^T */
hypre_ParCSRMatrix *A_Pi;
/* AMG solver for A_Pi */
HYPRE_Solver B_Pi;
/* Components of the Nedelec interpolation matrix (vertex-to-edge each) */
hypre_ParCSRMatrix *Pix, *Piy, *Piz;
/* Coarse grid matrices on the ranges of Pi{x,y,z}^T */
hypre_ParCSRMatrix *A_Pix, *A_Piy, *A_Piz;
/* AMG solvers for A_Pi{x,y,z} */
HYPRE_Solver B_Pix, B_Piy, B_Piz;
/* Does the solver own the Nedelec interpolations? */
HYPRE_Int owns_Pi;
/* Does the solver own the coarse grid matrices? */
HYPRE_Int owns_A_G, owns_A_Pi;
/* Coordinates of the vertices (z = 0 if dim == 2) */
hypre_ParVector *x, *y, *z;
/* Representations of the constant vectors in the Nedelec basis */
hypre_ParVector *Gx, *Gy, *Gz;
/* Nodes in the interior of the zero-conductivity region */
hypre_ParVector *interior_nodes;
/* Discrete gradient matrix for the interior nodes only */
hypre_ParCSRMatrix *G0;
/* Coarse grid matrix on the interior nodes */
hypre_ParCSRMatrix *A_G0;
/* AMG solver for A_G0 */
HYPRE_Solver B_G0;
/* How frequently to project the r.h.s. onto Ker(G0^T)? */
HYPRE_Int projection_frequency;
/* Internal counter to use with projection_frequency in PCG */
HYPRE_Int solve_counter;
/* Solver options */
HYPRE_Int maxit;
HYPRE_Real tol;
HYPRE_Int cycle_type;
HYPRE_Int print_level;
/* Smoothing options for A */
HYPRE_Int A_relax_type;
HYPRE_Int A_relax_times;
HYPRE_Real *A_l1_norms;
HYPRE_Real A_relax_weight;
HYPRE_Real A_omega;
HYPRE_Real A_max_eig_est;
HYPRE_Real A_min_eig_est;
HYPRE_Int A_cheby_order;
HYPRE_Real A_cheby_fraction;
/* AMG options for B_G */
HYPRE_Int B_G_coarsen_type;
HYPRE_Int B_G_agg_levels;
HYPRE_Int B_G_relax_type;
HYPRE_Int B_G_coarse_relax_type;
HYPRE_Real B_G_theta;
HYPRE_Int B_G_interp_type;
HYPRE_Int B_G_Pmax;
/* AMG options for B_Pi */
HYPRE_Int B_Pi_coarsen_type;
HYPRE_Int B_Pi_agg_levels;
HYPRE_Int B_Pi_relax_type;
HYPRE_Int B_Pi_coarse_relax_type;
HYPRE_Real B_Pi_theta;
HYPRE_Int B_Pi_interp_type;
HYPRE_Int B_Pi_Pmax;
/* Temporary vectors */
hypre_ParVector *r0, *g0, *r1, *g1, *r2, *g2;
/* Output log info */
HYPRE_Int num_iterations;
HYPRE_Real rel_resid_norm;
} hypre_AMSData;
/* Space dimension */
#define hypre_AMSDataDimension(ams_data) ((ams_data)->dim)
/* Edge stiffness matrix */
#define hypre_AMSDataA(ams_data) ((ams_data)->A)
/* Vertex space data */
#define hypre_AMSDataDiscreteGradient(ams_data) ((ams_data)->G)
#define hypre_AMSDataPoissonBeta(ams_data) ((ams_data)->A_G)
#define hypre_AMSDataPoissonBetaAMG(ams_data) ((ams_data)->B_G)
#define hypre_AMSDataOwnsPoissonBeta(ams_data) ((ams_data)->owns_A_G)
#define hypre_AMSDataBetaIsZero(ams_data) ((ams_data)->beta_is_zero)
/* Vector vertex space data */
#define hypre_AMSDataPiInterpolation(ams_data) ((ams_data)->Pi)
#define hypre_AMSDataOwnsPiInterpolation(ams_data) ((ams_data)->owns_Pi)
#define hypre_AMSDataPoissonAlpha(ams_data) ((ams_data)->A_Pi)
#define hypre_AMSDataPoissonAlphaAMG(ams_data) ((ams_data)->B_Pi)
#define hypre_AMSDataOwnsPoissonAlpha(ams_data) ((ams_data)->owns_A_Pi)
/* Coordinates of the vertices */
#define hypre_AMSDataVertexCoordinateX(ams_data) ((ams_data)->x)
#define hypre_AMSDataVertexCoordinateY(ams_data) ((ams_data)->y)
#define hypre_AMSDataVertexCoordinateZ(ams_data) ((ams_data)->z)
/* Representations of the constant vectors in the Nedelec basis */
#define hypre_AMSDataEdgeConstantX(ams_data) ((ams_data)->Gx)
#define hypre_AMSDataEdgeConstantY(ams_data) ((ams_data)->Gy)
#define hypre_AMSDataEdgeConstantZ(ams_data) ((ams_data)->Gz)
/* Solver options */
#define hypre_AMSDataMaxIter(ams_data) ((ams_data)->maxit)
#define hypre_AMSDataTol(ams_data) ((ams_data)->tol)
#define hypre_AMSDataCycleType(ams_data) ((ams_data)->cycle_type)
#define hypre_AMSDataPrintLevel(ams_data) ((ams_data)->print_level)
/* Smoothing and AMG options */
#define hypre_AMSDataARelaxType(ams_data) ((ams_data)->A_relax_type)
#define hypre_AMSDataARelaxTimes(ams_data) ((ams_data)->A_relax_times)
#define hypre_AMSDataAL1Norms(ams_data) ((ams_data)->A_l1_norms)
#define hypre_AMSDataARelaxWeight(ams_data) ((ams_data)->A_relax_weight)
#define hypre_AMSDataAOmega(ams_data) ((ams_data)->A_omega)
#define hypre_AMSDataAMaxEigEst(ams_data) ((ams_data)->A_max_eig_est)
#define hypre_AMSDataAMinEigEst(ams_data) ((ams_data)->A_min_eig_est)
#define hypre_AMSDataAChebyOrder(ams_data) ((ams_data)->A_cheby_order)
#define hypre_AMSDataAChebyFraction(ams_data) ((ams_data)->A_cheby_fraction)
#define hypre_AMSDataPoissonAlphaAMGCoarsenType(ams_data) ((ams_data)->B_Pi_coarsen_type)
#define hypre_AMSDataPoissonAlphaAMGAggLevels(ams_data) ((ams_data)->B_Pi_agg_levels)
#define hypre_AMSDataPoissonAlphaAMGRelaxType(ams_data) ((ams_data)->B_Pi_relax_type)
#define hypre_AMSDataPoissonAlphaAMGStrengthThreshold(ams_data) ((ams_data)->B_Pi_theta)
#define hypre_AMSDataPoissonBetaAMGCoarsenType(ams_data) ((ams_data)->B_G_coarsen_type)
#define hypre_AMSDataPoissonBetaAMGAggLevels(ams_data) ((ams_data)->B_G_agg_levels)
#define hypre_AMSDataPoissonBetaAMGRelaxType(ams_data) ((ams_data)->B_G_relax_type)
#define hypre_AMSDataPoissonBetaAMGStrengthThreshold(ams_data) ((ams_data)->B_G_theta)
/* Temporary vectors */
#define hypre_AMSDataTempEdgeVectorR(ams_data) ((ams_data)->r0)
#define hypre_AMSDataTempEdgeVectorG(ams_data) ((ams_data)->g0)
#define hypre_AMSDataTempVertexVectorR(ams_data) ((ams_data)->r1)
#define hypre_AMSDataTempVertexVectorG(ams_data) ((ams_data)->g1)
#define hypre_AMSDataTempVecVertexVectorR(ams_data) ((ams_data)->r2)
#define hypre_AMSDataTempVecVertexVectorG(ams_data) ((ams_data)->g2)
#endif
| 7,471 | 37.715026 | 89 | h |
AMG | AMG-master/parcsr_ls/aux_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
#include "hypre_hopscotch_hash.h"
/*---------------------------------------------------------------------------
* Auxilary routines for the long range interpolation methods.
* Implemented: "standard", "extended", "multipass", "FF"
*--------------------------------------------------------------------------*/
/* AHB 11/06: Modification of the above original - takes two
communication packages and inserts nodes to position expected for
OUT_marker
offd nodes from comm_pkg take up first chunk of CF_marker_offd, offd
nodes from extend_comm_pkg take up the second chunk 0f CF_marker_offd. */
HYPRE_Int hypre_alt_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg,
hypre_ParCSRCommPkg *extend_comm_pkg,
HYPRE_Int *IN_marker,
HYPRE_Int full_off_procNodes,
HYPRE_Int *OUT_marker)
{
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Int i, index, shift;
HYPRE_Int num_sends, num_recvs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int e_num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int *e_out_marker;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
e_num_sends = hypre_ParCSRCommPkgNumSends(extend_comm_pkg);
index = hypre_max(hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends));
int_buf_data = hypre_CTAlloc(HYPRE_Int, index);
/* orig commpkg data*/
index = 0;
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] =
IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
OUT_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/* now do the extend commpkg */
/* first we need to shift our position in the OUT_marker */
shift = recv_vec_starts[num_recvs];
e_out_marker = OUT_marker + shift;
index = 0;
begin = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, 0);
end = hypre_ParCSRCommPkgSendMapStart(extend_comm_pkg, e_num_sends);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] =
IN_marker[hypre_ParCSRCommPkgSendMapElmt(extend_comm_pkg, i)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, extend_comm_pkg, int_buf_data,
e_out_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
hypre_TFree(int_buf_data);
return hypre_error_flag;
}
/* AHB 11/06 : alternate to the extend function below - creates a
* second comm pkg based on found - this makes it easier to use the
* global partition*/
HYPRE_Int
hypre_ParCSRFindExtendCommPkg(hypre_ParCSRMatrix *A, HYPRE_Int newoff, HYPRE_Int *found,
hypre_ParCSRCommPkg **extend_comm_pkg)
{
HYPRE_Int num_sends;
HYPRE_Int *send_procs;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
HYPRE_Int num_recvs;
HYPRE_Int *recv_procs;
HYPRE_Int *recv_vec_starts;
hypre_ParCSRCommPkg *new_comm_pkg;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int first_col_diag = hypre_ParCSRMatrixFirstColDiag(A);
/* use found instead of col_map_offd in A, and newoff instead
of num_cols_offd*/
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int row_start=0, row_end=0, col_start = 0, col_end = 0;
HYPRE_Int global_num_cols;
hypre_IJAssumedPart *apart;
hypre_ParCSRMatrixGetLocalRange( A,
&row_start, &row_end ,
&col_start, &col_end );
global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
/* Create the assumed partition */
if (hypre_ParCSRMatrixAssumedPartition(A) == NULL)
{
hypre_ParCSRMatrixCreateAssumedPartition(A);
}
apart = hypre_ParCSRMatrixAssumedPartition(A);
hypre_NewCommPkgCreate_core( comm, found, first_col_diag,
col_start, col_end,
newoff, global_num_cols,
&num_recvs, &recv_procs, &recv_vec_starts,
&num_sends, &send_procs, &send_map_starts,
&send_map_elmts, apart);
#else
HYPRE_Int *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int num_cols_diag = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A));
hypre_MatvecCommPkgCreate_core
(
comm, found, first_col_diag, col_starts,
num_cols_diag, newoff,
first_col_diag, found,
1,
&num_recvs, &recv_procs, &recv_vec_starts,
&num_sends, &send_procs, &send_map_starts,
&send_map_elmts
);
#endif
new_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1);
hypre_ParCSRCommPkgComm(new_comm_pkg) = comm;
hypre_ParCSRCommPkgNumRecvs(new_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvProcs(new_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvVecStarts(new_comm_pkg) = recv_vec_starts;
hypre_ParCSRCommPkgNumSends(new_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendProcs(new_comm_pkg) = send_procs;
hypre_ParCSRCommPkgSendMapStarts(new_comm_pkg) = send_map_starts;
hypre_ParCSRCommPkgSendMapElmts(new_comm_pkg) = send_map_elmts;
*extend_comm_pkg = new_comm_pkg;
return hypre_error_flag;
}
/* sort for non-ordered arrays */
HYPRE_Int hypre_ssort(HYPRE_Int *data, HYPRE_Int n)
{
HYPRE_Int i,si;
HYPRE_Int change = 0;
if(n > 0)
for(i = n-1; i > 0; i--){
si = hypre_index_of_minimum(data,i+1);
if(i != si)
{
hypre_swap_int(data, i, si);
change = 1;
}
}
return change;
}
/* Auxilary function for hypre_ssort */
HYPRE_Int hypre_index_of_minimum(HYPRE_Int *data, HYPRE_Int n)
{
HYPRE_Int answer;
HYPRE_Int i;
answer = 0;
for(i = 1; i < n; i++)
if(data[answer] < data[i])
answer = i;
return answer;
}
void hypre_swap_int(HYPRE_Int *data, HYPRE_Int a, HYPRE_Int b)
{
HYPRE_Int temp;
temp = data[a];
data[a] = data[b];
data[b] = temp;
return;
}
/* Initialize CF_marker_offd, CF_marker, P_marker, P_marker_offd, tmp */
void hypre_initialize_vecs(HYPRE_Int diag_n, HYPRE_Int offd_n, HYPRE_Int *diag_ftc, HYPRE_Int *offd_ftc,
HYPRE_Int *diag_pm, HYPRE_Int *offd_pm, HYPRE_Int *tmp_CF)
{
HYPRE_Int i;
/* Quicker initialization */
if(offd_n < diag_n)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = 0; i < offd_n; i++)
{
diag_ftc[i] = -1;
offd_ftc[i] = -1;
tmp_CF[i] = -1;
if(diag_pm != NULL)
{ diag_pm[i] = -1; }
if(offd_pm != NULL)
{ offd_pm[i] = -1;}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = offd_n; i < diag_n; i++)
{
diag_ftc[i] = -1;
if(diag_pm != NULL)
{ diag_pm[i] = -1; }
}
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = 0; i < diag_n; i++)
{
diag_ftc[i] = -1;
offd_ftc[i] = -1;
tmp_CF[i] = -1;
if(diag_pm != NULL)
{ diag_pm[i] = -1;}
if(offd_pm != NULL)
{ offd_pm[i] = -1;}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for(i = diag_n; i < offd_n; i++)
{
offd_ftc[i] = -1;
tmp_CF[i] = -1;
if(offd_pm != NULL)
{ offd_pm[i] = -1;}
}
}
return;
}
/* Find nodes that are offd and are not contained in original offd
* (neighbors of neighbors) */
static HYPRE_Int hypre_new_offd_nodes(HYPRE_Int **found, HYPRE_Int num_cols_A_offd, HYPRE_Int *A_ext_i, HYPRE_Int *A_ext_j,
HYPRE_Int num_cols_S_offd, HYPRE_Int *col_map_offd, HYPRE_Int col_1,
HYPRE_Int col_n, HYPRE_Int *Sop_i, HYPRE_Int *Sop_j,
HYPRE_Int *CF_marker_offd)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
#endif
HYPRE_Int i, i1, j, kk, k1;
HYPRE_Int got_loc, loc_col;
/*HYPRE_Int min;*/
HYPRE_Int newoff = 0;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedIntMap col_map_offd_inverse;
hypre_UnorderedIntMapCreate(&col_map_offd_inverse, 2*num_cols_A_offd, 16*hypre_NumThreads());
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_A_offd; i++)
{
hypre_UnorderedIntMapPutIfAbsent(&col_map_offd_inverse, col_map_offd[i], i);
}
/* Find nodes that will be added to the off diag list */
HYPRE_Int size_offP = A_ext_i[num_cols_A_offd];
hypre_UnorderedIntSet set;
hypre_UnorderedIntSetCreate(&set, size_offP, 16*hypre_NumThreads());
#pragma omp parallel private(i,j,i1)
{
#pragma omp for HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
i1 = A_ext_j[j];
if(i1 < col_1 || i1 >= col_n)
{
if (!hypre_UnorderedIntSetContains(&set, i1))
{
HYPRE_Int k = hypre_UnorderedIntMapGet(&col_map_offd_inverse, i1);
if (-1 == k)
{
hypre_UnorderedIntSetPut(&set, i1);
}
else
{
A_ext_j[j] = -k - 1;
}
}
}
}
for (j = Sop_i[i]; j < Sop_i[i+1]; j++)
{
i1 = Sop_j[j];
if(i1 < col_1 || i1 >= col_n)
{
if (!hypre_UnorderedIntSetContains(&set, i1))
{
Sop_j[j] = -hypre_UnorderedIntMapGet(&col_map_offd_inverse, i1) - 1;
}
}
}
} /* CF_marker_offd[i] < 0 */
} /* for each row */
} /* omp parallel */
hypre_UnorderedIntMapDestroy(&col_map_offd_inverse);
HYPRE_Int *tmp_found = hypre_UnorderedIntSetCopyToArray(&set, &newoff);
hypre_UnorderedIntSetDestroy(&set);
/* Put found in monotone increasing order */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
hypre_UnorderedIntMap tmp_found_inverse;
if (newoff > 0)
{
hypre_sort_and_create_inverse_map(tmp_found, newoff, &tmp_found, &tmp_found_inverse);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
/* Set column indices for Sop and A_ext such that offd nodes are
* negatively indexed */
#pragma omp parallel for private(kk,k1,got_loc,loc_col) HYPRE_SMP_SCHEDULE
for(i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 > -1 && (k1 < col_1 || k1 >= col_n))
{
got_loc = hypre_UnorderedIntMapGet(&tmp_found_inverse, k1);
loc_col = got_loc + num_cols_A_offd;
Sop_j[kk] = -loc_col - 1;
}
}
for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++)
{
k1 = A_ext_j[kk];
if(k1 > -1 && (k1 < col_1 || k1 >= col_n))
{
got_loc = hypre_UnorderedIntMapGet(&tmp_found_inverse, k1);
loc_col = got_loc + num_cols_A_offd;
A_ext_j[kk] = -loc_col - 1;
}
}
}
}
if (newoff)
{
hypre_UnorderedIntMapDestroy(&tmp_found_inverse);
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
HYPRE_Int size_offP;
HYPRE_Int *tmp_found;
HYPRE_Int min;
HYPRE_Int ifound;
size_offP = A_ext_i[num_cols_A_offd]+Sop_i[num_cols_A_offd];
tmp_found = hypre_CTAlloc(HYPRE_Int, size_offP);
/* Find nodes that will be added to the off diag list */
for (i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for (j = A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
i1 = A_ext_j[j];
if(i1 < col_1 || i1 >= col_n)
{
ifound = hypre_BinarySearch(col_map_offd,i1,num_cols_A_offd);
if(ifound == -1)
{
tmp_found[newoff]=i1;
newoff++;
}
else
{
A_ext_j[j] = -ifound-1;
}
}
}
for (j = Sop_i[i]; j < Sop_i[i+1]; j++)
{
i1 = Sop_j[j];
if(i1 < col_1 || i1 >= col_n)
{
ifound = hypre_BinarySearch(col_map_offd,i1,num_cols_A_offd);
if(ifound == -1)
{
tmp_found[newoff]=i1;
newoff++;
}
else
{
Sop_j[j] = -ifound-1;
}
}
}
}
}
/* Put found in monotone increasing order */
if (newoff > 0)
{
hypre_qsort0(tmp_found,0,newoff-1);
ifound = tmp_found[0];
min = 1;
for (i=1; i < newoff; i++)
{
if (tmp_found[i] > ifound)
{
ifound = tmp_found[i];
tmp_found[min++] = ifound;
}
}
newoff = min;
}
/* Set column indices for Sop and A_ext such that offd nodes are
* negatively indexed */
for(i = 0; i < num_cols_A_offd; i++)
{
if (CF_marker_offd[i] < 0)
{
for(kk = Sop_i[i]; kk < Sop_i[i+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 > -1 && (k1 < col_1 || k1 >= col_n))
{
got_loc = hypre_BinarySearch(tmp_found,k1,newoff);
if(got_loc > -1)
loc_col = got_loc + num_cols_A_offd;
Sop_j[kk] = -loc_col - 1;
}
}
for (kk = A_ext_i[i]; kk < A_ext_i[i+1]; kk++)
{
k1 = A_ext_j[kk];
if(k1 > -1 && (k1 < col_1 || k1 >= col_n))
{
got_loc = hypre_BinarySearch(tmp_found,k1,newoff);
loc_col = got_loc + num_cols_A_offd;
A_ext_j[kk] = -loc_col - 1;
}
}
}
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
*found = tmp_found;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
#endif
return newoff;
}
HYPRE_Int hypre_exchange_marker(hypre_ParCSRCommPkg *comm_pkg,
HYPRE_Int *IN_marker,
HYPRE_Int *OUT_marker)
{
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
HYPRE_Int begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
HYPRE_Int *int_buf_data = hypre_CTAlloc(HYPRE_Int, end);
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = begin; i < end; ++i) {
int_buf_data[i - begin] =
IN_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)];
}
hypre_ParCSRCommHandle *comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
OUT_marker);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
return hypre_error_flag;
}
HYPRE_Int hypre_exchange_interp_data(
HYPRE_Int **CF_marker_offd,
HYPRE_Int **dof_func_offd,
hypre_CSRMatrix **A_ext,
HYPRE_Int *full_off_procNodes,
hypre_CSRMatrix **Sop,
hypre_ParCSRCommPkg **extend_comm_pkg,
hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int skip_fine_or_same_sign) // skip_fine_or_same_sign if we want to skip fine points in S and nnz with the same sign as diagonal in A
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime();
#endif
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int *found = NULL;
/*----------------------------------------------------------------------
* Get the off processors rows for A and S, associated with columns in
* A_offd and S_offd.
*---------------------------------------------------------------------*/
*CF_marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd);
hypre_exchange_marker(comm_pkg, CF_marker, *CF_marker_offd);
hypre_ParCSRCommHandle *comm_handle_a_idx, *comm_handle_a_data;
*A_ext = hypre_ParCSRMatrixExtractBExt_Overlap(A,A,1,&comm_handle_a_idx,&comm_handle_a_data,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,skip_fine_or_same_sign);
HYPRE_Int *A_ext_i = hypre_CSRMatrixI(*A_ext);
HYPRE_Int *A_ext_j = hypre_CSRMatrixJ(*A_ext);
HYPRE_Int A_ext_rows = hypre_CSRMatrixNumRows(*A_ext);
hypre_ParCSRCommHandle *comm_handle_s_idx;
*Sop = hypre_ParCSRMatrixExtractBExt_Overlap(S,A,0,&comm_handle_s_idx,NULL,CF_marker,*CF_marker_offd,skip_fine_or_same_sign,0);
HYPRE_Int *Sop_i = hypre_CSRMatrixI(*Sop);
HYPRE_Int *Sop_j = hypre_CSRMatrixJ(*Sop);
HYPRE_Int Soprows = hypre_CSRMatrixNumRows(*Sop);
HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_s_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_s_idx);
hypre_TFree(send_idx);
send_idx = (HYPRE_Int *)comm_handle_a_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_a_idx);
hypre_TFree(send_idx);
/* Find nodes that are neighbors of neighbors, not found in offd */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime();
#endif
HYPRE_Int newoff = hypre_new_offd_nodes(&found, A_ext_rows, A_ext_i, A_ext_j,
Soprows, col_map_offd, col_1, col_n,
Sop_i, Sop_j, *CF_marker_offd);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] -= hypre_MPI_Wtime();
#endif
if(newoff >= 0)
*full_off_procNodes = newoff + num_cols_A_offd;
else
{
return hypre_error_flag;
}
/* Possibly add new points and new processors to the comm_pkg, all
* processors need new_comm_pkg */
/* AHB - create a new comm package just for extended info -
this will work better with the assumed partition*/
hypre_ParCSRFindExtendCommPkg(A, newoff, found,
extend_comm_pkg);
*CF_marker_offd = hypre_TReAlloc(*CF_marker_offd, HYPRE_Int, *full_off_procNodes);
hypre_exchange_marker(*extend_comm_pkg, CF_marker, *CF_marker_offd + A_ext_rows);
if(num_functions > 1)
{
if (*full_off_procNodes > 0)
*dof_func_offd = hypre_CTAlloc(HYPRE_Int, *full_off_procNodes);
hypre_alt_insert_new_nodes(comm_pkg, *extend_comm_pkg, dof_func,
*full_off_procNodes, *dof_func_offd);
}
hypre_TFree(found);
HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_a_data->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_a_data);
hypre_TFree(send_data);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXCHANGE_INTERP_DATA] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
void hypre_build_interp_colmap(hypre_ParCSRMatrix *P, HYPRE_Int full_off_procNodes, HYPRE_Int *tmp_CF_marker_offd, HYPRE_Int *fine_to_coarse_offd)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
#endif
HYPRE_Int i, index;
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P->diag);
HYPRE_Int P_offd_size = P->offd->i[n_fine];
HYPRE_Int *P_offd_j = P->offd->j;
HYPRE_Int *col_map_offd_P = NULL;
HYPRE_Int *P_marker = NULL;
if (full_off_procNodes)
P_marker = hypre_TAlloc(HYPRE_Int, full_off_procNodes);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < full_off_procNodes; i++)
P_marker[i] = 0;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
/* These two loops set P_marker[i] to 1 if it appears in P_offd_j and if
* tmp_CF_marker_offd has i marked. num_cols_P_offd is then set to the
* total number of times P_marker is set */
#pragma omp parallel for private(i,index) HYPRE_SMP_SCHEDULE
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if(tmp_CF_marker_offd[index] >= 0)
{ P_marker[index] = 1; }
}
HYPRE_Int prefix_sum_workspace[hypre_NumThreads() + 1];
HYPRE_Int num_cols_P_offd = 0;
#pragma omp parallel private(i)
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, full_off_procNodes);
HYPRE_Int local_num_cols_P_offd = 0;
for (i = i_begin; i < i_end; i++)
{
if (P_marker[i] == 1) local_num_cols_P_offd++;
}
hypre_prefix_sum(&local_num_cols_P_offd, &num_cols_P_offd, prefix_sum_workspace);
#pragma omp master
{
if (num_cols_P_offd)
col_map_offd_P = hypre_TAlloc(HYPRE_Int, num_cols_P_offd);
}
#pragma omp barrier
for (i = i_begin; i < i_end; i++)
{
if (P_marker[i] == 1)
{
col_map_offd_P[local_num_cols_P_offd++] = fine_to_coarse_offd[i];
}
}
}
hypre_UnorderedIntMap col_map_offd_P_inverse;
hypre_sort_and_create_inverse_map(col_map_offd_P, num_cols_P_offd, &col_map_offd_P, &col_map_offd_P_inverse);
// find old idx -> new idx map
#pragma omp parallel for
for (i = 0; i < full_off_procNodes; i++)
P_marker[i] = hypre_UnorderedIntMapGet(&col_map_offd_P_inverse, fine_to_coarse_offd[i]);
if (num_cols_P_offd)
{
hypre_UnorderedIntMapDestroy(&col_map_offd_P_inverse);
}
#pragma omp parallel for
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = P_marker[P_offd_j[i]];
#else /* HYPRE_CONCURRENT_HOPSCOTCH */
HYPRE_Int num_cols_P_offd = 0;
HYPRE_Int j;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
if(tmp_CF_marker_offd[index] >= 0)
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
}
if (num_cols_P_offd)
col_map_offd_P = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while( P_marker[index] == 0) index++;
col_map_offd_P[i] = index++;
}
for(i = 0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
index = 0;
for(i = 0; i < num_cols_P_offd; i++)
{
while (P_marker[index] == 0) index++;
col_map_offd_P[i] = fine_to_coarse_offd[index];
index++;
}
/* Sort the col_map_offd_P and P_offd_j correctly */
for(i = 0; i < num_cols_P_offd; i++)
P_marker[i] = col_map_offd_P[i];
/* Check if sort actually changed anything */
if(hypre_ssort(col_map_offd_P,num_cols_P_offd))
{
for(i = 0; i < P_offd_size; i++)
for(j = 0; j < num_cols_P_offd; j++)
if(P_marker[P_offd_j[i]] == col_map_offd_P[j])
{
P_offd_j[i] = j;
j = num_cols_P_offd;
}
}
#endif /* HYPRE_CONCURRENT_HOPSCOTCH */
hypre_TFree(P_marker);
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P->offd) = num_cols_P_offd;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
#endif
}
| 24,688 | 28.603118 | 175 | c |
AMG | AMG-master/parcsr_ls/aux_interp.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifdef __cplusplus
extern "C" {
#endif
void
hypre_ParCSRCommExtendA(hypre_ParCSRMatrix *A, HYPRE_Int newoff, HYPRE_Int *found,
HYPRE_Int *p_num_recvs, HYPRE_Int **p_recv_procs,
HYPRE_Int **p_recv_vec_starts, HYPRE_Int *p_num_sends,
HYPRE_Int **p_send_procs, HYPRE_Int **p_send_map_starts,
HYPRE_Int **p_send_map_elmts, HYPRE_Int **p_node_add);
HYPRE_Int alt_insert_new_nodes(hypre_ParCSRCommPkg *comm_pkg,
hypre_ParCSRCommPkg *extend_comm_pkg,
HYPRE_Int *IN_marker,
HYPRE_Int full_off_procNodes,
HYPRE_Int *OUT_marker);
HYPRE_Int hypre_ssort(HYPRE_Int *data, HYPRE_Int n);
HYPRE_Int index_of_minimum(HYPRE_Int *data, HYPRE_Int n);
void swap_int(HYPRE_Int *data, HYPRE_Int a, HYPRE_Int b);
void initialize_vecs(HYPRE_Int diag_n, HYPRE_Int offd_n, HYPRE_Int *diag_ftc, HYPRE_Int *offd_ftc,
HYPRE_Int *diag_pm, HYPRE_Int *offd_pm, HYPRE_Int *tmp_CF);
HYPRE_Int exchange_interp_data(
HYPRE_Int **CF_marker_offd,
HYPRE_Int **dof_func_offd,
hypre_CSRMatrix **A_ext,
HYPRE_Int *full_off_procNodes,
hypre_CSRMatrix **Sop,
hypre_ParCSRCommPkg **extend_comm_pkg,
hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int skip_fine_or_same_sign);
void build_interp_colmap(hypre_ParCSRMatrix *P, HYPRE_Int full_off_procNodes, HYPRE_Int *tmp_CF_marker_offd, HYPRE_Int *fine_to_coarse_offd);
#ifdef __cplusplus
}
#endif
| 2,478 | 40.316667 | 141 | h |
AMG | AMG-master/parcsr_ls/gen_redcs_mat.c | #include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#define USE_ALLTOALL 0
/* here we have the sequential setup and solve - called from the
* parallel one - for the coarser levels */
HYPRE_Int hypre_seqAMGSetup( hypre_ParAMGData *amg_data,
HYPRE_Int p_level,
HYPRE_Int coarse_threshold)
{
/* Par Data Structure variables */
hypre_ParCSRMatrix **Par_A_array = hypre_ParAMGDataAArray(amg_data);
MPI_Comm comm = hypre_ParCSRMatrixComm(Par_A_array[0]);
MPI_Comm new_comm, seq_comm;
hypre_ParCSRMatrix *A_seq = NULL;
hypre_CSRMatrix *A_seq_diag;
hypre_CSRMatrix *A_seq_offd;
hypre_ParVector *F_seq = NULL;
hypre_ParVector *U_seq = NULL;
hypre_ParCSRMatrix *A;
HYPRE_Int **dof_func_array;
HYPRE_Int num_procs, my_id;
HYPRE_Int level;
HYPRE_Int redundant;
HYPRE_Int num_functions;
HYPRE_Solver coarse_solver;
/* misc */
dof_func_array = hypre_ParAMGDataDofFuncArray(amg_data);
num_functions = hypre_ParAMGDataNumFunctions(amg_data);
redundant = hypre_ParAMGDataRedundant(amg_data);
/*MPI Stuff */
hypre_MPI_Comm_size(comm, &num_procs);
/*initial */
level = p_level;
/* convert A at this level to sequential */
A = Par_A_array[level];
{
HYPRE_Real *A_seq_data = NULL;
HYPRE_Int *A_seq_i = NULL;
HYPRE_Int *A_seq_offd_i = NULL;
HYPRE_Int *A_seq_j = NULL;
HYPRE_Int *seq_dof_func = NULL;
HYPRE_Real *A_tmp_data = NULL;
HYPRE_Int *A_tmp_i = NULL;
HYPRE_Int *A_tmp_j = NULL;
HYPRE_Int *info = NULL;
HYPRE_Int *displs = NULL;
HYPRE_Int *displs2 = NULL;
HYPRE_Int i, j, size, num_nonzeros, total_nnz, cnt;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int new_num_procs, *row_starts;
hypre_GenerateSubComm(comm, num_rows, &new_comm);
/*hypre_MPI_Group orig_group, new_group;
HYPRE_Int *ranks, new_num_procs, *row_starts;
info = hypre_CTAlloc(HYPRE_Int, num_procs);
hypre_MPI_Allgather(&num_rows, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, comm);
ranks = hypre_CTAlloc(HYPRE_Int, num_procs);
new_num_procs = 0;
for (i=0; i < num_procs; i++)
if (info[i])
{
ranks[new_num_procs] = i;
info[new_num_procs++] = info[i];
}
hypre_MPI_Comm_group(comm, &orig_group);
hypre_MPI_Group_incl(orig_group, new_num_procs, ranks, &new_group);
hypre_MPI_Comm_create(comm, new_group, &new_comm);
hypre_MPI_Group_free(&new_group);
hypre_MPI_Group_free(&orig_group); */
if (num_rows)
{
hypre_ParAMGDataParticipate(amg_data) = 1;
hypre_MPI_Comm_size(new_comm, &new_num_procs);
hypre_MPI_Comm_rank(new_comm, &my_id);
info = hypre_CTAlloc(HYPRE_Int, new_num_procs);
if (redundant)
hypre_MPI_Allgather(&num_rows, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, new_comm);
else
hypre_MPI_Gather(&num_rows, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, 0, new_comm);
/* alloc space in seq data structure only for participating procs*/
if (redundant || my_id == 0)
{
HYPRE_BoomerAMGCreate(&coarse_solver);
HYPRE_BoomerAMGSetMaxRowSum(coarse_solver,
hypre_ParAMGDataMaxRowSum(amg_data));
HYPRE_BoomerAMGSetStrongThreshold(coarse_solver,
hypre_ParAMGDataStrongThreshold(amg_data));
HYPRE_BoomerAMGSetCoarsenType(coarse_solver,
hypre_ParAMGDataCoarsenType(amg_data));
HYPRE_BoomerAMGSetInterpType(coarse_solver,
hypre_ParAMGDataInterpType(amg_data));
HYPRE_BoomerAMGSetTruncFactor(coarse_solver,
hypre_ParAMGDataTruncFactor(amg_data));
HYPRE_BoomerAMGSetPMaxElmts(coarse_solver,
hypre_ParAMGDataPMaxElmts(amg_data));
if (hypre_ParAMGDataUserRelaxType(amg_data) > -1)
HYPRE_BoomerAMGSetRelaxType(coarse_solver,
hypre_ParAMGDataUserRelaxType(amg_data));
HYPRE_BoomerAMGSetRelaxOrder(coarse_solver,
hypre_ParAMGDataRelaxOrder(amg_data));
HYPRE_BoomerAMGSetRelaxWt(coarse_solver,
hypre_ParAMGDataUserRelaxWeight(amg_data));
if (hypre_ParAMGDataUserNumSweeps(amg_data) > -1)
HYPRE_BoomerAMGSetNumSweeps(coarse_solver,
hypre_ParAMGDataUserNumSweeps(amg_data));
HYPRE_BoomerAMGSetNumFunctions(coarse_solver,
num_functions);
HYPRE_BoomerAMGSetMaxIter(coarse_solver, 1);
HYPRE_BoomerAMGSetTol(coarse_solver, 0);
}
/* Create CSR Matrix, will be Diag part of new matrix */
A_tmp_i = hypre_CTAlloc(HYPRE_Int, num_rows+1);
A_tmp_i[0] = 0;
for (i=1; i < num_rows+1; i++)
A_tmp_i[i] = A_diag_i[i]-A_diag_i[i-1]+A_offd_i[i]-A_offd_i[i-1];
num_nonzeros = A_offd_i[num_rows]+A_diag_i[num_rows];
A_tmp_j = hypre_CTAlloc(HYPRE_Int, num_nonzeros);
A_tmp_data = hypre_CTAlloc(HYPRE_Real, num_nonzeros);
cnt = 0;
for (i=0; i < num_rows; i++)
{
for (j=A_diag_i[i]; j < A_diag_i[i+1]; j++)
{
A_tmp_j[cnt] = A_diag_j[j]+first_row_index;
A_tmp_data[cnt++] = A_diag_data[j];
}
for (j=A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
A_tmp_j[cnt] = col_map_offd[A_offd_j[j]];
A_tmp_data[cnt++] = A_offd_data[j];
}
}
displs = hypre_CTAlloc(HYPRE_Int, new_num_procs+1);
displs[0] = 0;
for (i=1; i < new_num_procs+1; i++)
displs[i] = displs[i-1]+info[i-1];
size = displs[new_num_procs];
if (redundant || my_id == 0)
{
A_seq_i = hypre_CTAlloc(HYPRE_Int, size+1);
A_seq_offd_i = hypre_CTAlloc(HYPRE_Int, size+1);
if (num_functions > 1) seq_dof_func = hypre_CTAlloc(HYPRE_Int, size);
}
if (redundant)
{
hypre_MPI_Allgatherv ( &A_tmp_i[1], num_rows, HYPRE_MPI_INT, &A_seq_i[1], info,
displs, HYPRE_MPI_INT, new_comm );
if (num_functions > 1)
{
hypre_MPI_Allgatherv ( dof_func_array[level], num_rows, HYPRE_MPI_INT,
seq_dof_func, info, displs, HYPRE_MPI_INT, new_comm );
HYPRE_BoomerAMGSetDofFunc(coarse_solver, seq_dof_func);
}
}
else
{
if (A_seq_i)
hypre_MPI_Gatherv ( &A_tmp_i[1], num_rows, HYPRE_MPI_INT, &A_seq_i[1], info,
displs, HYPRE_MPI_INT, 0, new_comm );
else
hypre_MPI_Gatherv ( &A_tmp_i[1], num_rows, HYPRE_MPI_INT, A_seq_i, info,
displs, HYPRE_MPI_INT, 0, new_comm );
if (num_functions > 1)
{
hypre_MPI_Gatherv ( dof_func_array[level], num_rows, HYPRE_MPI_INT,
seq_dof_func, info, displs, HYPRE_MPI_INT, 0, new_comm );
if (my_id == 0) HYPRE_BoomerAMGSetDofFunc(coarse_solver, seq_dof_func);
}
}
if (redundant || my_id == 0)
{
displs2 = hypre_CTAlloc(HYPRE_Int, new_num_procs+1);
A_seq_i[0] = 0;
displs2[0] = 0;
for (j=1; j < displs[1]; j++)
A_seq_i[j] = A_seq_i[j]+A_seq_i[j-1];
for (i=1; i < new_num_procs; i++)
{
for (j=displs[i]; j < displs[i+1]; j++)
{
A_seq_i[j] = A_seq_i[j]+A_seq_i[j-1];
}
}
A_seq_i[size] = A_seq_i[size]+A_seq_i[size-1];
displs2[new_num_procs] = A_seq_i[size];
for (i=1; i < new_num_procs+1; i++)
{
displs2[i] = A_seq_i[displs[i]];
info[i-1] = displs2[i] - displs2[i-1];
}
total_nnz = displs2[new_num_procs];
A_seq_j = hypre_CTAlloc(HYPRE_Int, total_nnz);
A_seq_data = hypre_CTAlloc(HYPRE_Real, total_nnz);
}
if (redundant)
{
hypre_MPI_Allgatherv ( A_tmp_j, num_nonzeros, HYPRE_MPI_INT,
A_seq_j, info, displs2,
HYPRE_MPI_INT, new_comm );
hypre_MPI_Allgatherv ( A_tmp_data, num_nonzeros, HYPRE_MPI_REAL,
A_seq_data, info, displs2,
HYPRE_MPI_REAL, new_comm );
}
else
{
hypre_MPI_Gatherv ( A_tmp_j, num_nonzeros, HYPRE_MPI_INT,
A_seq_j, info, displs2,
HYPRE_MPI_INT, 0, new_comm );
hypre_MPI_Gatherv ( A_tmp_data, num_nonzeros, HYPRE_MPI_REAL,
A_seq_data, info, displs2,
HYPRE_MPI_REAL, 0, new_comm );
}
hypre_TFree(info);
hypre_TFree(displs);
hypre_TFree(A_tmp_i);
hypre_TFree(A_tmp_j);
hypre_TFree(A_tmp_data);
if (redundant || my_id == 0)
{
hypre_TFree(displs2);
row_starts = hypre_CTAlloc(HYPRE_Int,2);
row_starts[0] = 0;
row_starts[1] = size;
/* Create 1 proc communicator */
seq_comm = hypre_MPI_COMM_SELF;
A_seq = hypre_ParCSRMatrixCreate(seq_comm,size,size,
row_starts, row_starts,
0,total_nnz,0);
A_seq_diag = hypre_ParCSRMatrixDiag(A_seq);
A_seq_offd = hypre_ParCSRMatrixOffd(A_seq);
hypre_CSRMatrixData(A_seq_diag) = A_seq_data;
hypre_CSRMatrixI(A_seq_diag) = A_seq_i;
hypre_CSRMatrixJ(A_seq_diag) = A_seq_j;
hypre_CSRMatrixI(A_seq_offd) = A_seq_offd_i;
F_seq = hypre_ParVectorCreate(seq_comm, size, row_starts);
U_seq = hypre_ParVectorCreate(seq_comm, size, row_starts);
hypre_ParVectorOwnsPartitioning(F_seq) = 0;
hypre_ParVectorOwnsPartitioning(U_seq) = 0;
hypre_ParVectorInitialize(F_seq);
hypre_ParVectorInitialize(U_seq);
hypre_BoomerAMGSetup(coarse_solver,A_seq,F_seq,U_seq);
hypre_ParAMGDataCoarseSolver(amg_data) = coarse_solver;
hypre_ParAMGDataACoarse(amg_data) = A_seq;
hypre_ParAMGDataFCoarse(amg_data) = F_seq;
hypre_ParAMGDataUCoarse(amg_data) = U_seq;
}
hypre_ParAMGDataNewComm(amg_data) = new_comm;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_seqAMGCycle
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_seqAMGCycle( hypre_ParAMGData *amg_data,
HYPRE_Int p_level,
hypre_ParVector **Par_F_array,
hypre_ParVector **Par_U_array )
{
hypre_ParVector *Aux_U;
hypre_ParVector *Aux_F;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Int n;
HYPRE_Int i;
hypre_Vector *u_local;
HYPRE_Real *u_data;
HYPRE_Int first_index;
/* Acquire seq data */
MPI_Comm new_comm = hypre_ParAMGDataNewComm(amg_data);
HYPRE_Solver coarse_solver = hypre_ParAMGDataCoarseSolver(amg_data);
hypre_ParCSRMatrix *A_coarse = hypre_ParAMGDataACoarse(amg_data);
hypre_ParVector *F_coarse = hypre_ParAMGDataFCoarse(amg_data);
hypre_ParVector *U_coarse = hypre_ParAMGDataUCoarse(amg_data);
HYPRE_Int redundant = hypre_ParAMGDataRedundant(amg_data);
Aux_U = Par_U_array[p_level];
Aux_F = Par_F_array[p_level];
first_index = hypre_ParVectorFirstIndex(Aux_U);
u_local = hypre_ParVectorLocalVector(Aux_U);
u_data = hypre_VectorData(u_local);
n = hypre_VectorSize(u_local);
/*if (A_coarse)*/
if (hypre_ParAMGDataParticipate(amg_data))
{
HYPRE_Real *f_data;
hypre_Vector *f_local;
hypre_Vector *tmp_vec;
HYPRE_Int nf;
HYPRE_Int local_info;
HYPRE_Real *recv_buf = NULL;
HYPRE_Int *displs = NULL;
HYPRE_Int *info = NULL;
HYPRE_Int new_num_procs, my_id;
hypre_MPI_Comm_size(new_comm, &new_num_procs);
hypre_MPI_Comm_rank(new_comm, &my_id);
f_local = hypre_ParVectorLocalVector(Aux_F);
f_data = hypre_VectorData(f_local);
nf = hypre_VectorSize(f_local);
/* first f */
info = hypre_CTAlloc(HYPRE_Int, new_num_procs);
local_info = nf;
if (redundant)
hypre_MPI_Allgather(&local_info, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, new_comm);
else
hypre_MPI_Gather(&local_info, 1, HYPRE_MPI_INT, info, 1, HYPRE_MPI_INT, 0, new_comm);
if (redundant || my_id ==0)
{
displs = hypre_CTAlloc(HYPRE_Int, new_num_procs+1);
displs[0] = 0;
for (i=1; i < new_num_procs+1; i++)
displs[i] = displs[i-1]+info[i-1];
if (F_coarse)
{
tmp_vec = hypre_ParVectorLocalVector(F_coarse);
recv_buf = hypre_VectorData(tmp_vec);
}
}
if (redundant)
hypre_MPI_Allgatherv ( f_data, nf, HYPRE_MPI_REAL,
recv_buf, info, displs,
HYPRE_MPI_REAL, new_comm );
else
hypre_MPI_Gatherv ( f_data, nf, HYPRE_MPI_REAL,
recv_buf, info, displs,
HYPRE_MPI_REAL, 0, new_comm );
if (redundant || my_id ==0)
{
tmp_vec = hypre_ParVectorLocalVector(U_coarse);
recv_buf = hypre_VectorData(tmp_vec);
}
/*then u */
if (redundant)
{
hypre_MPI_Allgatherv ( u_data, n, HYPRE_MPI_REAL,
recv_buf, info, displs,
HYPRE_MPI_REAL, new_comm );
hypre_TFree(displs);
hypre_TFree(info);
}
else
hypre_MPI_Gatherv ( u_data, n, HYPRE_MPI_REAL,
recv_buf, info, displs,
HYPRE_MPI_REAL, 0, new_comm );
/* clean up */
if (redundant || my_id ==0)
{
hypre_BoomerAMGSolve(coarse_solver, A_coarse, F_coarse, U_coarse);
}
/*copy my part of U to parallel vector */
if (redundant)
{
HYPRE_Real *local_data;
local_data = hypre_VectorData(hypre_ParVectorLocalVector(U_coarse));
for (i = 0; i < n; i++)
{
u_data[i] = local_data[first_index+i];
}
}
else
{
HYPRE_Real *local_data=NULL;
if (my_id == 0)
local_data = hypre_VectorData(hypre_ParVectorLocalVector(U_coarse));
hypre_MPI_Scatterv ( local_data, info, displs, HYPRE_MPI_REAL,
u_data, n, HYPRE_MPI_REAL, 0, new_comm );
/*if (my_id == 0)
local_data = hypre_VectorData(hypre_ParVectorLocalVector(F_coarse));
hypre_MPI_Scatterv ( local_data, info, displs, HYPRE_MPI_REAL,
f_data, n, HYPRE_MPI_REAL, 0, new_comm );*/
if (my_id == 0) hypre_TFree(displs);
hypre_TFree(info);
}
}
return(Solve_err_flag);
}
/* generate sub communicator, which contains no idle processors */
HYPRE_Int hypre_GenerateSubComm(MPI_Comm comm, HYPRE_Int participate, MPI_Comm *new_comm_ptr)
{
MPI_Comm new_comm;
hypre_MPI_Group orig_group, new_group;
hypre_MPI_Op hypre_MPI_MERGE;
HYPRE_Int *info, *ranks, new_num_procs, my_info, my_id, num_procs;
HYPRE_Int *list_len;
hypre_MPI_Comm_rank(comm,&my_id);
if (participate)
my_info = 1;
else
my_info = 0;
hypre_MPI_Allreduce(&my_info, &new_num_procs, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
if (new_num_procs == 0)
{
new_comm = hypre_MPI_COMM_NULL;
*new_comm_ptr = new_comm;
return 0;
}
ranks = hypre_CTAlloc(HYPRE_Int, new_num_procs+2);
if (new_num_procs == 1)
{
if (participate) my_info = my_id;
hypre_MPI_Allreduce(&my_info, &ranks[2], 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
}
else
{
info = hypre_CTAlloc(HYPRE_Int, new_num_procs+2);
list_len = hypre_CTAlloc(HYPRE_Int, 1);
if (participate)
{
info[0] = 1;
info[1] = 1;
info[2] = my_id;
}
else
info[0] = 0;
list_len[0] = new_num_procs + 2;
hypre_MPI_Op_create((hypre_MPI_User_function *)hypre_merge_lists, 0, &hypre_MPI_MERGE);
hypre_MPI_Allreduce(info, ranks, list_len[0], HYPRE_MPI_INT, hypre_MPI_MERGE, comm);
hypre_MPI_Op_free (&hypre_MPI_MERGE);
hypre_TFree(list_len);
hypre_TFree(info);
}
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_group(comm, &orig_group);
hypre_MPI_Group_incl(orig_group, new_num_procs, &ranks[2], &new_group);
hypre_MPI_Comm_create(comm, new_group, &new_comm);
hypre_MPI_Group_free(&new_group);
hypre_MPI_Group_free(&orig_group);
hypre_TFree(ranks);
*new_comm_ptr = new_comm;
return 0;
}
void hypre_merge_lists (HYPRE_Int *list1, HYPRE_Int* list2, hypre_int *np1, hypre_MPI_Datatype *dptr)
{
HYPRE_Int i, len1, len2, indx1, indx2;
if (list1[0] == 0 || (list2[0] == 0 && list1[0] == 0))
{
return;
}
else
{
list2[0] = 1;
len1 = list1[1];
len2 = list2[1];
list2[1] = len1+len2;
if ((hypre_int)(list2[1]) > *np1+2) printf("segfault in MPI User function merge_list\n");
indx1 = len1+1;
indx2 = len2+1;
for (i=len1+len2+1; i > 1; i--)
{
if (indx2 > 1 && indx1 > 1 && list1[indx1] > list2[indx2])
{
list2[i] = list1[indx1];
indx1--;
}
else if (indx2 > 1)
{
list2[i] = list2[indx2];
indx2--;
}
else if (indx1 > 1)
{
list2[i] = list1[indx1];
indx1--;
}
}
}
}
| 18,580 | 30.871355 | 101 | c |
AMG | AMG-master/parcsr_ls/headers.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "_hypre_parcsr_ls.h"
| 1,017 | 36.703704 | 81 | h |
AMG | AMG-master/parcsr_ls/par_add_cycle.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* ParAMG cycling routine
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCycle
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGAdditiveCycle( void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParCSRMatrix **P_array;
hypre_ParCSRMatrix **R_array;
hypre_ParCSRMatrix *Lambda;
hypre_ParCSRMatrix *Atilde;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParVector *Vtemp;
hypre_ParVector *Ztemp;
hypre_ParVector *Xtilde, *Rtilde;
HYPRE_Int **CF_marker_array;
HYPRE_Int num_levels;
HYPRE_Int addlvl, add_end;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int simple;
HYPRE_Int add_last_lvl;
HYPRE_Int i, j, num_rows;
HYPRE_Int n_global;
HYPRE_Int rlx_order;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Int level;
HYPRE_Int coarse_grid;
HYPRE_Int fine_grid;
HYPRE_Int rlx_down;
HYPRE_Int rlx_up;
HYPRE_Int rlx_coarse;
HYPRE_Int *grid_relax_type;
HYPRE_Int *num_grid_sweeps;
HYPRE_Real **l1_norms;
HYPRE_Real alpha, beta;
HYPRE_Real *u_data;
HYPRE_Real *v_data;
HYPRE_Real *l1_norms_lvl;
HYPRE_Real *D_inv;
HYPRE_Real *x_global;
HYPRE_Real *r_global;
HYPRE_Real *relax_weight;
HYPRE_Real *omega;
#if 0
HYPRE_Real *D_mat;
HYPRE_Real *S_vec;
#endif
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
P_array = hypre_ParAMGDataPArray(amg_data);
R_array = hypre_ParAMGDataRArray(amg_data);
CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data);
Vtemp = hypre_ParAMGDataVtemp(amg_data);
Ztemp = hypre_ParAMGDataZtemp(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
additive = hypre_ParAMGDataAdditive(amg_data);
mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
simple = hypre_ParAMGDataSimple(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
Lambda = hypre_ParAMGDataLambda(amg_data);
Atilde = hypre_ParAMGDataAtilde(amg_data);
Xtilde = hypre_ParAMGDataXtilde(amg_data);
Rtilde = hypre_ParAMGDataRtilde(amg_data);
l1_norms = hypre_ParAMGDataL1Norms(amg_data);
D_inv = hypre_ParAMGDataDinv(amg_data);
relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
omega = hypre_ParAMGDataOmega(amg_data);
rlx_order = hypre_ParAMGDataRelaxOrder(amg_data);
num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data);
/* Initialize */
addlvl = hypre_max(additive, mult_additive);
addlvl = hypre_max(addlvl, simple);
if (add_last_lvl == -1 ) add_end = num_levels-1;
else add_end = add_last_lvl;
Solve_err_flag = 0;
/*---------------------------------------------------------------------
* Main loop of cycling --- multiplicative version --- V-cycle
*--------------------------------------------------------------------*/
/* down cycle */
rlx_down = grid_relax_type[1];
rlx_up = grid_relax_type[2];
rlx_coarse = grid_relax_type[3];
for (level = 0; level < num_levels-1; level++)
{
fine_grid = level;
coarse_grid = level + 1;
u_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[fine_grid]));
v_data = hypre_VectorData(hypre_ParVectorLocalVector(Vtemp));
l1_norms_lvl = l1_norms[level];
hypre_ParVectorSetConstantValues(U_array[coarse_grid], 0.0);
if (level < addlvl || level > add_end) /* multiplicative version */
{
/* smoothing step */
if (rlx_down == 0)
{
HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
u_data[i] = relax_weight[level]*v_data[i] / A_data[A_i[i]];
}
}
else if (rlx_down != 18)
{
/*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_down,0,*/
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid], rlx_down,rlx_order,1,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[level], U_array[fine_grid], Vtemp, Ztemp);
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
}
}
else
{
num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid]));
for (j=0; j < num_grid_sweeps[1]; j++)
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
u_data[i] += v_data[i] / l1_norms_lvl[i];
}
}
alpha = -1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, A_array[fine_grid], U_array[fine_grid],
beta, Vtemp);
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,
beta,F_array[coarse_grid]);
}
else /* additive version */
{
hypre_ParVectorCopy(F_array[fine_grid],Vtemp);
if (level == 0) /* compute residual */
{
hypre_ParVectorCopy(Vtemp, Rtilde);
hypre_ParVectorCopy(U_array[fine_grid],Xtilde);
}
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,
beta,F_array[coarse_grid]);
}
}
/* additive smoothing and solve coarse grid */
if (addlvl < num_levels)
{
if (simple > -1)
{
x_global = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_global = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde));
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_global; i++)
x_global[i] += D_inv[i]*r_global[i];
}
else
{
if (num_grid_sweeps[1] > 1)
{
n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Rtilde));
hypre_ParVector *Tmptilde = hypre_CTAlloc(hypre_ParVector, 1);
hypre_Vector *Tmptilde_local = hypre_SeqVectorCreate(n_global);
hypre_SeqVectorInitialize(Tmptilde_local);
hypre_ParVectorLocalVector(Tmptilde) = Tmptilde_local;
hypre_ParVectorOwnsData(Tmptilde) = 1;
hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 0.0, Tmptilde);
hypre_ParVectorScale(2.0,Rtilde);
hypre_ParCSRMatrixMatvec(-1.0, Atilde, Tmptilde, 1.0, Rtilde);
hypre_ParVectorDestroy(Tmptilde);
}
hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 1.0, Xtilde);
}
if (addlvl == 0) hypre_ParVectorCopy(Xtilde, U_array[0]);
}
if (add_end < num_levels -1)
{
fine_grid = num_levels -1;
for (j=0; j < num_grid_sweeps[3]; j++)
if (rlx_coarse == 18)
hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid],
1, 1, l1_norms[fine_grid],
1.0, 1.0 ,0,0,0,0,
U_array[fine_grid], Vtemp, Ztemp);
else
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
NULL, rlx_coarse,0,0,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[fine_grid], U_array[fine_grid], Vtemp, Ztemp);
}
/* up cycle */
for (level = num_levels-1; level > 0; level--)
{
fine_grid = level - 1;
coarse_grid = level;
if (level <= addlvl || level > add_end+1) /* multiplicative version */
{
alpha = 1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid],
U_array[coarse_grid],
beta, U_array[fine_grid]);
if (rlx_up != 18)
/*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_up,0,*/
for (j=0; j < num_grid_sweeps[2]; j++)
hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid],
rlx_up,rlx_order,2,
relax_weight[fine_grid], omega[fine_grid],
l1_norms[fine_grid], U_array[fine_grid], Vtemp, Ztemp);
else if (rlx_order)
{
HYPRE_Int loc_relax_points[2];
loc_relax_points[0] = -1;
loc_relax_points[1] = 1;
for (j=0; j < num_grid_sweeps[2]; j++)
for (i=0; i < 2; i++)
hypre_ParCSRRelax_L1_Jacobi(A_array[fine_grid],F_array[fine_grid],
CF_marker_array[fine_grid],
loc_relax_points[i],
1.0, l1_norms[fine_grid],
U_array[fine_grid], Vtemp);
}
else
for (j=0; j < num_grid_sweeps[2]; j++)
hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid],
1, 1, l1_norms[fine_grid],
1.0, 1.0 ,0,0,0,0,
U_array[fine_grid], Vtemp, Ztemp);
}
else /* additive version */
{
alpha = 1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid],
U_array[coarse_grid],
beta, U_array[fine_grid]);
}
}
return(Solve_err_flag);
}
HYPRE_Int hypre_CreateLambda(void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
MPI_Comm comm;
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix *A_tmp;
hypre_ParCSRMatrix *Lambda;
hypre_CSRMatrix *L_diag;
hypre_CSRMatrix *L_offd;
hypre_ParCSRMatrix *Atilde;
hypre_CSRMatrix *Atilde_diag;
hypre_CSRMatrix *Atilde_offd;
HYPRE_Real *Atilde_diag_data;
HYPRE_Real *Atilde_offd_data;
hypre_CSRMatrix *A_tmp_diag;
hypre_CSRMatrix *A_tmp_offd;
hypre_ParVector *Xtilde;
hypre_ParVector *Rtilde;
hypre_Vector *Xtilde_local;
hypre_Vector *Rtilde_local;
hypre_ParCSRCommPkg *comm_pkg;
hypre_ParCSRCommPkg *L_comm_pkg = NULL;
hypre_ParCSRCommHandle *comm_handle;
HYPRE_Real *L_diag_data;
HYPRE_Real *L_offd_data;
HYPRE_Real *buf_data = NULL;
HYPRE_Real *tmp_data;
HYPRE_Real *x_data;
HYPRE_Real *r_data;
HYPRE_Real *l1_norms;
HYPRE_Real *A_tmp_diag_data;
HYPRE_Real *A_tmp_offd_data;
HYPRE_Real *D_data = NULL;
HYPRE_Real *D_data_offd = NULL;
HYPRE_Int *L_diag_i;
HYPRE_Int *L_diag_j;
HYPRE_Int *L_offd_i;
HYPRE_Int *L_offd_j;
HYPRE_Int *Atilde_diag_i;
HYPRE_Int *Atilde_diag_j;
HYPRE_Int *Atilde_offd_i;
HYPRE_Int *Atilde_offd_j;
HYPRE_Int *A_tmp_diag_i;
HYPRE_Int *A_tmp_offd_i;
HYPRE_Int *A_tmp_diag_j;
HYPRE_Int *A_tmp_offd_j;
HYPRE_Int *L_recv_ptr = NULL;
HYPRE_Int *L_send_ptr = NULL;
HYPRE_Int *L_recv_procs = NULL;
HYPRE_Int *L_send_procs = NULL;
HYPRE_Int *L_send_map_elmts = NULL;
HYPRE_Int *recv_procs;
HYPRE_Int *send_procs;
HYPRE_Int *send_map_elmts;
HYPRE_Int *send_map_starts;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *all_send_procs = NULL;
HYPRE_Int *all_recv_procs = NULL;
HYPRE_Int *remap = NULL;
HYPRE_Int *level_start;
HYPRE_Int addlvl;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int num_levels;
HYPRE_Int num_add_lvls;
HYPRE_Int num_procs;
HYPRE_Int num_sends, num_recvs;
HYPRE_Int num_sends_L = 0;
HYPRE_Int num_recvs_L = 0;
HYPRE_Int send_data_L = 0;
HYPRE_Int num_rows_L = 0;
HYPRE_Int num_rows_tmp = 0;
HYPRE_Int num_cols_offd_L = 0;
HYPRE_Int num_cols_offd = 0;
HYPRE_Int level, i, j, k;
HYPRE_Int this_proc, cnt, cnt_diag, cnt_offd;
HYPRE_Int A_cnt_diag, A_cnt_offd;
HYPRE_Int cnt_recv, cnt_send, cnt_row, row_start;
HYPRE_Int start_diag, start_offd, indx, cnt_map;
HYPRE_Int start, j_indx, index, cnt_level;
HYPRE_Int max_sends, max_recvs;
HYPRE_Int ns;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd;
HYPRE_Real **l1_norms_ptr = NULL;
/*HYPRE_Real *relax_weight = NULL;
HYPRE_Int relax_type; */
HYPRE_Int add_rlx;
HYPRE_Int add_last_lvl, add_end;
HYPRE_Real add_rlx_wt;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
additive = hypre_ParAMGDataAdditive(amg_data);
mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
/*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/
comm = hypre_ParCSRMatrixComm(A_array[0]);
add_rlx = hypre_ParAMGDataAddRelaxType(amg_data);
add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data);
ns = hypre_ParAMGDataNumGridSweeps(amg_data)[1];
hypre_MPI_Comm_size(comm,&num_procs);
l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data);
addlvl = hypre_max(additive, mult_additive);
if (add_last_lvl != -1) add_end = add_last_lvl+1;
else add_end = num_levels;
num_add_lvls = add_end+1-addlvl;
level_start = hypre_CTAlloc(HYPRE_Int, num_add_lvls+1);
send_data_L = 0;
num_rows_L = 0;
num_cols_offd_L = 0;
num_nonzeros_diag = 0;
num_nonzeros_offd = 0;
level_start[0] = 0;
cnt = 1;
max_sends = 0;
max_recvs = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp);
A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
num_cols_offd = hypre_CSRMatrixNumCols(A_tmp_offd);
num_rows_L += num_rows_tmp;
level_start[cnt] = level_start[cnt-1] + num_rows_tmp;
cnt++;
num_cols_offd_L += num_cols_offd;
num_nonzeros_diag += A_tmp_diag_i[num_rows_tmp];
num_nonzeros_offd += A_tmp_offd_i[num_rows_tmp];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
max_sends += num_sends;
if (num_sends)
send_data_L += hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends);
max_recvs += hypre_ParCSRCommPkgNumRecvs(comm_pkg);
}
}
if (max_sends >= num_procs ||max_recvs >= num_procs)
{
max_sends = num_procs;
max_recvs = num_procs;
}
if (max_sends) all_send_procs = hypre_CTAlloc(HYPRE_Int, max_sends);
if (max_recvs) all_recv_procs = hypre_CTAlloc(HYPRE_Int, max_recvs);
cnt_send = 0;
cnt_recv = 0;
if (max_sends || max_recvs)
{
if (max_sends < num_procs && max_recvs < num_procs)
{
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
for (j = 0; j < num_sends; j++)
all_send_procs[cnt_send++] = send_procs[j];
for (j = 0; j < num_recvs; j++)
all_recv_procs[cnt_recv++] = recv_procs[j];
}
}
if (max_sends)
{
hypre_qsort0(all_send_procs, 0, max_sends-1);
num_sends_L = 1;
this_proc = all_send_procs[0];
for (i=1; i < max_sends; i++)
{
if (all_send_procs[i] > this_proc)
{
this_proc = all_send_procs[i];
all_send_procs[num_sends_L++] = this_proc;
}
}
L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L);
for (j=0; j < num_sends_L; j++)
L_send_procs[j] = all_send_procs[j];
hypre_TFree(all_send_procs);
}
if (max_recvs)
{
hypre_qsort0(all_recv_procs, 0, max_recvs-1);
num_recvs_L = 1;
this_proc = all_recv_procs[0];
for (i=1; i < max_recvs; i++)
{
if (all_recv_procs[i] > this_proc)
{
this_proc = all_recv_procs[i];
all_recv_procs[num_recvs_L++] = this_proc;
}
}
L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L);
for (j=0; j < num_recvs_L; j++)
L_recv_procs[j] = all_recv_procs[j];
hypre_TFree(all_recv_procs);
}
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1);
L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1);
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
}
else
{
num_sends = 0;
num_recvs = 0;
}
for (k = 0; k < num_sends; k++)
{
this_proc = hypre_BinarySearch(L_send_procs,send_procs[k],num_sends_L);
L_send_ptr[this_proc+1] += send_map_starts[k+1]-send_map_starts[k];
}
for (k = 0; k < num_recvs; k++)
{
this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[k],num_recvs_L);
L_recv_ptr[this_proc+1] += recv_vec_starts[k+1]-recv_vec_starts[k];
}
}
L_recv_ptr[0] = 0;
for (i=1; i < num_recvs_L; i++)
L_recv_ptr[i+1] += L_recv_ptr[i];
L_send_ptr[0] = 0;
for (i=1; i < num_sends_L; i++)
L_send_ptr[i+1] += L_send_ptr[i];
}
else
{
num_recvs_L = 0;
num_sends_L = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
for (j = 0; j < num_sends; j++)
{
this_proc = send_procs[j];
if (all_send_procs[this_proc] == 0)
num_sends_L++;
all_send_procs[this_proc] += send_map_starts[j+1]-send_map_starts[j];
}
for (j = 0; j < num_recvs; j++)
{
this_proc = recv_procs[j];
if (all_recv_procs[this_proc] == 0)
num_recvs_L++;
all_recv_procs[this_proc] += recv_vec_starts[j+1]-recv_vec_starts[j];
}
}
}
if (max_sends)
{
L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L);
L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1);
num_sends_L = 0;
for (j=0; j < num_procs; j++)
{
this_proc = all_send_procs[j];
if (this_proc)
{
L_send_procs[num_sends_L++] = j;
L_send_ptr[num_sends_L] = this_proc + L_send_ptr[num_sends_L-1];
}
}
}
if (max_recvs)
{
L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L);
L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1);
num_recvs_L = 0;
for (j=0; j < num_procs; j++)
{
this_proc = all_recv_procs[j];
if (this_proc)
{
L_recv_procs[num_recvs_L++] = j;
L_recv_ptr[num_recvs_L] = this_proc + L_recv_ptr[num_recvs_L-1];
}
}
}
}
}
if (max_sends) hypre_TFree(all_send_procs);
if (max_recvs) hypre_TFree(all_recv_procs);
L_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag);
L_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd);
hypre_CSRMatrixInitialize(L_diag);
hypre_CSRMatrixInitialize(L_offd);
if (num_nonzeros_diag)
{
L_diag_data = hypre_CSRMatrixData(L_diag);
L_diag_j = hypre_CSRMatrixJ(L_diag);
}
L_diag_i = hypre_CSRMatrixI(L_diag);
if (num_nonzeros_offd)
{
L_offd_data = hypre_CSRMatrixData(L_offd);
L_offd_j = hypre_CSRMatrixJ(L_offd);
}
L_offd_i = hypre_CSRMatrixI(L_offd);
if (ns > 1)
{
Atilde_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag);
Atilde_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd);
hypre_CSRMatrixInitialize(Atilde_diag);
hypre_CSRMatrixInitialize(Atilde_offd);
if (num_nonzeros_diag)
{
Atilde_diag_data = hypre_CSRMatrixData(Atilde_diag);
Atilde_diag_j = hypre_CSRMatrixJ(Atilde_diag);
}
Atilde_diag_i = hypre_CSRMatrixI(Atilde_diag);
if (num_nonzeros_offd)
{
Atilde_offd_data = hypre_CSRMatrixData(Atilde_offd);
Atilde_offd_j = hypre_CSRMatrixJ(Atilde_offd);
}
Atilde_offd_i = hypre_CSRMatrixI(Atilde_offd);
}
if (num_rows_L) D_data = hypre_CTAlloc(HYPRE_Real,num_rows_L);
if (send_data_L)
{
L_send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_data_L);
buf_data = hypre_CTAlloc(HYPRE_Real,send_data_L);
}
if (num_cols_offd_L)
{
D_data_offd = hypre_CTAlloc(HYPRE_Real,num_cols_offd_L);
/*L_col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L);*/
remap = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L);
}
Rtilde = hypre_CTAlloc(hypre_ParVector, 1);
Rtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Rtilde_local);
hypre_ParVectorLocalVector(Rtilde) = Rtilde_local;
hypre_ParVectorOwnsData(Rtilde) = 1;
Xtilde = hypre_CTAlloc(hypre_ParVector, 1);
Xtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Xtilde_local);
hypre_ParVectorLocalVector(Xtilde) = Xtilde_local;
hypre_ParVectorOwnsData(Xtilde) = 1;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
cnt = 0;
cnt_level = 0;
cnt_diag = 0;
cnt_offd = 0;
cnt_row = 1;
L_diag_i[0] = 0;
L_offd_i[0] = 0;
if (ns > 1)
{
A_cnt_diag = 0;
A_cnt_offd = 0;
Atilde_diag_i[0] = 0;
Atilde_offd_i[0] = 0;
}
for (level=addlvl; level < add_end; level++)
{
row_start = level_start[cnt_level];
if (level != 0)
{
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level]));
if (tmp_data) hypre_TFree(tmp_data);
hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[row_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0;
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level]));
if (tmp_data) hypre_TFree(tmp_data);
hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[row_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0;
}
cnt_level++;
start_diag = L_diag_i[cnt_row-1];
start_offd = L_offd_i[cnt_row-1];
A_tmp = A_array[level];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp);
comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp);
A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd);
A_tmp_diag_j = hypre_CSRMatrixJ(A_tmp_diag);
A_tmp_offd_j = hypre_CSRMatrixJ(A_tmp_offd);
A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag);
A_tmp_offd_data = hypre_CSRMatrixData(A_tmp_offd);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
if (comm_pkg)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
}
else
{
num_sends = 0;
num_recvs = 0;
}
for (i=0; i < num_sends; i++)
{
this_proc = hypre_BinarySearch(L_send_procs,send_procs[i],num_sends_L);
indx = L_send_ptr[this_proc];
for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++)
{
L_send_map_elmts[indx++] = row_start + send_map_elmts[j];
}
L_send_ptr[this_proc] = indx;
}
cnt_map = 0;
for (i = 0; i < num_recvs; i++)
{
this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[i],num_recvs_L);
indx = L_recv_ptr[this_proc];
for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++)
{
remap[cnt_map++] = indx++;
}
L_recv_ptr[this_proc] = indx;
}
/* Compute Lambda */
if (add_rlx == 0)
{
/*HYPRE_Real rlx_wt = relax_weight[level];*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_data[i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]];
L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
if (ns > 1)
for (i=0; i < num_rows_tmp; i++)
{
Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
}
else
{
l1_norms = l1_norms_ptr[level];
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
{
D_data[i] = 1.0/l1_norms[i];
L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
if (ns > 1)
for (i=0; i < num_rows_tmp; i++)
{
Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1];
Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1];
}
}
if (num_procs > 1)
{
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_starts[i];
for (j=start; j < send_map_starts[i+1]; j++)
buf_data[index++] = D_data[send_map_elmts[j]];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg,
buf_data, D_data_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
for (i = 0; i < num_rows_tmp; i++)
{
j_indx = A_tmp_diag_i[i];
if (ns > 1)
{
Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j_indx];
Atilde_diag_j[A_cnt_diag++] = i+row_start;
}
L_diag_data[cnt_diag] = (2.0 - A_tmp_diag_data[j_indx]*D_data[i])*D_data[i];
L_diag_j[cnt_diag++] = i+row_start;
for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++)
{
j_indx = A_tmp_diag_j[j];
L_diag_data[cnt_diag] = (- A_tmp_diag_data[j]*D_data[j_indx])*D_data[i];
L_diag_j[cnt_diag++] = j_indx+row_start;
}
for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++)
{
j_indx = A_tmp_offd_j[j];
L_offd_data[cnt_offd] = (- A_tmp_offd_data[j]*D_data_offd[j_indx])*D_data[i];
L_offd_j[cnt_offd++] = remap[j_indx];
}
if (ns > 1)
{
for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++)
{
j_indx = A_tmp_diag_j[j];
Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j];
Atilde_diag_j[A_cnt_diag++] = j_indx+row_start;
}
for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++)
{
j_indx = A_tmp_offd_j[j];
Atilde_offd_data[A_cnt_offd] = A_tmp_offd_data[j];
Atilde_offd_j[A_cnt_offd++] = remap[j_indx];
}
}
}
cnt_row += num_rows_tmp;
}
if (L_send_ptr)
{
for (i=num_sends_L-1; i > 0; i--)
L_send_ptr[i] = L_send_ptr[i-1];
L_send_ptr[0] = 0;
}
else
L_send_ptr = hypre_CTAlloc(HYPRE_Int,1);
if (L_recv_ptr)
{
for (i=num_recvs_L-1; i > 0; i--)
L_recv_ptr[i] = L_recv_ptr[i-1];
L_recv_ptr[0] = 0;
}
else
L_recv_ptr = hypre_CTAlloc(HYPRE_Int,1);
L_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
hypre_ParCSRCommPkgNumRecvs(L_comm_pkg) = num_recvs_L;
hypre_ParCSRCommPkgNumSends(L_comm_pkg) = num_sends_L;
hypre_ParCSRCommPkgRecvProcs(L_comm_pkg) = L_recv_procs;
hypre_ParCSRCommPkgSendProcs(L_comm_pkg) = L_send_procs;
hypre_ParCSRCommPkgRecvVecStarts(L_comm_pkg) = L_recv_ptr;
hypre_ParCSRCommPkgSendMapStarts(L_comm_pkg) = L_send_ptr;
hypre_ParCSRCommPkgSendMapElmts(L_comm_pkg) = L_send_map_elmts;
hypre_ParCSRCommPkgComm(L_comm_pkg) = comm;
Lambda = hypre_CTAlloc(hypre_ParCSRMatrix, 1);
hypre_ParCSRMatrixDiag(Lambda) = L_diag;
hypre_ParCSRMatrixOffd(Lambda) = L_offd;
hypre_ParCSRMatrixCommPkg(Lambda) = L_comm_pkg;
hypre_ParCSRMatrixComm(Lambda) = comm;
hypre_ParCSRMatrixOwnsData(Lambda) = 1;
if (ns > 1)
{
/*hypre_ParCSRCommPkg *A_comm_pkg = NULL;
HYPRE_Int *A_recv_ptr = NULL;
HYPRE_Int *A_send_ptr = NULL;
HYPRE_Int *A_recv_procs = NULL;
HYPRE_Int *A_send_procs = NULL;
HYPRE_Int *A_send_map_elmts = NULL;
A_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
A_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs+1);
A_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends+1);
A_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L);
A_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L);
A_send_map_elmts = hypre_CTAlloc(HYPRE_Int, L_send_ptr[num_sends_L]);
for (i=0; i<num_recvs_L+1; i++)
A_recv_ptr[i] = L_recv_ptr[i];
for (i=0; i<num_sends_L+1; i++)
A_send_ptr[i] = L_send_ptr[i];
for (i=0; i<num_recvs_L; i++)
A_recv_procs[i] = L_recv_procs[i];
for (i=0; i<num_sends_L; i++)
A_send_procs[i] = L_send_procs[i];
for (i=0; i < L_send_ptr[num_sends_L]; i++)
A_send_map_elmts[i] = L_send_map_elmts[i];
hypre_ParCSRCommPkgNumRecvs(A_comm_pkg) = num_recvs_L;
hypre_ParCSRCommPkgNumSends(A_comm_pkg) = num_sends_L;
hypre_ParCSRCommPkgRecvProcs(A_comm_pkg) = A_recv_procs;
hypre_ParCSRCommPkgSendProcs(A_comm_pkg) = A_send_procs;
hypre_ParCSRCommPkgRecvVecStarts(A_comm_pkg) = A_recv_ptr;
hypre_ParCSRCommPkgSendMapStarts(A_comm_pkg) = A_send_ptr;
hypre_ParCSRCommPkgSendMapElmts(A_comm_pkg) = A_send_map_elmts;
hypre_ParCSRCommPkgComm(A_comm_pkg) = comm; */
Atilde = hypre_CTAlloc(hypre_ParCSRMatrix, 1);
hypre_ParCSRMatrixDiag(Atilde) = Atilde_diag;
hypre_ParCSRMatrixOffd(Atilde) = Atilde_offd;
hypre_ParCSRMatrixCommPkg(Atilde) = L_comm_pkg;
hypre_ParCSRMatrixComm(Atilde) = comm;
hypre_ParCSRMatrixOwnsData(Atilde) = 1;
hypre_ParAMGDataAtilde(amg_data) = Atilde;
}
hypre_ParAMGDataLambda(amg_data) = Lambda;
hypre_ParAMGDataRtilde(amg_data) = Rtilde;
hypre_ParAMGDataXtilde(amg_data) = Xtilde;
hypre_TFree(D_data_offd);
hypre_TFree(D_data);
if (num_procs > 1) hypre_TFree(buf_data);
hypre_TFree(remap);
hypre_TFree(buf_data);
hypre_TFree(level_start);
return Solve_err_flag;
}
HYPRE_Int hypre_CreateDinv(void *amg_vdata)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix *A_tmp;
hypre_CSRMatrix *A_tmp_diag;
hypre_ParVector *Xtilde;
hypre_ParVector *Rtilde;
hypre_Vector *Xtilde_local;
hypre_Vector *Rtilde_local;
HYPRE_Real *x_data;
HYPRE_Real *r_data;
HYPRE_Real *tmp_data;
HYPRE_Real *D_inv = NULL;
/*HYPRE_Real *relax_weight = NULL;
HYPRE_Real relax_type;*/
HYPRE_Int addlvl;
HYPRE_Int num_levels;
HYPRE_Int num_rows_L;
HYPRE_Int num_rows_tmp;
HYPRE_Int level, i;
HYPRE_Int add_rlx;
HYPRE_Real add_rlx_wt;
HYPRE_Int add_last_lvl, add_end;
/* Local variables */
HYPRE_Int Solve_err_flag = 0;
HYPRE_Real **l1_norms_ptr = NULL;
HYPRE_Real *l1_norms;
HYPRE_Int l1_start;
/* Acquire data and allocate storage */
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
addlvl = hypre_ParAMGDataSimple(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data);
add_rlx = hypre_ParAMGDataAddRelaxType(amg_data);
add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
/*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/
l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data);
/* smooth_option = hypre_ParAMGDataSmoothOption(amg_data); */
if (add_last_lvl == -1 ) add_end = num_levels;
else add_end = add_last_lvl;
num_rows_L = 0;
for (i=addlvl; i < add_end; i++)
{
A_tmp = A_array[i];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
num_rows_L += num_rows_tmp;
}
Rtilde = hypre_CTAlloc(hypre_ParVector, 1);
Rtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Rtilde_local);
hypre_ParVectorLocalVector(Rtilde) = Rtilde_local;
hypre_ParVectorOwnsData(Rtilde) = 1;
Xtilde = hypre_CTAlloc(hypre_ParVector, 1);
Xtilde_local = hypre_SeqVectorCreate(num_rows_L);
hypre_SeqVectorInitialize(Xtilde_local);
hypre_ParVectorLocalVector(Xtilde) = Xtilde_local;
hypre_ParVectorOwnsData(Xtilde) = 1;
x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde));
r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde));
D_inv = hypre_CTAlloc(HYPRE_Real, num_rows_L);
l1_start = 0;
for (level=addlvl; level < add_end; level++)
{
if (level != 0)
{
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level]));
if (tmp_data) hypre_TFree(tmp_data);
hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[l1_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0;
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level]));
if (tmp_data) hypre_TFree(tmp_data);
hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[l1_start];
hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0;
}
A_tmp = A_array[level];
A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp);
num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag);
if (add_rlx == 0)
{
/*HYPRE_Real rlx_wt = relax_weight[level];*/
HYPRE_Int *A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag);
HYPRE_Real *A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag);
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
D_inv[l1_start+i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]];
}
else
{
l1_norms = l1_norms_ptr[level];
#ifdef HYPRE_USING_OPENMP
#pragma omp for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_rows_tmp; i++)
D_inv[l1_start+i] = 1.0/l1_norms[i];
}
l1_start += num_rows_tmp;
}
hypre_ParAMGDataDinv(amg_data) = D_inv;
hypre_ParAMGDataRtilde(amg_data) = Rtilde;
hypre_ParAMGDataXtilde(amg_data) = Xtilde;
return Solve_err_flag;
}
| 40,284 | 34.808889 | 97 | c |
AMG | AMG-master/parcsr_ls/par_amg.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* ParAMG functions
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCreate
*--------------------------------------------------------------------------*/
void *
hypre_BoomerAMGCreate()
{
hypre_ParAMGData *amg_data;
/* setup params */
HYPRE_Int max_levels;
HYPRE_Int max_coarse_size;
HYPRE_Int min_coarse_size;
HYPRE_Real strong_threshold;
HYPRE_Real max_row_sum;
HYPRE_Real trunc_factor;
HYPRE_Real agg_trunc_factor;
HYPRE_Real agg_P12_trunc_factor;
HYPRE_Real jacobi_trunc_threshold;
HYPRE_Real S_commpkg_switch;
HYPRE_Real CR_rate;
HYPRE_Real CR_strong_th;
HYPRE_Int interp_type;
HYPRE_Int sep_weight;
HYPRE_Int coarsen_type;
HYPRE_Int measure_type;
HYPRE_Int setup_type;
HYPRE_Int P_max_elmts;
HYPRE_Int num_functions;
HYPRE_Int nodal, nodal_levels, nodal_diag;
HYPRE_Int num_paths;
HYPRE_Int agg_num_levels;
HYPRE_Int agg_interp_type;
HYPRE_Int agg_P_max_elmts;
HYPRE_Int agg_P12_max_elmts;
HYPRE_Int post_interp_type;
HYPRE_Int seq_threshold;
HYPRE_Int redundant;
/* solve params */
HYPRE_Int min_iter;
HYPRE_Int max_iter;
HYPRE_Int cycle_type;
HYPRE_Real tol;
HYPRE_Int num_sweeps;
HYPRE_Int relax_down;
HYPRE_Int relax_up;
HYPRE_Int relax_coarse;
HYPRE_Int relax_order;
HYPRE_Real relax_wt;
HYPRE_Real outer_wt;
HYPRE_Real nongalerkin_tol;
HYPRE_Int cheby_order;
HYPRE_Int cheby_eig_est;
HYPRE_Int cheby_variant;
HYPRE_Int cheby_scale;
HYPRE_Real cheby_eig_ratio;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int simple;
HYPRE_Int add_last_lvl;
HYPRE_Real add_trunc_factor;
HYPRE_Int add_P_max_elmts;
HYPRE_Int add_rlx_type;
HYPRE_Real add_rlx_wt;
/* log info */
HYPRE_Int num_iterations;
HYPRE_Int cum_num_iterations;
/* output params */
HYPRE_Int print_level;
HYPRE_Int logging;
/* HYPRE_Int cycle_op_count; */
char log_file_name[256];
HYPRE_Int debug_flag;
char plot_file_name[251] = {0};
/*-----------------------------------------------------------------------
* Setup default values for parameters
*-----------------------------------------------------------------------*/
/* setup params */
max_levels = 25;
max_coarse_size = 9;
min_coarse_size = 0;
seq_threshold = 0;
redundant = 0;
strong_threshold = 0.25;
max_row_sum = 0.9;
trunc_factor = 0.0;
agg_trunc_factor = 0.0;
agg_P12_trunc_factor = 0.0;
jacobi_trunc_threshold = 0.01;
S_commpkg_switch = 1.0;
interp_type = 0;
sep_weight = 0;
coarsen_type = 10;
interp_type = 6;
measure_type = 0;
setup_type = 1;
P_max_elmts = 4;
agg_P_max_elmts = 0;
agg_P12_max_elmts = 0;
num_functions = 1;
nodal = 0;
nodal_levels = max_levels;
nodal_diag = 0;
num_paths = 1;
agg_num_levels = 0;
post_interp_type = 0;
agg_interp_type = 4;
/* solve params */
min_iter = 0;
max_iter = 20;
cycle_type = 1;
tol = 1.0e-7;
num_sweeps = 1;
relax_down = 13;
relax_up = 14;
relax_coarse = 9;
relax_order = 0;
relax_wt = 1.0;
outer_wt = 1.0;
cheby_order = 2;
cheby_variant = 0;
cheby_scale = 1;
cheby_eig_est = 10;
cheby_eig_ratio = .3;
additive = -1;
mult_additive = -1;
simple = -1;
add_last_lvl = -1;
add_trunc_factor = 0.0;
add_P_max_elmts = 0;
add_rlx_type = 18;
add_rlx_wt = 1.0;
/* log info */
num_iterations = 0;
cum_num_iterations = 0;
/* output params */
print_level = 0;
logging = 0;
hypre_sprintf(log_file_name, "%s", "amg.out.log");
/* cycle_op_count = 0; */
debug_flag = 0;
nongalerkin_tol = 0.0;
/*HYPRE_ANNOTATION_BEGIN("BoomerAMG.create");*/
/*-----------------------------------------------------------------------
* Create the hypre_ParAMGData structure and return
*-----------------------------------------------------------------------*/
amg_data = hypre_CTAlloc(hypre_ParAMGData, 1);
hypre_ParAMGDataMaxLevels(amg_data) = max_levels;
hypre_ParAMGDataUserCoarseRelaxType(amg_data) = 9;
hypre_ParAMGDataUserRelaxType(amg_data) = -1;
hypre_ParAMGDataUserNumSweeps(amg_data) = -1;
hypre_ParAMGDataUserRelaxWeight(amg_data) = relax_wt;
hypre_ParAMGDataOuterWt(amg_data) = outer_wt;
hypre_BoomerAMGSetMaxCoarseSize(amg_data, max_coarse_size);
hypre_BoomerAMGSetMinCoarseSize(amg_data, min_coarse_size);
hypre_BoomerAMGSetStrongThreshold(amg_data, strong_threshold);
hypre_BoomerAMGSetMaxRowSum(amg_data, max_row_sum);
hypre_BoomerAMGSetTruncFactor(amg_data, trunc_factor);
hypre_BoomerAMGSetAggTruncFactor(amg_data, agg_trunc_factor);
hypre_BoomerAMGSetAggP12TruncFactor(amg_data, agg_P12_trunc_factor);
hypre_BoomerAMGSetJacobiTruncThreshold(amg_data, jacobi_trunc_threshold);
hypre_BoomerAMGSetSCommPkgSwitch(amg_data, S_commpkg_switch);
hypre_BoomerAMGSetSepWeight(amg_data, sep_weight);
hypre_BoomerAMGSetMeasureType(amg_data, measure_type);
hypre_BoomerAMGSetCoarsenType(amg_data, coarsen_type);
hypre_BoomerAMGSetInterpType(amg_data, interp_type);
hypre_BoomerAMGSetSetupType(amg_data, setup_type);
hypre_BoomerAMGSetPMaxElmts(amg_data, P_max_elmts);
hypre_BoomerAMGSetAggPMaxElmts(amg_data, agg_P_max_elmts);
hypre_BoomerAMGSetAggP12MaxElmts(amg_data, agg_P12_max_elmts);
hypre_BoomerAMGSetNumFunctions(amg_data, num_functions);
hypre_BoomerAMGSetNodal(amg_data, nodal);
hypre_BoomerAMGSetNodalLevels(amg_data, nodal_levels);
hypre_BoomerAMGSetNodal(amg_data, nodal_diag);
hypre_BoomerAMGSetNumPaths(amg_data, num_paths);
hypre_BoomerAMGSetAggNumLevels(amg_data, agg_num_levels);
hypre_BoomerAMGSetAggInterpType(amg_data, agg_interp_type);
hypre_BoomerAMGSetPostInterpType(amg_data, post_interp_type);
hypre_BoomerAMGSetMinIter(amg_data, min_iter);
hypre_BoomerAMGSetMaxIter(amg_data, max_iter);
hypre_BoomerAMGSetCycleType(amg_data, cycle_type);
hypre_BoomerAMGSetTol(amg_data, tol);
hypre_BoomerAMGSetNumSweeps(amg_data, num_sweeps);
hypre_BoomerAMGSetCycleRelaxType(amg_data, relax_down, 1);
hypre_BoomerAMGSetCycleRelaxType(amg_data, relax_up, 2);
hypre_BoomerAMGSetCycleRelaxType(amg_data, relax_coarse, 3);
hypre_BoomerAMGSetRelaxOrder(amg_data, relax_order);
hypre_BoomerAMGSetRelaxWt(amg_data, relax_wt);
hypre_BoomerAMGSetOuterWt(amg_data, outer_wt);
hypre_BoomerAMGSetChebyOrder(amg_data, cheby_order);
hypre_BoomerAMGSetChebyFraction(amg_data, cheby_eig_ratio);
hypre_BoomerAMGSetChebyEigEst(amg_data, cheby_eig_est);
hypre_BoomerAMGSetChebyVariant(amg_data, cheby_variant);
hypre_BoomerAMGSetChebyScale(amg_data, cheby_scale);
hypre_BoomerAMGSetNumIterations(amg_data, num_iterations);
hypre_BoomerAMGSetAdditive(amg_data, additive);
hypre_BoomerAMGSetMultAdditive(amg_data, mult_additive);
hypre_BoomerAMGSetSimple(amg_data, simple);
hypre_BoomerAMGSetMultAddPMaxElmts(amg_data, add_P_max_elmts);
hypre_BoomerAMGSetMultAddTruncFactor(amg_data, add_trunc_factor);
hypre_BoomerAMGSetAddRelaxType(amg_data, add_rlx_type);
hypre_BoomerAMGSetAddRelaxWt(amg_data, add_rlx_wt);
hypre_ParAMGDataAddLastLvl(amg_data) = add_last_lvl;
hypre_ParAMGDataLambda(amg_data) = NULL;
hypre_ParAMGDataXtilde(amg_data) = NULL;
hypre_ParAMGDataRtilde(amg_data) = NULL;
hypre_ParAMGDataDinv(amg_data) = NULL;
#ifdef CUMNUMIT
hypre_ParAMGDataCumNumIterations(amg_data) = cum_num_iterations;
#endif
hypre_BoomerAMGSetPrintLevel(amg_data, print_level);
hypre_BoomerAMGSetLogging(amg_data, logging);
hypre_BoomerAMGSetPrintFileName(amg_data, log_file_name);
hypre_BoomerAMGSetDebugFlag(amg_data, debug_flag);
hypre_BoomerAMGSetRestriction(amg_data, 0);
hypre_ParAMGDataAArray(amg_data) = NULL;
hypre_ParAMGDataPArray(amg_data) = NULL;
hypre_ParAMGDataRArray(amg_data) = NULL;
hypre_ParAMGDataCFMarkerArray(amg_data) = NULL;
hypre_ParAMGDataVtemp(amg_data) = NULL;
hypre_ParAMGDataRtemp(amg_data) = NULL;
hypre_ParAMGDataPtemp(amg_data) = NULL;
hypre_ParAMGDataZtemp(amg_data) = NULL;
hypre_ParAMGDataFArray(amg_data) = NULL;
hypre_ParAMGDataUArray(amg_data) = NULL;
hypre_ParAMGDataDofFunc(amg_data) = NULL;
hypre_ParAMGDataDofFuncArray(amg_data) = NULL;
hypre_ParAMGDataDofPointArray(amg_data) = NULL;
hypre_ParAMGDataDofPointArray(amg_data) = NULL;
hypre_ParAMGDataPointDofMapArray(amg_data) = NULL;
hypre_ParAMGDataL1Norms(amg_data) = NULL;
/* Stuff for Chebyshev smoothing */
hypre_ParAMGDataMaxEigEst(amg_data) = NULL;
hypre_ParAMGDataMinEigEst(amg_data) = NULL;
hypre_ParAMGDataChebyDS(amg_data) = NULL;
hypre_ParAMGDataChebyCoefs(amg_data) = NULL;
/* for redundant coarse grid solve */
hypre_ParAMGDataSeqThreshold(amg_data) = seq_threshold;
hypre_ParAMGDataRedundant(amg_data) = redundant;
hypre_ParAMGDataCoarseSolver(amg_data) = NULL;
hypre_ParAMGDataACoarse(amg_data) = NULL;
hypre_ParAMGDataFCoarse(amg_data) = NULL;
hypre_ParAMGDataUCoarse(amg_data) = NULL;
hypre_ParAMGDataNewComm(amg_data) = hypre_MPI_COMM_NULL;
/* for Gaussian elimination coarse grid solve */
hypre_ParAMGDataAMat(amg_data) = NULL;
hypre_ParAMGDataBVec(amg_data) = NULL;
hypre_ParAMGDataCommInfo(amg_data) = NULL;
hypre_ParAMGDataNonGalerkinTol(amg_data) = nongalerkin_tol;
hypre_ParAMGDataNonGalTolArray(amg_data) = NULL;
hypre_ParAMGDataRAP2(amg_data) = 0;
hypre_ParAMGDataKeepTranspose(amg_data) = 0;
/*HYPRE_ANNOTATION_END("BoomerAMG.create");*/
return (void *) amg_data;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGDestroy( void *data )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data);
void *amg = hypre_ParAMGDataCoarseSolver(amg_data);
MPI_Comm new_comm = hypre_ParAMGDataNewComm(amg_data);
HYPRE_Int i;
HYPRE_Int *grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
/*HYPRE_ANNOTATION_BEGIN("BoomerAMG.destroy");*/
if (hypre_ParAMGDataMaxEigEst(amg_data))
{
hypre_TFree(hypre_ParAMGDataMaxEigEst(amg_data));
hypre_ParAMGDataMaxEigEst(amg_data) = NULL;
}
if (hypre_ParAMGDataMinEigEst(amg_data))
{
hypre_TFree(hypre_ParAMGDataMinEigEst(amg_data));
hypre_ParAMGDataMinEigEst(amg_data) = NULL;
}
if (hypre_ParAMGDataNumGridSweeps(amg_data))
{
hypre_TFree (hypre_ParAMGDataNumGridSweeps(amg_data));
hypre_ParAMGDataNumGridSweeps(amg_data) = NULL;
}
if (grid_relax_type)
{
HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(amg_data);
hypre_TFree (hypre_ParAMGDataGridRelaxType(amg_data));
hypre_ParAMGDataGridRelaxType(amg_data) = NULL;
}
if (hypre_ParAMGDataRelaxWeight(amg_data))
{
hypre_TFree (hypre_ParAMGDataRelaxWeight(amg_data));
hypre_ParAMGDataRelaxWeight(amg_data) = NULL;
}
if (hypre_ParAMGDataOmega(amg_data))
{
hypre_TFree (hypre_ParAMGDataOmega(amg_data));
hypre_ParAMGDataOmega(amg_data) = NULL;
}
if (hypre_ParAMGDataNonGalTolArray(amg_data))
{
hypre_TFree (hypre_ParAMGDataNonGalTolArray(amg_data));
hypre_ParAMGDataNonGalTolArray(amg_data) = NULL;
}
if (hypre_ParAMGDataDofFunc(amg_data))
{
hypre_TFree (hypre_ParAMGDataDofFunc(amg_data));
hypre_ParAMGDataDofFunc(amg_data) = NULL;
}
if (hypre_ParAMGDataGridRelaxPoints(amg_data))
{
for (i=0; i < 4; i++)
hypre_TFree (hypre_ParAMGDataGridRelaxPoints(amg_data)[i]);
hypre_TFree (hypre_ParAMGDataGridRelaxPoints(amg_data));
hypre_ParAMGDataGridRelaxPoints(amg_data) = NULL;
}
for (i=1; i < num_levels; i++)
{
hypre_ParVectorDestroy(hypre_ParAMGDataFArray(amg_data)[i]);
hypre_ParVectorDestroy(hypre_ParAMGDataUArray(amg_data)[i]);
if (hypre_ParAMGDataAArray(amg_data)[i])
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(amg_data)[i]);
if (hypre_ParAMGDataPArray(amg_data)[i-1])
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(amg_data)[i-1]);
hypre_TFree(hypre_ParAMGDataCFMarkerArray(amg_data)[i-1]);
}
if (hypre_ParAMGDataLambda(amg_data))
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataLambda(amg_data));
if (hypre_ParAMGDataAtilde(amg_data))
{
hypre_ParCSRMatrix *Atilde = hypre_ParAMGDataAtilde(amg_data);
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(Atilde));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(Atilde));
hypre_TFree (Atilde);
}
if (hypre_ParAMGDataXtilde(amg_data))
hypre_ParVectorDestroy(hypre_ParAMGDataXtilde(amg_data));
if (hypre_ParAMGDataRtilde(amg_data))
hypre_ParVectorDestroy(hypre_ParAMGDataRtilde(amg_data));
if (hypre_ParAMGDataL1Norms(amg_data))
{
for (i=0; i < num_levels; i++)
if (hypre_ParAMGDataL1Norms(amg_data)[i])
hypre_TFree(hypre_ParAMGDataL1Norms(amg_data)[i]);
hypre_TFree(hypre_ParAMGDataL1Norms(amg_data));
}
if (hypre_ParAMGDataChebyCoefs(amg_data))
{
for (i=0; i < num_levels; i++)
if (hypre_ParAMGDataChebyCoefs(amg_data)[i])
hypre_TFree(hypre_ParAMGDataChebyCoefs(amg_data)[i]);
hypre_TFree(hypre_ParAMGDataChebyCoefs(amg_data));
}
if (hypre_ParAMGDataChebyDS(amg_data))
{
for (i=0; i < num_levels; i++)
if (hypre_ParAMGDataChebyDS(amg_data)[i])
hypre_TFree(hypre_ParAMGDataChebyDS(amg_data)[i]);
hypre_TFree(hypre_ParAMGDataChebyDS(amg_data));
}
if (hypre_ParAMGDataDinv(amg_data))
hypre_TFree(hypre_ParAMGDataDinv(amg_data));
/* see comments in par_coarsen.c regarding special case for CF_marker */
if (num_levels == 1)
{
hypre_TFree(hypre_ParAMGDataCFMarkerArray(amg_data)[0]);
}
hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(amg_data));
hypre_TFree(hypre_ParAMGDataFArray(amg_data));
hypre_TFree(hypre_ParAMGDataUArray(amg_data));
hypre_TFree(hypre_ParAMGDataAArray(amg_data));
hypre_TFree(hypre_ParAMGDataPArray(amg_data));
hypre_TFree(hypre_ParAMGDataCFMarkerArray(amg_data));
if (hypre_ParAMGDataRtemp(amg_data))
hypre_ParVectorDestroy(hypre_ParAMGDataRtemp(amg_data));
if (hypre_ParAMGDataPtemp(amg_data))
hypre_ParVectorDestroy(hypre_ParAMGDataPtemp(amg_data));
if (hypre_ParAMGDataZtemp(amg_data))
hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(amg_data));
if (hypre_ParAMGDataDofFuncArray(amg_data))
{
for (i=1; i < num_levels; i++)
hypre_TFree(hypre_ParAMGDataDofFuncArray(amg_data)[i]);
hypre_TFree(hypre_ParAMGDataDofFuncArray(amg_data));
hypre_ParAMGDataDofFuncArray(amg_data) = NULL;
}
if (hypre_ParAMGDataRestriction(amg_data))
{
hypre_TFree(hypre_ParAMGDataRArray(amg_data));
hypre_ParAMGDataRArray(amg_data) = NULL;
}
if (hypre_ParAMGDataDofPointArray(amg_data))
{
for (i=0; i < num_levels; i++)
hypre_TFree(hypre_ParAMGDataDofPointArray(amg_data)[i]);
hypre_TFree(hypre_ParAMGDataDofPointArray(amg_data));
hypre_ParAMGDataDofPointArray(amg_data) = NULL;
}
if (hypre_ParAMGDataPointDofMapArray(amg_data))
{
for (i=0; i < num_levels; i++)
hypre_TFree(hypre_ParAMGDataPointDofMapArray(amg_data)[i]);
hypre_TFree(hypre_ParAMGDataPointDofMapArray(amg_data));
hypre_ParAMGDataPointDofMapArray(amg_data) = NULL;
}
if ( hypre_ParAMGDataResidual(amg_data) ) {
/* jfp: was... hypre_TFree( hypre_ParAMGDataResidual(amg_data) );*/
hypre_ParVectorDestroy( hypre_ParAMGDataResidual(amg_data) );
hypre_ParAMGDataResidual(amg_data) = NULL;
}
if (amg) hypre_BoomerAMGDestroy(amg);
if (hypre_ParAMGDataACoarse(amg_data))
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataACoarse(amg_data));
if (hypre_ParAMGDataUCoarse(amg_data))
hypre_ParVectorDestroy(hypre_ParAMGDataUCoarse(amg_data));
if (hypre_ParAMGDataFCoarse(amg_data))
hypre_ParVectorDestroy(hypre_ParAMGDataFCoarse(amg_data));
if (hypre_ParAMGDataAMat(amg_data)) hypre_TFree(hypre_ParAMGDataAMat(amg_data));
if (hypre_ParAMGDataBVec(amg_data)) hypre_TFree(hypre_ParAMGDataBVec(amg_data));
if (hypre_ParAMGDataCommInfo(amg_data)) hypre_TFree(hypre_ParAMGDataCommInfo(amg_data));
if (new_comm != hypre_MPI_COMM_NULL)
{
hypre_MPI_Comm_free (&new_comm);
}
hypre_TFree(amg_data);
/*HYPRE_ANNOTATION_END("BoomerAMG.destroy");*/
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Routines to set the setup phase parameters
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetRestriction( void *data,
HYPRE_Int restr_par )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataRestriction(amg_data) = restr_par;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetMaxLevels( void *data,
HYPRE_Int max_levels )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
HYPRE_Int old_max_levels;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (max_levels < 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
old_max_levels = hypre_ParAMGDataMaxLevels(amg_data);
if (old_max_levels < max_levels)
{
HYPRE_Real *relax_weight, *omega, *nongal_tol_array;
HYPRE_Real relax_wt, outer_wt, nongalerkin_tol;
HYPRE_Int i;
relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
if (relax_weight)
{
relax_wt = hypre_ParAMGDataUserRelaxWeight(amg_data);
relax_weight = hypre_TReAlloc(relax_weight, HYPRE_Real, max_levels);
for (i=old_max_levels; i < max_levels; i++)
relax_weight[i] = relax_wt;
hypre_ParAMGDataRelaxWeight(amg_data) = relax_weight;
}
omega = hypre_ParAMGDataOmega(amg_data);
if (omega)
{
outer_wt = hypre_ParAMGDataOuterWt(amg_data);
omega = hypre_TReAlloc(omega, HYPRE_Real, max_levels);
for(i=old_max_levels; i < max_levels; i++)
omega[i] = outer_wt;
hypre_ParAMGDataOmega(amg_data) = omega;
}
nongal_tol_array = hypre_ParAMGDataNonGalTolArray(amg_data);
if (nongal_tol_array)
{
nongalerkin_tol = hypre_ParAMGDataNonGalerkinTol(amg_data);
nongal_tol_array = hypre_TReAlloc(nongal_tol_array, HYPRE_Real, max_levels);
for(i=old_max_levels; i < max_levels; i++)
nongal_tol_array[i] = nongalerkin_tol;
hypre_ParAMGDataNonGalTolArray(amg_data) = nongal_tol_array;
}
}
hypre_ParAMGDataMaxLevels(amg_data) = max_levels;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetMaxLevels( void *data,
HYPRE_Int * max_levels )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*max_levels = hypre_ParAMGDataMaxLevels(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetMaxCoarseSize( void *data,
HYPRE_Int max_coarse_size )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (max_coarse_size < 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataMaxCoarseSize(amg_data) = max_coarse_size;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetMaxCoarseSize( void *data,
HYPRE_Int * max_coarse_size )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*max_coarse_size = hypre_ParAMGDataMaxCoarseSize(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetMinCoarseSize( void *data,
HYPRE_Int min_coarse_size )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (min_coarse_size < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataMinCoarseSize(amg_data) = min_coarse_size;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetMinCoarseSize( void *data,
HYPRE_Int * min_coarse_size )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*min_coarse_size = hypre_ParAMGDataMinCoarseSize(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetSeqThreshold( void *data,
HYPRE_Int seq_threshold )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (seq_threshold < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataSeqThreshold(amg_data) = seq_threshold;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetSeqThreshold( void *data,
HYPRE_Int * seq_threshold )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*seq_threshold = hypre_ParAMGDataSeqThreshold(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetRedundant( void *data,
HYPRE_Int redundant )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (redundant < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataRedundant(amg_data) = redundant;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetRedundant( void *data,
HYPRE_Int * redundant )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*redundant = hypre_ParAMGDataRedundant(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetStrongThreshold( void *data,
HYPRE_Real strong_threshold )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (strong_threshold < 0 || strong_threshold > 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataStrongThreshold(amg_data) = strong_threshold;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetStrongThreshold( void *data,
HYPRE_Real * strong_threshold )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*strong_threshold = hypre_ParAMGDataStrongThreshold(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetMaxRowSum( void *data,
HYPRE_Real max_row_sum )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (max_row_sum <= 0 || max_row_sum > 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataMaxRowSum(amg_data) = max_row_sum;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetMaxRowSum( void *data,
HYPRE_Real * max_row_sum )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*max_row_sum = hypre_ParAMGDataMaxRowSum(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetTruncFactor( void *data,
HYPRE_Real trunc_factor )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (trunc_factor < 0 || trunc_factor >= 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataTruncFactor(amg_data) = trunc_factor;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetTruncFactor( void *data,
HYPRE_Real * trunc_factor )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*trunc_factor = hypre_ParAMGDataTruncFactor(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetPMaxElmts( void *data,
HYPRE_Int P_max_elmts )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (P_max_elmts < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataPMaxElmts(amg_data) = P_max_elmts;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetPMaxElmts( void *data,
HYPRE_Int * P_max_elmts )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*P_max_elmts = hypre_ParAMGDataPMaxElmts(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetJacobiTruncThreshold( void *data,
HYPRE_Real jacobi_trunc_threshold )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (jacobi_trunc_threshold < 0 || jacobi_trunc_threshold >= 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataJacobiTruncThreshold(amg_data) = jacobi_trunc_threshold;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetJacobiTruncThreshold( void *data,
HYPRE_Real * jacobi_trunc_threshold )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*jacobi_trunc_threshold = hypre_ParAMGDataJacobiTruncThreshold(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetPostInterpType( void *data,
HYPRE_Int post_interp_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (post_interp_type < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataPostInterpType(amg_data) = post_interp_type;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetPostInterpType( void *data,
HYPRE_Int * post_interp_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*post_interp_type = hypre_ParAMGDataPostInterpType(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetSCommPkgSwitch( void *data,
HYPRE_Real S_commpkg_switch )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataSCommPkgSwitch(amg_data) = S_commpkg_switch;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetSCommPkgSwitch( void *data,
HYPRE_Real * S_commpkg_switch )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*S_commpkg_switch = hypre_ParAMGDataSCommPkgSwitch(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetInterpType( void *data,
HYPRE_Int interp_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (interp_type < 0 || interp_type > 25)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataInterpType(amg_data) = interp_type;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetInterpType( void *data,
HYPRE_Int * interp_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*interp_type = hypre_ParAMGDataInterpType(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetSepWeight( void *data,
HYPRE_Int sep_weight )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataSepWeight(amg_data) = sep_weight;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetMinIter( void *data,
HYPRE_Int min_iter )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataMinIter(amg_data) = min_iter;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetMinIter( void *data,
HYPRE_Int * min_iter )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*min_iter = hypre_ParAMGDataMinIter(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetMaxIter( void *data,
HYPRE_Int max_iter )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (max_iter < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataMaxIter(amg_data) = max_iter;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetMaxIter( void *data,
HYPRE_Int * max_iter )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*max_iter = hypre_ParAMGDataMaxIter(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetCoarsenType( void *data,
HYPRE_Int coarsen_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataCoarsenType(amg_data) = coarsen_type;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetCoarsenType( void *data,
HYPRE_Int * coarsen_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*coarsen_type = hypre_ParAMGDataCoarsenType(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetMeasureType( void *data,
HYPRE_Int measure_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataMeasureType(amg_data) = measure_type;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetMeasureType( void *data,
HYPRE_Int * measure_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*measure_type = hypre_ParAMGDataMeasureType(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetSetupType( void *data,
HYPRE_Int setup_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataSetupType(amg_data) = setup_type;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetSetupType( void *data,
HYPRE_Int * setup_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*setup_type = hypre_ParAMGDataSetupType(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetCycleType( void *data,
HYPRE_Int cycle_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (cycle_type < 0 || cycle_type > 2)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataCycleType(amg_data) = cycle_type;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetCycleType( void *data,
HYPRE_Int * cycle_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*cycle_type = hypre_ParAMGDataCycleType(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetTol( void *data,
HYPRE_Real tol )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (tol < 0 || tol > 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataTol(amg_data) = tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetTol( void *data,
HYPRE_Real * tol )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*tol = hypre_ParAMGDataTol(amg_data);
return hypre_error_flag;
}
/* The "Get" function for SetNumSweeps is GetCycleNumSweeps. */
HYPRE_Int
hypre_BoomerAMGSetNumSweeps( void *data,
HYPRE_Int num_sweeps )
{
HYPRE_Int i;
HYPRE_Int *num_grid_sweeps;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (num_sweeps < 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (hypre_ParAMGDataNumGridSweeps(amg_data) == NULL)
hypre_ParAMGDataNumGridSweeps(amg_data) = hypre_CTAlloc(HYPRE_Int,4);
num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data);
for (i=0; i < 3; i++)
num_grid_sweeps[i] = num_sweeps;
num_grid_sweeps[3] = 1;
hypre_ParAMGDataUserNumSweeps(amg_data) = num_sweeps;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetCycleNumSweeps( void *data,
HYPRE_Int num_sweeps,
HYPRE_Int k )
{
HYPRE_Int i;
HYPRE_Int *num_grid_sweeps;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (num_sweeps < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (k < 1 || k > 3)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (hypre_ParAMGDataNumGridSweeps(amg_data) == NULL)
{
num_grid_sweeps = hypre_CTAlloc(HYPRE_Int,4);
for (i=0; i < 4; i++)
num_grid_sweeps[i] = 1;
hypre_ParAMGDataNumGridSweeps(amg_data) = num_grid_sweeps;
}
hypre_ParAMGDataNumGridSweeps(amg_data)[k] = num_sweeps;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetCycleNumSweeps( void *data,
HYPRE_Int * num_sweeps,
HYPRE_Int k )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (k < 1 || k > 3)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (hypre_ParAMGDataNumGridSweeps(amg_data) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*num_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data)[k];
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetNumGridSweeps( void *data,
HYPRE_Int *num_grid_sweeps )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!num_grid_sweeps)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (hypre_ParAMGDataNumGridSweeps(amg_data))
hypre_TFree(hypre_ParAMGDataNumGridSweeps(amg_data));
hypre_ParAMGDataNumGridSweeps(amg_data) = num_grid_sweeps;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetNumGridSweeps( void *data,
HYPRE_Int ** num_grid_sweeps )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data);
return hypre_error_flag;
}
/* The "Get" function for SetRelaxType is GetCycleRelaxType. */
HYPRE_Int
hypre_BoomerAMGSetRelaxType( void *data,
HYPRE_Int relax_type )
{
HYPRE_Int i;
HYPRE_Int *grid_relax_type;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (relax_type < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (hypre_ParAMGDataGridRelaxType(amg_data) == NULL)
hypre_ParAMGDataGridRelaxType(amg_data) = hypre_CTAlloc(HYPRE_Int,4);
grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
for (i=0; i < 3; i++)
grid_relax_type[i] = relax_type;
grid_relax_type[3] = 9;
hypre_ParAMGDataUserCoarseRelaxType(amg_data) = 9;
hypre_ParAMGDataUserRelaxType(amg_data) = relax_type;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetCycleRelaxType( void *data,
HYPRE_Int relax_type,
HYPRE_Int k )
{
HYPRE_Int i;
HYPRE_Int *grid_relax_type;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (k < 1 || k > 3)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (relax_type < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (hypre_ParAMGDataGridRelaxType(amg_data) == NULL)
{
grid_relax_type = hypre_CTAlloc(HYPRE_Int,4);
for (i=0; i < 3; i++)
grid_relax_type[i] = 3;
grid_relax_type[3] = 9;
hypre_ParAMGDataGridRelaxType(amg_data) = grid_relax_type;
}
hypre_ParAMGDataGridRelaxType(amg_data)[k] = relax_type;
if (k == 3)
hypre_ParAMGDataUserCoarseRelaxType(amg_data) = relax_type;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetCycleRelaxType( void *data,
HYPRE_Int * relax_type,
HYPRE_Int k )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (k < 1 || k > 3)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (hypre_ParAMGDataGridRelaxType(amg_data) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[k];
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetRelaxOrder( void *data,
HYPRE_Int relax_order)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataRelaxOrder(amg_data) = relax_order;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetRelaxOrder( void *data,
HYPRE_Int * relax_order)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*relax_order = hypre_ParAMGDataRelaxOrder(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetGridRelaxType( void *data,
HYPRE_Int *grid_relax_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!grid_relax_type)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (hypre_ParAMGDataGridRelaxType(amg_data))
hypre_TFree(hypre_ParAMGDataGridRelaxType(amg_data));
hypre_ParAMGDataGridRelaxType(amg_data) = grid_relax_type;
hypre_ParAMGDataUserCoarseRelaxType(amg_data) = grid_relax_type[3];
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetGridRelaxType( void *data,
HYPRE_Int ** grid_relax_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetGridRelaxPoints( void *data,
HYPRE_Int **grid_relax_points )
{
HYPRE_Int i;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!grid_relax_points)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (hypre_ParAMGDataGridRelaxPoints(amg_data))
{
for (i=0; i < 4; i++)
hypre_TFree (hypre_ParAMGDataGridRelaxPoints(amg_data)[i]);
hypre_TFree(hypre_ParAMGDataGridRelaxPoints(amg_data));
}
hypre_ParAMGDataGridRelaxPoints(amg_data) = grid_relax_points;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetGridRelaxPoints( void *data,
HYPRE_Int *** grid_relax_points )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*grid_relax_points = hypre_ParAMGDataGridRelaxPoints(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetRelaxWeight( void *data,
HYPRE_Real *relax_weight )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!relax_weight)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (hypre_ParAMGDataRelaxWeight(amg_data))
hypre_TFree(hypre_ParAMGDataRelaxWeight(amg_data));
hypre_ParAMGDataRelaxWeight(amg_data) = relax_weight;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetRelaxWeight( void *data,
HYPRE_Real ** relax_weight )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetRelaxWt( void *data,
HYPRE_Real relax_weight )
{
HYPRE_Int i, num_levels;
HYPRE_Real *relax_weight_array;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
num_levels = hypre_ParAMGDataMaxLevels(amg_data);
if (hypre_ParAMGDataRelaxWeight(amg_data) == NULL)
hypre_ParAMGDataRelaxWeight(amg_data) = hypre_CTAlloc(HYPRE_Real,num_levels);
relax_weight_array = hypre_ParAMGDataRelaxWeight(amg_data);
for (i=0; i < num_levels; i++)
relax_weight_array[i] = relax_weight;
hypre_ParAMGDataUserRelaxWeight(amg_data) = relax_weight;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetLevelRelaxWt( void *data,
HYPRE_Real relax_weight,
HYPRE_Int level )
{
HYPRE_Int i, num_levels;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
num_levels = hypre_ParAMGDataMaxLevels(amg_data);
if (level > num_levels-1 || level < 0)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (hypre_ParAMGDataRelaxWeight(amg_data) == NULL)
{
hypre_ParAMGDataRelaxWeight(amg_data) = hypre_CTAlloc(HYPRE_Real,num_levels);
for (i=0; i < num_levels; i++)
hypre_ParAMGDataRelaxWeight(amg_data)[i] = 1.0;
}
hypre_ParAMGDataRelaxWeight(amg_data)[level] = relax_weight;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetLevelRelaxWt( void *data,
HYPRE_Real * relax_weight,
HYPRE_Int level )
{
HYPRE_Int num_levels;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
num_levels = hypre_ParAMGDataMaxLevels(amg_data);
if (level > num_levels-1 || level < 0)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (hypre_ParAMGDataRelaxWeight(amg_data) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data)[level];
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetOmega( void *data,
HYPRE_Real *omega )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!omega)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (hypre_ParAMGDataOmega(amg_data))
hypre_TFree(hypre_ParAMGDataOmega(amg_data));
hypre_ParAMGDataOmega(amg_data) = omega;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetOmega( void *data,
HYPRE_Real ** omega )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*omega = hypre_ParAMGDataOmega(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetOuterWt( void *data,
HYPRE_Real omega )
{
HYPRE_Int i, num_levels;
HYPRE_Real *omega_array;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
num_levels = hypre_ParAMGDataMaxLevels(amg_data);
if (hypre_ParAMGDataOmega(amg_data) == NULL)
hypre_ParAMGDataOmega(amg_data) = hypre_CTAlloc(HYPRE_Real,num_levels);
omega_array = hypre_ParAMGDataOmega(amg_data);
for (i=0; i < num_levels; i++)
omega_array[i] = omega;
hypre_ParAMGDataOuterWt(amg_data) = omega;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetLevelOuterWt( void *data,
HYPRE_Real omega,
HYPRE_Int level )
{
HYPRE_Int i, num_levels;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
num_levels = hypre_ParAMGDataMaxLevels(amg_data);
if (level > num_levels-1)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (hypre_ParAMGDataOmega(amg_data) == NULL)
{
hypre_ParAMGDataOmega(amg_data) = hypre_CTAlloc(HYPRE_Real,num_levels);
for (i=0; i < num_levels; i++)
hypre_ParAMGDataOmega(amg_data)[i] = 1.0;
}
hypre_ParAMGDataOmega(amg_data)[level] = omega;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetLevelOuterWt( void *data,
HYPRE_Real * omega,
HYPRE_Int level )
{
HYPRE_Int num_levels;
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
num_levels = hypre_ParAMGDataMaxLevels(amg_data);
if (level > num_levels-1)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (hypre_ParAMGDataOmega(amg_data) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*omega = hypre_ParAMGDataOmega(amg_data)[level];
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetLogging( void *data,
HYPRE_Int logging )
{
/* This function should be called before Setup. Logging changes
may require allocation or freeing of arrays, which is presently
only done there.
It may be possible to support logging changes at other times,
but there is little need.
*/
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataLogging(amg_data) = logging;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetLogging( void *data,
HYPRE_Int * logging )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*logging = hypre_ParAMGDataLogging(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetPrintLevel( void *data,
HYPRE_Int print_level )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataPrintLevel(amg_data) = print_level;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetPrintLevel( void *data,
HYPRE_Int * print_level )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*print_level = hypre_ParAMGDataPrintLevel(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetPrintFileName( void *data,
const char *print_file_name )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*)data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if( strlen(print_file_name) > 256 )
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_sprintf(hypre_ParAMGDataLogFileName(amg_data), "%s", print_file_name);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetPrintFileName( void *data,
char ** print_file_name )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_sprintf( *print_file_name, "%s", hypre_ParAMGDataLogFileName(amg_data) );
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetNumIterations( void *data,
HYPRE_Int num_iterations )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataNumIterations(amg_data) = num_iterations;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetDebugFlag( void *data,
HYPRE_Int debug_flag )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataDebugFlag(amg_data) = debug_flag;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetDebugFlag( void *data,
HYPRE_Int * debug_flag )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Routines to set the problem data parameters
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetNumFunctions( void *data,
HYPRE_Int num_functions )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (num_functions < 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataNumFunctions(amg_data) = num_functions;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetNumFunctions( void *data,
HYPRE_Int * num_functions )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*num_functions = hypre_ParAMGDataNumFunctions(amg_data);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicate whether to use nodal systems function
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetNodal( void *data,
HYPRE_Int nodal )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataNodal(amg_data) = nodal;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicate number of levels for nodal coarsening
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetNodalLevels( void *data,
HYPRE_Int nodal_levels )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataNodalLevels(amg_data) = nodal_levels;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicate how to treat diag for primary matrix with nodal systems function
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetNodalDiag( void *data,
HYPRE_Int nodal )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataNodalDiag(amg_data) = nodal;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicate the degree of aggressive coarsening
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetNumPaths( void *data,
HYPRE_Int num_paths )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (num_paths < 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataNumPaths(amg_data) = num_paths;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates the number of levels of aggressive coarsening
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetAggNumLevels( void *data,
HYPRE_Int agg_num_levels )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (agg_num_levels < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataAggNumLevels(amg_data) = agg_num_levels;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates the interpolation used with aggressive coarsening
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetAggInterpType( void *data,
HYPRE_Int agg_interp_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (agg_interp_type < 0 || agg_interp_type > 4)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataAggInterpType(amg_data) = agg_interp_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates max number of elements per row for aggressive coarsening
* interpolation
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetAggPMaxElmts( void *data,
HYPRE_Int agg_P_max_elmts )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (agg_P_max_elmts < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataAggPMaxElmts(amg_data) = agg_P_max_elmts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates max number of elements per row for smoothed
* interpolation in mult-additive or simple method
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetMultAddPMaxElmts( void *data,
HYPRE_Int add_P_max_elmts )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (add_P_max_elmts < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataMultAddPMaxElmts(amg_data) = add_P_max_elmts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates Relaxtion Type for Additive Cycle
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetAddRelaxType( void *data,
HYPRE_Int add_rlx_type )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataAddRelaxType(amg_data) = add_rlx_type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates Relaxation Weight for Additive Cycle
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetAddRelaxWt( void *data,
HYPRE_Real add_rlx_wt )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataAddRelaxWt(amg_data) = add_rlx_wt;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates max number of elements per row for 1st stage of aggressive
* coarsening two-stage interpolation
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetAggP12MaxElmts( void *data,
HYPRE_Int agg_P12_max_elmts )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (agg_P12_max_elmts < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataAggP12MaxElmts(amg_data) = agg_P12_max_elmts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates truncation factor for aggressive coarsening interpolation
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetAggTruncFactor( void *data,
HYPRE_Real agg_trunc_factor )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (agg_trunc_factor < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataAggTruncFactor(amg_data) = agg_trunc_factor;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates the truncation factor for smoothed interpolation when using
* mult-additive or simple method
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetMultAddTruncFactor( void *data,
HYPRE_Real add_trunc_factor )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (add_trunc_factor < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataMultAddTruncFactor(amg_data) = add_trunc_factor;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Indicates truncation factor for 1 stage of aggressive coarsening
* two stage interpolation
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSetAggP12TruncFactor( void *data,
HYPRE_Real agg_P12_trunc_factor )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (agg_P12_trunc_factor < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataAggP12TruncFactor(amg_data) = agg_P12_trunc_factor;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetNumPoints( void *data,
HYPRE_Int num_points )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataNumPoints(amg_data) = num_points;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetDofFunc( void *data,
HYPRE_Int *dof_func )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_TFree(hypre_ParAMGDataDofFunc(amg_data));
hypre_ParAMGDataDofFunc(amg_data) = dof_func;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetPointDofMap( void *data,
HYPRE_Int *point_dof_map )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_TFree(hypre_ParAMGDataPointDofMap(amg_data));
hypre_ParAMGDataPointDofMap(amg_data) = point_dof_map;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetDofPoint( void *data,
HYPRE_Int *dof_point )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_TFree(hypre_ParAMGDataDofPoint(amg_data));
hypre_ParAMGDataDofPoint(amg_data) = dof_point;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetNumIterations( void *data,
HYPRE_Int *num_iterations )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*num_iterations = hypre_ParAMGDataNumIterations(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetCumNumIterations( void *data,
HYPRE_Int *cum_num_iterations )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
#ifdef CUMNUMIT
*cum_num_iterations = hypre_ParAMGDataCumNumIterations(amg_data);
#endif
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetCumNnzAP( void *data,
HYPRE_Real *cum_nnz_AP )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*cum_nnz_AP = hypre_ParAMGDataCumNnzAP(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetResidual( void * data, hypre_ParVector ** resid )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*resid = hypre_ParAMGDataResidual( amg_data );
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetRelResidualNorm( void *data,
HYPRE_Real *rel_resid_norm )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*rel_resid_norm = hypre_ParAMGDataRelativeResidualNorm(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetChebyOrder( void *data,
HYPRE_Int order)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (order < 1)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataChebyOrder(amg_data) = order;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetChebyFraction( void *data,
HYPRE_Real ratio)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ratio <= 0.0 || ratio > 1.0 )
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataChebyFraction(amg_data) = ratio;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetChebyEigEst( void *data,
HYPRE_Int cheby_eig_est)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (cheby_eig_est < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
hypre_ParAMGDataChebyEigEst(amg_data) = cheby_eig_est;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetChebyVariant( void *data,
HYPRE_Int cheby_variant)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataChebyVariant(amg_data) = cheby_variant;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetChebyScale( void *data,
HYPRE_Int cheby_scale)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataChebyScale(amg_data) = cheby_scale;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetAdditive( void *data,
HYPRE_Int additive )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataAdditive(amg_data) = additive;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetAdditive( void *data,
HYPRE_Int * additive )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*additive = hypre_ParAMGDataAdditive(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetMultAdditive( void *data,
HYPRE_Int mult_additive )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataMultAdditive(amg_data) = mult_additive;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetMultAdditive( void *data,
HYPRE_Int * mult_additive )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetSimple( void *data,
HYPRE_Int simple )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataSimple(amg_data) = simple;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGGetSimple( void *data,
HYPRE_Int * simple )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*simple = hypre_ParAMGDataSimple(amg_data);
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetAddLastLvl( void *data,
HYPRE_Int add_last_lvl )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParAMGDataAddLastLvl(amg_data) = add_last_lvl;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetNonGalerkinTol( void *data,
HYPRE_Real nongalerkin_tol)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
HYPRE_Int i, max_num_levels;
HYPRE_Real *nongal_tol_array;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nongalerkin_tol < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
max_num_levels = hypre_ParAMGDataMaxLevels(amg_data);
nongal_tol_array = hypre_ParAMGDataNonGalTolArray(amg_data);
if (nongal_tol_array == NULL)
{
nongal_tol_array = hypre_CTAlloc(HYPRE_Real, max_num_levels);
hypre_ParAMGDataNonGalTolArray(amg_data) = nongal_tol_array;
}
hypre_ParAMGDataNonGalerkinTol(amg_data) = nongalerkin_tol;
for (i=0; i < max_num_levels; i++)
nongal_tol_array[i] = nongalerkin_tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetLevelNonGalerkinTol( void *data,
HYPRE_Real nongalerkin_tol,
HYPRE_Int level)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
HYPRE_Real *nongal_tol_array;
HYPRE_Int max_num_levels;
if (!amg_data)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nongalerkin_tol < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
nongal_tol_array = hypre_ParAMGDataNonGalTolArray(amg_data);
max_num_levels = hypre_ParAMGDataMaxLevels(amg_data);
if (nongal_tol_array == NULL)
{
nongal_tol_array = hypre_CTAlloc(HYPRE_Real, max_num_levels);
hypre_ParAMGDataNonGalTolArray(amg_data) = nongal_tol_array;
}
if (level+1 > max_num_levels)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
nongal_tol_array[level] = nongalerkin_tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetNonGalerkTol( void *data,
HYPRE_Int nongalerk_num_tol,
HYPRE_Real *nongalerk_tol)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
hypre_ParAMGDataNonGalerkNumTol(amg_data) = nongalerk_num_tol;
hypre_ParAMGDataNonGalerkTol(amg_data) = nongalerk_tol;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetRAP2( void *data,
HYPRE_Int rap2)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
hypre_ParAMGDataRAP2(amg_data) = rap2;
return hypre_error_flag;
}
HYPRE_Int
hypre_BoomerAMGSetKeepTranspose( void *data,
HYPRE_Int keepTranspose)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
hypre_ParAMGDataKeepTranspose(amg_data) = keepTranspose;
return hypre_error_flag;
}
| 75,562 | 24.459232 | 91 | c |
AMG | AMG-master/parcsr_ls/par_amg.h | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#ifndef hypre_ParAMG_DATA_HEADER
#define hypre_ParAMG_DATA_HEADER
#define CUMNUMIT
/*#include "par_csr_block_matrix.h"*/
/*--------------------------------------------------------------------------
* hypre_ParAMGData
*--------------------------------------------------------------------------*/
typedef struct
{
/* setup params */
HYPRE_Int max_levels;
HYPRE_Real strong_threshold;
HYPRE_Real max_row_sum;
HYPRE_Real trunc_factor;
HYPRE_Real agg_trunc_factor;
HYPRE_Real agg_P12_trunc_factor;
HYPRE_Real jacobi_trunc_threshold;
HYPRE_Real S_commpkg_switch;
HYPRE_Int measure_type;
HYPRE_Int setup_type;
HYPRE_Int coarsen_type;
HYPRE_Int P_max_elmts;
HYPRE_Int interp_type;
HYPRE_Int sep_weight;
HYPRE_Int agg_interp_type;
HYPRE_Int agg_P_max_elmts;
HYPRE_Int agg_P12_max_elmts;
HYPRE_Int restr_par;
HYPRE_Int agg_num_levels;
HYPRE_Int num_paths;
HYPRE_Int post_interp_type;
HYPRE_Int max_coarse_size;
HYPRE_Int min_coarse_size;
HYPRE_Int seq_threshold;
HYPRE_Int redundant;
HYPRE_Int participate;
/* solve params */
HYPRE_Int max_iter;
HYPRE_Int min_iter;
HYPRE_Int cycle_type;
HYPRE_Int *num_grid_sweeps;
HYPRE_Int *grid_relax_type;
HYPRE_Int **grid_relax_points;
HYPRE_Int relax_order;
HYPRE_Int user_coarse_relax_type;
HYPRE_Int user_relax_type;
HYPRE_Int user_num_sweeps;
HYPRE_Real user_relax_weight;
HYPRE_Real outer_wt;
HYPRE_Real *relax_weight;
HYPRE_Real *omega;
HYPRE_Real tol;
/* problem data */
hypre_ParCSRMatrix *A;
HYPRE_Int num_variables;
HYPRE_Int num_functions;
HYPRE_Int nodal;
HYPRE_Int nodal_levels;
HYPRE_Int nodal_diag;
HYPRE_Int num_points;
HYPRE_Int *dof_func;
HYPRE_Int *dof_point;
HYPRE_Int *point_dof_map;
/* data generated in the setup phase */
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParCSRMatrix **P_array;
hypre_ParCSRMatrix **R_array;
HYPRE_Int **CF_marker_array;
HYPRE_Int **dof_func_array;
HYPRE_Int **dof_point_array;
HYPRE_Int **point_dof_map_array;
HYPRE_Int num_levels;
HYPRE_Real **l1_norms;
HYPRE_Int block_mode;
HYPRE_Real *max_eig_est;
HYPRE_Real *min_eig_est;
HYPRE_Int cheby_eig_est;
HYPRE_Int cheby_order;
HYPRE_Int cheby_variant;
HYPRE_Int cheby_scale;
HYPRE_Real cheby_fraction;
HYPRE_Real **cheby_ds;
HYPRE_Real **cheby_coefs;
/* data needed for non-Galerkin option */
HYPRE_Int nongalerk_num_tol;
HYPRE_Real *nongalerk_tol;
HYPRE_Real nongalerkin_tol;
HYPRE_Real *nongal_tol_array;
/* data generated in the solve phase */
hypre_ParVector *Vtemp;
hypre_Vector *Vtemp_local;
HYPRE_Real *Vtemp_local_data;
HYPRE_Real cycle_op_count;
hypre_ParVector *Rtemp;
hypre_ParVector *Ptemp;
hypre_ParVector *Ztemp;
/* log info */
HYPRE_Int logging;
HYPRE_Int num_iterations;
#ifdef CUMNUMIT
HYPRE_Int cum_num_iterations;
#endif
HYPRE_Real cum_nnz_A_P;
HYPRE_Real rel_resid_norm;
hypre_ParVector *residual; /* available if logging>1 */
/* output params */
HYPRE_Int print_level;
char log_file_name[256];
HYPRE_Int debug_flag;
/* enable redundant coarse grid solve */
HYPRE_Solver coarse_solver;
hypre_ParCSRMatrix *A_coarse;
hypre_ParVector *f_coarse;
hypre_ParVector *u_coarse;
MPI_Comm new_comm;
/* store matrix, vector and communication info for Gaussian elimination */
HYPRE_Real *A_mat;
HYPRE_Real *b_vec;
HYPRE_Int *comm_info;
/* information for multiplication with Lambda - additive AMG */
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int simple;
HYPRE_Int add_last_lvl;
HYPRE_Int add_P_max_elmts;
HYPRE_Real add_trunc_factor;
HYPRE_Int add_rlx_type;
HYPRE_Real add_rlx_wt;
hypre_ParCSRMatrix *Lambda;
hypre_ParCSRMatrix *Atilde;
hypre_ParVector *Rtilde;
hypre_ParVector *Xtilde;
HYPRE_Real *D_inv;
/* Use 2 mat-mat-muls instead of triple product*/
HYPRE_Int rap2;
HYPRE_Int keepTranspose;
} hypre_ParAMGData;
/*--------------------------------------------------------------------------
* Accessor functions for the hypre_AMGData structure
*--------------------------------------------------------------------------*/
/* setup params */
#define hypre_ParAMGDataRestriction(amg_data) ((amg_data)->restr_par)
#define hypre_ParAMGDataMaxLevels(amg_data) ((amg_data)->max_levels)
#define hypre_ParAMGDataStrongThreshold(amg_data) \
((amg_data)->strong_threshold)
#define hypre_ParAMGDataMaxRowSum(amg_data) ((amg_data)->max_row_sum)
#define hypre_ParAMGDataTruncFactor(amg_data) ((amg_data)->trunc_factor)
#define hypre_ParAMGDataAggTruncFactor(amg_data) ((amg_data)->agg_trunc_factor)
#define hypre_ParAMGDataAggP12TruncFactor(amg_data) ((amg_data)->agg_P12_trunc_factor)
#define hypre_ParAMGDataJacobiTruncThreshold(amg_data) ((amg_data)->jacobi_trunc_threshold)
#define hypre_ParAMGDataSCommPkgSwitch(amg_data) ((amg_data)->S_commpkg_switch)
#define hypre_ParAMGDataInterpType(amg_data) ((amg_data)->interp_type)
#define hypre_ParAMGDataSepWeight(amg_data) ((amg_data)->sep_weight)
#define hypre_ParAMGDataAggInterpType(amg_data) ((amg_data)->agg_interp_type)
#define hypre_ParAMGDataCoarsenType(amg_data) ((amg_data)->coarsen_type)
#define hypre_ParAMGDataMeasureType(amg_data) ((amg_data)->measure_type)
#define hypre_ParAMGDataSetupType(amg_data) ((amg_data)->setup_type)
#define hypre_ParAMGDataPMaxElmts(amg_data) ((amg_data)->P_max_elmts)
#define hypre_ParAMGDataAggPMaxElmts(amg_data) ((amg_data)->agg_P_max_elmts)
#define hypre_ParAMGDataAggP12MaxElmts(amg_data) ((amg_data)->agg_P12_max_elmts)
#define hypre_ParAMGDataNumPaths(amg_data) ((amg_data)->num_paths)
#define hypre_ParAMGDataAggNumLevels(amg_data) ((amg_data)->agg_num_levels)
#define hypre_ParAMGDataPostInterpType(amg_data) ((amg_data)->post_interp_type)
#define hypre_ParAMGDataL1Norms(amg_data) ((amg_data)->l1_norms)
#define hypre_ParAMGDataMaxCoarseSize(amg_data) ((amg_data)->max_coarse_size)
#define hypre_ParAMGDataMinCoarseSize(amg_data) ((amg_data)->min_coarse_size)
#define hypre_ParAMGDataSeqThreshold(amg_data) ((amg_data)->seq_threshold)
/* solve params */
#define hypre_ParAMGDataMinIter(amg_data) ((amg_data)->min_iter)
#define hypre_ParAMGDataMaxIter(amg_data) ((amg_data)->max_iter)
#define hypre_ParAMGDataCycleType(amg_data) ((amg_data)->cycle_type)
#define hypre_ParAMGDataTol(amg_data) ((amg_data)->tol)
#define hypre_ParAMGDataNumGridSweeps(amg_data) ((amg_data)->num_grid_sweeps)
#define hypre_ParAMGDataUserCoarseRelaxType(amg_data) ((amg_data)->user_coarse_relax_type)
#define hypre_ParAMGDataUserRelaxType(amg_data) ((amg_data)->user_relax_type)
#define hypre_ParAMGDataUserRelaxWeight(amg_data) ((amg_data)->user_relax_weight)
#define hypre_ParAMGDataUserNumSweeps(amg_data) ((amg_data)->user_num_sweeps)
#define hypre_ParAMGDataGridRelaxType(amg_data) ((amg_data)->grid_relax_type)
#define hypre_ParAMGDataGridRelaxPoints(amg_data) \
((amg_data)->grid_relax_points)
#define hypre_ParAMGDataRelaxOrder(amg_data) ((amg_data)->relax_order)
#define hypre_ParAMGDataRelaxWeight(amg_data) ((amg_data)->relax_weight)
#define hypre_ParAMGDataOmega(amg_data) ((amg_data)->omega)
#define hypre_ParAMGDataOuterWt(amg_data) ((amg_data)->outer_wt)
/* problem data parameters */
#define hypre_ParAMGDataNumVariables(amg_data) ((amg_data)->num_variables)
#define hypre_ParAMGDataNumFunctions(amg_data) ((amg_data)->num_functions)
#define hypre_ParAMGDataNodal(amg_data) ((amg_data)->nodal)
#define hypre_ParAMGDataNodalLevels(amg_data) ((amg_data)->nodal_levels)
#define hypre_ParAMGDataNodalDiag(amg_data) ((amg_data)->nodal_diag)
#define hypre_ParAMGDataNumPoints(amg_data) ((amg_data)->num_points)
#define hypre_ParAMGDataDofFunc(amg_data) ((amg_data)->dof_func)
#define hypre_ParAMGDataDofPoint(amg_data) ((amg_data)->dof_point)
#define hypre_ParAMGDataPointDofMap(amg_data) ((amg_data)->point_dof_map)
/* data generated by the setup phase */
#define hypre_ParAMGDataCFMarkerArray(amg_data) ((amg_data)-> CF_marker_array)
#define hypre_ParAMGDataAArray(amg_data) ((amg_data)->A_array)
#define hypre_ParAMGDataFArray(amg_data) ((amg_data)->F_array)
#define hypre_ParAMGDataUArray(amg_data) ((amg_data)->U_array)
#define hypre_ParAMGDataPArray(amg_data) ((amg_data)->P_array)
#define hypre_ParAMGDataRArray(amg_data) ((amg_data)->R_array)
#define hypre_ParAMGDataDofFuncArray(amg_data) ((amg_data)->dof_func_array)
#define hypre_ParAMGDataDofPointArray(amg_data) ((amg_data)->dof_point_array)
#define hypre_ParAMGDataPointDofMapArray(amg_data) \
((amg_data)->point_dof_map_array)
#define hypre_ParAMGDataNumLevels(amg_data) ((amg_data)->num_levels)
#define hypre_ParAMGDataMaxEigEst(amg_data) ((amg_data)->max_eig_est)
#define hypre_ParAMGDataMinEigEst(amg_data) ((amg_data)->min_eig_est)
#define hypre_ParAMGDataChebyEigEst(amg_data) ((amg_data)->cheby_eig_est)
#define hypre_ParAMGDataChebyVariant(amg_data) ((amg_data)->cheby_variant)
#define hypre_ParAMGDataChebyScale(amg_data) ((amg_data)->cheby_scale)
#define hypre_ParAMGDataChebyOrder(amg_data) ((amg_data)->cheby_order)
#define hypre_ParAMGDataChebyFraction(amg_data) ((amg_data)->cheby_fraction)
#define hypre_ParAMGDataChebyDS(amg_data) ((amg_data)->cheby_ds)
#define hypre_ParAMGDataChebyCoefs(amg_data) ((amg_data)->cheby_coefs)
#define hypre_ParAMGDataBlockMode(amg_data) ((amg_data)->block_mode)
/* data generated in the solve phase */
#define hypre_ParAMGDataVtemp(amg_data) ((amg_data)->Vtemp)
#define hypre_ParAMGDataVtempLocal(amg_data) ((amg_data)->Vtemp_local)
#define hypre_ParAMGDataVtemplocalData(amg_data) ((amg_data)->Vtemp_local_data)
#define hypre_ParAMGDataCycleOpCount(amg_data) ((amg_data)->cycle_op_count)
#define hypre_ParAMGDataRtemp(amg_data) ((amg_data)->Rtemp)
#define hypre_ParAMGDataPtemp(amg_data) ((amg_data)->Ptemp)
#define hypre_ParAMGDataZtemp(amg_data) ((amg_data)->Ztemp)
/* fields used by GSMG */
#define hypre_ParAMGDataGSMG(amg_data) ((amg_data)->gsmg)
#define hypre_ParAMGDataNumSamples(amg_data) ((amg_data)->num_samples)
/* log info data */
#define hypre_ParAMGDataLogging(amg_data) ((amg_data)->logging)
#define hypre_ParAMGDataNumIterations(amg_data) ((amg_data)->num_iterations)
#ifdef CUMNUMIT
#define hypre_ParAMGDataCumNumIterations(amg_data) ((amg_data)->cum_num_iterations)
#endif
#define hypre_ParAMGDataRelativeResidualNorm(amg_data) ((amg_data)->rel_resid_norm)
#define hypre_ParAMGDataResidual(amg_data) ((amg_data)->residual)
/* output parameters */
#define hypre_ParAMGDataPrintLevel(amg_data) ((amg_data)->print_level)
#define hypre_ParAMGDataLogFileName(amg_data) ((amg_data)->log_file_name)
#define hypre_ParAMGDataDebugFlag(amg_data) ((amg_data)->debug_flag)
#define hypre_ParAMGDataCumNnzAP(amg_data) ((amg_data)->cum_nnz_AP)
#define hypre_ParAMGDataCoarseSolver(amg_data) ((amg_data)->coarse_solver)
#define hypre_ParAMGDataACoarse(amg_data) ((amg_data)->A_coarse)
#define hypre_ParAMGDataFCoarse(amg_data) ((amg_data)->f_coarse)
#define hypre_ParAMGDataUCoarse(amg_data) ((amg_data)->u_coarse)
#define hypre_ParAMGDataNewComm(amg_data) ((amg_data)->new_comm)
#define hypre_ParAMGDataRedundant(amg_data) ((amg_data)->redundant)
#define hypre_ParAMGDataParticipate(amg_data) ((amg_data)->participate)
#define hypre_ParAMGDataAMat(amg_data) ((amg_data)->A_mat)
#define hypre_ParAMGDataBVec(amg_data) ((amg_data)->b_vec)
#define hypre_ParAMGDataCommInfo(amg_data) ((amg_data)->comm_info)
/* additive AMG parameters */
#define hypre_ParAMGDataAdditive(amg_data) ((amg_data)->additive)
#define hypre_ParAMGDataMultAdditive(amg_data) ((amg_data)->mult_additive)
#define hypre_ParAMGDataSimple(amg_data) ((amg_data)->simple)
#define hypre_ParAMGDataAddLastLvl(amg_data) ((amg_data)->add_last_lvl)
#define hypre_ParAMGDataMultAddPMaxElmts(amg_data) ((amg_data)->add_P_max_elmts)
#define hypre_ParAMGDataMultAddTruncFactor(amg_data) ((amg_data)->add_trunc_factor)
#define hypre_ParAMGDataAddRelaxType(amg_data) ((amg_data)->add_rlx_type)
#define hypre_ParAMGDataAddRelaxWt(amg_data) ((amg_data)->add_rlx_wt)
#define hypre_ParAMGDataLambda(amg_data) ((amg_data)->Lambda)
#define hypre_ParAMGDataAtilde(amg_data) ((amg_data)->Atilde)
#define hypre_ParAMGDataRtilde(amg_data) ((amg_data)->Rtilde)
#define hypre_ParAMGDataXtilde(amg_data) ((amg_data)->Xtilde)
#define hypre_ParAMGDataDinv(amg_data) ((amg_data)->D_inv)
/* non-Galerkin parameters */
#define hypre_ParAMGDataNonGalerkNumTol(amg_data) ((amg_data)->nongalerk_num_tol)
#define hypre_ParAMGDataNonGalerkTol(amg_data) ((amg_data)->nongalerk_tol)
#define hypre_ParAMGDataNonGalerkinTol(amg_data) ((amg_data)->nongalerkin_tol)
#define hypre_ParAMGDataNonGalTolArray(amg_data) ((amg_data)->nongal_tol_array)
#define hypre_ParAMGDataRAP2(amg_data) ((amg_data)->rap2)
#define hypre_ParAMGDataKeepTranspose(amg_data) ((amg_data)->keepTranspose)
#endif
| 14,437 | 41.589971 | 91 | h |
AMG | AMG-master/parcsr_ls/par_amg_setup.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#define DEBUG 0
#define PRINT_CF 0
/*****************************************************************************
*
* Routine for driving the setup phase of AMG
*
*****************************************************************************/
/*****************************************************************************
* hypre_BoomerAMGSetup
*****************************************************************************/
HYPRE_Int
hypre_BoomerAMGSetup( void *amg_vdata,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
hypre_ParVector *Vtemp = NULL;
hypre_ParVector *Rtemp = NULL;
hypre_ParVector *Ptemp = NULL;
hypre_ParVector *Ztemp = NULL;
hypre_ParCSRMatrix **P_array;
hypre_ParVector *Residual_array;
HYPRE_Int **CF_marker_array;
HYPRE_Int **dof_func_array;
HYPRE_Int *dof_func;
HYPRE_Int *col_offd_S_to_A;
HYPRE_Int *col_offd_SN_to_AN;
HYPRE_Real *relax_weight;
HYPRE_Real *omega;
HYPRE_Real schwarz_relax_wt = 1;
HYPRE_Real strong_threshold;
HYPRE_Real max_row_sum;
HYPRE_Real trunc_factor;
HYPRE_Real agg_trunc_factor, agg_P12_trunc_factor;
HYPRE_Real S_commpkg_switch;
HYPRE_Int relax_order;
HYPRE_Int max_levels;
HYPRE_Int amg_logging;
HYPRE_Int amg_print_level;
HYPRE_Int debug_flag;
HYPRE_Int dbg_flg;
HYPRE_Int local_num_vars;
HYPRE_Int P_max_elmts;
HYPRE_Int agg_P_max_elmts;
HYPRE_Int agg_P12_max_elmts;
HYPRE_Int mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
HYPRE_Int additive = hypre_ParAMGDataAdditive(amg_data);
HYPRE_Int simple = hypre_ParAMGDataSimple(amg_data);
HYPRE_Int add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data);
HYPRE_Int add_P_max_elmts = hypre_ParAMGDataMultAddPMaxElmts(amg_data);
HYPRE_Real add_trunc_factor = hypre_ParAMGDataMultAddTruncFactor(amg_data);
HYPRE_Int add_rlx = hypre_ParAMGDataAddRelaxType(amg_data);
HYPRE_Real add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data);
/* Local variables */
HYPRE_Int *CF_marker;
HYPRE_Int *CFN_marker;
HYPRE_Int *CF2_marker;
hypre_ParCSRMatrix *S = NULL;
hypre_ParCSRMatrix *S2;
hypre_ParCSRMatrix *SN = NULL;
hypre_ParCSRMatrix *SCR;
hypre_ParCSRMatrix *P = NULL;
hypre_ParCSRMatrix *A_H;
hypre_ParCSRMatrix *AN = NULL;
hypre_ParCSRMatrix *P1;
hypre_ParCSRMatrix *P2;
hypre_ParCSRMatrix *Pnew = NULL;
HYPRE_Real *SmoothVecs = NULL;
HYPRE_Real **l1_norms = NULL;
HYPRE_Real **cheby_ds = NULL;
HYPRE_Real **cheby_coefs = NULL;
HYPRE_Int old_num_levels, num_levels;
HYPRE_Int level;
HYPRE_Int local_size, i;
HYPRE_Int first_local_row;
HYPRE_Int coarse_size;
HYPRE_Int coarsen_type;
HYPRE_Int measure_type;
HYPRE_Int setup_type;
HYPRE_Int fine_size;
HYPRE_Int rest, tms, indx;
HYPRE_Real size;
HYPRE_Int not_finished_coarsening = 1;
HYPRE_Int coarse_threshold = hypre_ParAMGDataMaxCoarseSize(amg_data);
HYPRE_Int min_coarse_size = hypre_ParAMGDataMinCoarseSize(amg_data);
HYPRE_Int seq_threshold = hypre_ParAMGDataSeqThreshold(amg_data);
HYPRE_Int j, k;
HYPRE_Int num_procs,my_id,num_threads;
HYPRE_Int *grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
HYPRE_Int num_functions = hypre_ParAMGDataNumFunctions(amg_data);
HYPRE_Int nodal = hypre_ParAMGDataNodal(amg_data);
HYPRE_Int num_paths = hypre_ParAMGDataNumPaths(amg_data);
HYPRE_Int agg_num_levels = hypre_ParAMGDataAggNumLevels(amg_data);
HYPRE_Int agg_interp_type = hypre_ParAMGDataAggInterpType(amg_data);
HYPRE_Int sep_weight = hypre_ParAMGDataSepWeight(amg_data);
HYPRE_Int *coarse_dof_func = NULL;
HYPRE_Int *coarse_pnts_global;
HYPRE_Int *coarse_pnts_global1;
HYPRE_Int num_cg_sweeps;
HYPRE_Real *max_eig_est = NULL;
HYPRE_Real *min_eig_est = NULL;
HYPRE_Int interp_type;
/* parameters for non-Galerkin stuff */
HYPRE_Int nongalerk_num_tol = hypre_ParAMGDataNonGalerkNumTol (amg_data);
HYPRE_Real *nongalerk_tol = hypre_ParAMGDataNonGalerkTol (amg_data);
HYPRE_Real nongalerk_tol_l = 0.0;
HYPRE_Real *nongal_tol_array = hypre_ParAMGDataNonGalTolArray (amg_data);
HYPRE_Int block_mode = 0;
HYPRE_Int mult_addlvl = hypre_max(mult_additive, simple);
HYPRE_Int addlvl = hypre_max(mult_addlvl, additive);
HYPRE_Int rap2 = hypre_ParAMGDataRAP2(amg_data);
HYPRE_Int keepTranspose = hypre_ParAMGDataKeepTranspose(amg_data);
HYPRE_Int *num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data);
HYPRE_Int ns = num_grid_sweeps[1];
HYPRE_Real wall_time; /* for debugging instrumentation */
HYPRE_Int add_end;
HYPRE_Real cum_nnz_AP;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
old_num_levels = hypre_ParAMGDataNumLevels(amg_data);
max_levels = hypre_ParAMGDataMaxLevels(amg_data);
add_end = hypre_min(add_last_lvl, max_levels-1);
if (add_end == -1) add_end = max_levels-1;
amg_logging = hypre_ParAMGDataLogging(amg_data);
amg_print_level = hypre_ParAMGDataPrintLevel(amg_data);
coarsen_type = hypre_ParAMGDataCoarsenType(amg_data);
measure_type = hypre_ParAMGDataMeasureType(amg_data);
setup_type = hypre_ParAMGDataSetupType(amg_data);
debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
omega = hypre_ParAMGDataOmega(amg_data);
dof_func = hypre_ParAMGDataDofFunc(amg_data);
interp_type = hypre_ParAMGDataInterpType(amg_data);
relax_order = hypre_ParAMGDataRelaxOrder(amg_data);
hypre_ParCSRMatrixSetNumNonzeros(A);
hypre_ParCSRMatrixSetDNumNonzeros(A);
hypre_ParAMGDataNumVariables(amg_data) = hypre_ParCSRMatrixNumRows(A);
if (num_procs == 1) seq_threshold = 0;
if (setup_type == 0) return hypre_error_flag;
S = NULL;
A_array = hypre_ParAMGDataAArray(amg_data);
P_array = hypre_ParAMGDataPArray(amg_data);
CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data);
dof_func_array = hypre_ParAMGDataDofFuncArray(amg_data);
local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
grid_relax_type[3] = hypre_ParAMGDataUserCoarseRelaxType(amg_data);
/* change in definition of standard and multipass interpolation, by
eliminating interp_type 9 and 5 and setting sep_weight instead
when using separation of weights option */
if (interp_type == 9)
{
interp_type = 8;
sep_weight = 1;
}
else if (interp_type == 5)
{
interp_type = 4;
sep_weight = 1;
}
/* free up storage in case of new setup without prvious destroy */
if (A_array || P_array || CF_marker_array || dof_func_array)
{
for (j = 1; j < old_num_levels; j++)
{
if (A_array[j])
{
hypre_ParCSRMatrixDestroy(A_array[j]);
A_array[j] = NULL;
}
if (dof_func_array[j])
{
hypre_TFree(dof_func_array[j]);
dof_func_array[j] = NULL;
}
}
for (j = 0; j < old_num_levels-1; j++)
{
if (P_array[j])
{
hypre_ParCSRMatrixDestroy(P_array[j]);
P_array[j] = NULL;
}
}
/* Special case use of CF_marker_array when old_num_levels == 1
requires us to attempt this deallocation every time */
if (CF_marker_array[0])
{
hypre_TFree(CF_marker_array[0]);
CF_marker_array[0] = NULL;
}
for (j = 1; j < old_num_levels-1; j++)
{
if (CF_marker_array[j])
{
hypre_TFree(CF_marker_array[j]);
CF_marker_array[j] = NULL;
}
}
}
{
MPI_Comm new_comm = hypre_ParAMGDataNewComm(amg_data);
void *amg = hypre_ParAMGDataCoarseSolver(amg_data);
if (hypre_ParAMGDataRtemp(amg_data))
{
hypre_ParVectorDestroy(hypre_ParAMGDataRtemp(amg_data));
hypre_ParAMGDataRtemp(amg_data) = NULL;
}
if (hypre_ParAMGDataPtemp(amg_data))
{
hypre_ParVectorDestroy(hypre_ParAMGDataPtemp(amg_data));
hypre_ParAMGDataPtemp(amg_data) = NULL;
}
if (hypre_ParAMGDataZtemp(amg_data))
{
hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(amg_data));
hypre_ParAMGDataZtemp(amg_data) = NULL;
}
if (hypre_ParAMGDataACoarse(amg_data))
{
hypre_ParCSRMatrixDestroy(hypre_ParAMGDataACoarse(amg_data));
hypre_ParAMGDataACoarse(amg_data) = NULL;
}
if (hypre_ParAMGDataUCoarse(amg_data))
{
hypre_ParVectorDestroy(hypre_ParAMGDataUCoarse(amg_data));
hypre_ParAMGDataUCoarse(amg_data) = NULL;
}
if (hypre_ParAMGDataFCoarse(amg_data))
{
hypre_ParVectorDestroy(hypre_ParAMGDataFCoarse(amg_data));
hypre_ParAMGDataFCoarse(amg_data) = NULL;
}
if (hypre_ParAMGDataAMat(amg_data))
{
hypre_TFree(hypre_ParAMGDataAMat(amg_data));
hypre_ParAMGDataAMat(amg_data) = NULL;
}
if (hypre_ParAMGDataBVec(amg_data))
{
hypre_TFree(hypre_ParAMGDataBVec(amg_data));
hypre_ParAMGDataBVec(amg_data) = NULL;
}
if (hypre_ParAMGDataCommInfo(amg_data))
{
hypre_TFree(hypre_ParAMGDataCommInfo(amg_data));
hypre_ParAMGDataCommInfo(amg_data) = NULL;
}
if (new_comm != hypre_MPI_COMM_NULL)
{
hypre_MPI_Comm_free (&new_comm);
hypre_ParAMGDataNewComm(amg_data) = hypre_MPI_COMM_NULL;
}
if (amg)
{
hypre_BoomerAMGDestroy (amg);
hypre_ParAMGDataCoarseSolver(amg_data) = NULL;
}
if (hypre_ParAMGDataMaxEigEst(amg_data))
{
hypre_TFree(hypre_ParAMGDataMaxEigEst(amg_data));
hypre_ParAMGDataMaxEigEst(amg_data) = NULL;
}
if (hypre_ParAMGDataMinEigEst(amg_data))
{
hypre_TFree(hypre_ParAMGDataMinEigEst(amg_data));
hypre_ParAMGDataMinEigEst(amg_data) = NULL;
}
if (hypre_ParAMGDataL1Norms(amg_data))
{
for (i=0; i < old_num_levels; i++)
if (hypre_ParAMGDataL1Norms(amg_data)[i])
hypre_TFree(hypre_ParAMGDataL1Norms(amg_data)[i]);
hypre_TFree(hypre_ParAMGDataL1Norms(amg_data));
}
if ( hypre_ParAMGDataResidual(amg_data) ) {
hypre_ParVectorDestroy( hypre_ParAMGDataResidual(amg_data) );
hypre_ParAMGDataResidual(amg_data) = NULL;
}
}
if (A_array == NULL)
A_array = hypre_CTAlloc(hypre_ParCSRMatrix*, max_levels);
if (P_array == NULL && max_levels > 1)
P_array = hypre_CTAlloc(hypre_ParCSRMatrix*, max_levels-1);
if (CF_marker_array == NULL)
CF_marker_array = hypre_CTAlloc(HYPRE_Int*, max_levels);
if (dof_func_array == NULL)
dof_func_array = hypre_CTAlloc(HYPRE_Int*, max_levels);
if (num_functions > 1 && dof_func == NULL)
{
first_local_row = hypre_ParCSRMatrixFirstRowIndex(A);
dof_func = hypre_CTAlloc(HYPRE_Int,local_size);
rest = first_local_row-((first_local_row/num_functions)*num_functions);
indx = num_functions-rest;
if (rest == 0) indx = 0;
k = num_functions - 1;
for (j = indx-1; j > -1; j--)
dof_func[j] = k--;
tms = local_size/num_functions;
if (tms*num_functions+indx > local_size) tms--;
for (j=0; j < tms; j++)
{
for (k=0; k < num_functions; k++)
dof_func[indx++] = k;
}
k = 0;
while (indx < local_size)
dof_func[indx++] = k++;
hypre_ParAMGDataDofFunc(amg_data) = dof_func;
}
A_array[0] = A;
dof_func_array[0] = dof_func;
hypre_ParAMGDataCFMarkerArray(amg_data) = CF_marker_array;
hypre_ParAMGDataDofFuncArray(amg_data) = dof_func_array;
hypre_ParAMGDataAArray(amg_data) = A_array;
hypre_ParAMGDataPArray(amg_data) = P_array;
hypre_ParAMGDataRArray(amg_data) = P_array;
Vtemp = hypre_ParAMGDataVtemp(amg_data);
if (Vtemp != NULL)
{
hypre_ParVectorDestroy(Vtemp);
Vtemp = NULL;
}
Vtemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[0]),
hypre_ParCSRMatrixGlobalNumRows(A_array[0]),
hypre_ParCSRMatrixRowStarts(A_array[0]));
hypre_ParVectorInitialize(Vtemp);
hypre_ParVectorSetPartitioningOwner(Vtemp,0);
hypre_ParAMGDataVtemp(amg_data) = Vtemp;
if (relax_weight[0] < 0 || omega[0] < 0)
{
Ptemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[0]),
hypre_ParCSRMatrixGlobalNumRows(A_array[0]),
hypre_ParCSRMatrixRowStarts(A_array[0]));
hypre_ParVectorInitialize(Ptemp);
hypre_ParVectorSetPartitioningOwner(Ptemp,0);
hypre_ParAMGDataPtemp(amg_data) = Ptemp;
Rtemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[0]),
hypre_ParCSRMatrixGlobalNumRows(A_array[0]),
hypre_ParCSRMatrixRowStarts(A_array[0]));
hypre_ParVectorInitialize(Rtemp);
hypre_ParVectorSetPartitioningOwner(Rtemp,0);
hypre_ParAMGDataRtemp(amg_data) = Rtemp;
}
/* See if we need the Ztemp vector */
if (relax_weight[0] < 0 || omega[0] < 0)
{
Ztemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[0]),
hypre_ParCSRMatrixGlobalNumRows(A_array[0]),
hypre_ParCSRMatrixRowStarts(A_array[0]));
hypre_ParVectorInitialize(Ztemp);
hypre_ParVectorSetPartitioningOwner(Ztemp,0);
hypre_ParAMGDataZtemp(amg_data) = Ztemp;
}
else if (grid_relax_type[0] == 16 || grid_relax_type[1] == 16 || grid_relax_type[2] == 16 || grid_relax_type[3] == 16)
{
/* Chebyshev */
Ztemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[0]),
hypre_ParCSRMatrixGlobalNumRows(A_array[0]),
hypre_ParCSRMatrixRowStarts(A_array[0]));
hypre_ParVectorInitialize(Ztemp);
hypre_ParVectorSetPartitioningOwner(Ztemp,0);
hypre_ParAMGDataZtemp(amg_data) = Ztemp;
}
else if (num_threads > 1)
{
/* we need the temp Z vector for relaxation 3 and 6 now if we are
* using threading */
for (j = 1; j < 4; j++)
{
if (grid_relax_type[j] == 3 || grid_relax_type[j] == 4 || grid_relax_type[j] == 6 ||
grid_relax_type[j] == 8 || grid_relax_type[j] == 13 || grid_relax_type[j] == 14)
{
Ztemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[0]),
hypre_ParCSRMatrixGlobalNumRows(A_array[0]),
hypre_ParCSRMatrixRowStarts(A_array[0]));
hypre_ParVectorInitialize(Ztemp);
hypre_ParVectorSetPartitioningOwner(Ztemp,0);
hypre_ParAMGDataZtemp(amg_data) = Ztemp;
break;
}
}
}
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
if (F_array != NULL || U_array != NULL)
{
for (j = 1; j < old_num_levels; j++)
{
if (F_array[j] != NULL)
{
hypre_ParVectorDestroy(F_array[j]);
F_array[j] = NULL;
}
if (U_array[j] != NULL)
{
hypre_ParVectorDestroy(U_array[j]);
U_array[j] = NULL;
}
}
}
if (F_array == NULL)
F_array = hypre_CTAlloc(hypre_ParVector*, max_levels);
if (U_array == NULL)
U_array = hypre_CTAlloc(hypre_ParVector*, max_levels);
F_array[0] = f;
U_array[0] = u;
hypre_ParAMGDataFArray(amg_data) = F_array;
hypre_ParAMGDataUArray(amg_data) = U_array;
/*----------------------------------------------------------
* Initialize hypre_ParAMGData
*----------------------------------------------------------*/
not_finished_coarsening = 1;
level = 0;
strong_threshold = hypre_ParAMGDataStrongThreshold(amg_data);
max_row_sum = hypre_ParAMGDataMaxRowSum(amg_data);
trunc_factor = hypre_ParAMGDataTruncFactor(amg_data);
agg_trunc_factor = hypre_ParAMGDataAggTruncFactor(amg_data);
agg_P12_trunc_factor = hypre_ParAMGDataAggP12TruncFactor(amg_data);
P_max_elmts = hypre_ParAMGDataPMaxElmts(amg_data);
agg_P_max_elmts = hypre_ParAMGDataAggPMaxElmts(amg_data);
agg_P12_max_elmts = hypre_ParAMGDataAggP12MaxElmts(amg_data);
S_commpkg_switch = hypre_ParAMGDataSCommPkgSwitch(amg_data);
/*-----------------------------------------------------
* Enter Coarsening Loop
*-----------------------------------------------------*/
while (not_finished_coarsening)
{
fine_size = hypre_ParCSRMatrixGlobalNumRows(A_array[level]);
if (level > 0)
{
if (block_mode == 0)
{
F_array[level] =
hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[level]),
hypre_ParCSRMatrixGlobalNumRows(A_array[level]),
hypre_ParCSRMatrixRowStarts(A_array[level]));
hypre_ParVectorInitialize(F_array[level]);
hypre_ParVectorSetPartitioningOwner(F_array[level],0);
U_array[level] =
hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[level]),
hypre_ParCSRMatrixGlobalNumRows(A_array[level]),
hypre_ParCSRMatrixRowStarts(A_array[level]));
hypre_ParVectorInitialize(U_array[level]);
hypre_ParVectorSetPartitioningOwner(U_array[level],0);
}
}
/*-------------------------------------------------------------
* Select coarse-grid points on 'level' : returns CF_marker
* for the level. Returns strength matrix, S
*--------------------------------------------------------------*/
if (debug_flag==1) wall_time = time_getWallclockSeconds();
if (debug_flag==3)
{
hypre_printf("\n ===== Proc = %d Level = %d =====\n",
my_id, level);
fflush(NULL);
}
if ( max_levels == 1)
{
S = NULL;
coarse_pnts_global = NULL;
CF_marker = hypre_CTAlloc(HYPRE_Int, local_size );
for (i=0; i < local_size ; i++)
CF_marker[i] = 1;
/* AB removed below - already allocated */
/* CF_marker_array = hypre_CTAlloc(HYPRE_Int*, 1);*/
CF_marker_array[level] = CF_marker;
coarse_size = fine_size;
}
else /* max_levels > 1 */
{
if (block_mode == 0)
{
local_num_vars =
hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[level]));
}
/**** Get the Strength Matrix ****/
if (nodal == 0)
{
hypre_BoomerAMGCreateS(A_array[level],
strong_threshold, max_row_sum,
num_functions, dof_func_array[level],&S);
col_offd_S_to_A = NULL;
if (strong_threshold > S_commpkg_switch)
hypre_BoomerAMGCreateSCommPkg(A_array[level],S,
&col_offd_S_to_A);
}
/**** Do the appropriate coarsening ****/
if (nodal == 0) /* no nodal coarsening */
{
if (coarsen_type == 6)
hypre_BoomerAMGCoarsenFalgout(S, A_array[level], measure_type,
debug_flag, &CF_marker);
else if (coarsen_type == 7)
hypre_BoomerAMGCoarsen(S, A_array[level], 2,
debug_flag, &CF_marker);
else if (coarsen_type == 8)
hypre_BoomerAMGCoarsenPMIS(S, A_array[level], 0,
debug_flag, &CF_marker);
else if (coarsen_type == 9)
hypre_BoomerAMGCoarsenPMIS(S, A_array[level], 2,
debug_flag, &CF_marker);
else if (coarsen_type == 10)
hypre_BoomerAMGCoarsenHMIS(S, A_array[level], measure_type,
debug_flag, &CF_marker);
else if (coarsen_type)
hypre_BoomerAMGCoarsenRuge(S, A_array[level],
measure_type, coarsen_type, debug_flag, &CF_marker);
else
hypre_BoomerAMGCoarsen(S, A_array[level], 0,
debug_flag, &CF_marker);
if (level < agg_num_levels)
{
hypre_BoomerAMGCoarseParms(comm, local_num_vars,
1, dof_func_array[level], CF_marker,
&coarse_dof_func,&coarse_pnts_global1);
hypre_BoomerAMGCreate2ndS (S, CF_marker, num_paths,
coarse_pnts_global1, &S2);
if (coarsen_type == 10)
hypre_BoomerAMGCoarsenHMIS(S2, S2, measure_type+3,
debug_flag, &CFN_marker);
else if (coarsen_type == 8)
hypre_BoomerAMGCoarsenPMIS(S2, S2, 3,
debug_flag, &CFN_marker);
else if (coarsen_type == 9)
hypre_BoomerAMGCoarsenPMIS(S2, S2, 4,
debug_flag, &CFN_marker);
else if (coarsen_type == 6)
hypre_BoomerAMGCoarsenFalgout(S2, S2, measure_type,
debug_flag, &CFN_marker);
else if (coarsen_type == 7)
hypre_BoomerAMGCoarsen(S2, S2, 2, debug_flag, &CFN_marker);
else if (coarsen_type)
hypre_BoomerAMGCoarsenRuge(S2, S2, measure_type, coarsen_type,
debug_flag, &CFN_marker);
else
hypre_BoomerAMGCoarsen(S2, S2, 0, debug_flag, &CFN_marker);
hypre_ParCSRMatrixDestroy(S2);
}
}
/*****xxxxxxxxxxxxx changes for min_coarse_size */
/* here we will determine the coarse grid size to be able to determine if it is not smaller
than requested minimal size */
if (level >= agg_num_levels)
{
if (block_mode == 0 )
{
hypre_BoomerAMGCoarseParms(comm, local_num_vars,
num_functions, dof_func_array[level], CF_marker,
&coarse_dof_func,&coarse_pnts_global);
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) coarse_size = coarse_pnts_global[1];
hypre_MPI_Bcast(&coarse_size, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
coarse_size = coarse_pnts_global[num_procs];
#endif
/* if no coarse-grid, stop coarsening, and set the
* coarsest solve to be a single sweep of default smoother or smoother set by user */
if ((coarse_size == 0) || (coarse_size == fine_size))
{
HYPRE_Int *num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data);
HYPRE_Int **grid_relax_points = hypre_ParAMGDataGridRelaxPoints(amg_data);
if (grid_relax_type[3] == 9 || grid_relax_type[3] == 99
|| grid_relax_type[3] == 19 || grid_relax_type[3] == 98)
{
grid_relax_type[3] = grid_relax_type[0];
num_grid_sweeps[3] = 1;
if (grid_relax_points) grid_relax_points[3][0] = 0;
}
if (S) hypre_ParCSRMatrixDestroy(S);
if (SN) hypre_ParCSRMatrixDestroy(SN);
if (AN) hypre_ParCSRMatrixDestroy(AN);
hypre_TFree(CF_marker);
hypre_TFree(coarse_pnts_global);
if (level > 0)
{
/* note special case treatment of CF_marker is necessary
* to do CF relaxation correctly when num_levels = 1 */
hypre_TFree(CF_marker_array[level]);
hypre_ParVectorDestroy(F_array[level]);
hypre_ParVectorDestroy(U_array[level]);
}
coarse_size = fine_size;
break;
}
if (coarse_size < min_coarse_size)
{
if (S) hypre_ParCSRMatrixDestroy(S);
if (SN) hypre_ParCSRMatrixDestroy(SN);
if (AN) hypre_ParCSRMatrixDestroy(AN);
if (num_functions > 1) hypre_TFree(coarse_dof_func);
hypre_TFree(CF_marker);
hypre_TFree(coarse_pnts_global);
if (level > 0)
{
hypre_ParVectorDestroy(F_array[level]);
hypre_ParVectorDestroy(U_array[level]);
}
coarse_size = fine_size;
break;
}
}
/*****xxxxxxxxxxxxx changes for min_coarse_size end */
if (level < agg_num_levels)
{
if (nodal == 0)
{
if (agg_interp_type == 1)
hypre_BoomerAMGBuildExtPIInterp(A_array[level],
CF_marker, S, coarse_pnts_global1,
num_functions, dof_func_array[level], debug_flag,
agg_P12_trunc_factor, agg_P12_max_elmts, col_offd_S_to_A, &P1);
else if (agg_interp_type == 2)
hypre_BoomerAMGBuildStdInterp(A_array[level],
CF_marker, S, coarse_pnts_global1,
num_functions, dof_func_array[level], debug_flag,
agg_P12_trunc_factor, agg_P12_max_elmts, 0, col_offd_S_to_A, &P1);
else if (agg_interp_type == 3)
hypre_BoomerAMGBuildExtInterp(A_array[level],
CF_marker, S, coarse_pnts_global1,
num_functions, dof_func_array[level], debug_flag,
agg_P12_trunc_factor, agg_P12_max_elmts, col_offd_S_to_A, &P1);
if (agg_interp_type == 4)
{
hypre_BoomerAMGCorrectCFMarker (CF_marker, local_num_vars,
CFN_marker);
hypre_TFree(coarse_pnts_global1);
/*hypre_TFree(coarse_dof_func);
coarse_dof_func = NULL;*/
hypre_TFree(CFN_marker);
hypre_BoomerAMGCoarseParms(comm, local_num_vars,
num_functions, dof_func_array[level], CF_marker,
&coarse_dof_func,&coarse_pnts_global);
hypre_BoomerAMGBuildMultipass(A_array[level],
CF_marker, S, coarse_pnts_global,
num_functions, dof_func_array[level], debug_flag,
agg_trunc_factor, agg_P_max_elmts, sep_weight,
col_offd_S_to_A, &P);
}
else
{
hypre_BoomerAMGCorrectCFMarker2 (CF_marker, local_num_vars,
CFN_marker);
hypre_TFree(CFN_marker);
/*hypre_TFree(coarse_dof_func);
coarse_dof_func = NULL;*/
hypre_BoomerAMGCoarseParms(comm, local_num_vars,
num_functions, dof_func_array[level], CF_marker,
&coarse_dof_func,&coarse_pnts_global);
/*if (num_functions > 1 && nodal > -1 && (!block_mode) )
dof_func_array[level+1] = coarse_dof_func;*/
hypre_TFree(col_offd_S_to_A);
if (agg_interp_type == 1)
hypre_BoomerAMGBuildPartialExtPIInterp(A_array[level],
CF_marker, S, coarse_pnts_global,
coarse_pnts_global1, num_functions,
dof_func_array[level], debug_flag, agg_P12_trunc_factor,
agg_P12_max_elmts, col_offd_S_to_A, &P2);
else if (agg_interp_type == 2)
hypre_BoomerAMGBuildPartialStdInterp(A_array[level],
CF_marker, S, coarse_pnts_global,
coarse_pnts_global1, num_functions,
dof_func_array[level], debug_flag, agg_P12_trunc_factor,
agg_P12_max_elmts, sep_weight, col_offd_S_to_A, &P2);
else if (agg_interp_type == 3)
hypre_BoomerAMGBuildPartialExtInterp(A_array[level],
CF_marker, S, coarse_pnts_global,
coarse_pnts_global1, num_functions,
dof_func_array[level], debug_flag, agg_P12_trunc_factor,
agg_P12_max_elmts, col_offd_S_to_A, &P2);
P = hypre_ParMatmul(P1,P2);
hypre_BoomerAMGInterpTruncation(P, agg_trunc_factor,
agg_P_max_elmts);
hypre_MatvecCommPkgCreate(P);
hypre_ParCSRMatrixDestroy(P1);
hypre_ParCSRMatrixOwnsColStarts(P2) = 0;
hypre_ParCSRMatrixDestroy(P2);
hypre_ParCSRMatrixOwnsColStarts(P) = 1;
}
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) coarse_size = coarse_pnts_global[1];
hypre_MPI_Bcast(&coarse_size, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
coarse_size = coarse_pnts_global[num_procs];
#endif
}
else /* no aggressive coarsening */
{
/**** Get the coarse parameters ****/
/* xxxxxxxxxxxxxxxxxxxxxxxxx change for min_coarse_size
if (block_mode )
{
hypre_BoomerAMGCoarseParms(comm,
hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(AN)),
1, NULL, CF_marker, NULL, &coarse_pnts_global);
}
else
{
hypre_BoomerAMGCoarseParms(comm, local_num_vars,
num_functions, dof_func_array[level], CF_marker,
&coarse_dof_func,&coarse_pnts_global);
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
if (my_id == (num_procs -1)) coarse_size = coarse_pnts_global[1];
hypre_MPI_Bcast(&coarse_size, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
coarse_size = coarse_pnts_global[num_procs];
#endif
xxxxxxxxxxxxxxxxxxxxxxxxx change for min_coarse_size */
if (debug_flag==1)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Level = %d Coarsen Time = %f\n",
my_id,level, wall_time);
fflush(NULL);
}
if (debug_flag==1) wall_time = time_getWallclockSeconds();
if (interp_type == 4)
{
hypre_BoomerAMGBuildMultipass(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, sep_weight, col_offd_S_to_A, &P);
hypre_TFree(col_offd_S_to_A);
}
else if (interp_type == 2)
{
hypre_BoomerAMGBuildInterpHE(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
hypre_TFree(col_offd_S_to_A);
}
else if (interp_type == 3)
{
hypre_BoomerAMGBuildDirInterp(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
hypre_TFree(col_offd_S_to_A);
}
else if (interp_type == 6) /*Extended+i classical interpolation */
{
hypre_BoomerAMGBuildExtPIInterp(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
hypre_TFree(col_offd_S_to_A);
}
else if (interp_type == 14) /*Extended classical interpolation */
{
hypre_BoomerAMGBuildExtInterp(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
hypre_TFree(col_offd_S_to_A);
}
else if (interp_type == 7) /*Extended+i (if no common C) interpolation */
{
hypre_BoomerAMGBuildExtPICCInterp(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
hypre_TFree(col_offd_S_to_A);
}
else if (interp_type == 12) /*FF interpolation */
{
hypre_BoomerAMGBuildFFInterp(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
hypre_TFree(col_offd_S_to_A);
}
else if (interp_type == 13) /*FF1 interpolation */
{
hypre_BoomerAMGBuildFF1Interp(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
hypre_TFree(col_offd_S_to_A);
}
else if (interp_type == 8) /*Standard interpolation */
{
hypre_BoomerAMGBuildStdInterp(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, sep_weight, col_offd_S_to_A, &P);
hypre_TFree(col_offd_S_to_A);
}
else
{
if (block_mode == 0)
{
if (nodal > -1) /* non-systems, or systems with unknown approach interpolation*/
{
/* if systems, do we want to use an interp. that uses the full strength matrix?*/
if ( (num_functions > 1) && (interp_type == 19 || interp_type == 18 || interp_type == 17 || interp_type == 16))
{
/* so create a second strength matrix and build interp with with num_functions = 1 */
hypre_BoomerAMGCreateS(A_array[level],
strong_threshold, max_row_sum,
1, dof_func_array[level],&S2);
col_offd_S_to_A = NULL;
switch (interp_type)
{
case 19:
dbg_flg = debug_flag;
if (amg_print_level) dbg_flg = -debug_flag;
hypre_BoomerAMGBuildInterp(A_array[level], CF_marker,
S2, coarse_pnts_global, 1,
dof_func_array[level],
dbg_flg, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
break;
case 18:
hypre_BoomerAMGBuildStdInterp(A_array[level], CF_marker,
S2, coarse_pnts_global, 1, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, 0, col_offd_S_to_A, &P);
break;
case 17:
hypre_BoomerAMGBuildExtPIInterp(A_array[level], CF_marker,
S2, coarse_pnts_global, 1, dof_func_array[level],
debug_flag, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
break;
case 16:
dbg_flg = debug_flag;
if (amg_print_level) dbg_flg = -debug_flag;
hypre_BoomerAMGBuildInterpModUnk(A_array[level], CF_marker,
S2, coarse_pnts_global, num_functions, dof_func_array[level],
dbg_flg, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
break;
}
hypre_ParCSRMatrixDestroy(S2);
}
else /* one function only or unknown-based interpolation- */
{
dbg_flg = debug_flag;
if (amg_print_level) dbg_flg = -debug_flag;
hypre_BoomerAMGBuildInterp(A_array[level], CF_marker,
S, coarse_pnts_global, num_functions,
dof_func_array[level],
dbg_flg, trunc_factor, P_max_elmts, col_offd_S_to_A, &P);
}
hypre_TFree(col_offd_S_to_A);
}
}
}
} /* end of no aggressive coarsening */
/*dof_func_array[level+1] = NULL;
if (num_functions > 1 && nodal > -1 && (!block_mode) )
dof_func_array[level+1] = coarse_dof_func;*/
/* store the CF array */
CF_marker_array[level] = CF_marker;
dof_func_array[level+1] = NULL;
if (num_functions > 1 && nodal > -1 && (!block_mode) )
dof_func_array[level+1] = coarse_dof_func;
} /* end of if max_levels > 1 */
/* if no coarse-grid, stop coarsening, and set the
* coarsest solve to be a single sweep of Jacobi */
if ((coarse_size == 0) ||
(coarse_size == fine_size))
{
HYPRE_Int *num_grid_sweeps =
hypre_ParAMGDataNumGridSweeps(amg_data);
HYPRE_Int **grid_relax_points =
hypre_ParAMGDataGridRelaxPoints(amg_data);
if (grid_relax_type[3] == 9 || grid_relax_type[3] == 99
|| grid_relax_type[3] == 19 || grid_relax_type[3] == 98)
{
grid_relax_type[3] = grid_relax_type[0];
num_grid_sweeps[3] = 1;
if (grid_relax_points) grid_relax_points[3][0] = 0;
}
if (S)
hypre_ParCSRMatrixDestroy(S);
if (P)
hypre_ParCSRMatrixDestroy(P);
if (level > 0)
{
/* note special case treatment of CF_marker is necessary
* to do CF relaxation correctly when num_levels = 1 */
hypre_TFree(CF_marker_array[level]);
hypre_ParVectorDestroy(F_array[level]);
hypre_ParVectorDestroy(U_array[level]);
}
break;
}
if (level < agg_num_levels && coarse_size < min_coarse_size)
{
if (S)
hypre_ParCSRMatrixDestroy(S);
if (P)
hypre_ParCSRMatrixDestroy(P);
if (level > 0)
{
hypre_TFree(CF_marker_array[level]);
hypre_ParVectorDestroy(F_array[level]);
hypre_ParVectorDestroy(U_array[level]);
}
coarse_size = fine_size;
break;
}
/*-------------------------------------------------------------
* Build prolongation matrix, P, and place in P_array[level]
*--------------------------------------------------------------*/
if (!block_mode)
{
if (mult_addlvl > -1 && level >= mult_addlvl && level <= add_end)
{
HYPRE_Real *d_diag;
if (add_rlx == 0)
{
hypre_CSRMatrix *lvl_Adiag = hypre_ParCSRMatrixDiag(A_array[level]);
HYPRE_Int lvl_nrows = hypre_CSRMatrixNumRows(lvl_Adiag);
HYPRE_Int *lvl_i = hypre_CSRMatrixI(lvl_Adiag);
HYPRE_Real *lvl_data = hypre_CSRMatrixData(lvl_Adiag);
HYPRE_Real w_inv = 1.0/add_rlx_wt;
/*HYPRE_Real w_inv = 1.0/hypre_ParAMGDataRelaxWeight(amg_data)[level];*/
d_diag = hypre_CTAlloc(HYPRE_Real, lvl_nrows);
for (i=0; i < lvl_nrows; i++)
d_diag[i] = lvl_data[lvl_i[i]]*w_inv;
}
else
{
if (num_threads == 1)
hypre_ParCSRComputeL1Norms(A_array[level], 1, NULL, &d_diag);
else
hypre_ParCSRComputeL1NormsThreads(A_array[level], 1,
num_threads, NULL, &d_diag);
}
if (ns == 1)
{
hypre_ParCSRMatrix *Q = NULL;
Q = hypre_ParMatmul(A_array[level],P);
hypre_ParCSRMatrixAminvDB(P,Q,d_diag,&P_array[level]);
A_H = hypre_ParTMatmul(P,Q);
hypre_ParCSRMatrixRowStarts(A_H) = hypre_ParCSRMatrixColStarts(A_H);
hypre_ParCSRMatrixOwnsRowStarts(A_H) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_H) = 0;
hypre_ParCSRMatrixOwnsColStarts(P) = 0;
if (num_procs > 1) hypre_MatvecCommPkgCreate(A_H);
/*hypre_ParCSRMatrixDestroy(P); */
hypre_TFree(d_diag);
/* Set NonGalerkin drop tol on each level */
if (level < nongalerk_num_tol) nongalerk_tol_l = nongalerk_tol[level];
if (nongal_tol_array) nongalerk_tol_l = nongal_tol_array[level];
if (nongalerk_tol_l > 0.0)
{
/* Build Non-Galerkin Coarse Grid */
hypre_ParCSRMatrix *Q = NULL;
hypre_BoomerAMGBuildNonGalerkinCoarseOperator(&A_H, Q,
0.333*strong_threshold, max_row_sum, num_functions,
dof_func_array[level+1], S_commpkg_switch, CF_marker_array[level],
/* nongalerk_tol, sym_collapse, lump_percent, beta );*/
nongalerk_tol_l, 1, 0.5, 1.0 );
hypre_ParCSRMatrixColStarts(P_array[level]) = hypre_ParCSRMatrixRowStarts(A_H);
if (!hypre_ParCSRMatrixCommPkg(A_H))
hypre_MatvecCommPkgCreate(A_H);
}
hypre_ParCSRMatrixDestroy(Q);
}
else
{
HYPRE_Int ns_tmp = ns;
hypre_ParCSRMatrix *C = NULL;
hypre_ParCSRMatrix *Ptmp = NULL;
/* Set NonGalerkin drop tol on each level */
if (level < nongalerk_num_tol)
nongalerk_tol_l = nongalerk_tol[level];
if (nongal_tol_array) nongalerk_tol_l = nongal_tol_array[level];
if (nongalerk_tol_l > 0.0)
{
/* Construct AP, and then RAP */
hypre_ParCSRMatrix *Q = NULL;
Q = hypre_ParMatmul(A_array[level],P_array[level]);
A_H = hypre_ParTMatmul(P_array[level],Q);
hypre_ParCSRMatrixRowStarts(A_H) = hypre_ParCSRMatrixColStarts(A_H);
hypre_ParCSRMatrixOwnsRowStarts(A_H) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_H) = 0;
hypre_ParCSRMatrixOwnsColStarts(P_array[level]) = 0;
if (num_procs > 1) hypre_MatvecCommPkgCreate(A_H);
/* Build Non-Galerkin Coarse Grid */
hypre_BoomerAMGBuildNonGalerkinCoarseOperator(&A_H, Q,
0.333*strong_threshold, max_row_sum, num_functions,
dof_func_array[level+1], S_commpkg_switch, CF_marker_array[level],
/* nongalerk_tol, sym_collapse, lump_percent, beta );*/
nongalerk_tol_l, 1, 0.5, 1.0 );
if (!hypre_ParCSRMatrixCommPkg(A_H))
hypre_MatvecCommPkgCreate(A_H);
/* Delete AP */
hypre_ParCSRMatrixDestroy(Q);
}
else if (rap2)
{
/* Use two matrix products to generate A_H */
hypre_ParCSRMatrix *Q = NULL;
Q = hypre_ParMatmul(A_array[level],P_array[level]);
A_H = hypre_ParTMatmul(P_array[level],Q);
hypre_ParCSRMatrixOwnsRowStarts(A_H) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_H) = 0;
hypre_ParCSRMatrixOwnsColStarts(P_array[level]) = 0;
if (num_procs > 1) hypre_MatvecCommPkgCreate(A_H);
/* Delete AP */
hypre_ParCSRMatrixDestroy(Q);
}
else
hypre_BoomerAMGBuildCoarseOperatorKT(P, A_array[level] , P,
keepTranspose, &A_H);
if (add_rlx == 18)
C = hypre_CreateC(A_array[level], 0.0);
else
C = hypre_CreateC(A_array[level], add_rlx_wt);
Ptmp = P;
while (ns_tmp > 0)
{
Pnew = Ptmp;
Ptmp = NULL;
Ptmp = hypre_ParMatmul(C,Pnew);
if (ns_tmp < ns)
hypre_ParCSRMatrixDestroy(Pnew);
ns_tmp--;
}
Pnew = Ptmp;
P_array[level] = Pnew;
hypre_ParCSRMatrixDestroy(C);
}
if (add_P_max_elmts || add_trunc_factor)
{
hypre_BoomerAMGTruncandBuild(P_array[level],
add_trunc_factor,add_P_max_elmts);
}
/*else
hypre_MatvecCommPkgCreate(P_array[level]); */
hypre_ParCSRMatrixDestroy(P);
}
else
P_array[level] = P;
}
if (S) hypre_ParCSRMatrixDestroy(S);
S = NULL;
if (debug_flag==1)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Level = %d Build Interp Time = %f\n",
my_id,level, wall_time);
fflush(NULL);
}
/*-------------------------------------------------------------
* Build coarse-grid operator, A_array[level+1] by R*A*P
*--------------------------------------------------------------*/
if (debug_flag==1) wall_time = time_getWallclockSeconds();
if (mult_addlvl == -1 || level < mult_addlvl || level > add_end)
{
/* Set NonGalerkin drop tol on each level */
if (level < nongalerk_num_tol)
nongalerk_tol_l = nongalerk_tol[level];
if (nongal_tol_array) nongalerk_tol_l = nongal_tol_array[level];
if (nongalerk_tol_l > 0.0)
{
/* Construct AP, and then RAP */
hypre_ParCSRMatrix *Q = NULL;
Q = hypre_ParMatmul(A_array[level],P_array[level]);
A_H = hypre_ParTMatmul(P_array[level],Q);
hypre_ParCSRMatrixRowStarts(A_H) = hypre_ParCSRMatrixColStarts(A_H);
hypre_ParCSRMatrixOwnsRowStarts(A_H) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_H) = 0;
hypre_ParCSRMatrixOwnsColStarts(P_array[level]) = 0;
if (num_procs > 1) hypre_MatvecCommPkgCreate(A_H);
/* Build Non-Galerkin Coarse Grid */
hypre_BoomerAMGBuildNonGalerkinCoarseOperator(&A_H, Q,
0.333*strong_threshold, max_row_sum, num_functions,
dof_func_array[level+1], S_commpkg_switch, CF_marker_array[level],
/* nongalerk_tol, sym_collapse, lump_percent, beta );*/
nongalerk_tol_l, 1, 0.5, 1.0 );
if (!hypre_ParCSRMatrixCommPkg(A_H))
hypre_MatvecCommPkgCreate(A_H);
/* Delete AP */
hypre_ParCSRMatrixDestroy(Q);
}
else if (rap2)
{
/* Use two matrix products to generate A_H */
hypre_ParCSRMatrix *Q = NULL;
Q = hypre_ParMatmul(A_array[level],P_array[level]);
A_H = hypre_ParTMatmul(P_array[level],Q);
hypre_ParCSRMatrixOwnsRowStarts(A_H) = 1;
hypre_ParCSRMatrixOwnsColStarts(A_H) = 0;
hypre_ParCSRMatrixOwnsColStarts(P_array[level]) = 0;
if (num_procs > 1) hypre_MatvecCommPkgCreate(A_H);
/* Delete AP */
hypre_ParCSRMatrixDestroy(Q);
}
else
{
/* Compute standard Galerkin coarse-grid product */
hypre_BoomerAMGBuildCoarseOperatorKT(P_array[level], A_array[level] ,
P_array[level], keepTranspose, &A_H);
if (Pnew && ns==1)
{
hypre_ParCSRMatrixDestroy(P);
P_array[level] = Pnew;
}
}
}
if (debug_flag==1)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Level = %d Build Coarse Operator Time = %f\n",
my_id,level, wall_time);
fflush(NULL);
}
++level;
if (!block_mode)
{
hypre_ParCSRMatrixSetNumNonzeros(A_H);
hypre_ParCSRMatrixSetDNumNonzeros(A_H);
A_array[level] = A_H;
}
size = ((HYPRE_Real) fine_size )*.75;
if (coarsen_type > 0 && coarse_size >= (HYPRE_Int) size)
{
coarsen_type = 0;
}
{
HYPRE_Int max_thresh = hypre_max(coarse_threshold, seq_threshold);
if ( (level == max_levels-1) || (coarse_size <= max_thresh) )
{
not_finished_coarsening = 0;
}
}
}
/* redundant coarse grid solve */
if ( (seq_threshold >= coarse_threshold) && (coarse_size > coarse_threshold) && (level != max_levels-1))
{
hypre_seqAMGSetup( amg_data, level, coarse_threshold);
}
else if (grid_relax_type[3] == 9 || grid_relax_type[3] == 99) /*use of Gaussian elimination on coarsest level */
{
if (coarse_size <= coarse_threshold)
hypre_GaussElimSetup(amg_data, level, grid_relax_type[3]);
else
grid_relax_type[3] = grid_relax_type[1];
}
else if (grid_relax_type[3] == 19 || grid_relax_type[3] == 98) /*use of Gaussian elimination on coarsest level */
{
if (coarse_size > coarse_threshold)
grid_relax_type[3] = grid_relax_type[1];
}
if (level > 0)
{
if (block_mode == 0)
{
F_array[level] =
hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[level]),
hypre_ParCSRMatrixGlobalNumRows(A_array[level]),
hypre_ParCSRMatrixRowStarts(A_array[level]));
hypre_ParVectorInitialize(F_array[level]);
hypre_ParVectorSetPartitioningOwner(F_array[level],0);
U_array[level] =
hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[level]),
hypre_ParCSRMatrixGlobalNumRows(A_array[level]),
hypre_ParCSRMatrixRowStarts(A_array[level]));
hypre_ParVectorInitialize(U_array[level]);
hypre_ParVectorSetPartitioningOwner(U_array[level],0);
}
}
/*-----------------------------------------------------------------------
* enter all the stuff created, A[level], P[level], CF_marker[level],
* for levels 1 through coarsest, into amg_data data structure
*-----------------------------------------------------------------------*/
num_levels = level+1;
hypre_ParAMGDataNumLevels(amg_data) = num_levels;
/*-----------------------------------------------------------------------
* Setup of special smoothers when needed
*-----------------------------------------------------------------------*/
if (addlvl > -1 ||
grid_relax_type[1] == 7 || grid_relax_type[2] == 7 || grid_relax_type[3] == 7 ||
grid_relax_type[1] == 8 || grid_relax_type[2] == 8 || grid_relax_type[3] == 8 ||
grid_relax_type[1] == 13 || grid_relax_type[2] == 13 || grid_relax_type[3] == 13 ||
grid_relax_type[1] == 14 || grid_relax_type[2] == 14 || grid_relax_type[3] == 14 ||
grid_relax_type[1] == 18 || grid_relax_type[2] == 18 || grid_relax_type[3] == 18)
{
l1_norms = hypre_CTAlloc(HYPRE_Real *, num_levels);
hypre_ParAMGDataL1Norms(amg_data) = l1_norms;
}
if (grid_relax_type[0] == 16 ||grid_relax_type[1] == 16 || grid_relax_type[2] == 16 || grid_relax_type[3] == 16)
/* Chebyshev */
{
max_eig_est = hypre_CTAlloc(HYPRE_Real, num_levels);
min_eig_est = hypre_CTAlloc(HYPRE_Real, num_levels);
hypre_ParAMGDataMaxEigEst(amg_data) = max_eig_est;
hypre_ParAMGDataMinEigEst(amg_data) = min_eig_est;
cheby_ds = hypre_CTAlloc(HYPRE_Real *, num_levels);
cheby_coefs = hypre_CTAlloc(HYPRE_Real *, num_levels);
hypre_ParAMGDataChebyDS(amg_data) = cheby_ds;
hypre_ParAMGDataChebyCoefs(amg_data) = cheby_coefs;
}
if (addlvl == -1) addlvl = num_levels;
for (j = 0; j < addlvl; j++)
{
if (num_threads == 1)
{
if (j < num_levels-1 && (grid_relax_type[1] == 8 || grid_relax_type[1] == 13 ||
grid_relax_type[1] == 14 || grid_relax_type[2] == 8 || grid_relax_type[2] == 13 ||
grid_relax_type[2] == 14))
{
if (relax_order)
hypre_ParCSRComputeL1Norms(A_array[j], 4, CF_marker_array[j], &l1_norms[j]);
else
hypre_ParCSRComputeL1Norms(A_array[j], 4, NULL, &l1_norms[j]);
}
else if ((grid_relax_type[3] == 8 || grid_relax_type[3] == 13 || grid_relax_type[3] == 14)
&& j == num_levels-1)
{
hypre_ParCSRComputeL1Norms(A_array[j], 4, NULL, &l1_norms[j]);
}
if ((grid_relax_type[1] == 18 || grid_relax_type[2] == 18) && j < num_levels-1)
{
if (relax_order)
hypre_ParCSRComputeL1Norms(A_array[j], 1, CF_marker_array[j], &l1_norms[j]);
else
hypre_ParCSRComputeL1Norms(A_array[j], 1, NULL, &l1_norms[j]);
}
else if (grid_relax_type[3] == 18 && j == num_levels-1)
{
hypre_ParCSRComputeL1Norms(A_array[j], 1, NULL, &l1_norms[j]);
}
}
else
{
if (j < num_levels-1 && (grid_relax_type[1] == 8 || grid_relax_type[1] == 13 ||
grid_relax_type[1] == 14 || grid_relax_type[2] == 8 || grid_relax_type[2] == 13 ||
grid_relax_type[2] == 14))
{
if (relax_order)
hypre_ParCSRComputeL1NormsThreads(A_array[j], 4, num_threads, CF_marker_array[j] , &l1_norms[j]);
else
hypre_ParCSRComputeL1NormsThreads(A_array[j], 4, num_threads, NULL, &l1_norms[j]);
}
else if ((grid_relax_type[3] == 8 || grid_relax_type[3] == 13 || grid_relax_type[3] == 14)
&& j == num_levels-1)
{
hypre_ParCSRComputeL1NormsThreads(A_array[j], 4, num_threads, NULL, &l1_norms[j]);
}
if ((grid_relax_type[1] == 18 || grid_relax_type[2] == 18) && j < num_levels-1)
{
if (relax_order)
hypre_ParCSRComputeL1NormsThreads(A_array[j], 1, num_threads, CF_marker_array[j], &l1_norms[j]);
else
hypre_ParCSRComputeL1NormsThreads(A_array[j], 1, num_threads, NULL, &l1_norms[j]);
}
else if (grid_relax_type[3] == 18 && j == num_levels-1)
{
hypre_ParCSRComputeL1NormsThreads(A_array[j], 1, num_threads, NULL, &l1_norms[j]);
}
}
}
for (j = addlvl; j < hypre_min(add_end+1, num_levels) ; j++)
{
if (add_rlx == 18 )
{
if (num_threads == 1)
hypre_ParCSRComputeL1Norms(A_array[j], 1, NULL, &l1_norms[j]);
else
hypre_ParCSRComputeL1NormsThreads(A_array[j], 1, num_threads, NULL, &l1_norms[j]);
}
}
for (j = add_end+1; j < num_levels; j++)
{
if (num_threads == 1)
{
if (j < num_levels-1 && (grid_relax_type[1] == 8 || grid_relax_type[1] == 13 ||
grid_relax_type[1] == 14 || grid_relax_type[2] == 8 || grid_relax_type[2] == 13 ||
grid_relax_type[2] == 14))
{
if (relax_order)
hypre_ParCSRComputeL1Norms(A_array[j], 4, CF_marker_array[j], &l1_norms[j]);
else
hypre_ParCSRComputeL1Norms(A_array[j], 4, NULL, &l1_norms[j]);
}
else if ((grid_relax_type[3] == 8 || grid_relax_type[3] == 13 || grid_relax_type[3] == 14)
&& j == num_levels-1)
{
hypre_ParCSRComputeL1Norms(A_array[j], 4, NULL, &l1_norms[j]);
}
if ((grid_relax_type[1] == 18 || grid_relax_type[2] == 18) && j < num_levels-1)
{
if (relax_order)
hypre_ParCSRComputeL1Norms(A_array[j], 1, CF_marker_array[j], &l1_norms[j]);
else
hypre_ParCSRComputeL1Norms(A_array[j], 1, NULL, &l1_norms[j]);
}
else if (grid_relax_type[3] == 18 && j == num_levels-1)
{
hypre_ParCSRComputeL1Norms(A_array[j], 1, NULL, &l1_norms[j]);
}
}
else
{
if (j < num_levels-1 && (grid_relax_type[1] == 8 || grid_relax_type[1] == 13 ||
grid_relax_type[1] == 14 || grid_relax_type[2] == 8 || grid_relax_type[2] == 13 ||
grid_relax_type[2] == 14))
{
if (relax_order)
hypre_ParCSRComputeL1NormsThreads(A_array[j], 4, num_threads, CF_marker_array[j] , &l1_norms[j]);
else
hypre_ParCSRComputeL1NormsThreads(A_array[j], 4, num_threads, NULL, &l1_norms[j]);
}
else if ((grid_relax_type[3] == 8 || grid_relax_type[3] == 13 || grid_relax_type[3] == 14)
&& j == num_levels-1)
{
hypre_ParCSRComputeL1NormsThreads(A_array[j], 4, num_threads, NULL, &l1_norms[j]);
}
if ((grid_relax_type[1] == 18 || grid_relax_type[2] == 18) && j < num_levels-1)
{
if (relax_order)
hypre_ParCSRComputeL1NormsThreads(A_array[j], 1, num_threads, CF_marker_array[j], &l1_norms[j]);
else
hypre_ParCSRComputeL1NormsThreads(A_array[j], 1, num_threads, NULL, &l1_norms[j]);
}
else if (grid_relax_type[3] == 18 && j == num_levels-1)
{
hypre_ParCSRComputeL1NormsThreads(A_array[j], 1, num_threads, NULL, &l1_norms[j]);
}
}
}
for (j = 0; j < num_levels; j++)
{
if (grid_relax_type[1] == 7 || grid_relax_type[2] == 7 || (grid_relax_type[3] == 7 && j== (num_levels-1)))
{
hypre_ParCSRComputeL1Norms(A_array[j], 5, NULL, &l1_norms[j]);
}
else if (grid_relax_type[1] == 16 || grid_relax_type[2] == 16 || (grid_relax_type[3] == 16 && j== (num_levels-1)))
{
HYPRE_Int scale = hypre_ParAMGDataChebyScale(amg_data);;
HYPRE_Int variant = hypre_ParAMGDataChebyVariant(amg_data);
HYPRE_Real max_eig, min_eig = 0;
HYPRE_Real *coefs = NULL;
HYPRE_Real *ds = NULL;
HYPRE_Int cheby_order = hypre_ParAMGDataChebyOrder(amg_data);
HYPRE_Int cheby_eig_est = hypre_ParAMGDataChebyEigEst(amg_data);
HYPRE_Real cheby_fraction = hypre_ParAMGDataChebyFraction(amg_data);
if (cheby_eig_est)
hypre_ParCSRMaxEigEstimateCG(A_array[j], scale, cheby_eig_est,
&max_eig, &min_eig);
else
hypre_ParCSRMaxEigEstimate(A_array[j], scale, &max_eig);
max_eig_est[j] = max_eig;
min_eig_est[j] = min_eig;
hypre_ParCSRRelax_Cheby_Setup(A_array[j],max_eig, min_eig,
cheby_fraction, cheby_order, scale, variant, &coefs, &ds);
cheby_coefs[j] = coefs;
cheby_ds[j] = ds;
}
if (relax_weight[j] == 0.0)
{
hypre_ParCSRMatrixScaledNorm(A_array[j], &relax_weight[j]);
if (relax_weight[j] != 0.0)
relax_weight[j] = 4.0/3.0/relax_weight[j];
else
hypre_error_w_msg(HYPRE_ERROR_GENERIC," Warning ! Matrix norm is zero !!!");
}
if ((j < num_levels-1) || ((j == num_levels-1) && (grid_relax_type[3]!= 9 &&
grid_relax_type[3] != 99 && grid_relax_type[3] != 19 && grid_relax_type[3] != 98)
&& coarse_size > 9))
{
if (relax_weight[j] < 0 )
{
num_cg_sweeps = (HYPRE_Int) (-relax_weight[j]);
hypre_BoomerAMGCGRelaxWt(amg_data, j, num_cg_sweeps,
&relax_weight[j]);
}
if (omega[j] < 0 )
{
num_cg_sweeps = (HYPRE_Int) (-omega[j]);
hypre_BoomerAMGCGRelaxWt(amg_data, j, num_cg_sweeps,
&omega[j]);
}
}
} /* end of levels loop */
if ( amg_logging > 1 )
{
Residual_array=
hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[0]),
hypre_ParCSRMatrixGlobalNumRows(A_array[0]),
hypre_ParCSRMatrixRowStarts(A_array[0]) );
hypre_ParVectorInitialize(Residual_array);
hypre_ParVectorSetPartitioningOwner(Residual_array,0);
hypre_ParAMGDataResidual(amg_data) = Residual_array;
}
else
hypre_ParAMGDataResidual(amg_data) = NULL;
if (simple > -1 && simple < num_levels)
hypre_CreateDinv(amg_data);
else if ((mult_additive > -1 && mult_additive < num_levels) ||
(additive > -1 && additive < num_levels))
hypre_CreateLambda(amg_data);
cum_nnz_AP = hypre_ParCSRMatrixDNumNonzeros(A_array[0]);
for (j = 0; j < num_levels-1; j++)
{
hypre_ParCSRMatrixSetDNumNonzeros(P_array[j]);
cum_nnz_AP += hypre_ParCSRMatrixDNumNonzeros(P_array[j]);
cum_nnz_AP += hypre_ParCSRMatrixDNumNonzeros(A_array[j+1]);
}
hypre_ParAMGDataCumNnzAP(amg_data) = cum_nnz_AP;
/*-----------------------------------------------------------------------
* Print some stuff
*-----------------------------------------------------------------------*/
if (amg_print_level == 1 || amg_print_level == 3)
hypre_BoomerAMGSetupStats(amg_data,A);
/* print out matrices on all levels */
#if DEBUG
{
char filename[256];
if (block_mode)
{
hypre_ParCSRMatrix *temp_A;
for (level = 0; level < num_levels; level++)
{
hypre_sprintf(filename, "BoomerAMG.out.A_blk.%02d.ij", level);
temp_A = hypre_ParCSRBlockMatrixConvertToParCSRMatrix(
A_block_array[level]);
hypre_ParCSRMatrixPrintIJ(temp_A, 0, 0, filename);
hypre_ParCSRMatrixDestroy(temp_A);
}
}
else
{
for (level = 0; level < num_levels; level++)
{
hypre_sprintf(filename, "BoomerAMG.out.A.%02d.ij", level);
hypre_ParCSRMatrixPrintIJ(A_array[level], 0, 0, filename);
}
for (level = 0; level < (num_levels-1); level++)
{
hypre_sprintf(filename, "BoomerAMG.out.P.%02d.ij", level);
hypre_ParCSRMatrixPrintIJ(P_array[level], 0, 0, filename);
}
}
}
#endif
return(hypre_error_flag);
}
| 65,267 | 39.438662 | 134 | c |
AMG | AMG-master/parcsr_ls/par_amg_solve.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* AMG solve routine
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
/*--------------------------------------------------------------------
* hypre_BoomerAMGSolve
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGSolve( void *amg_vdata,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
HYPRE_Int amg_print_level;
HYPRE_Int amg_logging;
HYPRE_Int cycle_count;
HYPRE_Int num_levels;
/* HYPRE_Int num_unknowns; */
HYPRE_Real tol;
hypre_ParCSRMatrix **A_array;
hypre_ParVector **F_array;
hypre_ParVector **U_array;
/* Local variables */
HYPRE_Int j;
HYPRE_Int Solve_err_flag;
HYPRE_Int min_iter;
HYPRE_Int max_iter;
HYPRE_Int num_procs, my_id;
HYPRE_Int additive;
HYPRE_Int mult_additive;
HYPRE_Int simple;
HYPRE_Real alpha = 1.0;
HYPRE_Real beta = -1.0;
HYPRE_Real cycle_op_count;
HYPRE_Real total_coeffs;
HYPRE_Real total_variables;
HYPRE_Real *num_coeffs;
HYPRE_Real *num_variables;
HYPRE_Real cycle_cmplxty = 0.0;
HYPRE_Real operat_cmplxty;
HYPRE_Real grid_cmplxty;
HYPRE_Real conv_factor = 0.0;
HYPRE_Real resid_nrm = 1.0;
HYPRE_Real resid_nrm_init = 0.0;
HYPRE_Real relative_resid;
HYPRE_Real rhs_norm = 0.0;
HYPRE_Real old_resid;
HYPRE_Real ieee_check = 0.;
hypre_ParVector *Vtemp;
hypre_ParVector *Residual;
/*HYPRE_ANNOTATION_BEGIN("BoomerAMG.solve");*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
amg_print_level = hypre_ParAMGDataPrintLevel(amg_data);
amg_logging = hypre_ParAMGDataLogging(amg_data);
if ( amg_logging > 1 )
Residual = hypre_ParAMGDataResidual(amg_data);
/* num_unknowns = hypre_ParAMGDataNumUnknowns(amg_data); */
num_levels = hypre_ParAMGDataNumLevels(amg_data);
A_array = hypre_ParAMGDataAArray(amg_data);
F_array = hypre_ParAMGDataFArray(amg_data);
U_array = hypre_ParAMGDataUArray(amg_data);
tol = hypre_ParAMGDataTol(amg_data);
min_iter = hypre_ParAMGDataMinIter(amg_data);
max_iter = hypre_ParAMGDataMaxIter(amg_data);
additive = hypre_ParAMGDataAdditive(amg_data);
simple = hypre_ParAMGDataSimple(amg_data);
mult_additive = hypre_ParAMGDataMultAdditive(amg_data);
A_array[0] = A;
F_array[0] = f;
U_array[0] = u;
/* Vtemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_array[0]),
hypre_ParCSRMatrixGlobalNumRows(A_array[0]),
hypre_ParCSRMatrixRowStarts(A_array[0]));
hypre_ParVectorInitialize(Vtemp);
hypre_ParVectorSetPartitioningOwner(Vtemp,0);
hypre_ParAMGDataVtemp(amg_data) = Vtemp;
*/
Vtemp = hypre_ParAMGDataVtemp(amg_data);
/*-----------------------------------------------------------------------
* Write the solver parameters
*-----------------------------------------------------------------------*/
if (my_id == 0 && amg_print_level > 1)
hypre_BoomerAMGWriteSolverParams(amg_data);
/*-----------------------------------------------------------------------
* Initialize the solver error flag and assorted bookkeeping variables
*-----------------------------------------------------------------------*/
Solve_err_flag = 0;
total_coeffs = 0;
total_variables = 0;
cycle_count = 0;
operat_cmplxty = 0;
grid_cmplxty = 0;
/*-----------------------------------------------------------------------
* write some initial info
*-----------------------------------------------------------------------*/
if (my_id == 0 && amg_print_level > 1 && tol > 0.)
hypre_printf("\n\nAMG SOLUTION INFO:\n");
/*-----------------------------------------------------------------------
* Compute initial fine-grid residual and print
*-----------------------------------------------------------------------*/
if (amg_print_level > 1 || amg_logging > 1 || tol > 0.)
{
if ( amg_logging > 1 ) {
hypre_ParVectorCopy(F_array[0], Residual );
if (tol > 0)
hypre_ParCSRMatrixMatvec(alpha, A_array[0], U_array[0], beta, Residual );
resid_nrm = sqrt(hypre_ParVectorInnerProd( Residual, Residual ));
}
else {
hypre_ParVectorCopy(F_array[0], Vtemp);
if (tol > 0)
hypre_ParCSRMatrixMatvec(alpha, A_array[0], U_array[0], beta, Vtemp);
resid_nrm = sqrt(hypre_ParVectorInnerProd(Vtemp, Vtemp));
}
/* Since it is does not diminish performance, attempt to return an error flag
and notify users when they supply bad input. */
if (resid_nrm != 0.) ieee_check = resid_nrm/resid_nrm; /* INF -> NaN conversion */
if (ieee_check != ieee_check)
{
/* ...INFs or NaNs in input can make ieee_check a NaN. This test
for ieee_check self-equality works on all IEEE-compliant compilers/
machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754"
by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be
found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */
if (amg_print_level > 0)
{
hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n");
hypre_printf("ERROR -- hypre_BoomerAMGSolve: INFs and/or NaNs detected in input.\n");
hypre_printf("User probably placed non-numerics in supplied A, x_0, or b.\n");
hypre_printf("ERROR detected by Hypre ... END\n\n\n");
}
hypre_error(HYPRE_ERROR_GENERIC);
/*HYPRE_ANNOTATION_END("BoomerAMG.solve");*/
return hypre_error_flag;
}
resid_nrm_init = resid_nrm;
rhs_norm = sqrt(hypre_ParVectorInnerProd(f, f));
if (rhs_norm)
{
relative_resid = resid_nrm_init / rhs_norm;
}
else
{
relative_resid = resid_nrm_init;
}
}
else
{
relative_resid = 1.;
}
if (my_id == 0 && amg_print_level > 1)
{
hypre_printf(" relative\n");
hypre_printf(" residual factor residual\n");
hypre_printf(" -------- ------ --------\n");
hypre_printf(" Initial %e %e\n",resid_nrm_init,
relative_resid);
}
/*-----------------------------------------------------------------------
* Main V-cycle loop
*-----------------------------------------------------------------------*/
while ((relative_resid >= tol || cycle_count < min_iter)
&& cycle_count < max_iter)
{
hypre_ParAMGDataCycleOpCount(amg_data) = 0;
/* Op count only needed for one cycle */
if ((additive < 0 || additive >= num_levels)
&& (mult_additive < 0 || mult_additive >= num_levels)
&& (simple < 0 || simple >= num_levels) )
hypre_BoomerAMGCycle(amg_data, F_array, U_array);
else
hypre_BoomerAMGAdditiveCycle(amg_data);
/*---------------------------------------------------------------
* Compute fine-grid residual and residual norm
*----------------------------------------------------------------*/
if (amg_print_level > 1 || amg_logging > 1 || tol > 0.)
{
old_resid = resid_nrm;
if ( amg_logging > 1 ) {
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A_array[0], U_array[0], beta, F_array[0], Residual );
resid_nrm = sqrt(hypre_ParVectorInnerProd( Residual, Residual ));
}
else {
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A_array[0], U_array[0], beta, F_array[0], Vtemp);
resid_nrm = sqrt(hypre_ParVectorInnerProd(Vtemp, Vtemp));
}
if (old_resid) conv_factor = resid_nrm / old_resid;
else conv_factor = resid_nrm;
if (rhs_norm)
{
relative_resid = resid_nrm / rhs_norm;
}
else
{
relative_resid = resid_nrm;
}
hypre_ParAMGDataRelativeResidualNorm(amg_data) = relative_resid;
}
++cycle_count;
hypre_ParAMGDataNumIterations(amg_data) = cycle_count;
#ifdef CUMNUMIT
++hypre_ParAMGDataCumNumIterations(amg_data);
#endif
if (my_id == 0 && amg_print_level > 1)
{
hypre_printf(" Cycle %2d %e %f %e \n", cycle_count,
resid_nrm, conv_factor, relative_resid);
}
}
if (cycle_count == max_iter && tol > 0.)
{
Solve_err_flag = 1;
hypre_error(HYPRE_ERROR_CONV);
}
/*-----------------------------------------------------------------------
* Compute closing statistics
*-----------------------------------------------------------------------*/
if (cycle_count > 0 && resid_nrm_init)
conv_factor = pow((resid_nrm/resid_nrm_init),(1.0/(HYPRE_Real) cycle_count));
else
conv_factor = 1.;
if (amg_print_level > 1)
{
num_coeffs = hypre_CTAlloc(HYPRE_Real, num_levels);
num_variables = hypre_CTAlloc(HYPRE_Real, num_levels);
num_coeffs[0] = hypre_ParCSRMatrixDNumNonzeros(A);
num_variables[0] = hypre_ParCSRMatrixGlobalNumRows(A);
for (j = 1; j < num_levels; j++)
{
num_coeffs[j] = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A_array[j]);
num_variables[j] = (HYPRE_Real) hypre_ParCSRMatrixGlobalNumRows(A_array[j]);
}
for (j=0;j<hypre_ParAMGDataNumLevels(amg_data);j++)
{
total_coeffs += num_coeffs[j];
total_variables += num_variables[j];
}
cycle_op_count = hypre_ParAMGDataCycleOpCount(amg_data);
if (num_variables[0])
grid_cmplxty = total_variables / num_variables[0];
if (num_coeffs[0])
{
operat_cmplxty = total_coeffs / num_coeffs[0];
cycle_cmplxty = cycle_op_count / num_coeffs[0];
}
if (my_id == 0)
{
if (Solve_err_flag == 1)
{
hypre_printf("\n\n==============================================");
hypre_printf("\n NOTE: Convergence tolerance was not achieved\n");
hypre_printf(" within the allowed %d V-cycles\n",max_iter);
hypre_printf("==============================================");
}
hypre_printf("\n\n Average Convergence Factor = %f",conv_factor);
hypre_printf("\n\n Complexity: grid = %f\n",grid_cmplxty);
hypre_printf(" operator = %f\n",operat_cmplxty);
hypre_printf(" cycle = %f\n\n\n\n",cycle_cmplxty);
}
hypre_TFree(num_coeffs);
hypre_TFree(num_variables);
}
/* HYPRE_ANNOTATION_END("BoomerAMG.solve"); */
return hypre_error_flag;
}
| 12,285 | 33.804533 | 106 | c |
AMG | AMG-master/parcsr_ls/par_cg_relax_wt.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* ParAMG cycling routine
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCycle
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCGRelaxWt( void *amg_vdata,
HYPRE_Int level,
HYPRE_Int num_cg_sweeps,
HYPRE_Real *rlx_wt_ptr)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
MPI_Comm comm;
/* Data Structure variables */
/* hypre_ParCSRMatrix **A_array = hypre_ParAMGDataAArray(amg_data); */
/* hypre_ParCSRMatrix **R_array = hypre_ParAMGDataRArray(amg_data); */
hypre_ParCSRMatrix *A = hypre_ParAMGDataAArray(amg_data)[level];
/* hypre_ParVector **F_array = hypre_ParAMGDataFArray(amg_data); */
/* hypre_ParVector **U_array = hypre_ParAMGDataUArray(amg_data); */
hypre_ParVector *Utemp;
hypre_ParVector *Vtemp;
hypre_ParVector *Ptemp;
hypre_ParVector *Rtemp;
hypre_ParVector *Ztemp;
hypre_ParVector *Qtemp = NULL;
HYPRE_Int *CF_marker = hypre_ParAMGDataCFMarkerArray(amg_data)[level];
HYPRE_Real *Ptemp_data;
HYPRE_Real *Ztemp_data;
/* HYPRE_Int **unknown_map_array;
HYPRE_Int **point_map_array;
HYPRE_Int **v_at_point_array; */
HYPRE_Int *grid_relax_type;
/* Local variables */
HYPRE_Int Solve_err_flag;
HYPRE_Int i, j, jj;
HYPRE_Int num_sweeps;
HYPRE_Int relax_type;
HYPRE_Int local_size;
HYPRE_Int old_size;
HYPRE_Int my_id = 0;
HYPRE_Int smooth_type;
HYPRE_Int smooth_num_levels;
HYPRE_Int smooth_option = 0;
HYPRE_Real *l1_norms = NULL;
HYPRE_Real alpha;
HYPRE_Real beta;
HYPRE_Real gamma = 1.0;
HYPRE_Real gammaold;
HYPRE_Real *tridiag;
HYPRE_Real *trioffd;
HYPRE_Real alphinv, row_sum = 0;
HYPRE_Real max_row_sum = 0;
HYPRE_Real rlx_wt = 0;
HYPRE_Real rlx_wt_old = 0;
HYPRE_Real lambda_max, lambda_max_old;
/* HYPRE_Real lambda_min, lambda_min_old; */
#if 0
HYPRE_Real *D_mat;
HYPRE_Real *S_vec;
#endif
HYPRE_Int num_threads;
num_threads = hypre_NumThreads();
/* Acquire data and allocate storage */
tridiag = hypre_CTAlloc(HYPRE_Real, num_cg_sweeps+1);
trioffd = hypre_CTAlloc(HYPRE_Real, num_cg_sweeps+1);
for (i=0; i < num_cg_sweeps+1; i++)
{
tridiag[i] = 0;
trioffd[i] = 0;
}
Vtemp = hypre_ParAMGDataVtemp(amg_data);
Rtemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(Rtemp);
hypre_ParVectorSetPartitioningOwner(Rtemp,0);
Ptemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(Ptemp);
hypre_ParVectorSetPartitioningOwner(Ptemp,0);
Ztemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(Ztemp);
hypre_ParVectorSetPartitioningOwner(Ztemp,0);
if (hypre_ParAMGDataL1Norms(amg_data) != NULL)
l1_norms = hypre_ParAMGDataL1Norms(amg_data)[level];
if (num_threads > 1)
{
Qtemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(Qtemp);
hypre_ParVectorSetPartitioningOwner(Qtemp,0);
}
grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
/* Initialize */
Solve_err_flag = 0;
comm = hypre_ParCSRMatrixComm(A);
hypre_MPI_Comm_rank(comm,&my_id);
/*---------------------------------------------------------------------
* Main loop of cycling
*--------------------------------------------------------------------*/
relax_type = grid_relax_type[1];
num_sweeps = 1;
local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
old_size
= hypre_VectorSize(hypre_ParVectorLocalVector(Vtemp));
hypre_VectorSize(hypre_ParVectorLocalVector(Vtemp)) =
hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
Ptemp_data = hypre_VectorData(hypre_ParVectorLocalVector(Ptemp));
Ztemp_data = hypre_VectorData(hypre_ParVectorLocalVector(Ztemp));
/* if (level == 0)
hypre_ParVectorCopy(hypre_ParAMGDataFArray(amg_data)[0],Rtemp);
else
{
hypre_ParVectorCopy(F_array[level-1],Vtemp);
alpha = -1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, A_array[level-1], U_array[level-1],
beta, Vtemp);
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[level-1],Vtemp,
beta,F_array[level]);
hypre_ParVectorCopy(F_array[level],Rtemp);
} */
hypre_ParVectorSetRandomValues(Rtemp,5128);
/*------------------------------------------------------------------
* Do the relaxation num_sweeps times
*-----------------------------------------------------------------*/
for (jj = 0; jj < num_cg_sweeps; jj++)
{
hypre_ParVectorSetConstantValues(Ztemp, 0.0);
for (j = 0; j < num_sweeps; j++)
{
Solve_err_flag = hypre_BoomerAMGRelax(A,
Rtemp,
CF_marker,
relax_type,
0,
1.0,
1.0,
l1_norms,
Ztemp,
Vtemp,
Qtemp);
if (Solve_err_flag != 0)
{
hypre_ParVectorDestroy(Ptemp);
hypre_TFree(tridiag);
hypre_TFree(trioffd);
return(Solve_err_flag);
}
}
gammaold = gamma;
gamma = hypre_ParVectorInnerProd(Rtemp,Ztemp);
if (jj == 0)
{
hypre_ParVectorCopy(Ztemp,Ptemp);
beta = 1.0;
}
else
{
beta = gamma/gammaold;
for (i=0; i < local_size; i++)
Ptemp_data[i] = Ztemp_data[i] + beta*Ptemp_data[i];
}
hypre_ParCSRMatrixMatvec(1.0,A,Ptemp,0.0,Vtemp);
alpha = gamma /hypre_ParVectorInnerProd(Ptemp,Vtemp);
alphinv = 1.0/alpha;
tridiag[jj+1] = alphinv;
tridiag[jj] *= beta;
tridiag[jj] += alphinv;
trioffd[jj] *= sqrt(beta);
trioffd[jj+1] = -alphinv;
row_sum = fabs(tridiag[jj]) + fabs(trioffd[jj]);
if (row_sum > max_row_sum) max_row_sum = row_sum;
if (jj > 0)
{
row_sum = fabs(tridiag[jj-1]) + fabs(trioffd[jj-1])
+ fabs(trioffd[jj]);
if (row_sum > max_row_sum) max_row_sum = row_sum;
/* lambda_min_old = lambda_min; */
lambda_max_old = lambda_max;
rlx_wt_old = rlx_wt;
hypre_Bisection(jj+1, tridiag, trioffd, lambda_max_old,
max_row_sum, 1.e-3, jj+1, &lambda_max);
rlx_wt = 1.0/lambda_max;
/* hypre_Bisection(jj+1, tridiag, trioffd, 0.0, lambda_min_old,
1.e-3, 1, &lambda_min);
rlx_wt = 2.0/(lambda_min+lambda_max); */
if (fabs(rlx_wt-rlx_wt_old) < 1.e-3 )
{
/* if (my_id == 0) hypre_printf (" cg sweeps : %d\n", (jj+1)); */
break;
}
}
else
{
/* lambda_min = tridiag[0]; */
lambda_max = tridiag[0];
}
hypre_ParVectorAxpy(-alpha,Vtemp,Rtemp);
}
/*if (my_id == 0)
hypre_printf (" lambda-min: %f lambda-max: %f\n", lambda_min, lambda_max);
rlx_wt = fabs(tridiag[0])+fabs(trioffd[1]);
for (i=1; i < num_cg_sweeps-1; i++)
{
row_sum = fabs(tridiag[i]) + fabs(trioffd[i]) + fabs(trioffd[i+1]);
if (row_sum > rlx_wt) rlx_wt = row_sum;
}
row_sum = fabs(tridiag[num_cg_sweeps-1]) + fabs(trioffd[num_cg_sweeps-1]);
if (row_sum > rlx_wt) rlx_wt = row_sum;
hypre_Bisection(num_cg_sweeps, tridiag, trioffd, 0.0, rlx_wt, 1.e-3, 1,
&lambda_min);
hypre_Bisection(num_cg_sweeps, tridiag, trioffd, 0.0, rlx_wt, 1.e-3,
num_cg_sweeps, &lambda_max);
*/
hypre_VectorSize(hypre_ParVectorLocalVector(Vtemp)) = old_size;
hypre_ParVectorDestroy(Ztemp);
hypre_ParVectorDestroy(Ptemp);
hypre_ParVectorDestroy(Rtemp);
if (num_threads > 1)
hypre_ParVectorDestroy(Qtemp);
hypre_TFree(tridiag);
hypre_TFree(trioffd);
if (smooth_option > 6 && smooth_option < 10)
{
hypre_ParVectorDestroy(Utemp);
}
*rlx_wt_ptr = rlx_wt;
return(Solve_err_flag);
}
/*--------------------------------------------------------------------------
* hypre_Bisection
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_Bisection(HYPRE_Int n, HYPRE_Real *diag, HYPRE_Real *offd,
HYPRE_Real y, HYPRE_Real z,
HYPRE_Real tol, HYPRE_Int k, HYPRE_Real *ev_ptr)
{
HYPRE_Real x;
HYPRE_Real eigen_value;
HYPRE_Int ierr = 0;
HYPRE_Int sign_change = 0;
HYPRE_Int i;
HYPRE_Real p0, p1, p2;
while (fabs(y-z) > tol*(fabs(y) + fabs(z)))
{
x = (y+z)/2;
sign_change = 0;
p0 = 1;
p1 = diag[0] - x;
if (p0*p1 <= 0) sign_change++;
for (i=1; i < n; i++)
{
p2 = (diag[i] - x)*p1 - offd[i]*offd[i]*p0;
p0 = p1;
p1 = p2;
if (p0*p1 <= 0) sign_change++;
}
if (sign_change >= k)
z = x;
else
y = x;
}
eigen_value = (y+z)/2;
*ev_ptr = eigen_value;
return ierr;
}
| 11,180 | 30.144847 | 81 | c |
AMG | AMG-master/parcsr_ls/par_cheby.c | /******************************************************************************
*
* Chebyshev setup and solve
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "_hypre_parcsr_mv.h"
#include "float.h"
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of
iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
HYPRE_Int hypre_ParCSRRelax_Cheby_Setup(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Real max_eig,
HYPRE_Real min_eig,
HYPRE_Real fraction,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
HYPRE_Real **coefs_ptr,
HYPRE_Real **ds_ptr) /* initial/updated approximation */
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real theta, delta;
HYPRE_Real den;
HYPRE_Real upper_bound, lower_bound;
HYPRE_Int j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real *coefs = NULL;
HYPRE_Int cheby_order;
HYPRE_Real *ds_data = NULL;
HYPRE_Real diag;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
coefs = hypre_CTAlloc(HYPRE_Real, order+1);
/* we are using the order of p(A) */
cheby_order = order -1;
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
/* lower_bound = max_eig/fraction; */
lower_bound = (upper_bound - min_eig)* fraction + min_eig;
/* theta and delta */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
if (variant == 1 )
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less that resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* (del - t + 2*th)/(th^2 + del*th) */
den = (theta*theta + delta*theta);
coefs[0] = (delta + 2*theta)/den;
coefs[1] = -1.0/den;
break;
case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/
den = 2*delta*theta*theta - delta*delta*theta - pow(delta,3) + 2*pow(theta,3);
coefs[0] = (4*delta*theta - pow(delta,2) + 6*pow(theta,2))/den;
coefs[1] = -(2*delta + 6*theta)/den;
coefs[2] = 2/den;
break;
case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/
den = - (4*delta*pow(theta,3) - 3*pow(delta,2)*pow(theta,2) - 3*pow(delta,3)*theta + 4*pow(theta,4) );
coefs[0] = (6*pow(delta,2)*theta - 12*delta*pow(theta,2) + 3*pow(delta,3) - 16*pow(theta,3) )/den;
coefs[1] = (12*delta*theta - 3*pow(delta,2) + 24*pow(theta,2))/den;
coefs[2] = -( 4*delta + 16*theta)/den;
coefs[3] = 4/den;
break;
}
}
else /* standard chebyshev */
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less thatn resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */
den = delta*delta - 2*theta*theta;
coefs[0] = -4*theta/den;
coefs[1] = 2/den;
break;
case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/
den = 3*(delta*delta)*theta - 4*(theta*theta*theta);
coefs[0] = (3*delta*delta - 12 *theta*theta)/den;
coefs[1] = 12*theta/den;
coefs[2] = -4/den;
break;
case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/
den = pow(delta,4) - 8*delta*delta*theta*theta + 8*pow(theta,4);
coefs[0] = (32*pow(theta,3)- 16*delta*delta*theta)/den;
coefs[1] = (8*delta*delta - 48*theta*theta)/den;
coefs[2] = 32*theta/den;
coefs[3] = -8/den;
break;
}
}
*coefs_ptr = coefs;
if (scale)
{
/*grab 1/sqrt(diagonal) */
ds_data = hypre_CTAlloc(HYPRE_Real, num_rows);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,diag) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_rows; j++)
{
diag = A_diag_data[A_diag_i[j]];
ds_data[j] = 1/sqrt(diag);
}
}/* end of scaling code */
*ds_ptr = ds_data;
return hypre_error_flag;
}
HYPRE_Int hypre_ParCSRRelax_Cheby_Solve(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Real *ds_data,
HYPRE_Real *coefs,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *r /*another temp vector */ )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
HYPRE_Real *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
HYPRE_Real *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
HYPRE_Real *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
HYPRE_Int i, j;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Real mult;
HYPRE_Real *orig_u;
HYPRE_Int cheby_order;
HYPRE_Real *tmp_data;
hypre_ParVector *tmp_vec;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
/* we are using the order of p(A) */
cheby_order = order -1;
orig_u = hypre_CTAlloc(HYPRE_Real, num_rows);
if (!scale)
{
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for ( i = 0; i < num_rows; i++ )
{
orig_u[i] = u_data[i];
u_data[i] = r_data[i] * coefs[cheby_order];
}
for (i = cheby_order - 1; i >= 0; i-- )
{
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}
}
else /* scaling! */
{
/*grab 1/sqrt(diagonal) */
tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(tmp_vec);
hypre_ParVectorSetPartitioningOwner(tmp_vec,0);
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec));
/* get ds_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
r_data[j] = ds_data[j] * (f_data[j] + tmp_data[j]);
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}
/* now do the other coefficients */
for (i = cheby_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + ds_data[j]*v_data[j];
}
} /* end of cheby_order loop */
/* now we have to scale u_data before adding it to u_orig*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j]*u_data[j];
}
hypre_ParVectorDestroy(tmp_vec);
}/* end of scaling code */
hypre_TFree(orig_u);
return hypre_error_flag;
}
| 10,320 | 30.275758 | 185 | c |
AMG | AMG-master/parcsr_ls/par_coarse_parms.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
*****************************************************************************/
/* following should be in a header file */
#include "_hypre_parcsr_ls.h"
/*==========================================================================*/
/*==========================================================================*/
/**
Generates global coarse_size and dof_func for next coarser level
Notes:
\begin{itemize}
\item The routine returns the following:
\begin{itemize}
\item an integer array containing the
function values for the local coarse points
\item the global number of coarse points
\end{itemize}
\end{itemize}
{\bf Input files:}
_hypre_parcsr_ls.h
@return Error code.
@param comm [IN]
MPI Communicator
@param local_num_variables [IN]
number of points on local processor
@param dof_func [IN]
array that contains the function numbers for all local points
@param CF_marker [IN]
marker array for coarse points
@param coarse_dof_func_ptr [OUT]
pointer to array which contains the function numbers for local coarse points
@param coarse_pnts_global_ptr [OUT]
pointer to array which contains the number of the first coarse point on each processor and the total number of coarse points in its last element
@see */
/*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCoarseParms(MPI_Comm comm,
HYPRE_Int local_num_variables,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int *CF_marker,
HYPRE_Int **coarse_dof_func_ptr,
HYPRE_Int **coarse_pnts_global_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_COARSE_PARAMS] -= hypre_MPI_Wtime();
#endif
HYPRE_Int i;
HYPRE_Int ierr = 0;
HYPRE_Int num_procs;
HYPRE_Int local_coarse_size = 0;
HYPRE_Int *coarse_dof_func;
HYPRE_Int *coarse_pnts_global;
/*--------------------------------------------------------------
*----------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
for (i=0; i < local_num_variables; i++)
{
if (CF_marker[i] == 1) local_coarse_size++;
}
if (num_functions > 1)
{
coarse_dof_func = hypre_CTAlloc(HYPRE_Int,local_coarse_size);
local_coarse_size = 0;
for (i=0; i < local_num_variables; i++)
{
if (CF_marker[i] == 1)
coarse_dof_func[local_coarse_size++] = dof_func[i];
}
*coarse_dof_func_ptr = coarse_dof_func;
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
{
HYPRE_Int scan_recv;
coarse_pnts_global = hypre_CTAlloc(HYPRE_Int,2);
hypre_MPI_Scan(&local_coarse_size, &scan_recv, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm);
/* first point in my range */
coarse_pnts_global[0] = scan_recv - local_coarse_size;
/* first point in next proc's range */
coarse_pnts_global[1] = scan_recv;
}
#else
coarse_pnts_global = hypre_CTAlloc(HYPRE_Int,num_procs+1);
hypre_MPI_Allgather(&local_coarse_size,1,HYPRE_MPI_INT,&coarse_pnts_global[1],
1,HYPRE_MPI_INT,comm);
for (i=2; i < num_procs+1; i++)
coarse_pnts_global[i] += coarse_pnts_global[i-1];
#endif
*coarse_pnts_global_ptr = coarse_pnts_global;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_COARSE_PARAMS] += hypre_MPI_Wtime();
#endif
return (ierr);
}
| 4,527 | 29.802721 | 147 | c |
AMG | AMG-master/parcsr_ls/par_coarsen.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
*****************************************************************************/
/* following should be in a header file */
#include "_hypre_parcsr_ls.h"
/*==========================================================================*/
/*==========================================================================*/
/**
Selects a coarse "grid" based on the graph of a matrix.
Notes:
\begin{itemize}
\item The underlying matrix storage scheme is a hypre_ParCSR matrix.
\item The routine returns the following:
\begin{itemize}
\item S - a ParCSR matrix representing the "strength matrix". This is
used in the "build interpolation" routine.
\item CF\_marker - an array indicating both C-pts (value = 1) and
F-pts (value = -1)
\end{itemize}
\item We define the following temporary storage:
\begin{itemize}
\item measure\_array - an array containing the "measures" for each
of the fine-grid points
\item graph\_array - an array containing the list of points in the
"current subgraph" being considered in the coarsening process.
\end{itemize}
\item The graph of the "strength matrix" for A is a subgraph of the
graph of A, but requires nonsymmetric storage even if A is
symmetric. This is because of the directional nature of the
"strengh of dependence" notion (see below). Since we are using
nonsymmetric storage for A right now, this is not a problem. If we
ever add the ability to store A symmetrically, then we could store
the strength graph as floats instead of doubles to save space.
\item This routine currently "compresses" the strength matrix. We
should consider the possibility of defining this matrix to have the
same "nonzero structure" as A. To do this, we could use the same
A\_i and A\_j arrays, and would need only define the S\_data array.
There are several pros and cons to discuss.
\end{itemize}
Terminology:
\begin{itemize}
\item Ruge's terminology: A point is "strongly connected to" $j$, or
"strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$.
\item Here, we retain some of this terminology, but with a more
generalized notion of "strength". We also retain the "natural"
graph notation for representing the directed graph of a matrix.
That is, the nonzero entry $a_ij$ is represented as: i --> j. In
the strength matrix, S, the entry $s_ij$ is also graphically denoted
as above, and means both of the following:
\begin{itemize}
\item $i$ "depends on" $j$ with "strength" $s_ij$
\item $j$ "influences" $i$ with "strength" $s_ij$
\end{itemize}
\end{itemize}
{\bf Input files:}
_hypre_parcsr_ls.h
@return Error code.
@param A [IN]
coefficient matrix
@param strength_threshold [IN]
threshold parameter used to define strength
@param S_ptr [OUT]
strength matrix
@param CF_marker_ptr [OUT]
array indicating C/F points
@see */
/*--------------------------------------------------------------------------*/
#define C_PT 1
#define F_PT -1
#define SF_PT -3
#define COMMON_C_PT 2
#define Z_PT -2
HYPRE_Int
hypre_BoomerAMGCoarsen( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int CF_init,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = NULL;
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstColDiag(S);
HYPRE_Int col_n = col_1 + hypre_CSRMatrixNumCols(S_diag);
HYPRE_Int num_cols_offd = 0;
hypre_CSRMatrix *S_ext;
HYPRE_Int *S_ext_i = NULL;
HYPRE_Int *S_ext_j = NULL;
HYPRE_Int num_sends = 0;
HYPRE_Int *int_buf_data;
HYPRE_Real *buf_data;
HYPRE_Int *CF_marker;
HYPRE_Int *CF_marker_offd;
HYPRE_Real *measure_array;
HYPRE_Int *graph_array;
HYPRE_Int *graph_array_offd;
HYPRE_Int graph_size;
HYPRE_Int graph_offd_size;
HYPRE_Int global_graph_size;
HYPRE_Int i, j, k, kc, jS, kS, ig, elmt;
HYPRE_Int index, start, my_id, num_procs, jrow, cnt;
HYPRE_Int ierr = 0;
HYPRE_Int use_commpkg_A = 0;
HYPRE_Int break_var = 1;
HYPRE_Real wall_time;
HYPRE_Int iter = 0;
#if 0 /* debugging */
char filename[256];
FILE *fp;
HYPRE_Int iter = 0;
#endif
/*--------------------------------------------------------------
* Compute a ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = 1, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
S_ext = NULL;
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (!comm_pkg)
{
use_commpkg_A = 1;
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
S_diag_j = hypre_CSRMatrixJ(S_diag);
if (num_cols_offd)
{
S_offd_j = hypre_CSRMatrixJ(S_offd);
}
/*----------------------------------------------------------
* Compute the measures
*
* The measures are currently given by the column sums of S.
* Hence, measure_array[i] is the number of influences
* of variable i.
*
* The measures are augmented by a random number
* between 0 and 1.
*----------------------------------------------------------*/
measure_array = hypre_CTAlloc(HYPRE_Real, num_variables+num_cols_offd);
for (i=0; i < S_offd_i[num_variables]; i++)
{
measure_array[num_variables + S_offd_j[i]] += 1.0;
}
if (num_procs > 1)
comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg,
&measure_array[num_variables], buf_data);
for (i=0; i < S_diag_i[num_variables]; i++)
{
measure_array[S_diag_j[i]] += 1.0;
}
if (num_procs > 1)
hypre_ParCSRCommHandleDestroy(comm_handle);
index = 0;
for (i=0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]
+= buf_data[index++];
}
for (i=num_variables; i < num_variables+num_cols_offd; i++)
{
measure_array[i] = 0;
}
/* this augments the measures */
if (CF_init == 2)
hypre_BoomerAMGIndepSetInit(S, measure_array, 1);
else
hypre_BoomerAMGIndepSetInit(S, measure_array, 0);
/*---------------------------------------------------
* Initialize the graph array
* graph_array contains interior points in elements 0 ... num_variables-1
* followed by boundary values
*---------------------------------------------------*/
graph_array = hypre_CTAlloc(HYPRE_Int, num_variables);
if (num_cols_offd)
graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
else
graph_array_offd = NULL;
/* initialize measure array and graph array */
for (ig = 0; ig < num_cols_offd; ig++)
graph_array_offd[ig] = ig;
/*---------------------------------------------------
* Initialize the C/F marker array
* C/F marker array contains interior points in elements 0 ...
* num_variables-1 followed by boundary values
*---------------------------------------------------*/
graph_offd_size = num_cols_offd;
if (CF_init==1)
{
CF_marker = *CF_marker_ptr;
cnt = 0;
for (i=0; i < num_variables; i++)
{
if ( (S_offd_i[i+1]-S_offd_i[i]) > 0
|| CF_marker[i] == -1)
{
CF_marker[i] = 0;
}
if ( CF_marker[i] == Z_PT)
{
if (measure_array[i] >= 1.0 ||
(S_diag_i[i+1]-S_diag_i[i]) > 0)
{
CF_marker[i] = 0;
graph_array[cnt++] = i;
}
else
{
CF_marker[i] = F_PT;
}
}
else if (CF_marker[i] == SF_PT)
measure_array[i] = 0;
else
graph_array[cnt++] = i;
}
}
else
{
CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables);
cnt = 0;
for (i=0; i < num_variables; i++)
{
CF_marker[i] = 0;
if ( (S_diag_i[i+1]-S_diag_i[i]) == 0
&& (S_offd_i[i+1]-S_offd_i[i]) == 0)
{
CF_marker[i] = SF_PT;
measure_array[i] = 0;
}
else
graph_array[cnt++] = i;
}
}
graph_size = cnt;
if (num_cols_offd)
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
else
CF_marker_offd = NULL;
for (i=0; i < num_cols_offd; i++)
CF_marker_offd[i] = 0;
/*---------------------------------------------------
* Loop until all points are either fine or coarse.
*---------------------------------------------------*/
if (num_procs > 1)
{
if (use_commpkg_A)
S_ext = hypre_ParCSRMatrixExtractBExt(S,A,0);
else
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,0);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixJ(S_ext);
}
/* compress S_ext and convert column numbers*/
index = 0;
for (i=0; i < num_cols_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
{
k = S_ext_j[j];
if (k >= col_1 && k < col_n)
{
S_ext_j[index++] = k - col_1;
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_offd);
if (kc > -1) S_ext_j[index++] = -kc-1;
}
}
S_ext_i[i] = index;
}
for (i = num_cols_offd; i > 0; i--)
S_ext_i[i] = S_ext_i[i-1];
if (num_procs > 1) S_ext_i[0] = 0;
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Initialize CLJP phase = %f\n",
my_id, wall_time);
}
while (1)
{
/*------------------------------------------------
* Exchange boundary data, i.i. get measures and S_ext_data
*------------------------------------------------*/
if (num_procs > 1)
comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg,
&measure_array[num_variables], buf_data);
if (num_procs > 1)
hypre_ParCSRCommHandleDestroy(comm_handle);
index = 0;
for (i=0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]
+= buf_data[index++];
}
/*------------------------------------------------
* Set F-pts and update subgraph
*------------------------------------------------*/
if (iter || (CF_init != 1))
{
for (ig = 0; ig < graph_size; ig++)
{
i = graph_array[ig];
if ( (CF_marker[i] != C_PT) && (measure_array[i] < 1) )
{
/* set to be an F-pt */
CF_marker[i] = F_PT;
/* make sure all dependencies have been accounted for */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
if (S_diag_j[jS] > -1)
{
CF_marker[i] = 0;
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
if (S_offd_j[jS] > -1)
{
CF_marker[i] = 0;
}
}
}
if (CF_marker[i])
{
measure_array[i] = 0;
/* take point out of the subgraph */
graph_size--;
graph_array[ig] = graph_array[graph_size];
graph_array[graph_size] = i;
ig--;
}
}
}
/*------------------------------------------------
* Exchange boundary data, i.i. get measures
*------------------------------------------------*/
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
buf_data[index++] = measure_array[jrow];
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data,
&measure_array[num_variables]);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/*------------------------------------------------
* Debugging:
*
* Uncomment the sections of code labeled
* "debugging" to generate several files that
* can be visualized using the `coarsen.m'
* matlab routine.
*------------------------------------------------*/
#if 0 /* debugging */
/* print out measures */
hypre_sprintf(filename, "coarsen.out.measures.%04d", iter);
fp = fopen(filename, "w");
for (i = 0; i < num_variables; i++)
{
hypre_fprintf(fp, "%f\n", measure_array[i]);
}
fclose(fp);
/* print out strength matrix */
hypre_sprintf(filename, "coarsen.out.strength.%04d", iter);
hypre_CSRMatrixPrint(S, filename);
/* print out C/F marker */
hypre_sprintf(filename, "coarsen.out.CF.%04d", iter);
fp = fopen(filename, "w");
for (i = 0; i < num_variables; i++)
{
hypre_fprintf(fp, "%d\n", CF_marker[i]);
}
fclose(fp);
iter++;
#endif
/*------------------------------------------------
* Test for convergence
*------------------------------------------------*/
hypre_MPI_Allreduce(&graph_size,&global_graph_size,1,HYPRE_MPI_INT,hypre_MPI_SUM,comm);
if (global_graph_size == 0)
break;
/*------------------------------------------------
* Pick an independent set of points with
* maximal measure.
*------------------------------------------------*/
if (iter || (CF_init != 1))
{
hypre_BoomerAMGIndepSet(S, measure_array, graph_array,
graph_size,
graph_array_offd, graph_offd_size,
CF_marker, CF_marker_offd);
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg,
CF_marker_offd, int_buf_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1);j++) {
elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
if (!int_buf_data[index++] && CF_marker[elmt] > 0)
{
CF_marker[elmt] = 0;
}
}
}
}
iter++;
/*------------------------------------------------
* Exchange boundary data for CF_marker
*------------------------------------------------*/
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
int_buf_data[index++] = CF_marker[elmt];
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
for (ig = 0; ig < graph_offd_size; ig++)
{
i = graph_array_offd[ig];
if (CF_marker_offd[i] < 0)
{
/* take point out of the subgraph */
graph_offd_size--;
graph_array_offd[ig] = graph_array_offd[graph_offd_size];
graph_array_offd[graph_offd_size] = i;
ig--;
}
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d iter %d comm. and subgraph update = %f\n",
my_id, iter, wall_time);
}
/*------------------------------------------------
* Set C_pts and apply heuristics.
*------------------------------------------------*/
for (i=num_variables; i < num_variables+num_cols_offd; i++)
{
measure_array[i] = 0;
}
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
for (ig = 0; ig < graph_size; ig++)
{
i = graph_array[ig];
/*---------------------------------------------
* Heuristic: C-pts don't interpolate from
* neighbors that influence them.
*---------------------------------------------*/
if (CF_marker[i] > 0)
{
/* set to be a C-pt */
CF_marker[i] = C_PT;
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
j = S_diag_j[jS];
if (j > -1)
{
/* "remove" edge from S */
S_diag_j[jS] = -S_diag_j[jS]-1;
/* decrement measures of unmarked neighbors */
if (!CF_marker[j])
{
measure_array[j]--;
}
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
j = S_offd_j[jS];
if (j > -1)
{
/* "remove" edge from S */
S_offd_j[jS] = -S_offd_j[jS]-1;
/* decrement measures of unmarked neighbors */
if (!CF_marker_offd[j])
{
measure_array[j+num_variables]--;
}
}
}
}
else
{
/* marked dependencies */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
j = S_diag_j[jS];
if (j < 0) j = -j-1;
if (CF_marker[j] > 0)
{
if (S_diag_j[jS] > -1)
{
/* "remove" edge from S */
S_diag_j[jS] = -S_diag_j[jS]-1;
}
/* IMPORTANT: consider all dependencies */
/* temporarily modify CF_marker */
CF_marker[j] = COMMON_C_PT;
}
else if (CF_marker[j] == SF_PT)
{
if (S_diag_j[jS] > -1)
{
/* "remove" edge from S */
S_diag_j[jS] = -S_diag_j[jS]-1;
}
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
j = S_offd_j[jS];
if (j < 0) j = -j-1;
if (CF_marker_offd[j] > 0)
{
if (S_offd_j[jS] > -1)
{
/* "remove" edge from S */
S_offd_j[jS] = -S_offd_j[jS]-1;
}
/* IMPORTANT: consider all dependencies */
/* temporarily modify CF_marker */
CF_marker_offd[j] = COMMON_C_PT;
}
else if (CF_marker_offd[j] == SF_PT)
{
if (S_offd_j[jS] > -1)
{
/* "remove" edge from S */
S_offd_j[jS] = -S_offd_j[jS]-1;
}
}
}
/* unmarked dependencies */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
if (S_diag_j[jS] > -1)
{
j = S_diag_j[jS];
break_var = 1;
/* check for common C-pt */
for (kS = S_diag_i[j]; kS < S_diag_i[j+1]; kS++)
{
k = S_diag_j[kS];
if (k < 0) k = -k-1;
/* IMPORTANT: consider all dependencies */
if (CF_marker[k] == COMMON_C_PT)
{
/* "remove" edge from S and update measure*/
S_diag_j[jS] = -S_diag_j[jS]-1;
measure_array[j]--;
break_var = 0;
break;
}
}
if (break_var)
{
for (kS = S_offd_i[j]; kS < S_offd_i[j+1]; kS++)
{
k = S_offd_j[kS];
if (k < 0) k = -k-1;
/* IMPORTANT: consider all dependencies */
if ( CF_marker_offd[k] == COMMON_C_PT)
{
/* "remove" edge from S and update measure*/
S_diag_j[jS] = -S_diag_j[jS]-1;
measure_array[j]--;
break;
}
}
}
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
if (S_offd_j[jS] > -1)
{
j = S_offd_j[jS];
/* check for common C-pt */
for (kS = S_ext_i[j]; kS < S_ext_i[j+1]; kS++)
{
k = S_ext_j[kS];
if (k >= 0)
{
/* IMPORTANT: consider all dependencies */
if (CF_marker[k] == COMMON_C_PT)
{
/* "remove" edge from S and update measure*/
S_offd_j[jS] = -S_offd_j[jS]-1;
measure_array[j+num_variables]--;
break;
}
}
else
{
kc = -k-1;
if (kc > -1 && CF_marker_offd[kc] == COMMON_C_PT)
{
/* "remove" edge from S and update measure*/
S_offd_j[jS] = -S_offd_j[jS]-1;
measure_array[j+num_variables]--;
break;
}
}
}
}
}
}
/* reset CF_marker */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
j = S_diag_j[jS];
if (j < 0) j = -j-1;
if (CF_marker[j] == COMMON_C_PT)
{
CF_marker[j] = C_PT;
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
j = S_offd_j[jS];
if (j < 0) j = -j-1;
if (CF_marker_offd[j] == COMMON_C_PT)
{
CF_marker_offd[j] = C_PT;
}
}
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d CLJP phase = %f graph_size = %d nc_offd = %d\n",
my_id, wall_time, graph_size, num_cols_offd);
}
}
/*---------------------------------------------------
* Clean up and return
*---------------------------------------------------*/
/* Reset S_matrix */
for (i=0; i < S_diag_i[num_variables]; i++)
{
if (S_diag_j[i] < 0)
S_diag_j[i] = -S_diag_j[i]-1;
}
for (i=0; i < S_offd_i[num_variables]; i++)
{
if (S_offd_j[i] < 0)
S_offd_j[i] = -S_offd_j[i]-1;
}
/*for (i=0; i < num_variables; i++)
if (CF_marker[i] == SF_PT) CF_marker[i] = F_PT;*/
hypre_TFree(measure_array);
hypre_TFree(graph_array);
if (num_cols_offd) hypre_TFree(graph_array_offd);
hypre_TFree(buf_data);
hypre_TFree(int_buf_data);
hypre_TFree(CF_marker_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
*CF_marker_ptr = CF_marker;
return (ierr);
}
/*==========================================================================
* Ruge's coarsening algorithm
*==========================================================================*/
#define C_PT 1
#define F_PT -1
#define Z_PT -2
#define SF_PT -3 /* special fine points */
#define SC_PT 3 /* special coarse points */
#define UNDECIDED 0
/**************************************************************
*
* Ruge Coarsening routine
*
**************************************************************/
HYPRE_Int
hypre_BoomerAMGCoarsenRuge( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int measure_type,
HYPRE_Int coarsen_type,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = NULL;
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S);
hypre_CSRMatrix *S_ext = NULL;
HYPRE_Int *S_ext_i = NULL;
HYPRE_Int *S_ext_j = NULL;
hypre_CSRMatrix *ST;
HYPRE_Int *ST_i;
HYPRE_Int *ST_j;
HYPRE_Int *CF_marker;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int ci_tilde = -1;
HYPRE_Int ci_tilde_mark = -1;
HYPRE_Int ci_tilde_offd = -1;
HYPRE_Int ci_tilde_offd_mark = -1;
HYPRE_Int *measure_array;
HYPRE_Int *graph_array;
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int *ci_array = NULL;
HYPRE_Int i, j, k, jS;
HYPRE_Int ji, jj, jk, jm, index;
HYPRE_Int set_empty = 1;
HYPRE_Int C_i_nonempty = 0;
HYPRE_Int num_nonzeros;
HYPRE_Int num_procs, my_id;
HYPRE_Int num_sends = 0;
HYPRE_Int first_col, start;
HYPRE_Int col_0, col_n;
hypre_LinkList LoL_head;
hypre_LinkList LoL_tail;
HYPRE_Int *lists, *where;
HYPRE_Int measure, new_meas;
HYPRE_Int meas_type = 0;
HYPRE_Int agg_2 = 0;
HYPRE_Int num_left, elmt;
HYPRE_Int nabor, nabor_two;
HYPRE_Int ierr = 0;
HYPRE_Int use_commpkg_A = 0;
HYPRE_Int break_var = 0;
HYPRE_Int f_pnt = F_PT;
HYPRE_Real wall_time;
if (coarsen_type < 0) coarsen_type = -coarsen_type;
if (measure_type == 1 || measure_type == 4) meas_type = 1;
if (measure_type == 4 || measure_type == 3) agg_2 = 1;
/*-------------------------------------------------------
* Initialize the C/F marker, LoL_head, LoL_tail arrays
*-------------------------------------------------------*/
LoL_head = NULL;
LoL_tail = NULL;
lists = hypre_CTAlloc(HYPRE_Int, num_variables);
where = hypre_CTAlloc(HYPRE_Int, num_variables);
#if 0 /* debugging */
char filename[256];
FILE *fp;
HYPRE_Int iter = 0;
#endif
/*--------------------------------------------------------------
* Compute a CSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = 1, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
first_col = hypre_ParCSRMatrixFirstColDiag(S);
col_0 = first_col-1;
col_n = col_0+num_variables;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (!comm_pkg)
{
use_commpkg_A = 1;
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_cols_offd) S_offd_j = hypre_CSRMatrixJ(S_offd);
jS = S_i[num_variables];
ST = hypre_CSRMatrixCreate(num_variables, num_variables, jS);
ST_i = hypre_CTAlloc(HYPRE_Int,num_variables+1);
ST_j = hypre_CTAlloc(HYPRE_Int,jS);
hypre_CSRMatrixI(ST) = ST_i;
hypre_CSRMatrixJ(ST) = ST_j;
/*----------------------------------------------------------
* generate transpose of S, ST
*----------------------------------------------------------*/
for (i=0; i <= num_variables; i++)
ST_i[i] = 0;
for (i=0; i < jS; i++)
{
ST_i[S_j[i]+1]++;
}
for (i=0; i < num_variables; i++)
{
ST_i[i+1] += ST_i[i];
}
for (i=0; i < num_variables; i++)
{
for (j=S_i[i]; j < S_i[i+1]; j++)
{
index = S_j[j];
ST_j[ST_i[index]] = i;
ST_i[index]++;
}
}
for (i = num_variables; i > 0; i--)
{
ST_i[i] = ST_i[i-1];
}
ST_i[0] = 0;
/*----------------------------------------------------------
* Compute the measures
*
* The measures are given by the row sums of ST.
* Hence, measure_array[i] is the number of influences
* of variable i.
* correct actual measures through adding influences from
* neighbor processors
*----------------------------------------------------------*/
measure_array = hypre_CTAlloc(HYPRE_Int, num_variables);
for (i = 0; i < num_variables; i++)
{
measure_array[i] = ST_i[i+1]-ST_i[i];
}
/* special case for Falgout coarsening */
if (coarsen_type == 6)
{
f_pnt = Z_PT;
coarsen_type = 1;
}
if (coarsen_type == 10)
{
f_pnt = Z_PT;
coarsen_type = 11;
}
if ((meas_type || (coarsen_type != 1 && coarsen_type != 11))
&& num_procs > 1)
{
if (use_commpkg_A)
S_ext = hypre_ParCSRMatrixExtractBExt(S,A,0);
else
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,0);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixJ(S_ext);
num_nonzeros = S_ext_i[num_cols_offd];
/*first_col = hypre_ParCSRMatrixFirstColDiag(S);
col_0 = first_col-1;
col_n = col_0+num_variables; */
if (meas_type)
{
for (i=0; i < num_nonzeros; i++)
{
index = S_ext_j[i] - first_col;
if (index > -1 && index < num_variables)
measure_array[index]++;
}
}
}
/*---------------------------------------------------
* Loop until all points are either fine or coarse.
*---------------------------------------------------*/
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
/* first coarsening phase */
/*************************************************************
*
* Initialize the lists
*
*************************************************************/
CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables);
num_left = 0;
for (j = 0; j < num_variables; j++)
{
if ((S_i[j+1]-S_i[j])== 0 &&
(S_offd_i[j+1]-S_offd_i[j]) == 0)
{
CF_marker[j] = SF_PT;
if (agg_2) CF_marker[j] = SC_PT;
measure_array[j] = 0;
}
else
{
CF_marker[j] = UNDECIDED;
num_left++;
}
}
for (j = 0; j < num_variables; j++)
{
measure = measure_array[j];
if (CF_marker[j] != SF_PT && CF_marker[j] != SC_PT)
{
if (measure > 0)
{
hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, j, lists, where);
}
else
{
if (measure < 0) hypre_printf("negative measure!\n");
CF_marker[j] = f_pnt;
for (k = S_i[j]; k < S_i[j+1]; k++)
{
nabor = S_j[k];
if (CF_marker[nabor] != SF_PT && CF_marker[nabor] != SC_PT)
{
if (nabor < j)
{
new_meas = measure_array[nabor];
if (new_meas > 0)
hypre_remove_point(&LoL_head, &LoL_tail, new_meas,
nabor, lists, where);
new_meas = ++(measure_array[nabor]);
hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas,
nabor, lists, where);
}
else
{
new_meas = ++(measure_array[nabor]);
}
}
}
--num_left;
}
}
}
/****************************************************************
*
* Main loop of Ruge-Stueben first coloring pass.
*
* WHILE there are still points to classify DO:
* 1) find first point, i, on list with max_measure
* make i a C-point, remove it from the lists
* 2) For each point, j, in S_i^T,
* a) Set j to be an F-point
* b) For each point, k, in S_j
* move k to the list in LoL with measure one
* greater than it occupies (creating new LoL
* entry if necessary)
* 3) For each point, j, in S_i,
* move j to the list in LoL with measure one
* smaller than it occupies (creating new LoL
* entry if necessary)
*
****************************************************************/
while (num_left > 0)
{
index = LoL_head -> head;
CF_marker[index] = C_PT;
measure = measure_array[index];
measure_array[index] = 0;
--num_left;
hypre_remove_point(&LoL_head, &LoL_tail, measure, index, lists, where);
for (j = ST_i[index]; j < ST_i[index+1]; j++)
{
nabor = ST_j[j];
if (CF_marker[nabor] == UNDECIDED)
{
CF_marker[nabor] = F_PT;
measure = measure_array[nabor];
hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where);
--num_left;
for (k = S_i[nabor]; k < S_i[nabor+1]; k++)
{
nabor_two = S_j[k];
if (CF_marker[nabor_two] == UNDECIDED)
{
measure = measure_array[nabor_two];
hypre_remove_point(&LoL_head, &LoL_tail, measure,
nabor_two, lists, where);
new_meas = ++(measure_array[nabor_two]);
hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas,
nabor_two, lists, where);
}
}
}
}
for (j = S_i[index]; j < S_i[index+1]; j++)
{
nabor = S_j[j];
if (CF_marker[nabor] == UNDECIDED)
{
measure = measure_array[nabor];
hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where);
measure_array[nabor] = --measure;
if (measure > 0)
hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, nabor,
lists, where);
else
{
CF_marker[nabor] = F_PT;
--num_left;
for (k = S_i[nabor]; k < S_i[nabor+1]; k++)
{
nabor_two = S_j[k];
if (CF_marker[nabor_two] == UNDECIDED)
{
new_meas = measure_array[nabor_two];
hypre_remove_point(&LoL_head, &LoL_tail, new_meas,
nabor_two, lists, where);
new_meas = ++(measure_array[nabor_two]);
hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas,
nabor_two, lists, where);
}
}
}
}
}
}
hypre_TFree(measure_array);
hypre_CSRMatrixDestroy(ST);
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Coarsen 1st pass = %f\n",
my_id, wall_time);
}
hypre_TFree(lists);
hypre_TFree(where);
hypre_TFree(LoL_head);
hypre_TFree(LoL_tail);
for (i=0; i < num_variables; i++)
if (CF_marker[i] == SC_PT) CF_marker[i] = C_PT;
if (coarsen_type == 11)
{
*CF_marker_ptr = CF_marker;
if (meas_type && num_procs > 1)
hypre_CSRMatrixDestroy(S_ext);
return 0;
}
/* second pass, check fine points for coarse neighbors
for coarsen_type = 2, the second pass includes
off-processore boundary points */
/*---------------------------------------------------
* Initialize the graph array
*---------------------------------------------------*/
graph_array = hypre_CTAlloc(HYPRE_Int, num_variables);
for (i = 0; i < num_variables; i++)
{
graph_array[i] = -1;
}
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
if (coarsen_type == 2)
{
/*------------------------------------------------
* Exchange boundary data for CF_marker
*------------------------------------------------*/
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
ci_array = hypre_CTAlloc(HYPRE_Int,num_cols_offd);
for (i=0; i < num_cols_offd; i++)
ci_array[i] = -1;
for (i=0; i < num_variables; i++)
{
if (ci_tilde_mark != i) ci_tilde = -1;
if (ci_tilde_offd_mark != i) ci_tilde_offd = -1;
if (CF_marker[i] == -1)
{
break_var = 1;
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] > 0)
graph_array[j] = i;
}
for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++)
{
j = S_offd_j[ji];
if (CF_marker_offd[j] > 0)
ci_array[j] = i;
}
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] == -1)
{
set_empty = 1;
for (jj = S_i[j]; jj < S_i[j+1]; jj++)
{
index = S_j[jj];
if (graph_array[index] == i)
{
set_empty = 0;
break;
}
}
if (set_empty)
{
for (jj = S_offd_i[j]; jj < S_offd_i[j+1]; jj++)
{
index = S_offd_j[jj];
if (ci_array[index] == i)
{
set_empty = 0;
break;
}
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
if (ci_tilde_offd > -1)
{
CF_marker_offd[ci_tilde_offd] = -1;
ci_tilde_offd = -1;
}
C_i_nonempty = 0;
break_var = 0;
break;
}
else
{
ci_tilde = j;
ci_tilde_mark = i;
CF_marker[j] = 1;
C_i_nonempty = 1;
i--;
break_var = 0;
break;
}
}
}
}
if (break_var)
{
for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++)
{
j = S_offd_j[ji];
if (CF_marker_offd[j] == -1)
{
set_empty = 1;
for (jj = S_ext_i[j]; jj < S_ext_i[j+1]; jj++)
{
index = S_ext_j[jj];
if (index > col_0 && index < col_n) /* index interior */
{
if (graph_array[index-first_col] == i)
{
set_empty = 0;
break;
}
}
else
{
jk = hypre_BinarySearch(col_map_offd,index,num_cols_offd);
if (jk != -1)
{
if (ci_array[jk] == i)
{
set_empty = 0;
break;
}
}
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
if (ci_tilde_offd > -1)
{
CF_marker_offd[ci_tilde_offd] = -1;
ci_tilde_offd = -1;
}
C_i_nonempty = 0;
break;
}
else
{
ci_tilde_offd = j;
ci_tilde_offd_mark = i;
CF_marker_offd[j] = 1;
C_i_nonempty = 1;
i--;
break;
}
}
}
}
}
}
}
}
else
{
for (i=0; i < num_variables; i++)
{
if (ci_tilde_mark != i) ci_tilde = -1;
if (CF_marker[i] == -1)
{
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] > 0)
graph_array[j] = i;
}
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] == -1)
{
set_empty = 1;
for (jj = S_i[j]; jj < S_i[j+1]; jj++)
{
index = S_j[jj];
if (graph_array[index] == i)
{
set_empty = 0;
break;
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
C_i_nonempty = 0;
break;
}
else
{
ci_tilde = j;
ci_tilde_mark = i;
CF_marker[j] = 1;
C_i_nonempty = 1;
i--;
break;
}
}
}
}
}
}
}
if (debug_flag == 3 && coarsen_type != 2)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Coarsen 2nd pass = %f\n",
my_id, wall_time);
}
/* third pass, check boundary fine points for coarse neighbors */
if (coarsen_type == 3 || coarsen_type == 4)
{
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
/*------------------------------------------------
* Exchange boundary data for CF_marker
*------------------------------------------------*/
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
ci_array = hypre_CTAlloc(HYPRE_Int,num_cols_offd);
for (i=0; i < num_cols_offd; i++)
ci_array[i] = -1;
}
if (coarsen_type > 1 && coarsen_type < 5)
{
for (i=0; i < num_variables; i++)
graph_array[i] = -1;
for (i=0; i < num_cols_offd; i++)
{
if (ci_tilde_mark != i) ci_tilde = -1;
if (ci_tilde_offd_mark != i) ci_tilde_offd = -1;
if (CF_marker_offd[i] == -1)
{
for (ji = S_ext_i[i]; ji < S_ext_i[i+1]; ji++)
{
j = S_ext_j[ji];
if (j > col_0 && j < col_n)
{
j = j - first_col;
if (CF_marker[j] > 0)
graph_array[j] = i;
}
else
{
jj = hypre_BinarySearch(col_map_offd,j,num_cols_offd);
if (jj != -1 && CF_marker_offd[jj] > 0)
ci_array[jj] = i;
}
}
for (ji = S_ext_i[i]; ji < S_ext_i[i+1]; ji++)
{
j = S_ext_j[ji];
if (j > col_0 && j < col_n)
{
j = j - first_col;
if ( CF_marker[j] == -1)
{
set_empty = 1;
for (jj = S_i[j]; jj < S_i[j+1]; jj++)
{
index = S_j[jj];
if (graph_array[index] == i)
{
set_empty = 0;
break;
}
}
for (jj = S_offd_i[j]; jj < S_offd_i[j+1]; jj++)
{
index = S_offd_j[jj];
if (ci_array[index] == i)
{
set_empty = 0;
break;
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker_offd[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
if (ci_tilde_offd > -1)
{
CF_marker_offd[ci_tilde_offd] = -1;
ci_tilde_offd = -1;
}
C_i_nonempty = 0;
break;
}
else
{
ci_tilde = j;
ci_tilde_mark = i;
CF_marker[j] = 1;
C_i_nonempty = 1;
i--;
break;
}
}
}
}
else
{
jm = hypre_BinarySearch(col_map_offd,j,num_cols_offd);
if (jm != -1 && CF_marker_offd[jm] == -1)
{
set_empty = 1;
for (jj = S_ext_i[jm]; jj < S_ext_i[jm+1]; jj++)
{
index = S_ext_j[jj];
if (index > col_0 && index < col_n)
{
if (graph_array[index-first_col] == i)
{
set_empty = 0;
break;
}
}
else
{
jk = hypre_BinarySearch(col_map_offd,index,num_cols_offd);
if (jk != -1)
{
if (ci_array[jk] == i)
{
set_empty = 0;
break;
}
}
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker_offd[i] = 1;
if (ci_tilde > -1)
{
CF_marker[ci_tilde] = -1;
ci_tilde = -1;
}
if (ci_tilde_offd > -1)
{
CF_marker_offd[ci_tilde_offd] = -1;
ci_tilde_offd = -1;
}
C_i_nonempty = 0;
break;
}
else
{
ci_tilde_offd = jm;
ci_tilde_offd_mark = i;
CF_marker_offd[jm] = 1;
C_i_nonempty = 1;
i--;
break;
}
}
}
}
}
}
}
/*------------------------------------------------
* Send boundary data for CF_marker back
*------------------------------------------------*/
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd,
int_buf_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/* only CF_marker entries from larger procs are accepted
if coarsen_type = 4 coarse points are not overwritten */
index = 0;
if (coarsen_type != 4)
{
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
if (hypre_ParCSRCommPkgSendProc(comm_pkg,i) > my_id)
{
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] =
int_buf_data[index++];
}
else
{
index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - start;
}
}
}
else
{
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
if (hypre_ParCSRCommPkgSendProc(comm_pkg,i) > my_id)
{
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
if (CF_marker[elmt] != 1)
CF_marker[elmt] = int_buf_data[index];
index++;
}
}
else
{
index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - start;
}
}
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
if (coarsen_type == 4)
hypre_printf("Proc = %d Coarsen 3rd pass = %f\n",
my_id, wall_time);
if (coarsen_type == 3)
hypre_printf("Proc = %d Coarsen 3rd pass = %f\n",
my_id, wall_time);
if (coarsen_type == 2)
hypre_printf("Proc = %d Coarsen 2nd pass = %f\n",
my_id, wall_time);
}
}
if (coarsen_type == 5)
{
/*------------------------------------------------
* Exchange boundary data for CF_marker
*------------------------------------------------*/
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
ci_array = hypre_CTAlloc(HYPRE_Int,num_cols_offd);
for (i=0; i < num_cols_offd; i++)
ci_array[i] = -1;
for (i=0; i < num_variables; i++)
graph_array[i] = -1;
for (i=0; i < num_variables; i++)
{
if (CF_marker[i] == -1 && (S_offd_i[i+1]-S_offd_i[i]) > 0)
{
break_var = 1;
for (ji = S_i[i]; ji < S_i[i+1]; ji++)
{
j = S_j[ji];
if (CF_marker[j] > 0)
graph_array[j] = i;
}
for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++)
{
j = S_offd_j[ji];
if (CF_marker_offd[j] > 0)
ci_array[j] = i;
}
for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++)
{
j = S_offd_j[ji];
if (CF_marker_offd[j] == -1)
{
set_empty = 1;
for (jj = S_ext_i[j]; jj < S_ext_i[j+1]; jj++)
{
index = S_ext_j[jj];
if (index > col_0 && index < col_n) /* index interior */
{
if (graph_array[index-first_col] == i)
{
set_empty = 0;
break;
}
}
else
{
jk = hypre_BinarySearch(col_map_offd,index,num_cols_offd);
if (jk != -1)
{
if (ci_array[jk] == i)
{
set_empty = 0;
break;
}
}
}
}
if (set_empty)
{
if (C_i_nonempty)
{
CF_marker[i] = -2;
C_i_nonempty = 0;
break;
}
else
{
C_i_nonempty = 1;
i--;
break;
}
}
}
}
}
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Coarsen special points = %f\n",
my_id, wall_time);
}
}
/*---------------------------------------------------
* Clean up and return
*---------------------------------------------------*/
/*if (coarsen_type != 1)
{ */
hypre_TFree(CF_marker_offd);
hypre_TFree(int_buf_data);
hypre_TFree(ci_array);
/*} */
hypre_TFree(graph_array);
if ((meas_type || (coarsen_type != 1 && coarsen_type != 11))
&& num_procs > 1)
hypre_CSRMatrixDestroy(S_ext);
*CF_marker_ptr = CF_marker;
return (ierr);
}
HYPRE_Int
hypre_BoomerAMGCoarsenFalgout( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int measure_type,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
HYPRE_Int ierr = 0;
/*-------------------------------------------------------
* Perform Ruge coarsening followed by CLJP coarsening
*-------------------------------------------------------*/
ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 6, debug_flag,
CF_marker_ptr);
ierr += hypre_BoomerAMGCoarsen (S, A, 1, debug_flag,
CF_marker_ptr);
return (ierr);
}
HYPRE_Int
hypre_BoomerAMGCoarsenHMIS( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int measure_type,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
HYPRE_Int ierr = 0;
/*-------------------------------------------------------
* Perform Ruge coarsening followed by CLJP coarsening
*-------------------------------------------------------*/
ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 10, debug_flag,
CF_marker_ptr);
ierr += hypre_BoomerAMGCoarsenPMIS (S, A, 1, debug_flag,
CF_marker_ptr);
return (ierr);
}
/*--------------------------------------------------------------------------*/
#define C_PT 1
#define F_PT -1
#define SF_PT -3
#define COMMON_C_PT 2
#define Z_PT -2
/* begin HANS added */
/**************************************************************
*
* Modified Independent Set Coarsening routine
* (don't worry about strong F-F connections
* without a common C point)
*
**************************************************************/
HYPRE_Int
hypre_BoomerAMGCoarsenPMIS( hypre_ParCSRMatrix *S,
hypre_ParCSRMatrix *A,
HYPRE_Int CF_init,
HYPRE_Int debug_flag,
HYPRE_Int **CF_marker_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PMIS] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j;
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int num_cols_offd = 0;
/* hypre_CSRMatrix *S_ext;
HYPRE_Int *S_ext_i;
HYPRE_Int *S_ext_j; */
HYPRE_Int num_sends = 0;
HYPRE_Int *int_buf_data;
HYPRE_Real *buf_data;
HYPRE_Int *CF_marker;
HYPRE_Int *CF_marker_offd;
HYPRE_Real *measure_array;
HYPRE_Int *graph_array;
HYPRE_Int *graph_array_offd;
HYPRE_Int graph_size;
HYPRE_Int graph_offd_size;
HYPRE_Int global_graph_size;
HYPRE_Int i, j, jj, jS, ig;
HYPRE_Int index, start, my_id, num_procs, jrow, cnt, elmt;
HYPRE_Int ierr = 0;
HYPRE_Real wall_time;
HYPRE_Int iter = 0;
HYPRE_Int *prefix_sum_workspace;
#if 0 /* debugging */
char filename[256];
FILE *fp;
HYPRE_Int iter = 0;
#endif
/*******************************************************************************
BEFORE THE INDEPENDENT SET COARSENING LOOP:
measure_array: calculate the measures, and communicate them
(this array contains measures for both local and external nodes)
CF_marker, CF_marker_offd: initialize CF_marker
(separate arrays for local and external; 0=unassigned, negative=F point, positive=C point)
******************************************************************************/
/*--------------------------------------------------------------
* Use the ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = 1, else S_ij = 0.
*
* NOTE: S_data is not used; in stead, only strong columns are retained
* in S_j, which can then be used like S_data
*----------------------------------------------------------------*/
/*S_ext = NULL; */
if (debug_flag == 3) wall_time = time_getWallclockSeconds();
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
if (!comm_pkg)
{
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
S_diag_j = hypre_CSRMatrixJ(S_diag);
if (num_cols_offd)
{
S_offd_j = hypre_CSRMatrixJ(S_offd);
}
/*----------------------------------------------------------
* Compute the measures
*
* The measures are currently given by the column sums of S.
* Hence, measure_array[i] is the number of influences
* of variable i.
*
* The measures are augmented by a random number
* between 0 and 1.
*----------------------------------------------------------*/
measure_array = hypre_CTAlloc(HYPRE_Real, num_variables+num_cols_offd);
/* first calculate the local part of the sums for the external nodes */
#ifdef HYPRE_USING_OPENMP
HYPRE_Int *measure_array_temp = hypre_CTAlloc(HYPRE_Int, num_variables+num_cols_offd);
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
for (i=0; i < S_offd_i[num_variables]; i++)
{
#pragma omp atomic
measure_array_temp[num_variables + S_offd_j[i]]++;
}
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
for (i=0; i < num_cols_offd; i++)
{
measure_array[i + num_variables] = measure_array_temp[i + num_variables];
}
#else
for (i=0; i < S_offd_i[num_variables]; i++)
{
measure_array[num_variables + S_offd_j[i]] += 1.0;
}
#endif // HYPRE_USING_OPENMP
/* now send those locally calculated values for the external nodes to the neighboring processors */
if (num_procs > 1)
comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg,
&measure_array[num_variables], buf_data);
/* calculate the local part for the local nodes */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
for (i=0; i < S_diag_i[num_variables]; i++)
{
#pragma omp atomic
measure_array_temp[S_diag_j[i]]++;
}
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
for (i=0; i < num_variables; i++)
{
measure_array[i] = measure_array_temp[i];
}
hypre_TFree(measure_array_temp);
#else
for (i=0; i < S_diag_i[num_variables]; i++)
{
measure_array[S_diag_j[i]] += 1.0;
}
#endif // HYPRE_USING_OPENMP
/* finish the communication */
if (num_procs > 1)
hypre_ParCSRCommHandleDestroy(comm_handle);
/* now add the externally calculated part of the local nodes to the local nodes */
index = 0;
for (i=0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]
+= buf_data[index++];
}
/* set the measures of the external nodes to zero */
for (i=num_variables; i < num_variables+num_cols_offd; i++)
{
measure_array[i] = 0;
}
/* this augments the measures with a random number between 0 and 1 */
/* (only for the local part) */
/* this augments the measures */
if (CF_init == 2 || CF_init == 4)
hypre_BoomerAMGIndepSetInit(S, measure_array, 1);
else
hypre_BoomerAMGIndepSetInit(S, measure_array, 0);
/*---------------------------------------------------
* Initialize the graph arrays, and CF_marker arrays
*---------------------------------------------------*/
/* first the off-diagonal part of the graph array */
if (num_cols_offd)
graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
else
graph_array_offd = NULL;
for (ig = 0; ig < num_cols_offd; ig++)
graph_array_offd[ig] = ig;
graph_offd_size = num_cols_offd;
/* now the local part of the graph array, and the local CF_marker array */
graph_array = hypre_CTAlloc(HYPRE_Int, num_variables);
if (CF_init==1)
{
CF_marker = *CF_marker_ptr;
cnt = 0;
for (i=0; i < num_variables; i++)
{
if ( (S_offd_i[i+1]-S_offd_i[i]) > 0 || CF_marker[i] == -1)
{
CF_marker[i] = 0;
}
if ( CF_marker[i] == Z_PT)
{
if (measure_array[i] >= 1.0 ||
(S_diag_i[i+1]-S_diag_i[i]) > 0)
{
CF_marker[i] = 0;
graph_array[cnt++] = i;
}
else
{
CF_marker[i] = F_PT;
}
}
else if (CF_marker[i] == SF_PT)
measure_array[i] = 0;
else
graph_array[cnt++] = i;
}
}
else
{
CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables);
cnt = 0;
for (i=0; i < num_variables; i++)
{
CF_marker[i] = 0;
if ( (S_diag_i[i+1]-S_diag_i[i]) == 0
&& (S_offd_i[i+1]-S_offd_i[i]) == 0)
{
CF_marker[i] = SF_PT; /* an isolated fine grid */
if (CF_init == 3 || CF_init == 4) CF_marker[i] = C_PT;
measure_array[i] = 0;
}
else
graph_array[cnt++] = i;
}
}
graph_size = cnt;
/* now the off-diagonal part of CF_marker */
if (num_cols_offd)
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
else
CF_marker_offd = NULL;
for (i=0; i < num_cols_offd; i++)
CF_marker_offd[i] = 0;
/*------------------------------------------------
* Communicate the local measures, which are complete,
to the external nodes
*------------------------------------------------*/
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
buf_data[index++] = measure_array[jrow];
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data,
&measure_array[num_variables]);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag == 3)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Initialize CLJP phase = %f\n",
my_id, wall_time);
}
HYPRE_Int *graph_array2 = hypre_CTAlloc(HYPRE_Int, num_variables);
HYPRE_Int *graph_array_offd2 = NULL;
if (num_cols_offd)
graph_array_offd2 = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
/*******************************************************************************
THE INDEPENDENT SET COARSENING LOOP:
******************************************************************************/
/*---------------------------------------------------
* Loop until all points are either fine or coarse.
*---------------------------------------------------*/
while (1)
{
/* stop the coarsening if nothing left to be coarsened */
hypre_MPI_Allreduce(&graph_size,&global_graph_size,1,HYPRE_MPI_INT,hypre_MPI_SUM,comm);
if (global_graph_size == 0)
break;
/* hypre_printf("\n");
hypre_printf("*** MIS iteration %d\n",iter);
hypre_printf("graph_size remaining %d\n",graph_size);*/
/*------------------------------------------------
* Pick an independent set of points with
* maximal measure.
At the end, CF_marker is complete, but still needs to be
communicated to CF_marker_offd
*------------------------------------------------*/
if (!CF_init || iter)
{
/*hypre_BoomerAMGIndepSet(S, measure_array, graph_array,
graph_size,
graph_array_offd, graph_offd_size,
CF_marker, CF_marker_offd);*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE
#endif
for (ig = 0; ig < graph_size; ig++)
{
i = graph_array[ig];
if (measure_array[i] > 1)
{
CF_marker[i] = 1;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE
#endif
for (ig = 0; ig < graph_offd_size; ig++)
{
i = graph_array_offd[ig];
if (measure_array[i+num_variables] > 1)
{
CF_marker_offd[i] = 1;
}
}
/*-------------------------------------------------------
* Remove nodes from the initial independent set
*-------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ig, i, jS, j, jj) HYPRE_SMP_SCHEDULE
#endif
for (ig = 0; ig < graph_size; ig++)
{
i = graph_array[ig];
if (measure_array[i] > 1)
{
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
j = S_diag_j[jS];
if (measure_array[j] > 1)
{
if (measure_array[i] > measure_array[j])
CF_marker[j] = 0;
else if (measure_array[j] > measure_array[i])
CF_marker[i] = 0;
}
} /* for each local neighbor j of i */
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
jj = S_offd_j[jS];
j = num_variables+jj;
if (measure_array[j] > 1)
{
if (measure_array[i] > measure_array[j])
CF_marker_offd[jj] = 0;
else if (measure_array[j] > measure_array[i])
CF_marker[i] = 0;
}
}
} /* for each node with measure > 1 */
} /* for each node i */
/*------------------------------------------------
* Exchange boundary data for CF_marker: send internal
points to external points
*------------------------------------------------*/
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg,
CF_marker_offd, int_buf_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j);
if (!int_buf_data[index] && CF_marker[elmt] > 0)
{
CF_marker[elmt] = 0;
index++;
}
else
{
int_buf_data[index++] = CF_marker[elmt];
}
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
}
iter++;
/*------------------------------------------------
* Set C-pts and F-pts.
*------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ig, i, jS, j) HYPRE_SMP_SCHEDULE
#endif
for (ig = 0; ig < graph_size; ig++) {
i = graph_array[ig];
/*---------------------------------------------
* If the measure of i is smaller than 1, then
* make i and F point (because it does not influence
* any other point)
*---------------------------------------------*/
if(measure_array[i]<1.) CF_marker[i]= F_PT;
/*---------------------------------------------
* First treat the case where point i is in the
* independent set: make i a C point,
*---------------------------------------------*/
if (CF_marker[i] > 0) CF_marker[i] = C_PT;
/*---------------------------------------------
* Now treat the case where point i is not in the
* independent set: loop over
* all the points j that influence equation i; if
* j is a C point, then make i an F point.
*---------------------------------------------*/
else
{
/* first the local part */
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
/* j is the column number, or the local number of the point influencing i */
j = S_diag_j[jS];
if (CF_marker[j] > 0) /* j is a C-point */
CF_marker[i] = F_PT;
}
/* now the external part */
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
j = S_offd_j[jS];
if (CF_marker_offd[j] > 0) /* j is a C-point */
CF_marker[i] = F_PT;
}
} /* end else */
} /* end first loop over graph */
/* now communicate CF_marker to CF_marker_offd, to make
sure that new external F points are known on this processor */
/*------------------------------------------------
* Exchange boundary data for CF_marker: send internal
points to external points
*------------------------------------------------*/
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/*------------------------------------------------
* Update subgraph
*------------------------------------------------*/
/*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1));
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(ig,i)
#endif
{
HYPRE_Int private_graph_size_cnt = 0;
HYPRE_Int private_graph_offd_size_cnt = 0;
HYPRE_Int ig_begin, ig_end;
hypre_GetSimpleThreadPartition(&ig_begin, &ig_end, graph_size);
HYPRE_Int ig_offd_begin, ig_offd_end;
hypre_GetSimpleThreadPartition(&ig_offd_begin, &ig_offd_end, graph_offd_size);
for (ig = ig_begin; ig < ig_end; ig++)
{
i = graph_array[ig];
if (CF_marker[i]!=0) /* C or F point */
{
/* the independent set subroutine needs measure 0 for
removed nodes */
measure_array[i] = 0;
}
else
{
private_graph_size_cnt++;
}
}
for (ig = ig_offd_begin; ig < ig_offd_end; ig++)
{
i = graph_array_offd[ig];
if (CF_marker_offd[i]!=0) /* C of F point */
{
/* the independent set subroutine needs measure 0 for
removed nodes */
measure_array[i + num_variables] = 0;
}
else
{
private_graph_offd_size_cnt++;
}
}
hypre_prefix_sum_pair(&private_graph_size_cnt, &graph_size, &private_graph_offd_size_cnt, &graph_offd_size, prefix_sum_workspace);
for (ig = ig_begin; ig < ig_end; ig++)
{
i = graph_array[ig];
if (CF_marker[i]==0)
{
graph_array2[private_graph_size_cnt++] = i;
}
}
for (ig = ig_offd_begin; ig < ig_offd_end; ig++)
{
i = graph_array_offd[ig];
if (CF_marker_offd[i]==0)
{
graph_array_offd2[private_graph_offd_size_cnt++] = i;
}
}
} /* omp parallel */
HYPRE_Int *temp = graph_array;
graph_array = graph_array2;
graph_array2 = temp;
temp = graph_array_offd;
graph_array_offd = graph_array_offd2;
graph_array_offd2 = temp;
hypre_TFree(prefix_sum_workspace);
} /* end while */
/* hypre_printf("*** MIS iteration %d\n",iter);
hypre_printf("graph_size remaining %d\n",graph_size);
hypre_printf("num_cols_offd %d\n",num_cols_offd);
for (i=0;i<num_variables;i++)
{
if(CF_marker[i]==1)
hypre_printf("node %d CF %d\n",i,CF_marker[i]);
}*/
/*---------------------------------------------------
* Clean up and return
*---------------------------------------------------*/
hypre_TFree(measure_array);
hypre_TFree(graph_array);
hypre_TFree(graph_array2);
hypre_TFree(graph_array_offd2);
if (num_cols_offd) hypre_TFree(graph_array_offd);
hypre_TFree(buf_data);
hypre_TFree(int_buf_data);
hypre_TFree(CF_marker_offd);
/*if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);*/
*CF_marker_ptr = CF_marker;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PMIS] += hypre_MPI_Wtime();
#endif
return (ierr);
}
| 82,574 | 30.445164 | 138 | c |
AMG | AMG-master/parcsr_ls/par_coordinates.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* GenerateCoordinates
*--------------------------------------------------------------------------*/
float *
GenerateCoordinates( MPI_Comm comm,
HYPRE_Int nx,
HYPRE_Int ny,
HYPRE_Int nz,
HYPRE_Int P,
HYPRE_Int Q,
HYPRE_Int R,
HYPRE_Int p,
HYPRE_Int q,
HYPRE_Int r,
HYPRE_Int coorddim)
{
HYPRE_Int ix, iy, iz;
HYPRE_Int cnt;
HYPRE_Int nx_local, ny_local, nz_local;
HYPRE_Int local_num_rows;
HYPRE_Int *nx_part;
HYPRE_Int *ny_part;
HYPRE_Int *nz_part;
float *coord=NULL;
if (coorddim<1 || coorddim>3) {
return NULL;
}
hypre_GeneratePartitioning(nx,P,&nx_part);
hypre_GeneratePartitioning(ny,Q,&ny_part);
hypre_GeneratePartitioning(nz,R,&nz_part);
nx_local = nx_part[p+1] - nx_part[p];
ny_local = ny_part[q+1] - ny_part[q];
nz_local = nz_part[r+1] - nz_part[r];
local_num_rows = nx_local*ny_local*nz_local;
coord = hypre_CTAlloc(float, coorddim*local_num_rows);
cnt = 0;
for (iz = nz_part[r]; iz < nz_part[r+1]; iz++)
{
for (iy = ny_part[q]; iy < ny_part[q+1]; iy++)
{
for (ix = nx_part[p]; ix < nx_part[p+1]; ix++)
{
/* set coordinates BM Oct 17, 2006 */
if (coord) {
if (nx>1) coord[cnt++] = ix;
if (ny>1) coord[cnt++] = iy;
if (nz>1) coord[cnt++] = iz;
}
}
}
}
hypre_TFree(nx_part);
hypre_TFree(ny_part);
hypre_TFree(nz_part);
return coord;
}
| 2,564 | 26.880435 | 81 | c |
AMG | AMG-master/parcsr_ls/par_cycle.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
* ParAMG cycling routine
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#ifdef HYPRE_USING_CALIPER
#include <caliper/cali.h>
#endif
/*--------------------------------------------------------------------------
* hypre_BoomerAMGCycle
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCycle( void *amg_vdata,
hypre_ParVector **F_array,
hypre_ParVector **U_array )
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata;
/* Data Structure variables */
hypre_ParCSRMatrix **A_array;
hypre_ParCSRMatrix **P_array;
hypre_ParCSRMatrix **R_array;
hypre_ParVector *Utemp;
hypre_ParVector *Vtemp;
hypre_ParVector *Rtemp;
hypre_ParVector *Ptemp;
hypre_ParVector *Ztemp;
hypre_ParVector *Aux_U;
hypre_ParVector *Aux_F;
HYPRE_Real *Ztemp_data;
HYPRE_Real *Ptemp_data;
HYPRE_Int **CF_marker_array;
/* HYPRE_Int **unknown_map_array;
HYPRE_Int **point_map_array;
HYPRE_Int **v_at_point_array; */
HYPRE_Real cycle_op_count;
HYPRE_Int cycle_type;
HYPRE_Int num_levels;
HYPRE_Int max_levels;
HYPRE_Real *num_coeffs;
HYPRE_Int *num_grid_sweeps;
HYPRE_Int *grid_relax_type;
HYPRE_Int **grid_relax_points;
HYPRE_Int cheby_order;
/* Local variables */
HYPRE_Int *lev_counter;
HYPRE_Int Solve_err_flag;
HYPRE_Int k;
HYPRE_Int i, j, jj;
HYPRE_Int level;
HYPRE_Int cycle_param;
HYPRE_Int coarse_grid;
HYPRE_Int fine_grid;
HYPRE_Int Not_Finished;
HYPRE_Int num_sweep;
HYPRE_Int cg_num_sweep = 1;
HYPRE_Int relax_type;
HYPRE_Int relax_points;
HYPRE_Int relax_order;
HYPRE_Int relax_local;
HYPRE_Int old_version = 0;
HYPRE_Real *relax_weight;
HYPRE_Real *omega;
HYPRE_Real alfa, beta, gammaold;
HYPRE_Real gamma = 1.0;
HYPRE_Int local_size;
HYPRE_Int num_threads, my_id;
HYPRE_Real alpha;
HYPRE_Real **l1_norms = NULL;
HYPRE_Real *l1_norms_level;
HYPRE_Real **ds = hypre_ParAMGDataChebyDS(amg_data);
HYPRE_Real **coefs = hypre_ParAMGDataChebyCoefs(amg_data);
HYPRE_Int seq_cg = 0;
MPI_Comm comm;
#if 0
HYPRE_Real *D_mat;
HYPRE_Real *S_vec;
#endif
#ifdef HYPRE_USING_CALIPER
cali_id_t iter_attr =
cali_create_attribute("hypre.par_cycle.level", CALI_TYPE_INT, CALI_ATTR_DEFAULT);
#endif
/* Acquire data and allocate storage */
num_threads = hypre_NumThreads();
A_array = hypre_ParAMGDataAArray(amg_data);
P_array = hypre_ParAMGDataPArray(amg_data);
R_array = hypre_ParAMGDataRArray(amg_data);
CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data);
Vtemp = hypre_ParAMGDataVtemp(amg_data);
Rtemp = hypre_ParAMGDataRtemp(amg_data);
Ptemp = hypre_ParAMGDataPtemp(amg_data);
Ztemp = hypre_ParAMGDataZtemp(amg_data);
num_levels = hypre_ParAMGDataNumLevels(amg_data);
max_levels = hypre_ParAMGDataMaxLevels(amg_data);
cycle_type = hypre_ParAMGDataCycleType(amg_data);
num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data);
grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data);
grid_relax_points = hypre_ParAMGDataGridRelaxPoints(amg_data);
relax_order = hypre_ParAMGDataRelaxOrder(amg_data);
relax_weight = hypre_ParAMGDataRelaxWeight(amg_data);
omega = hypre_ParAMGDataOmega(amg_data);
l1_norms = hypre_ParAMGDataL1Norms(amg_data);
/*max_eig_est = hypre_ParAMGDataMaxEigEst(amg_data);
min_eig_est = hypre_ParAMGDataMinEigEst(amg_data);
cheby_fraction = hypre_ParAMGDataChebyFraction(amg_data);*/
cheby_order = hypre_ParAMGDataChebyOrder(amg_data);
cycle_op_count = hypre_ParAMGDataCycleOpCount(amg_data);
lev_counter = hypre_CTAlloc(HYPRE_Int, num_levels);
if (hypre_ParAMGDataParticipate(amg_data)) seq_cg = 1;
/* Initialize */
Solve_err_flag = 0;
if (grid_relax_points) old_version = 1;
num_coeffs = hypre_CTAlloc(HYPRE_Real, num_levels);
num_coeffs[0] = hypre_ParCSRMatrixDNumNonzeros(A_array[0]);
comm = hypre_ParCSRMatrixComm(A_array[0]);
hypre_MPI_Comm_rank(comm,&my_id);
for (j = 1; j < num_levels; j++)
num_coeffs[j] = hypre_ParCSRMatrixDNumNonzeros(A_array[j]);
/*---------------------------------------------------------------------
* Initialize cycling control counter
*
* Cycling is controlled using a level counter: lev_counter[k]
*
* Each time relaxation is performed on level k, the
* counter is decremented by 1. If the counter is then
* negative, we go to the next finer level. If non-
* negative, we go to the next coarser level. The
* following actions control cycling:
*
* a. lev_counter[0] is initialized to 1.
* b. lev_counter[k] is initialized to cycle_type for k>0.
*
* c. During cycling, when going down to level k, lev_counter[k]
* is set to the max of (lev_counter[k],cycle_type)
*---------------------------------------------------------------------*/
Not_Finished = 1;
lev_counter[0] = 1;
for (k = 1; k < num_levels; ++k)
{
lev_counter[k] = cycle_type;
}
level = 0;
cycle_param = 1;
/*---------------------------------------------------------------------
* Main loop of cycling
*--------------------------------------------------------------------*/
#ifdef HYPRE_USING_CALIPER
cali_set_int(iter_attr, level);
#endif
while (Not_Finished)
{
if (num_levels > 1)
{
local_size
= hypre_VectorSize(hypre_ParVectorLocalVector(F_array[level]));
hypre_VectorSize(hypre_ParVectorLocalVector(Vtemp)) = local_size;
cg_num_sweep = 1;
num_sweep = num_grid_sweeps[cycle_param];
Aux_U = U_array[level];
Aux_F = F_array[level];
relax_type = grid_relax_type[cycle_param];
}
else /* AB: 4/08: removed the max_levels > 1 check - should do this when max-levels = 1 also */
{
/* If no coarsening occurred, apply a simple smoother once */
Aux_U = U_array[level];
Aux_F = F_array[level];
num_sweep = 1;
/* TK: Use the user relax type (instead of 0) to allow for setting a
convergent smoother (e.g. in the solution of singular problems). */
relax_type = hypre_ParAMGDataUserRelaxType(amg_data);
if (relax_type == -1) relax_type = 6;
}
if (l1_norms != NULL)
l1_norms_level = l1_norms[level];
else
l1_norms_level = NULL;
if (cycle_param == 3 && seq_cg)
{
hypre_seqAMGCycle(amg_data, level, F_array, U_array);
}
else
{
/*------------------------------------------------------------------
* Do the relaxation num_sweep times
*-----------------------------------------------------------------*/
for (jj = 0; jj < cg_num_sweep; jj++)
{
for (j = 0; j < num_sweep; j++)
{
if (num_levels == 1 && max_levels > 1)
{
relax_points = 0;
relax_local = 0;
}
else
{
if (old_version)
relax_points = grid_relax_points[cycle_param][j];
relax_local = relax_order;
}
/*-----------------------------------------------
* VERY sloppy approximation to cycle complexity
*-----------------------------------------------*/
if (old_version && level < num_levels -1)
{
switch (relax_points)
{
case 1:
cycle_op_count += num_coeffs[level+1];
break;
case -1:
cycle_op_count += (num_coeffs[level]-num_coeffs[level+1]);
break;
}
}
else
{
cycle_op_count += num_coeffs[level];
}
/*-----------------------------------------------
Choose Smoother
-----------------------------------------------*/
if (relax_type == 9 || relax_type == 99)
{ /* Gaussian elimination */
hypre_GaussElimSolve(amg_data, level, relax_type);
}
else if (relax_type == 18)
{ /* L1 - Jacobi*/
if (relax_order == 1 && cycle_param < 3)
{
/* need to do CF - so can't use the AMS one */
HYPRE_Int i;
HYPRE_Int loc_relax_points[2];
if (cycle_type < 2)
{
loc_relax_points[0] = 1;
loc_relax_points[1] = -1;
}
else
{
loc_relax_points[0] = -1;
loc_relax_points[1] = 1;
}
for (i=0; i < 2; i++)
hypre_ParCSRRelax_L1_Jacobi(A_array[level],
Aux_F,
CF_marker_array[level],
loc_relax_points[i],
relax_weight[level],
l1_norms[level],
Aux_U,
Vtemp);
}
else /* not CF - so use through AMS */
{
if (num_threads == 1)
hypre_ParCSRRelax(A_array[level],
Aux_F,
1,
1,
l1_norms_level,
relax_weight[level],
omega[level],0,0,0,0,
Aux_U,
Vtemp,
Ztemp);
else
hypre_ParCSRRelaxThreads(A_array[level],
Aux_F,
1,
1,
l1_norms_level,
relax_weight[level],
omega[level],
Aux_U,
Vtemp,
Ztemp);
}
}
else if (relax_type == 16)
{ /* scaled Chebyshev */
HYPRE_Int scale = hypre_ParAMGDataChebyScale(amg_data);
HYPRE_Int variant = hypre_ParAMGDataChebyVariant(amg_data);
hypre_ParCSRRelax_Cheby_Solve(A_array[level], Aux_F,
ds[level], coefs[level],
cheby_order, scale,
variant, Aux_U, Vtemp, Ztemp );
}
else if (relax_type ==17)
{
hypre_BoomerAMGRelax_FCFJacobi(A_array[level],
Aux_F,
CF_marker_array[level],
relax_weight[level],
Aux_U,
Vtemp);
}
else if (old_version)
{
Solve_err_flag = hypre_BoomerAMGRelax(A_array[level],
Aux_F,
CF_marker_array[level],
relax_type, relax_points,
relax_weight[level],
omega[level],
l1_norms_level,
Aux_U,
Vtemp,
Ztemp);
}
else
{
/* smoother than can have CF ordering */
Solve_err_flag = hypre_BoomerAMGRelaxIF(A_array[level],
Aux_F,
CF_marker_array[level],
relax_type,
relax_local,
cycle_param,
relax_weight[level],
omega[level],
l1_norms_level,
Aux_U,
Vtemp,
Ztemp);
}
if (Solve_err_flag != 0)
return(Solve_err_flag);
}
}
}
/*------------------------------------------------------------------
* Decrement the control counter and determine which grid to visit next
*-----------------------------------------------------------------*/
--lev_counter[level];
if (lev_counter[level] >= 0 && level != num_levels-1)
{
/*---------------------------------------------------------------
* Visit coarser level next.
* Compute residual using hypre_ParCSRMatrixMatvec.
* Perform restriction using hypre_ParCSRMatrixMatvecT.
* Reset counters and cycling parameters for coarse level
*--------------------------------------------------------------*/
fine_grid = level;
coarse_grid = level + 1;
hypre_ParVectorSetConstantValues(U_array[coarse_grid], 0.0);
alpha = -1.0;
beta = 1.0;
// JSP: avoid unnecessary copy using out-of-place version of SpMV
hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A_array[fine_grid], U_array[fine_grid],
beta, F_array[fine_grid], Vtemp);
alpha = 1.0;
beta = 0.0;
hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp,
beta,F_array[coarse_grid]);
++level;
lev_counter[level] = hypre_max(lev_counter[level],cycle_type);
cycle_param = 1;
if (level == num_levels-1) cycle_param = 3;
#ifdef HYPRE_USING_CALIPER
cali_set_int(iter_attr, level); /* set the level for caliper here */
#endif
}
else if (level != 0)
{
/*---------------------------------------------------------------
* Visit finer level next.
* Interpolate and add correction using hypre_ParCSRMatrixMatvec.
* Reset counters and cycling parameters for finer level.
*--------------------------------------------------------------*/
fine_grid = level - 1;
coarse_grid = level;
alpha = 1.0;
beta = 1.0;
hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid],
U_array[coarse_grid],
beta, U_array[fine_grid]);
--level;
cycle_param = 2;
#ifdef HYPRE_USING_CALIPER
cali_set_int(iter_attr, level); /* set the level for caliper here */
#endif
}
else
{
Not_Finished = 0;
}
}
#ifdef HYPRE_USING_CALIPER
cali_end(iter_attr); /* unset "iter" */
#endif
hypre_ParAMGDataCycleOpCount(amg_data) = cycle_op_count;
hypre_TFree(lev_counter);
hypre_TFree(num_coeffs);
return(Solve_err_flag);
}
| 17,740 | 35.730849 | 101 | c |
AMG | AMG-master/parcsr_ls/par_difconv.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* hypre_GenerateDifConv
*--------------------------------------------------------------------------*/
HYPRE_ParCSRMatrix
GenerateDifConv( MPI_Comm comm,
HYPRE_Int nx,
HYPRE_Int ny,
HYPRE_Int nz,
HYPRE_Int P,
HYPRE_Int Q,
HYPRE_Int R,
HYPRE_Int p,
HYPRE_Int q,
HYPRE_Int r,
HYPRE_Real *value )
{
hypre_ParCSRMatrix *A;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Real *diag_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Real *offd_data;
HYPRE_Int *global_part;
HYPRE_Int ix, iy, iz;
HYPRE_Int cnt, o_cnt;
HYPRE_Int local_num_rows;
HYPRE_Int *col_map_offd;
HYPRE_Int row_index;
HYPRE_Int i,j;
HYPRE_Int nx_local, ny_local, nz_local;
HYPRE_Int nx_size, ny_size, nz_size;
HYPRE_Int num_cols_offd;
HYPRE_Int grid_size;
HYPRE_Int *nx_part;
HYPRE_Int *ny_part;
HYPRE_Int *nz_part;
HYPRE_Int num_procs, my_id;
HYPRE_Int P_busy, Q_busy, R_busy;
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
grid_size = nx*ny*nz;
hypre_GeneratePartitioning(nx,P,&nx_part);
hypre_GeneratePartitioning(ny,Q,&ny_part);
hypre_GeneratePartitioning(nz,R,&nz_part);
global_part = hypre_CTAlloc(HYPRE_Int,P*Q*R+1);
global_part[0] = 0;
cnt = 1;
for (iz = 0; iz < R; iz++)
{
nz_size = nz_part[iz+1]-nz_part[iz];
for (iy = 0; iy < Q; iy++)
{
ny_size = ny_part[iy+1]-ny_part[iy];
for (ix = 0; ix < P; ix++)
{
nx_size = nx_part[ix+1] - nx_part[ix];
global_part[cnt] = global_part[cnt-1];
global_part[cnt++] += nx_size*ny_size*nz_size;
}
}
}
nx_local = nx_part[p+1] - nx_part[p];
ny_local = ny_part[q+1] - ny_part[q];
nz_local = nz_part[r+1] - nz_part[r];
my_id = r*(P*Q) + q*P + p;
num_procs = P*Q*R;
local_num_rows = nx_local*ny_local*nz_local;
diag_i = hypre_CTAlloc(HYPRE_Int, local_num_rows+1);
offd_i = hypre_CTAlloc(HYPRE_Int, local_num_rows+1);
P_busy = hypre_min(nx,P);
Q_busy = hypre_min(ny,Q);
R_busy = hypre_min(nz,R);
num_cols_offd = 0;
if (p) num_cols_offd += ny_local*nz_local;
if (p < P_busy-1) num_cols_offd += ny_local*nz_local;
if (q) num_cols_offd += nx_local*nz_local;
if (q < Q_busy-1) num_cols_offd += nx_local*nz_local;
if (r) num_cols_offd += nx_local*ny_local;
if (r < R_busy-1) num_cols_offd += nx_local*ny_local;
if (!local_num_rows) num_cols_offd = 0;
col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
cnt = 1;
o_cnt = 1;
diag_i[0] = 0;
offd_i[0] = 0;
for (iz = nz_part[r]; iz < nz_part[r+1]; iz++)
{
for (iy = ny_part[q]; iy < ny_part[q+1]; iy++)
{
for (ix = nx_part[p]; ix < nx_part[p+1]; ix++)
{
diag_i[cnt] = diag_i[cnt-1];
offd_i[o_cnt] = offd_i[o_cnt-1];
diag_i[cnt]++;
if (iz > nz_part[r])
diag_i[cnt]++;
else
{
if (iz)
{
offd_i[o_cnt]++;
}
}
if (iy > ny_part[q])
diag_i[cnt]++;
else
{
if (iy)
{
offd_i[o_cnt]++;
}
}
if (ix > nx_part[p])
diag_i[cnt]++;
else
{
if (ix)
{
offd_i[o_cnt]++;
}
}
if (ix+1 < nx_part[p+1])
diag_i[cnt]++;
else
{
if (ix+1 < nx)
{
offd_i[o_cnt]++;
}
}
if (iy+1 < ny_part[q+1])
diag_i[cnt]++;
else
{
if (iy+1 < ny)
{
offd_i[o_cnt]++;
}
}
if (iz+1 < nz_part[r+1])
diag_i[cnt]++;
else
{
if (iz+1 < nz)
{
offd_i[o_cnt]++;
}
}
cnt++;
o_cnt++;
}
}
}
diag_j = hypre_CTAlloc(HYPRE_Int, diag_i[local_num_rows]);
diag_data = hypre_CTAlloc(HYPRE_Real, diag_i[local_num_rows]);
if (num_procs > 1)
{
offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[local_num_rows]);
offd_data = hypre_CTAlloc(HYPRE_Real, offd_i[local_num_rows]);
}
row_index = 0;
cnt = 0;
o_cnt = 0;
for (iz = nz_part[r]; iz < nz_part[r+1]; iz++)
{
for (iy = ny_part[q]; iy < ny_part[q+1]; iy++)
{
for (ix = nx_part[p]; ix < nx_part[p+1]; ix++)
{
diag_j[cnt] = row_index;
diag_data[cnt++] = value[0];
if (iz > nz_part[r])
{
diag_j[cnt] = row_index-nx_local*ny_local;
diag_data[cnt++] = value[3];
}
else
{
if (iz)
{
offd_j[o_cnt] = hypre_map(ix,iy,iz-1,p,q,r-1,P,Q,R,
nx_part,ny_part,nz_part,global_part);
offd_data[o_cnt++] = value[3];
}
}
if (iy > ny_part[q])
{
diag_j[cnt] = row_index-nx_local;
diag_data[cnt++] = value[2];
}
else
{
if (iy)
{
offd_j[o_cnt] = hypre_map(ix,iy-1,iz,p,q-1,r,P,Q,R,
nx_part,ny_part,nz_part,global_part);
offd_data[o_cnt++] = value[2];
}
}
if (ix > nx_part[p])
{
diag_j[cnt] = row_index-1;
diag_data[cnt++] = value[1];
}
else
{
if (ix)
{
offd_j[o_cnt] = hypre_map(ix-1,iy,iz,p-1,q,r,P,Q,R,
nx_part,ny_part,nz_part,global_part);
offd_data[o_cnt++] = value[1];
}
}
if (ix+1 < nx_part[p+1])
{
diag_j[cnt] = row_index+1;
diag_data[cnt++] = value[4];
}
else
{
if (ix+1 < nx)
{
offd_j[o_cnt] = hypre_map(ix+1,iy,iz,p+1,q,r,P,Q,R,
nx_part,ny_part,nz_part,global_part);
offd_data[o_cnt++] = value[4];
}
}
if (iy+1 < ny_part[q+1])
{
diag_j[cnt] = row_index+nx_local;
diag_data[cnt++] = value[5];
}
else
{
if (iy+1 < ny)
{
offd_j[o_cnt] = hypre_map(ix,iy+1,iz,p,q+1,r,P,Q,R,
nx_part,ny_part,nz_part,global_part);
offd_data[o_cnt++] = value[5];
}
}
if (iz+1 < nz_part[r+1])
{
diag_j[cnt] = row_index+nx_local*ny_local;
diag_data[cnt++] = value[6];
}
else
{
if (iz+1 < nz)
{
offd_j[o_cnt] = hypre_map(ix,iy,iz+1,p,q,r+1,P,Q,R,
nx_part,ny_part,nz_part,global_part);
offd_data[o_cnt++] = value[6];
}
}
row_index++;
}
}
}
if (num_procs > 1)
{
for (i=0; i < num_cols_offd; i++)
col_map_offd[i] = offd_j[i];
hypre_qsort0(col_map_offd, 0, num_cols_offd-1);
for (i=0; i < num_cols_offd; i++)
for (j=0; j < num_cols_offd; j++)
if (offd_j[i] == col_map_offd[j])
{
offd_j[i] = j;
break;
}
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* ideally we would use less storage earlier in this function, but this is fine
for testing */
{
HYPRE_Int tmp1, tmp2;
tmp1 = global_part[my_id];
tmp2 = global_part[my_id + 1];
hypre_TFree(global_part);
global_part = hypre_CTAlloc(HYPRE_Int, 2);
global_part[0] = tmp1;
global_part[1] = tmp2;
}
#endif
A = hypre_ParCSRMatrixCreate(comm, grid_size, grid_size,
global_part, global_part, num_cols_offd,
diag_i[local_num_rows],
offd_i[local_num_rows]);
hypre_ParCSRMatrixColMapOffd(A) = col_map_offd;
diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_data;
offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrixI(offd) = offd_i;
if (num_cols_offd)
{
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixData(offd) = offd_data;
}
hypre_TFree(nx_part);
hypre_TFree(ny_part);
hypre_TFree(nz_part);
return (HYPRE_ParCSRMatrix) A;
}
| 10,430 | 27.422343 | 81 | c |
AMG | AMG-master/parcsr_ls/par_indepset.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
/*==========================================================================*/
/*==========================================================================*/
/**
Augments measures by some random value between 0 and 1.
{\bf Input files:}
_hypre_parcsr_ls.h
@return Error code.
@param S [IN]
parent graph matrix in CSR format
@param measure_array [IN/OUT]
measures assigned to each node of the parent graph
@see hypre_AMGIndepSet */
/*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGIndepSetInit( hypre_ParCSRMatrix *S,
HYPRE_Real *measure_array ,
HYPRE_Int seq_rand)
{
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
HYPRE_Int S_num_nodes = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, my_id;
HYPRE_Int ierr = 0;
hypre_MPI_Comm_rank(comm,&my_id);
i = 2747+my_id;
if (seq_rand) i = 2747;
hypre_SeedRand(i);
if (seq_rand)
{
for (i = 0; i < hypre_ParCSRMatrixFirstRowIndex(S); i++)
hypre_Rand();
}
for (i = 0; i < S_num_nodes; i++)
{
measure_array[i] += hypre_Rand();
}
return (ierr);
}
/*==========================================================================*/
/*==========================================================================*/
/**
Select an independent set from a graph. This graph is actually a
subgraph of some parent graph. The parent graph is described as a
matrix in compressed sparse row format, where edges in the graph are
represented by nonzero matrix coefficients (zero coefficients are
ignored). A positive measure is given for each node in the
subgraph, and this is used to pick the independent set. A measure
of zero must be given for all other nodes in the parent graph. The
subgraph is a collection of nodes in the parent graph.
Positive entries in the `IS\_marker' array indicate nodes in the
independent set. All other entries are zero.
The algorithm proceeds by first setting all nodes in `graph\_array'
to be in the independent set. Nodes are then removed from the
independent set by simply comparing the measures of adjacent nodes.
{\bf Input files:}
_hypre_parcsr_ls.h
@return Error code.
@param S [IN]
parent graph matrix in CSR format
@param measure_array [IN]
measures assigned to each node of the parent graph
@param graph_array [IN]
node numbers in the subgraph to be partitioned
@param graph_array_size [IN]
number of nodes in the subgraph to be partitioned
@param IS_marker [IN/OUT]
marker array for independent set
@see hypre_InitAMGIndepSet */
/*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGIndepSet( hypre_ParCSRMatrix *S,
HYPRE_Real *measure_array,
HYPRE_Int *graph_array,
HYPRE_Int graph_array_size,
HYPRE_Int *graph_array_offd,
HYPRE_Int graph_array_offd_size,
HYPRE_Int *IS_marker,
HYPRE_Int *IS_marker_offd )
{
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = NULL;
HYPRE_Int local_num_vars = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j, ig, jS, jj;
HYPRE_Int ierr = 0;
/*-------------------------------------------------------
* Initialize IS_marker by putting all nodes in
* the independent set.
*-------------------------------------------------------*/
if (hypre_CSRMatrixNumCols(S_offd))
{
S_offd_j = hypre_CSRMatrixJ(S_offd);
}
for (ig = 0; ig < graph_array_size; ig++)
{
i = graph_array[ig];
if (measure_array[i] > 1)
{
IS_marker[i] = 1;
}
}
for (ig = 0; ig < graph_array_offd_size; ig++)
{
i = graph_array_offd[ig];
if (measure_array[i+local_num_vars] > 1)
{
IS_marker_offd[i] = 1;
}
}
/*-------------------------------------------------------
* Remove nodes from the initial independent set
*-------------------------------------------------------*/
for (ig = 0; ig < graph_array_size; ig++)
{
i = graph_array[ig];
if (measure_array[i] > 1)
{
for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++)
{
j = S_diag_j[jS];
if (j < 0) j = -j-1;
/* only consider valid graph edges */
/* if ( (measure_array[j] > 1) && (S_diag_data[jS]) ) */
if (measure_array[j] > 1)
{
if (measure_array[i] > measure_array[j])
{
IS_marker[j] = 0;
}
else if (measure_array[j] > measure_array[i])
{
IS_marker[i] = 0;
}
}
}
for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++)
{
jj = S_offd_j[jS];
if (jj < 0) jj = -jj-1;
j = local_num_vars+jj;
/* only consider valid graph edges */
/* if ( (measure_array[j] > 1) && (S_offd_data[jS]) ) */
if (measure_array[j] > 1)
{
if (measure_array[i] > measure_array[j])
{
IS_marker_offd[jj] = 0;
}
else if (measure_array[j] > measure_array[i])
{
IS_marker[i] = 0;
}
}
}
}
}
return (ierr);
}
| 7,220 | 32.742991 | 81 | c |
AMG | AMG-master/parcsr_ls/par_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_Int *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int k,kc;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
k = A_ext_j[j];
if (k >= col_1 && k < col_n)
{
A_ext_j[index] = k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = -kc-1;
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpHE
* interpolation routine for hyperbolic PDEs
* treats weak fine connections like strong fine connections
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpHE( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_Int *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int k, kc;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
k = A_ext_j[j];
if (k >= col_1 && k < col_n)
{
A_ext_j[index] = k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = -kc-1;
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly influence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0)
{
sum += A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
else
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildDirInterp
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildDirInterp( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real diagonal;
HYPRE_Real sum_N_pos, sum_P_pos;
HYPRE_Real sum_N_neg, sum_P_neg;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] > 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,diagonal,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
sum_N_pos = 0;
sum_N_neg = 0;
sum_P_pos = 0;
sum_P_neg = 0;
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
if (num_functions == 1 || dof_func[i1] == dof_func[i])
{
if (A_diag_data[jj] > 0)
sum_N_pos += A_diag_data[jj];
else
sum_N_neg += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
if (A_diag_data[jj] > 0)
sum_P_pos += A_diag_data[jj];
else
sum_P_neg += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if (num_functions == 1 || dof_func_offd[i1] == dof_func[i])
{
if (A_offd_data[jj] > 0)
sum_N_pos += A_offd_data[jj];
else
sum_N_neg += A_offd_data[jj];
}
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
if (A_offd_data[jj] > 0)
sum_P_pos += A_offd_data[jj];
else
sum_P_neg += A_offd_data[jj];
}
}
}
if (sum_P_neg) alfa = sum_N_neg/sum_P_neg/diagonal;
if (sum_P_pos) beta = sum_N_pos/sum_P_pos/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
if (P_diag_data[jj]> 0)
P_diag_data[jj] *= -beta;
else
P_diag_data[jj] *= -alfa;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
if (P_offd_data[jj]> 0)
P_offd_data[jj] *= -beta;
else
P_offd_data[jj] *= -alfa;
}
}
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A,fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
return(0);
}
HYPRE_Int
hypre_BoomerAMGInterpTruncation( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime();
#endif
hypre_CSRMatrix *P_diag = hypre_ParCSRMatrixDiag(P);
HYPRE_Int *P_diag_i = hypre_CSRMatrixI(P_diag);
HYPRE_Int *P_diag_j = hypre_CSRMatrixJ(P_diag);
HYPRE_Real *P_diag_data = hypre_CSRMatrixData(P_diag);
HYPRE_Int *P_diag_j_new;
HYPRE_Real *P_diag_data_new;
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Real *P_offd_data = hypre_CSRMatrixData(P_offd);
HYPRE_Int *P_offd_j_new;
HYPRE_Real *P_offd_data_new;
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_diag);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(P_diag);
HYPRE_Int i, j, start_j;
HYPRE_Int ierr = 0;
HYPRE_Int next_open;
HYPRE_Int now_checking;
HYPRE_Int num_lost;
HYPRE_Int num_lost_global=0;
HYPRE_Int next_open_offd;
HYPRE_Int now_checking_offd;
HYPRE_Int num_lost_offd;
HYPRE_Int num_lost_global_offd;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int num_elmts;
HYPRE_Int cnt, cnt_diag, cnt_offd;
HYPRE_Real max_coef;
HYPRE_Real row_sum;
HYPRE_Real scale;
/* Threading variables. Entry i of num_lost_(offd_)per_thread holds the
* number of dropped entries over thread i's row range. Cum_lost_per_thread
* will temporarily store the cumulative number of dropped entries up to
* each thread. */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1);
HYPRE_Int * cum_lost_per_thread;
HYPRE_Int * num_lost_per_thread;
HYPRE_Int * num_lost_offd_per_thread;
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
for(i=0; i < max_num_threads[0]; i++)
{
num_lost_per_thread[i] = 0;
num_lost_offd_per_thread[i] = 0;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,max_coef,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt)
#endif
{
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
/* Compute each thread's range of rows to truncate and compress. Note,
* that i, j and data are all compressed as entries are dropped, but
* that the compression only occurs locally over each thread's row
* range. P_diag_i is only made globally consistent at the end of this
* routine. During the dropping phases, P_diag_i[stop] will point to
* the start of the next thread's row range. */
/* my row range */
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ stop = n_fine; }
else
{ stop = (n_fine/num_threads)*(my_thread_num+1); }
/*
* Truncate based on truncation tolerance
*/
if (trunc_factor > 0)
{
num_lost = 0;
num_lost_offd = 0;
next_open = P_diag_i[start];
now_checking = P_diag_i[start];
next_open_offd = P_offd_i[start];;
now_checking_offd = P_offd_i[start];;
for (i = start; i < stop; i++)
{
max_coef = 0;
for (j = P_diag_i[i]; j < P_diag_i[i+1]; j++)
max_coef = (max_coef < fabs(P_diag_data[j])) ?
fabs(P_diag_data[j]) : max_coef;
for (j = P_offd_i[i]; j < P_offd_i[i+1]; j++)
max_coef = (max_coef < fabs(P_offd_data[j])) ?
fabs(P_offd_data[j]) : max_coef;
max_coef *= trunc_factor;
start_j = P_diag_i[i];
if (num_lost) P_diag_i[i] -= num_lost;
row_sum = 0;
scale = 0;
for (j = start_j; j < P_diag_i[i+1]; j++)
{
row_sum += P_diag_data[now_checking];
if (fabs(P_diag_data[now_checking]) < max_coef)
{
num_lost++;
now_checking++;
}
else
{
scale += P_diag_data[now_checking];
P_diag_data[next_open] = P_diag_data[now_checking];
P_diag_j[next_open] = P_diag_j[now_checking];
now_checking++;
next_open++;
}
}
start_j = P_offd_i[i];
if (num_lost_offd) P_offd_i[i] -= num_lost_offd;
for (j = start_j; j < P_offd_i[i+1]; j++)
{
row_sum += P_offd_data[now_checking_offd];
if (fabs(P_offd_data[now_checking_offd]) < max_coef)
{
num_lost_offd++;
now_checking_offd++;
}
else
{
scale += P_offd_data[now_checking_offd];
P_offd_data[next_open_offd] = P_offd_data[now_checking_offd];
P_offd_j[next_open_offd] = P_offd_j[now_checking_offd];
now_checking_offd++;
next_open_offd++;
}
}
/* normalize row of P */
if (scale != 0.)
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = P_diag_i[i]; j < (P_diag_i[i+1]-num_lost); j++)
P_diag_data[j] *= scale;
for (j = P_offd_i[i]; j < (P_offd_i[i+1]-num_lost_offd); j++)
P_offd_data[j] *= scale;
}
}
} /* end loop for (i = 0; i < n_fine; i++) */
/* store number of dropped elements and number of threads */
if(my_thread_num == 0)
{ max_num_threads[0] = num_threads; }
num_lost_per_thread[my_thread_num] = num_lost;
num_lost_offd_per_thread[my_thread_num] = num_lost_offd;
} /* end if (trunc_factor > 0) */
/*
* Truncate based on capping the nnz per row
*
*/
if (max_elmts > 0)
{
HYPRE_Int P_mxnum, cnt1, last_index, last_index_offd;
HYPRE_Int *P_aux_j;
HYPRE_Real *P_aux_data;
/* find maximum row length locally over this row range */
P_mxnum = 0;
for (i=start; i<stop; i++)
{
/* Note P_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = P_diag_i[i+1];
last_index_offd = P_offd_i[i+1];
if(i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
cnt1 = last_index-P_diag_i[i] + last_index_offd-P_offd_i[i];
if (cnt1 > P_mxnum) P_mxnum = cnt1;
}
/* Some rows exceed max_elmts, and require truncation. Essentially,
* each thread truncates and compresses its range of rows locally. */
if (P_mxnum > max_elmts)
{
num_lost = 0;
num_lost_offd = 0;
/* two temporary arrays to hold row i for temporary operations */
P_aux_j = hypre_CTAlloc(HYPRE_Int, P_mxnum);
P_aux_data = hypre_CTAlloc(HYPRE_Real, P_mxnum);
cnt_diag = P_diag_i[start];
cnt_offd = P_offd_i[start];
for (i = start; i < stop; i++)
{
/* Note P_diag_i[stop] is the starting point for the next thread
* in j and data, not the stop point for this thread */
last_index = P_diag_i[i+1];
last_index_offd = P_offd_i[i+1];
if(i == stop-1)
{
last_index -= num_lost_per_thread[my_thread_num];
last_index_offd -= num_lost_offd_per_thread[my_thread_num];
}
row_sum = 0;
num_elmts = last_index-P_diag_i[i] + last_index_offd-P_offd_i[i];
if (max_elmts < num_elmts)
{
/* copy both diagonal and off-diag parts of row i to _aux_ arrays */
cnt = 0;
for (j = P_diag_i[i]; j < last_index; j++)
{
P_aux_j[cnt] = P_diag_j[j];
P_aux_data[cnt++] = P_diag_data[j];
row_sum += P_diag_data[j];
}
num_lost += cnt;
cnt1 = cnt;
for (j = P_offd_i[i]; j < last_index_offd; j++)
{
P_aux_j[cnt] = P_offd_j[j]+num_cols;
P_aux_data[cnt++] = P_offd_data[j];
row_sum += P_offd_data[j];
}
num_lost_offd += cnt-cnt1;
/* sort data */
hypre_qsort2abs(P_aux_j,P_aux_data,0,cnt-1);
scale = 0;
if (i > start)
{
P_diag_i[i] = cnt_diag;
P_offd_i[i] = cnt_offd;
}
for (j = 0; j < max_elmts; j++)
{
scale += P_aux_data[j];
if (P_aux_j[j] < num_cols)
{
P_diag_j[cnt_diag] = P_aux_j[j];
P_diag_data[cnt_diag++] = P_aux_data[j];
}
else
{
P_offd_j[cnt_offd] = P_aux_j[j]-num_cols;
P_offd_data[cnt_offd++] = P_aux_data[j];
}
}
num_lost -= cnt_diag-P_diag_i[i];
num_lost_offd -= cnt_offd-P_offd_i[i];
/* normalize row of P */
if (scale != 0.)
{
if (scale != row_sum)
{
scale = row_sum/scale;
for (j = P_diag_i[i]; j < cnt_diag; j++)
P_diag_data[j] *= scale;
for (j = P_offd_i[i]; j < cnt_offd; j++)
P_offd_data[j] *= scale;
}
}
} /* end if (max_elmts < num_elmts) */
else
{
/* nothing dropped from this row, but still have to shift entries back
* by the number dropped so far */
if (P_diag_i[i] != cnt_diag)
{
start_j = P_diag_i[i];
P_diag_i[i] = cnt_diag;
for (j = start_j; j < last_index; j++)
{
P_diag_j[cnt_diag] = P_diag_j[j];
P_diag_data[cnt_diag++] = P_diag_data[j];
}
}
else
cnt_diag += last_index-P_diag_i[i];
if (P_offd_i[i] != cnt_offd)
{
start_j = P_offd_i[i];
P_offd_i[i] = cnt_offd;
for (j = start_j; j < last_index_offd; j++)
{
P_offd_j[cnt_offd] = P_offd_j[j];
P_offd_data[cnt_offd++] = P_offd_data[j];
}
}
else
cnt_offd += last_index_offd-P_offd_i[i];
}
} /* end for (i = 0; i < n_fine; i++) */
num_lost_per_thread[my_thread_num] += num_lost;
num_lost_offd_per_thread[my_thread_num] += num_lost_offd;
hypre_TFree(P_aux_j);
hypre_TFree(P_aux_data);
} /* end if (P_mxnum > max_elmts) */
} /* end if (max_elmts > 0) */
/* Sum up num_lost_global */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
num_lost_global = 0;
num_lost_global_offd = 0;
for(i = 0; i < max_num_threads[0]; i++)
{
num_lost_global += num_lost_per_thread[i];
num_lost_global_offd += num_lost_offd_per_thread[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*
* Synchronize and create new diag data structures
*/
if (num_lost_global)
{
/* Each thread has it's own locally compressed CSR matrix from rows start
* to stop. Now, we have to copy each thread's chunk into the new
* process-wide CSR data structures
*
* First, we compute the new process-wide number of nonzeros (i.e.,
* P_diag_size), and compute cum_lost_per_thread[k] so that this
* entry holds the cumulative sum of entries dropped up to and
* including thread k. */
if(my_thread_num == 0)
{
P_diag_size = P_diag_i[n_fine];
for(i = 0; i < max_num_threads[0]; i++)
{
P_diag_size -= num_lost_per_thread[i];
if(i > 0)
{ cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1]; }
else
{ cum_lost_per_thread[i] = num_lost_per_thread[i]; }
}
P_diag_j_new = hypre_CTAlloc(HYPRE_Int,P_diag_size);
P_diag_data_new = hypre_CTAlloc(HYPRE_Real,P_diag_size);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if(my_thread_num == 0)
{ next_open = 0; }
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = P_diag_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for(i = P_diag_i[start]; i < P_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++)
{
P_diag_j_new[next_open] = P_diag_j[i];
P_diag_data_new[next_open] = P_diag_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update P_diag_i with number of dropped entries by all lower ranked
* threads */
if(my_thread_num > 0)
{
for(i=start; i<stop; i++)
{
P_diag_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if(my_thread_num == 0)
{
/* Set last entry */
P_diag_i[n_fine] = P_diag_size ;
hypre_TFree(P_diag_j);
hypre_TFree(P_diag_data);
hypre_CSRMatrixJ(P_diag) = P_diag_j_new;
hypre_CSRMatrixData(P_diag) = P_diag_data_new;
hypre_CSRMatrixNumNonzeros(P_diag) = P_diag_size;
}
}
/*
* Synchronize and create new offd data structures
*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (num_lost_global_offd)
{
/* Repeat process for off-diagonal */
if(my_thread_num == 0)
{
P_offd_size = P_offd_i[n_fine];
for(i = 0; i < max_num_threads[0]; i++)
{
P_offd_size -= num_lost_offd_per_thread[i];
if(i > 0)
{ cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1]; }
else
{ cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; }
}
P_offd_j_new = hypre_CTAlloc(HYPRE_Int,P_offd_size);
P_offd_data_new = hypre_CTAlloc(HYPRE_Real,P_offd_size);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* points to next open spot in new data structures for this thread */
if(my_thread_num == 0)
{ next_open = 0; }
else
{
/* remember, cum_lost_per_thread[k] stores the num dropped up to and
* including thread k */
next_open = P_offd_i[start] - cum_lost_per_thread[my_thread_num-1];
}
/* copy the j and data arrays over */
for(i = P_offd_i[start]; i < P_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++)
{
P_offd_j_new[next_open] = P_offd_j[i];
P_offd_data_new[next_open] = P_offd_data[i];
next_open += 1;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* update P_offd_i with number of dropped entries by all lower ranked
* threads */
if(my_thread_num > 0)
{
for(i=start; i<stop; i++)
{
P_offd_i[i] -= cum_lost_per_thread[my_thread_num-1];
}
}
if(my_thread_num == 0)
{
/* Set last entry */
P_offd_i[n_fine] = P_offd_size ;
hypre_TFree(P_offd_j);
hypre_TFree(P_offd_data);
hypre_CSRMatrixJ(P_offd) = P_offd_j_new;
hypre_CSRMatrixData(P_offd) = P_offd_data_new;
hypre_CSRMatrixNumNonzeros(P_offd) = P_offd_size;
}
}
} /* end parallel region */
hypre_TFree(max_num_threads);
hypre_TFree(cum_lost_per_thread);
hypre_TFree(num_lost_per_thread);
hypre_TFree(num_lost_offd_per_thread);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime();
#endif
return ierr;
}
/* sort both v and w, in place, but based only on entries in w */
void hypre_qsort2abs( HYPRE_Int *v,
HYPRE_Real *w,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
hypre_swap2( v, w, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (fabs(w[i]) > fabs(w[left]))
{
hypre_swap2(v, w, ++last, i);
}
hypre_swap2(v, w, left, last);
hypre_qsort2abs(v, w, left, last-1);
hypre_qsort2abs(v, w, last+1, right);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpModUnk - this is a modified interpolation for the unknown approach.
* here we need to pass in a strength matrix built on the entire matrix.
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpModUnk( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
hypre_ParCSRMatrix *P;
HYPRE_Int *col_map_offd_P;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_Int *A_ext_j = NULL;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_Int total_global_cpts;
HYPRE_Int num_cols_P_offd,my_first_cpt;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int k,kc;
HYPRE_Int start;
HYPRE_Int sgn;
HYPRE_Int c_num;
HYPRE_Real diagonal;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int print_level = 0;
HYPRE_Int *int_buf_data;
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag < 0)
{
debug_flag = -debug_flag;
print_level = 1;
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (num_functions > 1 && num_cols_A_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of A
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
A_ext = hypre_ParCSRMatrixExtractBExt(A,A,1);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
}
index = 0;
for (i=0; i < num_cols_A_offd; i++)
{
for (j=A_ext_i[i]; j < A_ext_i[i+1]; j++)
{
k = A_ext_j[j];
if (k >= col_1 && k < col_n)
{
A_ext_j[index] = k - col_1;
A_ext_data[index++] = A_ext_data[j];
}
else
{
kc = hypre_BinarySearch(col_map_offd,k,num_cols_A_offd);
if (kc > -1)
{
A_ext_j[index] = -kc-1;
A_ext_data[index++] = A_ext_data[j];
}
}
}
A_ext_i[i] = index;
}
for (i = num_cols_A_offd; i > 0; i--)
A_ext_i[i] = A_ext_i[i-1];
if (num_procs > 1) A_ext_i[0] = 0;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get A_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += my_first_cpt+coarse_shift;
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,diagonal,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,sgn,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_cols_A_offd)
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
else
P_marker_offd = NULL;
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_A_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
if (col_offd_S_to_A)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = col_offd_S_to_A[S_offd_j[jj]];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
else
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
/*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
}
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
/* Loop over ith row of A. First, the diagonal part of A */
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
i1 = A_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
HERE, we only want to distribut to points of the SAME function type
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
sgn = 1;
if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row &&
(sgn*A_diag_data[jj1]) < 0 )
{
sum += A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
sum += A_offd_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of A for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_diag_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_diag_data[jj1];
}
}
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (P_marker_offd[i2] >= jj_begin_row_offd
&& (sgn*A_offd_data[jj1]) < 0)
{
P_offd_data[P_marker_offd[i2]]
+= distribute * A_offd_data[jj1];
}
}
}
}
}
else /* sum = 0 - only add to diag if the same function type */
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal. (only if the same function type)
*--------------------------------------------------------------*/
else if (CF_marker[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
/*----------------------------------------------------------------
* Still looping over ith row of A. Next, loop over the
* off-diagonal part of A
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
AGAIN, we only want to distribut to points of the SAME function type
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = A_offd_j[jj];
sgn = 1;
if (A_ext_data[A_ext_i[c_num]] < 0) sgn = -1;
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1)
{
/* in the diagonal block */
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
{
sum += A_ext_data[jj1];
}
}
}
}
if (sum != 0)
{
distribute = A_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of A_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = A_ext_i[c_num]; jj1 < A_ext_i[c_num+1]; jj1++)
{
i2 = A_ext_j[jj1];
if (num_functions == 1 || dof_func[i1] == dof_func[i2])
{
if (i2 > -1) /* in the diagonal block */
{
if (P_marker[i2] >= jj_begin_row
&& (sgn*A_ext_data[jj1]) < 0)
{
P_diag_data[P_marker[i2]]
+= distribute * A_ext_data[jj1];
}
}
else
{
/* in the off_diagonal block */
if (P_marker_offd[-i2-1] >= jj_begin_row_offd
&& (sgn*A_ext_data[jj1]) < 0)
P_offd_data[P_marker_offd[-i2-1]]
+= distribute * A_ext_data[jj1];
}
}
}
}
else /* sum = 0 */
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else if (CF_marker_offd[i1] != -3)
{
if (num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
if (diagonal == 0.0)
{
if (print_level)
hypre_printf(" Warning! zero diagonal! Proc id %d row %d\n", my_id,i);
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] = 0.0;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] = 0.0;
}
}
else
{
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
P_diag_data[jj] /= -diagonal;
}
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
P_offd_data[jj] /= -diagonal;
}
}
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker);
hypre_TFree(P_marker_offd);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_A_offd; i++)
P_marker[i] = 0;
num_cols_P_offd = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
num_cols_P_offd++;
P_marker[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_P_offd);
index = 0;
for (i=0; i < num_cols_P_offd; i++)
{
while (P_marker[index]==0) index++;
col_map_offd_P[i] = index++;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(col_map_offd_P,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker);
}
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd);
hypre_TFree(dof_func_offd);
hypre_TFree(int_buf_data);
hypre_TFree(fine_to_coarse);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(coarse_counter);
hypre_TFree(jj_count);
hypre_TFree(jj_count_offd);
if (num_procs > 1) hypre_CSRMatrixDestroy(A_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGTruncandBuild
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGTruncandBuild( hypre_ParCSRMatrix *P,
HYPRE_Real trunc_factor,
HYPRE_Int max_elmts)
{
hypre_CSRMatrix *P_offd = hypre_ParCSRMatrixOffd(P);
hypre_ParCSRCommPkg *commpkg_P = hypre_ParCSRMatrixCommPkg(P);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(P);
HYPRE_Int *P_offd_i = hypre_CSRMatrixI(P_offd);
HYPRE_Int *P_offd_j = hypre_CSRMatrixJ(P_offd);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(P_offd);
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(P_offd);
HYPRE_Int *new_col_map_offd;
HYPRE_Int P_offd_size=0, new_num_cols_offd;
HYPRE_Int *P_marker;
HYPRE_Int i;
HYPRE_Int index;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_size = P_offd_i[n_fine];
}
new_num_cols_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < num_cols_offd; i++)
P_marker[i] = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker[index])
{
new_num_cols_offd++;
P_marker[index] = 1;
}
}
if (new_num_cols_offd)
new_col_map_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd);
index = 0;
for (i=0; i < new_num_cols_offd; i++)
{
while (P_marker[index]==0) index++;
new_col_map_offd[i] = index++;
}
/*#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"*/
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(new_col_map_offd,
P_offd_j[i],
new_num_cols_offd);
}
index = 0;
for(i = 0; i < new_num_cols_offd; i++)
{
while (P_marker[index] == 0) index++;
new_col_map_offd[i] = col_map_offd[index];
index++;
}
if (P_offd_size) hypre_TFree(P_marker);
if (new_num_cols_offd)
{
hypre_TFree(col_map_offd);
hypre_ParCSRMatrixColMapOffd(P) = new_col_map_offd;
hypre_CSRMatrixNumCols(P_offd) = new_num_cols_offd;
}
if (commpkg_P != NULL) hypre_MatvecCommPkgDestroy(commpkg_P);
hypre_MatvecCommPkgCreate(P);
return(0);
}
hypre_ParCSRMatrix *hypre_CreateC( hypre_ParCSRMatrix *A,
HYPRE_Real w)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *C_diag;
hypre_CSRMatrix *C_offd;
HYPRE_Real *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
HYPRE_Real *C_offd_data;
HYPRE_Int *C_offd_i;
HYPRE_Int *C_offd_j;
HYPRE_Int *col_map_offd_C;
HYPRE_Int i, j, index;
HYPRE_Real invdiag;
HYPRE_Real w_local = w;
C = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_rows, row_starts,
row_starts, num_cols_offd, A_diag_i[num_rows], A_offd_i[num_rows]);
hypre_ParCSRMatrixInitialize(C);
C_diag = hypre_ParCSRMatrixDiag(C);
C_offd = hypre_ParCSRMatrixOffd(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
col_map_offd_C = hypre_ParCSRMatrixColMapOffd(C);
hypre_ParCSRMatrixOwnsRowStarts(C) = 0;
hypre_ParCSRMatrixOwnsColStarts(C) = 0;
for (i=0; i < num_cols_offd; i++)
col_map_offd_C[i] = col_map_offd_A[i];
for (i=0; i < num_rows; i++)
{
index = A_diag_i[i];
invdiag = -w/A_diag_data[index];
C_diag_data[index] = 1.0-w;
C_diag_j[index] = A_diag_j[index];
if (w == 0)
{
w_local = fabs(A_diag_data[index]);
for (j = index+1; j < A_diag_i[i+1]; j++)
w_local += fabs(A_diag_data[j]);
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
w_local += fabs(A_offd_data[j]);
invdiag = -1/w_local;
C_diag_data[index] = 1.0-A_diag_data[index]/w_local;
}
C_diag_i[i] = index;
C_offd_i[i] = A_offd_i[i];
for (j = index+1; j < A_diag_i[i+1]; j++)
{
C_diag_data[j] = A_diag_data[j]*invdiag;
C_diag_j[j] = A_diag_j[j];
}
for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++)
{
C_offd_data[j] = A_offd_data[j]*invdiag;
C_offd_j[j] = A_offd_j[j];
}
}
C_diag_i[num_rows] = A_diag_i[num_rows];
C_offd_i[num_rows] = A_offd_i[num_rows];
return C;
}
| 150,307 | 33.006335 | 246 | c |
AMG | AMG-master/parcsr_ls/par_lr_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "aux_interp.h"
#define MAX_C_CONNECTIONS 100
#define HAVE_COMMON_C 1
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildStdInterp
* Comment: The interpolatory weighting can be changed with the sep_weight
* variable. This can enable not separating negative and positive
* off diagonals in the weight formula.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int sep_weight, HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/* HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
HYPRE_Int *ihat = NULL;
HYPRE_Int *ihat_offd = NULL;
HYPRE_Int *ipnt = NULL;
HYPRE_Int *ipnt_offd = NULL;
HYPRE_Int strong_f_marker = -2;
/* Interpolation weight variables */
HYPRE_Real *ahat = NULL;
HYPRE_Real *ahat_offd = NULL;
HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C;
HYPRE_Real diagonal, distribute;
HYPRE_Real alfa = 1.;
HYPRE_Real beta = 1.;
/* Loop variables */
// HYPRE_Int index;
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, j1, jj, kk, k1;
HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
HYPRE_Real wall_1 = 0;
HYPRE_Real wall_2 = 0;
HYPRE_Real wall_3 = 0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 0);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
/* Initialize ahat, which is a modification to a, used in the standard
* interpolation routine. */
if (n_fine)
{
ahat = hypre_CTAlloc(HYPRE_Real, n_fine);
ihat = hypre_CTAlloc(HYPRE_Int, n_fine);
ipnt = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes);
ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
ahat[i] = 0;
ihat[i] = -1;
}
for (i = 0; i < full_off_procNodes; i++)
{
P_marker_offd[i] = -1;
ahat_offd[i] = 0;
ihat_offd[i] = -1;
}
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
if (debug_flag==4) wall_time = time_getWallclockSeconds();
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = i1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = k1;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd]=i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(CF_marker[loc_col] >= 0)
{
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = loc_col;
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] >= 0)
{
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_1 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
cnt_c = 0;
cnt_f = jj_end_row-jj_begin_row;
cnt_c_offd = 0;
cnt_f_offd = jj_end_row_offd-jj_begin_row_offd;
ihat[i] = cnt_f;
ipnt[cnt_f] = i;
ahat[cnt_f++] = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is direct neighbor */
i1 = A_diag_j[jj];
if (P_marker[i1] != strong_f_marker)
{
indx = ihat[i1];
if (indx > -1)
ahat[indx] += A_diag_data[jj];
else if (P_marker[i1] >= jj_begin_row)
{
ihat[i1] = cnt_c;
ipnt[cnt_c] = i1;
ahat[cnt_c++] += A_diag_data[jj];
}
else if (CF_marker[i1] != -3)
{
ihat[i1] = cnt_f;
ipnt[cnt_f] = i1;
ahat[cnt_f++] += A_diag_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
{
distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]];
for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++)
{
k1 = A_diag_j[kk];
indx = ihat[k1];
if (indx > -1)
ahat[indx] -= A_diag_data[kk]*distribute;
else if (P_marker[k1] >= jj_begin_row)
{
ihat[k1] = cnt_c;
ipnt[cnt_c] = k1;
ahat[cnt_c++] -= A_diag_data[kk]*distribute;
}
else
{
ihat[k1] = cnt_f;
ipnt[cnt_f] = k1;
ahat[cnt_f++] -= A_diag_data[kk]*distribute;
}
}
if(num_procs > 1)
{
for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++)
{
k1 = A_offd_j[kk];
indx = ihat_offd[k1];
if(num_functions == 1 || dof_func[i1] == dof_func_offd[k1])
{
if (indx > -1)
ahat_offd[indx] -= A_offd_data[kk]*distribute;
else if (P_marker_offd[k1] >= jj_begin_row_offd)
{
ihat_offd[k1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = k1;
ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute;
}
else
{
ihat_offd[k1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = k1;
ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute;
}
}
}
}
}
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] != strong_f_marker)
{
indx = ihat_offd[i1];
if (indx > -1)
ahat_offd[indx] += A_offd_data[jj];
else if (P_marker_offd[i1] >= jj_begin_row_offd)
{
ihat_offd[i1] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = i1;
ahat_offd[cnt_c_offd++] += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
ihat_offd[i1] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = i1;
ahat_offd[cnt_f_offd++] += A_offd_data[jj];
}
}
else
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
{
distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]];
for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++)
{
k1 = A_ext_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /*diag*/
loc_col = k1 - col_1;
indx = ihat[loc_col];
if (indx > -1)
ahat[indx] -= A_ext_data[kk]*distribute;
else if (P_marker[loc_col] >= jj_begin_row)
{
ihat[loc_col] = cnt_c;
ipnt[cnt_c] = loc_col;
ahat[cnt_c++] -= A_ext_data[kk]*distribute;
}
else
{
ihat[loc_col] = cnt_f;
ipnt[cnt_f] = loc_col;
ahat[cnt_f++] -= A_ext_data[kk]*distribute;
}
}
else
{
loc_col = -k1 - 1;
if(num_functions == 1 ||
dof_func_offd[loc_col] == dof_func_offd[i1])
{
indx = ihat_offd[loc_col];
if (indx > -1)
ahat_offd[indx] -= A_ext_data[kk]*distribute;
else if(P_marker_offd[loc_col] >= jj_begin_row_offd)
{
ihat_offd[loc_col] = cnt_c_offd;
ipnt_offd[cnt_c_offd] = loc_col;
ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute;
}
else
{
ihat_offd[loc_col] = cnt_f_offd;
ipnt_offd[cnt_f_offd] = loc_col;
ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_2 += wall_time;
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();
diagonal = ahat[cnt_c];
ahat[cnt_c] = 0;
sum_pos = 0;
sum_pos_C = 0;
sum_neg = 0;
sum_neg_C = 0;
sum = 0;
sum_C = 0;
if(sep_weight == 1)
{
for (jj=0; jj < cnt_c; jj++)
{
if (ahat[jj] > 0)
{
sum_pos_C += ahat[jj];
}
else
{
sum_neg_C += ahat[jj];
}
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos_C += ahat_offd[jj];
}
else
{
sum_neg_C += ahat_offd[jj];
}
}
}
sum_pos = sum_pos_C;
sum_neg = sum_neg_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
if (ahat[jj] > 0)
{
sum_pos += ahat[jj];
}
else
{
sum_neg += ahat[jj];
}
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
if (ahat_offd[jj] > 0)
{
sum_pos += ahat_offd[jj];
}
else
{
sum_neg += ahat_offd[jj];
}
ahat_offd[jj] = 0;
}
}
if (sum_neg_C*diagonal) alfa = sum_neg/sum_neg_C/diagonal;
if (sum_pos_C*diagonal) beta = sum_pos/sum_pos_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
if (ahat[j1] > 0)
P_diag_data[jj] = -beta*ahat[j1];
else
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
if (ahat_offd[j1] > 0)
P_offd_data[jj] = -beta*ahat_offd[j1];
else
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
else
{
for (jj=0; jj < cnt_c; jj++)
{
sum_C += ahat[jj];
}
if(num_procs > 1)
{
for (jj=0; jj < cnt_c_offd; jj++)
{
sum_C += ahat_offd[jj];
}
}
sum = sum_C;
for (jj=cnt_c+1; jj < cnt_f; jj++)
{
sum += ahat[jj];
ahat[jj] = 0;
}
if(num_procs > 1)
{
for (jj=cnt_c_offd; jj < cnt_f_offd; jj++)
{
sum += ahat_offd[jj];
ahat_offd[jj] = 0;
}
}
if (sum_C*diagonal) alfa = sum/sum_C/diagonal;
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
for (jj = jj_begin_row; jj < jj_end_row; jj++)
{
j1 = ihat[P_diag_j[jj]];
P_diag_data[jj] = -alfa*ahat[j1];
P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]];
ahat[j1] = 0;
}
for (jj=0; jj < cnt_f; jj++)
ihat[ipnt[jj]] = -1;
if(num_procs > 1)
{
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
{
j1 = ihat_offd[P_offd_j[jj]];
P_offd_data[jj] = -alfa*ahat_offd[j1];
ahat_offd[j1] = 0;
}
for (jj=0; jj < cnt_f_offd; jj++)
ihat_offd[ipnt_offd[jj]] = -1;
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
wall_3 += wall_time;
fflush(NULL);
}
}
}
if (debug_flag==4)
{
hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n",
my_id, wall_1, wall_2, wall_3);
fflush(NULL);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
hypre_TFree(ahat);
hypre_TFree(ihat);
hypre_TFree(ipnt);
if (full_off_procNodes)
{
hypre_TFree(ahat_offd);
hypre_TFree(ihat_offd);
hypre_TFree(ipnt_offd);
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPIInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, start, stop;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1);
HYPRE_Int * diag_offset;
HYPRE_Int * fine_to_coarse_offset;
HYPRE_Int * offd_offset;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
/* This function is smart enough to check P_marker and P_marker_offd only,
* and set them if they are not NULL. The other vectors are set regardless.*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
/*-----------------------------------------------------------------------
* Initialize threading variables
*-----------------------------------------------------------------------*/
max_num_threads[0] = hypre_NumThreads();
diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
for(i=0; i < max_num_threads[0]; i++)
{
diag_offset[i] = 0;
fine_to_coarse_offset[i] = 0;
offd_offset[i] = 0;
}
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker)
#endif
{
/* Parallelize by computing only over each thread's range of rows.
*
* The first large for loop computes ~locally~ for each thread P_diag_i,
* P_offd_i and fine_to_coarse. Then, the arrays are stitched together
* For eaxample the first phase would compute
* P_diag_i = [0, 2, 4, 7, 2, 5, 6]
* for two threads. P_diag_i[stop] points to the end of that
* thread's data, but P_diag_i[start] points to the end of the
* previous thread's row range. This is then stitched together at the
* end to yield,
* P_diag_i = [0, 2, 4, 7, 9, 14, 15].
*
* The second large for loop computes interpolation weights and is
* relatively straight-forward to thread.
*/
/* initialize thread-wise variables */
strong_f_marker = -2;
coarse_counter = 0;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (n_fine)
{
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
for (i = 0; i < n_fine; i++)
{ P_marker[i] = -1; }
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
for (i = 0; i < full_off_procNodes; i++)
{ P_marker_offd[i] = -1;}
}
/* this thread's row range */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
start = (n_fine/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ stop = n_fine; }
else
{ stop = (n_fine/num_threads)*(my_thread_num+1); }
/* loop over rows */
for (i = start; i < stop; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* End loop over fine grid.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
P_diag_i[stop] = jj_counter;
P_offd_i[stop] = jj_counter_offd;
fine_to_coarse_offset[my_thread_num] = coarse_counter;
diag_offset[my_thread_num] = jj_counter;
offd_offset[my_thread_num] = jj_counter_offd;
/* Stitch P_diag_i, P_offd_i and fine_to_coarse together */
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
/* Calculate the offset for P_diag_i and P_offd_i for each thread */
for (i = 1; i < num_threads; i++)
{
diag_offset[i] = diag_offset[i-1] + diag_offset[i];
fine_to_coarse_offset[i] = fine_to_coarse_offset[i-1] + fine_to_coarse_offset[i];
offd_offset[i] = offd_offset[i-1] + offd_offset[i];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num > 0)
{
/* update row pointer array with offset,
* making sure to update the row stop index */
for (i = start+1; i <= stop; i++)
{
P_diag_i[i] += diag_offset[my_thread_num-1];
P_offd_i[i] += offd_offset[my_thread_num-1];
}
/* update fine_to_coarse by offsetting with the offset
* from the preceding thread */
for (i = start; i < stop; i++)
{
if(fine_to_coarse[i] >= 0)
{ fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num-1]; }
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
}
}
/* Fine to coarse mapping */
if(num_procs > 1 && my_thread_num == 0)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = start; i < stop; i++)
{
jj_begin_row = P_diag_i[i];
jj_begin_row_offd = P_offd_i[i];
jj_counter = jj_begin_row;
jj_counter_offd = jj_begin_row_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly influence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
/*-----------------------------------------------------------------------
* End large for loop over nfine
*-----------------------------------------------------------------------*/
if (n_fine)
{ hypre_TFree(P_marker); }
if (full_off_procNodes)
{ hypre_TFree(P_marker_offd); }
}
/*-----------------------------------------------------------------------
* End PAR_REGION
*-----------------------------------------------------------------------*/
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime();
#endif
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(max_num_threads);
hypre_TFree(fine_to_coarse);
hypre_TFree(diag_offset);
hypre_TFree(offd_offset);
hypre_TFree(fine_to_coarse_offset);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtPICCInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int **ext_p, **ext_p_offd;*/
/*HYPRE_Int ccounter_offd;
HYPRE_Int *clist_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
/*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS);
for(i = 0; i < MAX_C_CONNECTIONS; i++)
clist[i] = 0;
if(num_procs > 1)
{
clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS);
for(i = 0; i < MAX_C_CONNECTIONS; i++)
clist_offd[i] = 0;
}*/
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
/*clist[ccounter++] = i1;*/
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < P_offd_i[i])
{
/*clist_offd[ccounter_offd++] = i1;*/
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] == -1)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if(hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
/*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if(CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
/*break;*/
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] == -1)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*clist[ccounter++] = i1;*/
}
}
}
/*qsort0(clist,0,ccounter-1);*/
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*clist_offd[ccounter_offd++] = i1;*/
}
}
}
/*qsort0(clist_offd,0,ccounter_offd-1);*/
}
for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if(CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
/*if(hypre_BinarySearch(clist,k1,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = S_diag_i[i1+1];
}*/
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
/*if(hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0)
{*/
common_c = 1;
break;
/*kk = S_offd_i[i1+1];
}*/
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if(CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist,loc_col,ccounter) >= 0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
/*if(hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >=
0)
{*/
common_c = 1;
break;
/*kk = Sop_i[i1+1];
}*/
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
/*break;*/
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
/*break;*/
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
if(i2 == i && (sgn*A_diag_data[jj1]) < 0)
diagonal += distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row || loc_col == i)
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
if(loc_col == i)
diagonal += distribute*A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
/*hypre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFFInterp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
/*HYPRE_Int ccounter;
HYPRE_Int *clist, ccounter;*/
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if(CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] < 0)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if(CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if(CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row)
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildFF1Interp
* Comment: Only use FF when there is no common c point.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/*HYPRE_Int ccounter_offd;*/
HYPRE_Int common_c;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
HYPRE_Int sgn = 1;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
/*HYPRE_Int ccounter;*/
HYPRE_Int found_c = 0;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else
{
/* Initialize ccounter for each f point */
/*ccounter = 0;
ccounter_offd = 0;*/
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] > 0)
{ /* i1 is a C point */
CF_marker[i1] = 2;
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] > 0)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search diag to find f neighbors and determine if common c point */
i1 = S_diag_j[jj];
if (CF_marker[i1] < 0)
{ /* i1 is a F point, loop through it's strong neighbors */
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
found_c = 0;
for(kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if(CF_marker[k1] > 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
found_c = 1;
break;
}
}
}
if(num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] > 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] < 0)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
break;
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* search through diag to find all c neighbors */
i1 = S_diag_j[jj];
if (CF_marker[i1] == 2)
CF_marker[i1] = 1;
}
if(num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{ /* search through offd to find all c neighbors */
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if(CF_marker_offd[i1] == 2)
{ /* i1 is a C point direct neighbor */
CF_marker_offd[i1] = 1;
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*ccounter = start_indexing;
ccounter_offd = start_indexing;*/
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
jj_begin_row_offd = 0;
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
if(num_procs > 1)
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
/*ccounter = 0;
ccounter_offd = 0;*/
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] > 0)
{
CF_marker[i1] = 2;
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] > 0)
{
CF_marker_offd[i1] = 2;
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
for(jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search through F points */
i1 = S_diag_j[jj];
if(CF_marker[i1] == -1)
{
P_marker[i1] = strong_f_marker;
common_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] == 2)
{
common_c = 1;
break;
}
}
if(num_procs > 1 && common_c == 0)
{ /* no common c point yet, check offd */
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] == 2)
{ /* k1 is a c point check if it is common */
common_c = 1;
break;
}
}
}
if(!common_c)
{ /* No common c point, extend the interp set */
found_c = 0;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
found_c = 1;
break;
}
}
}
if(num_procs > 1 && !found_c)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if(CF_marker_offd[i1] == -1)
{ /* F points that are off proc */
P_marker_offd[i1] = strong_f_marker;
common_c = 0;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{ /* Check if common c */
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(CF_marker[loc_col] == 2)
{
common_c = 1;
break;
}
}
else
{
loc_col = -k1 - 1;
if(CF_marker_offd[loc_col] == 2)
{
common_c = 1;
break;
}
}
}
if(!common_c)
{
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
break;
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
break;
}
}
}
}
}
}
}
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{ /* Search C points only */
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] == 2)
{
CF_marker[i1] = 1;
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[S_offd_j[jj]];
else
i1 = S_offd_j[jj];
if ( CF_marker_offd[i1] == 2)
{
CF_marker_offd[i1] = 1;
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
diagonal += A_diag_data[jj];
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row)
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
diagonal += A_offd_data[jj];
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
/*hynre_TFree(clist);*/
if (num_procs > 1)
{
/*hypre_TFree(clist_offd);*/
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildExtInterp
* Comment:
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S, HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag,
HYPRE_Real trunc_factor, HYPRE_Int max_elmts,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr)
{
/* Communication Variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int my_id, num_procs;
/* Variables to store input variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_1 = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int col_n = col_1 + local_numrows;
HYPRE_Int total_global_cpts, my_first_cpt;
/* Variables to store strong connection matrix info */
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
/* Interpolation matrix P */
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data = NULL;
HYPRE_Int *P_diag_i, *P_diag_j = NULL;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i, *P_offd_j = NULL;
/*HYPRE_Int *col_map_offd_P = NULL;*/
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker = NULL;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *tmp_CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
/* Full row information for columns of A that are off diag*/
hypre_CSRMatrix *A_ext;
HYPRE_Real *A_ext_data;
HYPRE_Int *A_ext_i;
HYPRE_Int *A_ext_j;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int loc_col;
HYPRE_Int full_off_procNodes;
hypre_CSRMatrix *Sop;
HYPRE_Int *Sop_i;
HYPRE_Int *Sop_j;
HYPRE_Int sgn = 1;
/* Variables to keep count of interpolatory points */
HYPRE_Int jj_counter, jj_counter_offd;
HYPRE_Int jj_begin_row, jj_end_row;
HYPRE_Int jj_begin_row_offd = 0;
HYPRE_Int jj_end_row_offd = 0;
HYPRE_Int coarse_counter;
/* Interpolation weight variables */
HYPRE_Real sum, diagonal, distribute;
HYPRE_Int strong_f_marker = -2;
/* Loop variables */
/*HYPRE_Int index;*/
HYPRE_Int start_indexing = 0;
HYPRE_Int i, i1, i2, jj, kk, k1, jj1;
/* Definitions */
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Real wall_time;
hypre_ParCSRCommPkg *extend_comm_pkg = NULL;
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/* BEGIN */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
/* Set up off processor information (specifically for neighbors of
* neighbors */
full_off_procNodes = 0;
if (num_procs > 1)
{
hypre_exchange_interp_data(
&CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg,
A, CF_marker, S, num_functions, dof_func, 1);
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime();
#endif
}
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixJ(A_ext);
A_ext_data = hypre_CSRMatrixData(A_ext);
Sop_i = hypre_CSRMatrixI(Sop);
Sop_j = hypre_CSRMatrixJ(Sop);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_fine)
{
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine);
}
if (full_off_procNodes)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes);
}
hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse,
fine_to_coarse_offd, P_marker, P_marker_offd,
tmp_CF_marker_offd);
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
coarse_counter = 0;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
P_diag_i[i] = jj_counter;
if (num_procs > 1)
P_offd_i[i] = jj_counter_offd;
if (CF_marker[i] >= 0)
{
jj_counter++;
fine_to_coarse[i] = coarse_counter;
coarse_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i, or C-points that stronly influence F-points
* that strongly influence i.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{ /* i1 is a C point */
if (P_marker[i1] < P_diag_i[i])
{
P_marker[i1] = jj_counter;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{ /* i1 is a F point, loop through it's strong neighbors */
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < P_diag_i[i])
{
P_marker[k1] = jj_counter;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if (CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < P_offd_i[i])
{
tmp_CF_marker_offd[k1] = 1;
P_marker_offd[k1] = jj_counter_offd;
jj_counter_offd++;
}
}
}
}
}
}
/* Look at off diag strong connections of i */
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if (CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < P_offd_i[i])
{
tmp_CF_marker_offd[i1] = 1;
P_marker_offd[i1] = jj_counter_offd;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{ /* F point; look at neighbors of i1. Sop contains global col
* numbers and entries that could be in S_diag or S_offd or
* neither. */
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
if(k1 >= col_1 && k1 < col_n)
{ /* In S_diag */
loc_col = k1-col_1;
if(P_marker[loc_col] < P_diag_i[i])
{
P_marker[loc_col] = jj_counter;
jj_counter++;
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < P_offd_i[i])
{
P_marker_offd[loc_col] = jj_counter_offd;
tmp_CF_marker_offd[loc_col] = 1;
jj_counter_offd++;
}
}
}
}
}
}
}
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d determine structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
if (debug_flag== 4) wall_time = time_getWallclockSeconds();
P_diag_size = jj_counter;
P_offd_size = jj_counter_offd;
if (P_diag_size)
{
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size);
}
if (P_offd_size)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size);
}
P_diag_i[n_fine] = jj_counter;
P_offd_i[n_fine] = jj_counter_offd;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/* Fine to coarse mapping */
if(num_procs > 1)
{
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] += my_first_cpt;
hypre_alt_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse,
full_off_procNodes,
fine_to_coarse_offd);
for (i = 0; i < n_fine; i++)
fine_to_coarse[i] -= my_first_cpt;
}
for (i = 0; i < n_fine; i++)
P_marker[i] = -1;
for (i = 0; i < full_off_procNodes; i++)
P_marker_offd[i] = -1;
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
for (i = 0; i < n_fine; i++)
{
jj_begin_row = jj_counter;
jj_begin_row_offd = jj_counter_offd;
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else if (CF_marker[i] != -3)
{
strong_f_marker--;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
if (P_marker[i1] < jj_begin_row)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else if (CF_marker[i1] != -3)
{
P_marker[i1] = strong_f_marker;
for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++)
{
k1 = S_diag_j[kk];
if (CF_marker[k1] >= 0)
{
if(P_marker[k1] < jj_begin_row)
{
P_marker[k1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[k1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
}
if(num_procs > 1)
{
for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++)
{
if(col_offd_S_to_A)
k1 = col_offd_S_to_A[S_offd_j[kk]];
else
k1 = S_offd_j[kk];
if(CF_marker_offd[k1] >= 0)
{
if(P_marker_offd[k1] < jj_begin_row_offd)
{
P_marker_offd[k1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = k1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
if ( num_procs > 1)
{
for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if(col_offd_S_to_A)
i1 = col_offd_S_to_A[i1];
if ( CF_marker_offd[i1] >= 0)
{
if(P_marker_offd[i1] < jj_begin_row_offd)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
else if (CF_marker_offd[i1] != -3)
{
P_marker_offd[i1] = strong_f_marker;
for(kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++)
{
k1 = Sop_j[kk];
/* Find local col number */
if(k1 >= col_1 && k1 < col_n)
{
loc_col = k1-col_1;
if(P_marker[loc_col] < jj_begin_row)
{
P_marker[loc_col] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[loc_col];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] < jj_begin_row_offd)
{
P_marker_offd[loc_col] = jj_counter_offd;
P_offd_j[jj_counter_offd]=loc_col;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
}
}
}
}
}
jj_end_row = jj_counter;
jj_end_row_offd = jj_counter_offd;
diagonal = A_diag_data[A_diag_i[i]];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{ /* i1 is a c-point and strongly influences i, accumulate
* a_(i,i1) into interpolation weight */
i1 = A_diag_j[jj];
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += A_diag_data[jj];
}
else if(P_marker[i1] == strong_f_marker)
{
sum = zero;
sgn = 1;
if(A_diag_data[A_diag_i[i1]] < 0) sgn = -1;
/* Loop over row of A for point i1 and calculate the sum
* of the connections to c-points that strongly incluence i. */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if((P_marker[i2] >= jj_begin_row ) && (sgn*A_diag_data[jj1]) < 0)
sum += A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
sum += A_offd_data[jj1];
}
}
if(sum != 0)
{
distribute = A_diag_data[jj]/sum;
/* Loop over row of A for point i1 and do the distribution */
for(jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++)
{
i2 = A_diag_j[jj1];
if(P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0)
P_diag_data[P_marker[i2]] +=
distribute*A_diag_data[jj1];
}
if(num_procs > 1)
{
for(jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++)
{
i2 = A_offd_j[jj1];
if(P_marker_offd[i2] >= jj_begin_row_offd &&
(sgn*A_offd_data[jj1]) < 0)
P_offd_data[P_marker_offd[i2]] +=
distribute*A_offd_data[jj1];
}
}
}
else
{
diagonal += A_diag_data[jj];
}
}
/* neighbor i1 weakly influences i, accumulate a_(i,i1) into
* diagonal */
else if (CF_marker[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func[i1])
diagonal += A_diag_data[jj];
}
}
if(num_procs > 1)
{
for(jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
i1 = A_offd_j[jj];
if(P_marker_offd[i1] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i1]] += A_offd_data[jj];
else if(P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row )
sum += A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
sum += A_ext_data[jj1];
}
}
if(sum != 0)
{
distribute = A_offd_data[jj] / sum;
for(jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++)
{
k1 = A_ext_j[jj1];
if(k1 >= col_1 && k1 < col_n)
{ /* diag */
loc_col = k1 - col_1;
if(P_marker[loc_col] >= jj_begin_row)
P_diag_data[P_marker[loc_col]] += distribute*
A_ext_data[jj1];
}
else
{
loc_col = -k1 - 1;
if(P_marker_offd[loc_col] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[loc_col]] += distribute*
A_ext_data[jj1];
}
}
}
else
{
diagonal += A_offd_data[jj];
}
}
else if (CF_marker_offd[i1] != -3)
{
if(num_functions == 1 || dof_func[i] == dof_func_offd[i1])
diagonal += A_offd_data[jj];
}
}
}
if (diagonal)
{
for(jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= -diagonal;
for(jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= -diagonal;
}
}
strong_f_marker--;
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d fill structure %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0 || max_elmts > 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
/* This builds col_map, col_map should be monotone increasing and contain
* global numbers. */
if(P_offd_size)
{
hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd);
}
hypre_MatvecCommPkgCreate(P);
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
*P_ptr = P;
/* Deallocate memory */
hypre_TFree(fine_to_coarse);
hypre_TFree(P_marker);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Sop);
hypre_CSRMatrixDestroy(A_ext);
hypre_TFree(fine_to_coarse_offd);
hypre_TFree(P_marker_offd);
hypre_TFree(CF_marker_offd);
hypre_TFree(tmp_CF_marker_offd);
if(num_functions > 1)
hypre_TFree(dof_func_offd);
hypre_MatvecCommPkgDestroy(extend_comm_pkg);
}
return hypre_error_flag;
}
| 160,866 | 29.243843 | 264 | c |
AMG | AMG-master/parcsr_ls/par_multi_interp.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* hypre_ParAMGBuildMultipass
* This routine implements Stuben's direct interpolation with multiple passes.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildMultipass( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_Int *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int P_max_elmts,
HYPRE_Int weight_option,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_ParCSRCommPkg *tmp_comm_pkg;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = NULL;
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = NULL;
HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = NULL;
HYPRE_Int *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
HYPRE_Int *col_map_offd = NULL;
HYPRE_Int num_cols_offd;
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i; /*at first counter of nonzero cols for each row,
finally will be pointer to start of row */
HYPRE_Int *P_diag_j;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i; /*at first counter of nonzero cols for each row,
finally will be pointer to start of row */
HYPRE_Int *P_offd_j = NULL;
HYPRE_Int num_sends = 0;
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int *send_map_start;
HYPRE_Int *send_map_elmt;
HYPRE_Int *send_procs;
HYPRE_Int num_recvs = 0;
HYPRE_Int *recv_vec_start;
HYPRE_Int *recv_procs;
HYPRE_Int *new_recv_vec_start = NULL;
HYPRE_Int **Pext_send_map_start = NULL;
HYPRE_Int **Pext_recv_vec_start = NULL;
HYPRE_Int *Pext_start = NULL;
HYPRE_Int *P_ncols = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
HYPRE_Int *P_marker;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *C_array;
HYPRE_Int *C_array_offd = NULL;
HYPRE_Int *pass_array = NULL; /* contains points ordered according to pass */
HYPRE_Int *pass_pointer = NULL; /* pass_pointer[j] contains pointer to first
point of pass j contained in pass_array */
HYPRE_Int *P_diag_start;
HYPRE_Int *P_offd_start = NULL;
HYPRE_Int **P_diag_pass;
HYPRE_Int **P_offd_pass = NULL;
HYPRE_Int **Pext_pass = NULL;
HYPRE_Int **new_elmts = NULL; /* new neighbors generated in each pass */
HYPRE_Int *new_counter = NULL; /* contains no. of new neighbors for
each pass */
HYPRE_Int *loc = NULL; /* contains locations for new neighbor
connections in int_o_buffer to avoid searching */
HYPRE_Int *Pext_i = NULL; /*contains P_diag_i and P_offd_i info for nonzero
cols of off proc neighbors */
HYPRE_Int *Pext_send_buffer = NULL; /* used to collect global nonzero
col ids in P_diag for send_map_elmts */
HYPRE_Int *map_S_to_new = NULL;
/*HYPRE_Int *map_A_to_new = NULL;*/
HYPRE_Int *map_A_to_S = NULL;
HYPRE_Int *new_col_map_offd = NULL;
HYPRE_Int *col_map_offd_P = NULL;
HYPRE_Int *permute = NULL;
HYPRE_Int cnt;
HYPRE_Int cnt_nz;
HYPRE_Int total_nz;
HYPRE_Int pass;
HYPRE_Int num_passes;
HYPRE_Int max_num_passes = 10;
HYPRE_Int n_fine;
HYPRE_Int n_coarse = 0;
HYPRE_Int n_coarse_offd = 0;
HYPRE_Int n_SF = 0;
HYPRE_Int n_SF_offd = 0;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_Int *fine_to_coarse_offd = NULL;
HYPRE_Int *assigned = NULL;
HYPRE_Int *assigned_offd = NULL;
HYPRE_Real *Pext_send_data = NULL;
HYPRE_Real *Pext_data = NULL;
HYPRE_Real sum_C, sum_N;
HYPRE_Real sum_C_pos, sum_C_neg;
HYPRE_Real sum_N_pos, sum_N_neg;
HYPRE_Real diagonal;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Int j_start;
HYPRE_Int j_end;
HYPRE_Int i,i1;
HYPRE_Int j,j1;
HYPRE_Int k,k1,k2,k3;
HYPRE_Int pass_array_size;
HYPRE_Int global_pass_array_size;
HYPRE_Int local_pass_array_size;
HYPRE_Int my_id, num_procs;
HYPRE_Int index, start;
HYPRE_Int my_first_cpt;
HYPRE_Int total_global_cpts;
HYPRE_Int p_cnt;
HYPRE_Int total_nz_offd;
HYPRE_Int cnt_nz_offd;
HYPRE_Int cnt_offd, cnt_new;
HYPRE_Int no_break;
HYPRE_Int not_found;
HYPRE_Int Pext_send_size;
HYPRE_Int Pext_recv_size;
HYPRE_Int old_Pext_send_size;
HYPRE_Int old_Pext_recv_size;
HYPRE_Int P_offd_size = 0;
HYPRE_Int local_index = -1;
HYPRE_Int new_num_cols_offd = 0;
HYPRE_Int num_cols_offd_P;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, thread_start, thread_stop;
HYPRE_Int pass_length;
HYPRE_Int *tmp_marker, *tmp_marker_offd;
HYPRE_Int *tmp_array, *tmp_array_offd;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1);
HYPRE_Int * cnt_nz_per_thread;
HYPRE_Int * cnt_nz_offd_per_thread;
/* HYPRE_Real wall_time;
wall_time = hypre_MPI_Wtime(); */
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cnt_nz_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
cnt_nz_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0]);
for(i=0; i < max_num_threads[0]; i++)
{
cnt_nz_offd_per_thread[i] = 0;
cnt_nz_per_thread[i] = 0;
}
/*-----------------------------------------------------------------------
* Access the CSR vectors for A and S. Also get size of fine grid.
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
/* total_global_cpts = 0; */
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
col_offd_S_to_A = NULL;
}
if (col_offd_S_to_A)
{
col_map_offd = col_map_offd_S;
num_cols_offd = num_cols_offd_S;
}
else
{
col_map_offd = col_map_offd_A;
num_cols_offd = num_cols_offd_A;
}
if (num_cols_offd_A)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
A_offd_j = hypre_CSRMatrixJ(A_offd);
}
if (num_cols_offd)
S_offd_j = hypre_CSRMatrixJ(S_offd);
n_fine = hypre_CSRMatrixNumRows(A_diag);
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
if (n_fine) fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine);
n_coarse = 0;
n_SF = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == 1) n_coarse++;
else if (CF_marker[i] == -3) n_SF++;
pass_array_size = n_fine-n_coarse-n_SF;
if (pass_array_size) pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size);
pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes+1);
if (n_fine) assigned = hypre_CTAlloc(HYPRE_Int, n_fine);
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1);
if (n_coarse) C_array = hypre_CTAlloc(HYPRE_Int, n_coarse);
if (num_cols_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
send_map_start = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmt = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
recv_vec_start = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
if (send_map_start[num_sends])
int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends]);
}
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
int_buf_data[index++] = CF_marker[send_map_elmt[j]];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (num_functions > 1)
{
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
int_buf_data[index++] = dof_func[send_map_elmt[j]];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
}
n_coarse_offd = 0;
n_SF_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd; i++)
if (CF_marker_offd[i] == 1) n_coarse_offd++;
else if (CF_marker_offd[i] == -3) n_SF_offd++;
if (num_cols_offd)
{
assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
new_col_map_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd);
}
/*-----------------------------------------------------------------------
* First Pass: determine the maximal size of P, and elementsPerRow[i].
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Assigned points are points for which we know an interpolation
* formula already, and which are thus available to interpolate from.
* assigned[i]=0 for C points, and 1, 2, 3, ... for F points, depending
* in which pass their interpolation formula is determined.
*
* pass_array contains the points ordered according to its pass, i.e.
* | C-points | points of pass 1 | points of pass 2 | ....
* C_points are points 0 through pass_pointer[1]-1,
* points of pass k (0 < k < num_passes) are contained in points
* pass_pointer[k] through pass_pointer[k+1]-1 of pass_array .
*
* pass_array is also used to avoid going through all points for each pass,
* i,e. at the bginning it contains all points in descending order starting
* with n_fine-1. Then starting from the last point, we evaluate whether
* it is a C_point (pass 0). If it is the point is brought to the front
* and the length of the points to be searched is shortened. This is
* done until the parameter cnt (which determines the first point of
* pass_array to be searched) becomes n_fine. Then all points have been
* assigned a pass number.
*-----------------------------------------------------------------------*/
cnt = 0;
p_cnt = pass_array_size-1;
P_diag_i[0] = 0;
P_offd_i[0] = 0;
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt; /* this C point is assigned index
coarse_counter on coarse grid,
and in column of P */
C_array[cnt++] = i;
assigned[i] = 0;
P_diag_i[i+1] = 1; /* one element in row i1 of P */
P_offd_i[i+1] = 0;
}
else if (CF_marker[i] == -1)
{
pass_array[p_cnt--] = i;
P_diag_i[i+1] = 0;
P_offd_i[i+1] = 0;
assigned[i] = -1;
fine_to_coarse[i] = -1;
}
else
{
P_diag_i[i+1] = 0;
P_offd_i[i+1] = 0;
assigned[i] = -1;
fine_to_coarse[i] = -1;
}
}
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{
int_buf_data[index] = fine_to_coarse[send_map_elmt[j]];
if (int_buf_data[index] > -1)
int_buf_data[index] += my_first_cpt;
index++;
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
new_recv_vec_start = hypre_CTAlloc(HYPRE_Int,num_recvs+1);
if (n_coarse_offd)
C_array_offd = hypre_CTAlloc(HYPRE_Int,n_coarse_offd);
cnt = 0;
new_recv_vec_start[0] = 0;
for (j = 0; j < num_recvs; j++)
{
for (i = recv_vec_start[j]; i < recv_vec_start[j+1]; i++)
{
if (CF_marker_offd[i] == 1)
{
map_S_to_new[i] = cnt;
C_array_offd[cnt] = i;
new_col_map_offd[cnt++] = fine_to_coarse_offd[i];
assigned_offd[i] = 0;
}
else
{
assigned_offd[i] = -1;
map_S_to_new[i] = -1;
}
}
new_recv_vec_start[j+1] = cnt;
}
cnt = 0;
hypre_TFree(fine_to_coarse_offd);
if (col_offd_S_to_A)
{
map_A_to_S = hypre_CTAlloc(HYPRE_Int,num_cols_offd_A);
for (i=0; i < num_cols_offd_A; i++)
{
if (cnt < num_cols_offd && col_map_offd_A[i] == col_map_offd[cnt])
map_A_to_S[i] = cnt++;
else
map_A_to_S[i] = -1;
}
}
/*-----------------------------------------------------------------------
* Mark all local neighbors of C points as 'assigned'.
*-----------------------------------------------------------------------*/
pass_pointer[0] = 0;
pass_pointer[1] = 0;
total_nz = n_coarse; /* accumulates total number of nonzeros in P_diag */
total_nz_offd = 0; /* accumulates total number of nonzeros in P_offd */
cnt = 0;
cnt_offd = 0;
cnt_nz = 0;
cnt_nz_offd = 0;
for (i = pass_array_size-1; i > cnt-1; i--)
{
i1 = pass_array[i];
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (CF_marker[j1] == 1)
{
P_diag_i[i1+1]++;
cnt_nz++;
assigned[i1] = 1;
}
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (CF_marker_offd[j1] == 1)
{
P_offd_i[i1+1]++;
cnt_nz_offd++;
assigned[i1] = 1;
}
}
if (assigned[i1] == 1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
}
}
pass_pointer[2] = cnt;
/*-----------------------------------------------------------------------
* All local neighbors are assigned, now need to exchange the boundary
* info for assigned strong neighbors.
*-----------------------------------------------------------------------*/
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{ int_buf_data[index++] = assigned[send_map_elmt[j]]; }
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
assigned_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/*-----------------------------------------------------------------------
* Now we need to determine strong neighbors of points of pass 1, etc.
* we need to update assigned_offd after each pass
*-----------------------------------------------------------------------*/
pass = 2;
local_pass_array_size = pass_array_size - cnt;
hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
while (global_pass_array_size && pass < max_num_passes)
{
for (i = pass_array_size-1; i > cnt-1; i--)
{
i1 = pass_array[i];
no_break = 1;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
assigned[i1] = pass;
no_break = 0;
break;
}
}
if (no_break)
{
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
assigned[i1] = pass;
break;
}
}
}
}
/*hypre_printf("pass %d remaining points %d \n", pass, local_pass_array_size);*/
pass++;
pass_pointer[pass] = cnt;
local_pass_array_size = pass_array_size - cnt;
hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_INT,
hypre_MPI_SUM, comm);
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{ int_buf_data[index++] = assigned[send_map_elmt[j]]; }
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
assigned_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
}
hypre_TFree(int_buf_data);
num_passes = pass;
P_diag_pass = hypre_CTAlloc(HYPRE_Int*,num_passes); /* P_diag_pass[i] will contain
all column numbers for points of pass i */
P_diag_pass[1] = hypre_CTAlloc(HYPRE_Int,cnt_nz);
P_diag_start = hypre_CTAlloc(HYPRE_Int, n_fine); /* P_diag_start[i] contains
pointer to begin of column numbers in P_pass for point i,
P_diag_i[i+1] contains number of columns for point i */
P_offd_start = hypre_CTAlloc(HYPRE_Int, n_fine);
if (num_procs > 1)
{
P_offd_pass = hypre_CTAlloc(HYPRE_Int*,num_passes);
if (cnt_nz_offd)
P_offd_pass[1] = hypre_CTAlloc(HYPRE_Int,cnt_nz_offd);
else
P_offd_pass[1] = NULL;
new_elmts = hypre_CTAlloc(HYPRE_Int*,num_passes);
new_counter = hypre_CTAlloc(HYPRE_Int, num_passes+1);
new_counter[0] = 0;
new_counter[1] = n_coarse_offd;
new_num_cols_offd = n_coarse_offd;
new_elmts[0] = new_col_map_offd;
}
/*-----------------------------------------------------------------------
* Pass 1: now we consider points of pass 1, with strong C_neighbors,
*-----------------------------------------------------------------------*/
cnt_nz = 0;
cnt_nz_offd = 0;
/* JBS: Possible candidate for threading */
for (i=pass_pointer[1]; i < pass_pointer[2]; i++)
{
i1 = pass_array[i];
P_diag_start[i1] = cnt_nz;
P_offd_start[i1] = cnt_nz_offd;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (CF_marker[j1] == 1)
{ P_diag_pass[1][cnt_nz++] = fine_to_coarse[j1]; }
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (CF_marker_offd[j1] == 1)
{ P_offd_pass[1][cnt_nz_offd++] = map_S_to_new[j1]; }
}
}
total_nz += cnt_nz;
total_nz_offd += cnt_nz_offd;
if (num_procs > 1)
{
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
Pext_send_map_start = hypre_CTAlloc(HYPRE_Int*,num_passes);
Pext_recv_vec_start = hypre_CTAlloc(HYPRE_Int*,num_passes);
Pext_pass = hypre_CTAlloc(HYPRE_Int*,num_passes);
Pext_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd+1);
if (num_cols_offd) Pext_start = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
if (send_map_start[num_sends])
P_ncols = hypre_CTAlloc(HYPRE_Int,send_map_start[num_sends]);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd+1; i++)
{ Pext_i[i] = 0; }
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < send_map_start[num_sends]; i++)
{ P_ncols[i] = 0; }
}
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
for (pass=2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_map_start[pass] = hypre_CTAlloc(HYPRE_Int, num_sends+1);
Pext_recv_vec_start[pass] = hypre_CTAlloc(HYPRE_Int, num_recvs+1);
Pext_send_size = 0;
Pext_send_map_start[pass][0] = 0;
for (i=0; i < num_sends; i++)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE
#endif
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1];
Pext_send_size += P_ncols[j];
}
}
Pext_send_map_start[pass][i+1] = Pext_send_size;
}
comm_handle = hypre_ParCSRCommHandleCreate (11, comm_pkg,
P_ncols, &Pext_i[1]);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_buffer);
Pext_send_buffer = hypre_CTAlloc(HYPRE_Int, Pext_send_size);
}
old_Pext_send_size = Pext_send_size;
}
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_buffer[cnt_offd++] = my_first_cpt
+P_diag_pass[pass-1][k];
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
k3 = 0;
while (k3 < pass-1)
{
if (k1 < new_counter[k3+1])
{
k2 = k1-new_counter[k3];
Pext_send_buffer[cnt_offd++] = new_elmts[k3][k2];
break;
}
k3++;
}
}
}
}
}
if (num_procs > 1)
{
Pext_recv_size = 0;
Pext_recv_vec_start[pass][0] = 0;
cnt_offd = 0;
for (i=0; i < num_recvs; i++)
{
for (j=recv_vec_start[i]; j<recv_vec_start[i+1]; j++)
{
if (assigned_offd[j] == pass-1)
{
Pext_start[j] = cnt_offd;
cnt_offd += Pext_i[j+1];
}
}
Pext_recv_size = cnt_offd;
Pext_recv_vec_start[pass][i+1] = Pext_recv_size;
}
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
if (Pext_recv_size)
{
Pext_pass[pass] = hypre_CTAlloc(HYPRE_Int, Pext_recv_size);
new_elmts[pass-1] = hypre_CTAlloc(HYPRE_Int,Pext_recv_size);
}
else
{
Pext_pass[pass] = NULL;
new_elmts[pass-1] = NULL;
}
comm_handle = hypre_ParCSRCommHandleCreate (11, tmp_comm_pkg,
Pext_send_buffer, Pext_pass[pass]);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(loc);
loc = hypre_CTAlloc(HYPRE_Int,Pext_recv_size);
}
old_Pext_recv_size = Pext_recv_size;
}
cnt_new = 0;
cnt_offd = 0;
/* JBS: Possible candidate for threading */
for (i=0; i < num_recvs; i++)
{
for (j=recv_vec_start[i]; j < recv_vec_start[i+1]; j++)
{
if (assigned_offd[j] == pass-1)
{
for (j1 = cnt_offd; j1 < cnt_offd+Pext_i[j+1]; j1++)
{
k1 = Pext_pass[pass][j1];
k2 = k1 - my_first_cpt;
if (k2 > -1 && k2 < n_coarse)
{ Pext_pass[pass][j1] = -k2-1; }
else
{
not_found = 1;
k3 = 0;
while (k3 < pass-1 && not_found)
{
k2 = hypre_BinarySearch(new_elmts[k3], k1,
(new_counter[k3+1]-new_counter[k3]));
if (k2 > -1)
{
Pext_pass[pass][j1] = k2 + new_counter[k3];
not_found = 0;
}
else
{
k3++;
}
}
if (not_found)
{
new_elmts[pass-1][cnt_new] = Pext_pass[pass][j1];
loc[cnt_new++] = j1;
}
}
}
cnt_offd += Pext_i[j+1];
}
}
}
if (cnt_new)
{
hypre_qsort2i(new_elmts[pass-1],loc,0,cnt_new-1);
cnt = 0;
local_index = new_counter[pass-1];
Pext_pass[pass][loc[0]] = local_index;
for (i=1; i < cnt_new; i++)
{
if (new_elmts[pass-1][i] > new_elmts[pass-1][cnt])
{
new_elmts[pass-1][++cnt] = new_elmts[pass-1][i];
local_index++;
}
Pext_pass[pass][loc[i]] = local_index;
}
new_counter[pass] = local_index+1;
}
else if (num_procs > 1)
new_counter[pass] = new_counter[pass-1];
if (new_num_cols_offd < local_index+1)
{ new_num_cols_offd = local_index+1; }
pass_length = pass_pointer[pass+1] - pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,thread_start,thread_stop,cnt_nz,cnt_nz_offd,i1,j,j1,j_start,j_end,k1,k,P_marker,P_marker_offd)
#endif
{
/* Thread by computing the sparsity structure for this pass only over
* each thread's range of rows. Rows are divided up evenly amongst
* the threads. The necessary thread-wise temporary arrays, like
* P_marker, are initialized and de-allocated internally to the
* parallel region. */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_length; }
else
{ thread_stop = (pass_length/num_threads)*(my_thread_num+1); }
thread_start += pass_pointer[pass];
thread_stop += pass_pointer[pass];
/* Local initializations */
cnt_nz = 0;
cnt_nz_offd = 0;
/* This block of code is to go to the top of the parallel region starting before
* the loop over num_passes. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_coarse); /* marks points to see if they're counted */
for (i=0; i < n_coarse; i++)
{ P_marker[i] = -1; }
if (new_num_cols_offd == local_index+1)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd);
for (i=0; i < new_num_cols_offd; i++)
{ P_marker_offd[i] = -1; }
}
else if (n_coarse_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd);
for (i=0; i < n_coarse_offd; i++)
{ P_marker_offd[i] = -1; }
}
/* Need some variables to store each threads cnt_nz and cnt_nz_offd, and
* then stitch things together as in par_interp.c
* This loop writes
* P_diag_i, P_offd_i: data parallel here, and require no special treatment
* P_diag_start, P_offd_start: are not data parallel, require special treatment
*/
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
P_diag_start[i1] = cnt_nz;
P_offd_start[i1] = cnt_nz_offd;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_diag_pass[pass-1][k];
if (P_marker[k1] != i1)
{
cnt_nz++;
P_diag_i[i1+1]++;
P_marker[k1] = i1;
}
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
if (P_marker_offd[k1] != i1)
{
cnt_nz_offd++;
P_offd_i[i1+1]++;
P_marker_offd[k1] = i1;
}
}
}
}
j_start = 0;
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
if (k1 < 0)
{
if (P_marker[-k1-1] != i1)
{
cnt_nz++;
P_diag_i[i1+1]++;
P_marker[-k1-1] = i1;
}
}
else if (P_marker_offd[k1] != i1)
{
cnt_nz_offd++;
P_offd_i[i1+1]++;
P_marker_offd[k1] = i1;
}
}
}
}
}
/* Update P_diag_start, P_offd_start with cumulative
* nonzero counts over all threads */
if(my_thread_num == 0)
{ max_num_threads[0] = num_threads; }
cnt_nz_offd_per_thread[my_thread_num] = cnt_nz_offd;
cnt_nz_per_thread[my_thread_num] = cnt_nz;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
for(i = 1; i < max_num_threads[0]; i++)
{
cnt_nz_offd_per_thread[i] += cnt_nz_offd_per_thread[i-1];
cnt_nz_per_thread[i] += cnt_nz_per_thread[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num > 0)
{
/* update this thread's section of P_diag_start and P_offd_start
* with the num of nz's counted by previous threads */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
P_diag_start[i1] += cnt_nz_per_thread[my_thread_num-1];
P_offd_start[i1] += cnt_nz_offd_per_thread[my_thread_num-1];
}
}
else /* if my_thread_num == 0 */
{
/* Grab the nz count for all threads */
cnt_nz = cnt_nz_per_thread[max_num_threads[0]-1];
cnt_nz_offd = cnt_nz_offd_per_thread[max_num_threads[0]-1];
/* Updated total nz count */
total_nz += cnt_nz;
total_nz_offd += cnt_nz_offd;
/* Allocate P_diag_pass and P_offd_pass for all threads */
P_diag_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz);
if (cnt_nz_offd)
P_offd_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd);
else if (num_procs > 1)
P_offd_pass[pass] = NULL;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* offset cnt_nz and cnt_nz_offd to point to the starting
* point in P_diag_pass and P_offd_pass for each thread */
if(my_thread_num > 0)
{
cnt_nz = cnt_nz_per_thread[my_thread_num-1];
cnt_nz_offd = cnt_nz_offd_per_thread[my_thread_num-1];
}
else
{
cnt_nz = 0;
cnt_nz_offd = 0;
}
/* Set P_diag_pass and P_offd_pass */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_diag_pass[pass-1][k];
if (P_marker[k1] != -i1-1)
{
P_diag_pass[pass][cnt_nz++] = k1;
P_marker[k1] = -i1-1;
}
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
if (P_marker_offd[k1] != -i1-1)
{
P_offd_pass[pass][cnt_nz_offd++] = k1;
P_marker_offd[k1] = -i1-1;
}
}
}
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
if (k1 < 0)
{
if (P_marker[-k1-1] != -i1-1)
{
P_diag_pass[pass][cnt_nz++] = -k1-1;
P_marker[-k1-1] = -i1-1;
}
}
else if (P_marker_offd[k1] != -i1-1)
{
P_offd_pass[pass][cnt_nz_offd++] = k1;
P_marker_offd[k1] = -i1-1;
}
}
}
}
}
hypre_TFree(P_marker);
if ( (n_coarse_offd) || (new_num_cols_offd == local_index+1) )
{ hypre_TFree(P_marker_offd); }
} /* End parallel region */
}
hypre_TFree(loc);
hypre_TFree(P_ncols);
hypre_TFree(Pext_send_buffer);
hypre_TFree(new_recv_vec_start);
hypre_TFree(cnt_nz_per_thread);
hypre_TFree(cnt_nz_offd_per_thread);
hypre_TFree(max_num_threads);
P_diag_j = hypre_CTAlloc(HYPRE_Int,total_nz);
P_diag_data = hypre_CTAlloc(HYPRE_Real,total_nz);
if (total_nz_offd)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int,total_nz_offd);
P_offd_data = hypre_CTAlloc(HYPRE_Real,total_nz_offd);
}
for (i=0; i < n_fine; i++)
{
P_diag_i[i+1] += P_diag_i[i];
P_offd_i[i+1] += P_offd_i[i];
}
/* determine P for coarse points */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_coarse; i++)
{
i1 = C_array[i];
P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1];
P_diag_data[P_diag_i[i1]] = 1.0;
}
if (weight_option) /*if this is set, weights are separated into
negative and positive offdiagonals and accumulated
accordingly */
{
pass_length = pass_pointer[2]-pass_pointer[1];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_pos,sum_C_neg,sum_N_pos,sum_N_neg,j_start,j_end,j,k1,cnt,j1,cnt_offd,diagonal,alfa,beta)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for pass one. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
P_marker = hypre_CTAlloc(HYPRE_Int,n_fine);
for (i=0; i < n_fine; i++)
{ P_marker[i] = -1; }
if (num_cols_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd);
for (i=0; i < num_cols_offd; i++)
P_marker_offd[i] = -1;
}
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[1] + pass_length; }
else
{ thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); }
/* determine P for points of pass 1, i.e. neighbors of coarse points */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C_pos = 0;
sum_C_neg = 0;
sum_N_pos = 0;
sum_N_neg = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[1][j];
P_marker[C_array[k1]] = i1;
}
cnt = P_diag_i[i1];
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
{
if (A_diag_data[j] < 0)
sum_N_neg += A_diag_data[j];
else
sum_N_pos += A_diag_data[j];
}
if (j1 != -1 && P_marker[j1] == i1)
{
P_diag_data[cnt] = A_diag_data[j];
P_diag_j[cnt++] = fine_to_coarse[j1];
if (A_diag_data[j] < 0)
sum_C_neg += A_diag_data[j];
else
sum_C_pos += A_diag_data[j];
}
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[1][j];
P_marker_offd[C_array_offd[k1]] = i1;
}
cnt_offd = P_offd_i[i1];
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func_offd[j1]))
{
if (A_offd_data[j] < 0)
sum_N_neg += A_offd_data[j];
else
sum_N_pos += A_offd_data[j];
}
if (j1 != -1 && P_marker_offd[j1] == i1)
{
P_offd_data[cnt_offd] = A_offd_data[j];
P_offd_j[cnt_offd++] = map_S_to_new[j1];
if (A_offd_data[j] < 0)
sum_C_neg += A_offd_data[j];
else
sum_C_pos += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C_neg*diagonal) alfa = -sum_N_neg/(sum_C_neg*diagonal);
if (sum_C_pos*diagonal) beta = -sum_N_pos/(sum_C_pos*diagonal);
for (j=P_diag_i[i1]; j < cnt; j++)
if (P_diag_data[j] < 0)
P_diag_data[j] *= alfa;
else
P_diag_data[j] *= beta;
for (j=P_offd_i[i1]; j < cnt_offd; j++)
if (P_offd_data[j] < 0)
P_offd_data[j] *= alfa;
else
P_offd_data[j] *= beta;
}
hypre_TFree(P_marker);
if (num_cols_offd)
{ hypre_TFree(P_marker_offd); }
} /* End Parallel Region */
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
/*if (!col_offd_S_to_A) hypre_TFree(map_A_to_new);*/
if (n_coarse) hypre_TFree(C_array);
hypre_TFree(C_array_offd);
hypre_TFree(P_diag_pass[1]);
if (num_procs > 1) hypre_TFree(P_offd_pass[1]);
for (pass = 2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_size = Pext_send_map_start[pass][num_sends];
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_data);
Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size);
}
old_Pext_send_size = Pext_send_size;
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_i[j1];
j_end = P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{ Pext_send_data[cnt_offd++] = P_diag_data[k]; }
j_start = P_offd_i[j1];
j_end = P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{ Pext_send_data[cnt_offd++] = P_offd_data[k]; }
}
}
}
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
Pext_recv_size = Pext_recv_vec_start[pass][num_recvs];
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(Pext_data);
Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg,
Pext_send_data, Pext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(Pext_send_map_start[pass]);
hypre_TFree(Pext_recv_vec_start[pass]);
}
pass_length = pass_pointer[pass+1]-pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_neg,sum_C_pos,sum_N_neg,sum_N_pos,j_start,j_end,cnt,j,k1,cnt_offd,j1,k,alfa,beta,diagonal,C_array,C_array_offd)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for passes >= 2. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
P_marker = hypre_CTAlloc(HYPRE_Int,n_fine);
for (i=0; i < n_fine; i++)
{ P_marker[i] = -1; }
if (num_cols_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd);
for (i=0; i < num_cols_offd; i++)
P_marker_offd[i] = -1;
}
C_array = NULL;
C_array_offd = NULL;
if (n_coarse)
{ C_array = hypre_CTAlloc(HYPRE_Int, n_coarse); }
if (new_num_cols_offd > n_coarse_offd)
{ C_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd); }
else if (n_coarse_offd)
{ C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd); }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[pass] + pass_length; }
else
{ thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); }
/* Loop over each thread's row-range */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C_neg = 0;
sum_C_pos = 0;
sum_N_neg = 0;
sum_N_pos = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
cnt = P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[pass][j];
C_array[k1] = cnt;
P_diag_data[cnt] = 0;
P_diag_j[cnt++] = k1;
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
cnt_offd = P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[pass][j];
C_array_offd[k1] = cnt_offd;
P_offd_data[cnt_offd] = 0;
P_offd_j[cnt_offd++] = k1;
}
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
P_marker[j1] = i1;
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
P_marker_offd[j1] = i1;
}
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (P_marker[j1] == i1)
{
for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++)
{
k1 = P_diag_j[k];
alfa = A_diag_data[j]*P_diag_data[k];
P_diag_data[C_array[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++)
{
k1 = P_offd_j[k];
alfa = A_diag_data[j]*P_offd_data[k];
P_offd_data[C_array_offd[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
}
else
{
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
{
if (A_diag_data[j] < 0)
sum_N_neg += A_diag_data[j];
else
sum_N_pos += A_diag_data[j];
}
}
}
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (j1 > -1 && P_marker_offd[j1] == i1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
alfa = A_offd_data[j]*Pext_data[k];
if (k1 < 0)
P_diag_data[C_array[-k1-1]] += alfa;
else
P_offd_data[C_array_offd[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
}
else
{
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func_offd[j1] == dof_func[i1]))
{
if ( A_offd_data[j] < 0)
sum_N_neg += A_offd_data[j];
else
sum_N_pos += A_offd_data[j];
}
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C_neg*diagonal) alfa = -sum_N_neg/(sum_C_neg*diagonal);
if (sum_C_pos*diagonal) beta = -sum_N_pos/(sum_C_pos*diagonal);
for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++)
if (P_diag_data[j] < 0)
P_diag_data[j] *= alfa;
else
P_diag_data[j] *= beta;
for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++)
if (P_offd_data[j] < 0)
P_offd_data[j] *= alfa;
else
P_offd_data[j] *= beta;
}
hypre_TFree(C_array);
hypre_TFree(C_array_offd);
hypre_TFree(P_marker);
if (num_cols_offd)
{ hypre_TFree(P_marker_offd); }
} /* End OMP Parallel Section */
hypre_TFree(P_diag_pass[pass]);
if (num_procs > 1)
{
hypre_TFree(P_offd_pass[pass]);
hypre_TFree(Pext_pass[pass]);
}
} /* End num_passes for-loop */
}
else /* no distinction between positive and negative offdiagonal element */
{
pass_length = pass_pointer[2]-pass_pointer[1];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for pass one. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
/* Initialize thread-wise variables */
tmp_marker = NULL;
if (n_fine)
{ tmp_marker = hypre_CTAlloc(HYPRE_Int,n_fine); }
tmp_marker_offd = NULL;
if (num_cols_offd)
{ tmp_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); }
for (i=0; i < n_fine; i++)
{ tmp_marker[i] = -1; }
for (i=0; i < num_cols_offd; i++)
{ tmp_marker_offd[i] = -1; }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[1] + pass_length; }
else
{ thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); }
/* determine P for points of pass 1, i.e. neighbors of coarse points */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C = 0;
sum_N = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[1][j];
tmp_marker[C_array[k1]] = i1;
}
cnt = P_diag_i[i1];
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
sum_N += A_diag_data[j];
if (j1 != -1 && tmp_marker[j1] == i1)
{
P_diag_data[cnt] = A_diag_data[j];
P_diag_j[cnt++] = fine_to_coarse[j1];
sum_C += A_diag_data[j];
}
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[1][j];
tmp_marker_offd[C_array_offd[k1]] = i1;
}
cnt_offd = P_offd_i[i1];
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func_offd[j1]))
sum_N += A_offd_data[j];
if (j1 != -1 && tmp_marker_offd[j1] == i1)
{
P_offd_data[cnt_offd] = A_offd_data[j];
P_offd_j[cnt_offd++] = map_S_to_new[j1];
sum_C += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C*diagonal) alfa = -sum_N/(sum_C*diagonal);
for (j=P_diag_i[i1]; j < cnt; j++)
P_diag_data[j] *= alfa;
for (j=P_offd_i[i1]; j < cnt_offd; j++)
P_offd_data[j] *= alfa;
}
hypre_TFree(tmp_marker);
hypre_TFree(tmp_marker_offd);
} /* end OMP parallel region */
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
if (n_coarse) hypre_TFree(C_array);
hypre_TFree(C_array_offd);
hypre_TFree(P_diag_pass[1]);
if (num_procs > 1) hypre_TFree(P_offd_pass[1]);
for (pass = 2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_size = Pext_send_map_start[pass][num_sends];
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_data);
Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size);
}
old_Pext_send_size = Pext_send_size;
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_i[j1];
j_end = P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_data[cnt_offd++] = P_diag_data[k];
}
j_start = P_offd_i[j1];
j_end = P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_data[cnt_offd++] = P_offd_data[k];
}
}
}
}
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
Pext_recv_size = Pext_recv_vec_start[pass][num_recvs];
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(Pext_data);
Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg,
Pext_send_data, Pext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(Pext_send_map_start[pass]);
hypre_TFree(Pext_recv_vec_start[pass]);
}
pass_length = pass_pointer[pass+1]-pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa,tmp_array,tmp_array_offd)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for passes >= 2. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
/* Initialize thread-wise variables */
tmp_marker = NULL;
if (n_fine)
{ tmp_marker = hypre_CTAlloc(HYPRE_Int,n_fine); }
tmp_marker_offd = NULL;
if (num_cols_offd)
{ tmp_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); }
tmp_array = NULL;
if (n_coarse)
{ tmp_array = hypre_CTAlloc(HYPRE_Int,n_coarse); }
tmp_array_offd = NULL;
if (new_num_cols_offd > n_coarse_offd)
{ tmp_array_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd); }
else
{ tmp_array_offd = hypre_CTAlloc(HYPRE_Int,n_coarse_offd);}
for (i=0; i < n_fine; i++)
{ tmp_marker[i] = -1; }
for (i=0; i < num_cols_offd; i++)
{ tmp_marker_offd[i] = -1; }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[pass] + pass_length; }
else
{ thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); }
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C = 0;
sum_N = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
cnt = P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[pass][j];
tmp_array[k1] = cnt;
P_diag_data[cnt] = 0;
P_diag_j[cnt++] = k1;
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
cnt_offd = P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[pass][j];
tmp_array_offd[k1] = cnt_offd;
P_offd_data[cnt_offd] = 0;
P_offd_j[cnt_offd++] = k1;
}
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
tmp_marker[j1] = i1;
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
tmp_marker_offd[j1] = i1;
}
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (tmp_marker[j1] == i1)
{
for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++)
{
k1 = P_diag_j[k];
alfa = A_diag_data[j]*P_diag_data[k];
P_diag_data[tmp_array[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++)
{
k1 = P_offd_j[k];
alfa = A_diag_data[j]*P_offd_data[k];
P_offd_data[tmp_array_offd[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
}
else
{
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
sum_N += A_diag_data[j];
}
}
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (j1 > -1 && tmp_marker_offd[j1] == i1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
alfa = A_offd_data[j]*Pext_data[k];
if (k1 < 0)
P_diag_data[tmp_array[-k1-1]] += alfa;
else
P_offd_data[tmp_array_offd[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
}
else
{
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func_offd[j1] == dof_func[i1]))
sum_N += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C*diagonal) alfa = -sum_N/(sum_C*diagonal);
for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++)
P_diag_data[j] *= alfa;
for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++)
P_offd_data[j] *= alfa;
}
hypre_TFree(tmp_marker);
hypre_TFree(tmp_marker_offd);
hypre_TFree(tmp_array);
hypre_TFree(tmp_array_offd);
} /* End OMP Parallel Section */
hypre_TFree(P_diag_pass[pass]);
if (num_procs > 1)
{
hypre_TFree(P_offd_pass[pass]);
hypre_TFree(Pext_pass[pass]);
}
}
}
hypre_TFree(CF_marker_offd);
hypre_TFree(Pext_send_map_start);
hypre_TFree(Pext_recv_vec_start);
hypre_TFree(dof_func_offd);
hypre_TFree(Pext_send_data);
hypre_TFree(Pext_data);
hypre_TFree(P_diag_pass);
hypre_TFree(P_offd_pass);
hypre_TFree(Pext_pass);
hypre_TFree(P_diag_start);
hypre_TFree(P_offd_start);
hypre_TFree(Pext_start);
hypre_TFree(Pext_i);
hypre_TFree(fine_to_coarse);
hypre_TFree(assigned);
hypre_TFree(assigned_offd);
hypre_TFree(pass_pointer);
hypre_TFree(pass_array);
hypre_TFree(map_S_to_new);
hypre_TFree(map_A_to_S);
if (num_procs > 1) hypre_TFree(tmp_comm_pkg);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max
and/or keep yat most <P_max_elmts> per row absolutely maximal coefficients */
if (trunc_factor != 0.0 || P_max_elmts != 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, P_max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
}
P_offd_size = P_offd_i[n_fine];
num_cols_offd_P = 0;
if (P_offd_size)
{
if (new_num_cols_offd > num_cols_offd)
{ P_marker_offd = hypre_CTAlloc(HYPRE_Int,new_num_cols_offd); }
else
{ P_marker_offd = hypre_CTAlloc(HYPRE_Int,num_cols_offd); }
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_num_cols_offd; i++)
{ P_marker_offd[i] = 0; }
num_cols_offd_P = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker_offd[index])
{
num_cols_offd_P++;
P_marker_offd[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_Int,num_cols_offd_P);
permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes-1]);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_counter[num_passes-1]; i++)
permute[i] = -1;
cnt = 0;
for (i=0; i < num_passes-1; i++)
{
for (j=new_counter[i]; j < new_counter[i+1]; j++)
{
if (P_marker_offd[j])
{
col_map_offd_P[cnt] = new_elmts[i][j-new_counter[i]];
permute[j] = col_map_offd_P[cnt++];
}
}
}
hypre_qsort0(col_map_offd_P,0,num_cols_offd_P-1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,k1) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_counter[num_passes-1]; i++)
{
k1 = permute[i];
if (k1 != -1)
permute[i] = hypre_BinarySearch(col_map_offd_P,k1,num_cols_offd_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{ P_offd_j[i] = permute[P_offd_j[i]]; }
hypre_TFree(P_marker_offd);
}
if (num_procs > 1)
{
for (i=0; i < num_passes-1; i++)
hypre_TFree(new_elmts[i]);
}
hypre_TFree(permute);
hypre_TFree(new_elmts);
hypre_TFree(new_counter);
if (num_cols_offd_P)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P;
}
if (n_SF)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
}
if (num_procs > 1)
{
hypre_MatvecCommPkgCreate(P);
}
*P_ptr = P;
/* wall_time = hypre_MPI_Wtime() - wall_time;
hypre_printf("TOTAL TIME %1.2e \n",wall_time); */
/*-----------------------------------------------------------------------
* Build and return dof_func array for coarse grid.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Free mapping vector and marker array.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] += hypre_MPI_Wtime();
#endif
return(0);
}
| 74,513 | 35.13676 | 225 | c |
AMG | AMG-master/parcsr_ls/par_nodal_systems.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
/******************************************************************************
*
*****************************************************************************/
/* following should be in a header file */
#include "_hypre_parcsr_ls.h"
/*==========================================================================*/
/*==========================================================================*/
/**
Generates nodal norm matrix for use with nodal systems version
{\bf Input files:}
_hypre_parcsr_ls.h
@return Error code.
@param A [IN]
coefficient matrix
@param AN_ptr [OUT]
nodal norm matrix
@see */
/*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateNodalA(hypre_ParCSRMatrix *A,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int option,
HYPRE_Int diag_option,
hypre_ParCSRMatrix **AN_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_nonzeros_offd = 0;
HYPRE_Int num_cols_offd = 0;
hypre_ParCSRMatrix *AN;
hypre_CSRMatrix *AN_diag;
HYPRE_Int *AN_diag_i;
HYPRE_Int *AN_diag_j;
HYPRE_Real *AN_diag_data;
hypre_CSRMatrix *AN_offd;
HYPRE_Int *AN_offd_i;
HYPRE_Int *AN_offd_j;
HYPRE_Real *AN_offd_data;
HYPRE_Int *col_map_offd_AN;
HYPRE_Int *new_col_map_offd;
HYPRE_Int *row_starts_AN;
HYPRE_Int AN_num_nonzeros_diag = 0;
HYPRE_Int AN_num_nonzeros_offd = 0;
HYPRE_Int num_cols_offd_AN;
HYPRE_Int new_num_cols_offd;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_sends;
HYPRE_Int num_recvs;
HYPRE_Int *send_procs;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts = NULL;
HYPRE_Int *new_send_map_elmts;
HYPRE_Int *recv_procs;
HYPRE_Int *recv_vec_starts;
hypre_ParCSRCommPkg *comm_pkg_AN;
HYPRE_Int *send_procs_AN;
HYPRE_Int *send_map_starts_AN;
HYPRE_Int *send_map_elmts_AN;
HYPRE_Int *recv_procs_AN;
HYPRE_Int *recv_vec_starts_AN;
HYPRE_Int i, j, k, k_map;
HYPRE_Int index, row;
HYPRE_Int start_index;
HYPRE_Int num_procs;
HYPRE_Int node, cnt;
HYPRE_Int mode;
HYPRE_Int new_send_elmts_size;
HYPRE_Int global_num_nodes;
HYPRE_Int num_nodes;
HYPRE_Int num_fun2;
HYPRE_Int *map_to_node;
HYPRE_Int *map_to_map;
HYPRE_Int *counter;
HYPRE_Real sum;
HYPRE_Real *data;
hypre_MPI_Comm_size(comm,&num_procs);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
mode = hypre_abs(option);
comm_pkg_AN = NULL;
col_map_offd_AN = NULL;
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_starts_AN = hypre_CTAlloc(HYPRE_Int, 2);
for (i=0; i < 2; i++)
{
row_starts_AN[i] = row_starts[i]/num_functions;
if (row_starts_AN[i]*num_functions < row_starts[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"nodes not properly aligned or incomplete info!\n");
return hypre_error_flag;
}
}
global_num_nodes = hypre_ParCSRMatrixGlobalNumRows(A)/num_functions;
#else
row_starts_AN = hypre_CTAlloc(HYPRE_Int, num_procs+1);
for (i=0; i < num_procs+1; i++)
{
row_starts_AN[i] = row_starts[i]/num_functions;
if (row_starts_AN[i]*num_functions < row_starts[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"nodes not properly aligned or incomplete info!\n");
return hypre_error_flag;
}
}
global_num_nodes = row_starts_AN[num_procs];
#endif
num_nodes = num_variables/num_functions;
num_fun2 = num_functions*num_functions;
map_to_node = hypre_CTAlloc(HYPRE_Int, num_variables);
AN_diag_i = hypre_CTAlloc(HYPRE_Int, num_nodes+1);
counter = hypre_CTAlloc(HYPRE_Int, num_nodes);
for (i=0; i < num_variables; i++)
map_to_node[i] = i/num_functions;
for (i=0; i < num_nodes; i++)
counter[i] = -1;
AN_num_nonzeros_diag = 0;
row = 0;
for (i=0; i < num_nodes; i++)
{
AN_diag_i[i] = AN_num_nonzeros_diag;
for (j=0; j < num_functions; j++)
{
for (k=A_diag_i[row]; k < A_diag_i[row+1]; k++)
{
k_map = map_to_node[A_diag_j[k]];
if (counter[k_map] < i)
{
counter[k_map] = i;
AN_num_nonzeros_diag++;
}
}
row++;
}
}
AN_diag_i[num_nodes] = AN_num_nonzeros_diag;
AN_diag_j = hypre_CTAlloc(HYPRE_Int, AN_num_nonzeros_diag);
AN_diag_data = hypre_CTAlloc(HYPRE_Real, AN_num_nonzeros_diag);
AN_diag = hypre_CSRMatrixCreate(num_nodes,num_nodes,AN_num_nonzeros_diag);
hypre_CSRMatrixI(AN_diag) = AN_diag_i;
hypre_CSRMatrixJ(AN_diag) = AN_diag_j;
hypre_CSRMatrixData(AN_diag) = AN_diag_data;
for (i=0; i < num_nodes; i++)
counter[i] = -1;
index = 0;
start_index = 0;
row = 0;
switch (mode)
{
case 1: /* frobenius norm */
{
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_diag_i[row]; k < A_diag_i[row+1]; k++)
{
k_map = map_to_node[A_diag_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_diag_j[index] = k_map;
AN_diag_data[index] = A_diag_data[k]*A_diag_data[k];
index++;
}
else
{
AN_diag_data[counter[k_map]] +=
A_diag_data[k]*A_diag_data[k];
}
}
row++;
}
start_index = index;
}
for (i=0; i < AN_num_nonzeros_diag; i++)
AN_diag_data[i] = sqrt(AN_diag_data[i]);
}
break;
case 2: /* sum of abs. value of all elements in each block */
{
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_diag_i[row]; k < A_diag_i[row+1]; k++)
{
k_map = map_to_node[A_diag_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_diag_j[index] = k_map;
AN_diag_data[index] = fabs(A_diag_data[k]);
index++;
}
else
{
AN_diag_data[counter[k_map]] += fabs(A_diag_data[k]);
}
}
row++;
}
start_index = index;
}
for (i=0; i < AN_num_nonzeros_diag; i++)
AN_diag_data[i] /= num_fun2;
}
break;
case 3: /* largest element of each block (sets true value - not abs. value) */
{
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_diag_i[row]; k < A_diag_i[row+1]; k++)
{
k_map = map_to_node[A_diag_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_diag_j[index] = k_map;
AN_diag_data[index] = A_diag_data[k];
index++;
}
else
{
if (fabs(A_diag_data[k]) >
fabs(AN_diag_data[counter[k_map]]))
AN_diag_data[counter[k_map]] = A_diag_data[k];
}
}
row++;
}
start_index = index;
}
}
break;
case 4: /* inf. norm (row-sum) */
{
data = hypre_CTAlloc(HYPRE_Real, AN_num_nonzeros_diag*num_functions);
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_diag_i[row]; k < A_diag_i[row+1]; k++)
{
k_map = map_to_node[A_diag_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_diag_j[index] = k_map;
data[index*num_functions + j] = fabs(A_diag_data[k]);
index++;
}
else
{
data[(counter[k_map])*num_functions + j] += fabs(A_diag_data[k]);
}
}
row++;
}
start_index = index;
}
for (i=0; i < AN_num_nonzeros_diag; i++)
{
AN_diag_data[i] = data[i*num_functions];
for (j=1; j< num_functions; j++)
{
AN_diag_data[i] = hypre_max( AN_diag_data[i],data[i*num_functions+j]);
}
}
hypre_TFree(data);
}
break;
case 6: /* sum of all elements in each block */
{
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_diag_i[row]; k < A_diag_i[row+1]; k++)
{
k_map = map_to_node[A_diag_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_diag_j[index] = k_map;
AN_diag_data[index] = (A_diag_data[k]);
index++;
}
else
{
AN_diag_data[counter[k_map]] += (A_diag_data[k]);
}
}
row++;
}
start_index = index;
}
}
break;
}
if (diag_option ==1 )
{
/* make the diag entry the negative of the sum of off-diag entries (DO MORE BELOW) */
for (i=0; i < num_nodes; i++)
{
index = AN_diag_i[i];
sum = 0.0;
for (k = AN_diag_i[i]+1; k < AN_diag_i[i+1]; k++)
{
sum += AN_diag_data[k];
}
AN_diag_data[index] = -sum;
}
}
else if (diag_option == 2)
{
/* make all diagonal entries negative */
/* the diagonal is the first element listed in each row - */
for (i=0; i < num_nodes; i++)
{
index = AN_diag_i[i];
AN_diag_data[index] = - AN_diag_data[index];
}
}
num_nonzeros_offd = A_offd_i[num_variables];
AN_offd_i = hypre_CTAlloc(HYPRE_Int, num_nodes+1);
num_cols_offd_AN = 0;
if (comm_pkg)
{
comm_pkg_AN = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
hypre_ParCSRCommPkgComm(comm_pkg_AN) = comm;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_ParCSRCommPkgNumSends(comm_pkg_AN) = num_sends;
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
hypre_ParCSRCommPkgNumRecvs(comm_pkg_AN) = num_recvs;
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
send_procs_AN = NULL;
send_map_elmts_AN = NULL;
if (num_sends)
{
send_procs_AN = hypre_CTAlloc(HYPRE_Int,num_sends);
send_map_elmts_AN = hypre_CTAlloc(HYPRE_Int,send_map_starts[num_sends]);
}
send_map_starts_AN = hypre_CTAlloc(HYPRE_Int,num_sends+1);
recv_vec_starts_AN = hypre_CTAlloc(HYPRE_Int,num_recvs+1);
recv_procs_AN = NULL;
if (num_recvs) recv_procs_AN = hypre_CTAlloc(HYPRE_Int,num_recvs);
for (i=0; i < num_sends; i++)
send_procs_AN[i] = send_procs[i];
for (i=0; i < num_recvs; i++)
recv_procs_AN[i] = recv_procs[i];
send_map_starts_AN[0] = 0;
cnt = 0;
for (i=0; i < num_sends; i++)
{
k_map = send_map_starts[i];
if (send_map_starts[i+1]-k_map)
send_map_elmts_AN[cnt++] = send_map_elmts[k_map]/num_functions;
for (j=send_map_starts[i]+1; j < send_map_starts[i+1]; j++)
{
node = send_map_elmts[j]/num_functions;
if (node > send_map_elmts_AN[cnt-1])
send_map_elmts_AN[cnt++] = node;
}
send_map_starts_AN[i+1] = cnt;
}
hypre_ParCSRCommPkgSendProcs(comm_pkg_AN) = send_procs_AN;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_AN) = send_map_starts_AN;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg_AN) = send_map_elmts_AN;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_AN) = recv_procs_AN;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_AN) = recv_vec_starts_AN;
}
num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
if (num_cols_offd)
{
if (num_cols_offd > num_variables)
{
hypre_TFree(map_to_node);
map_to_node = hypre_CTAlloc(HYPRE_Int,num_cols_offd);
}
num_cols_offd_AN = 1;
map_to_node[0] = col_map_offd[0]/num_functions;
for (i=1; i < num_cols_offd; i++)
{
map_to_node[i] = col_map_offd[i]/num_functions;
if (map_to_node[i] > map_to_node[i-1]) num_cols_offd_AN++;
}
if (num_cols_offd_AN > num_nodes)
{
hypre_TFree(counter);
counter = hypre_CTAlloc(HYPRE_Int,num_cols_offd_AN);
}
map_to_map = NULL;
col_map_offd_AN = NULL;
map_to_map = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
col_map_offd_AN = hypre_CTAlloc(HYPRE_Int,num_cols_offd_AN);
col_map_offd_AN[0] = map_to_node[0];
recv_vec_starts_AN[0] = 0;
cnt = 1;
for (i=0; i < num_recvs; i++)
{
for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++)
{
node = map_to_node[j];
if (node > col_map_offd_AN[cnt-1])
{
col_map_offd_AN[cnt++] = node;
}
map_to_map[j] = cnt-1;
}
recv_vec_starts_AN[i+1] = cnt;
}
for (i=0; i < num_cols_offd_AN; i++)
counter[i] = -1;
AN_num_nonzeros_offd = 0;
row = 0;
for (i=0; i < num_nodes; i++)
{
AN_offd_i[i] = AN_num_nonzeros_offd;
for (j=0; j < num_functions; j++)
{
for (k=A_offd_i[row]; k < A_offd_i[row+1]; k++)
{
k_map = map_to_map[A_offd_j[k]];
if (counter[k_map] < i)
{
counter[k_map] = i;
AN_num_nonzeros_offd++;
}
}
row++;
}
}
AN_offd_i[num_nodes] = AN_num_nonzeros_offd;
}
AN_offd = hypre_CSRMatrixCreate(num_nodes,num_cols_offd_AN,
AN_num_nonzeros_offd);
hypre_CSRMatrixI(AN_offd) = AN_offd_i;
if (AN_num_nonzeros_offd)
{
AN_offd_j = hypre_CTAlloc(HYPRE_Int, AN_num_nonzeros_offd);
AN_offd_data = hypre_CTAlloc(HYPRE_Real, AN_num_nonzeros_offd);
hypre_CSRMatrixJ(AN_offd) = AN_offd_j;
hypre_CSRMatrixData(AN_offd) = AN_offd_data;
for (i=0; i < num_cols_offd_AN; i++)
counter[i] = -1;
index = 0;
row = 0;
AN_offd_i[0] = 0;
start_index = 0;
switch (mode)
{
case 1: /* frobenius norm */
{
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_offd_i[row]; k < A_offd_i[row+1]; k++)
{
k_map = map_to_map[A_offd_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_offd_j[index] = k_map;
AN_offd_data[index] = A_offd_data[k]*A_offd_data[k];
index++;
}
else
{
AN_offd_data[counter[k_map]] +=
A_offd_data[k]*A_offd_data[k];
}
}
row++;
}
start_index = index;
}
for (i=0; i < AN_num_nonzeros_offd; i++)
AN_offd_data[i] = sqrt(AN_offd_data[i]);
}
break;
case 2: /* sum of abs. value of all elements in block */
{
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_offd_i[row]; k < A_offd_i[row+1]; k++)
{
k_map = map_to_map[A_offd_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_offd_j[index] = k_map;
AN_offd_data[index] = fabs(A_offd_data[k]);
index++;
}
else
{
AN_offd_data[counter[k_map]] += fabs(A_offd_data[k]);
}
}
row++;
}
start_index = index;
}
for (i=0; i < AN_num_nonzeros_offd; i++)
AN_offd_data[i] /= num_fun2;
}
break;
case 3: /* largest element in each block (not abs. value ) */
{
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_offd_i[row]; k < A_offd_i[row+1]; k++)
{
k_map = map_to_map[A_offd_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_offd_j[index] = k_map;
AN_offd_data[index] = A_offd_data[k];
index++;
}
else
{
if (fabs(A_offd_data[k]) >
fabs(AN_offd_data[counter[k_map]]))
AN_offd_data[counter[k_map]] = A_offd_data[k];
}
}
row++;
}
start_index = index;
}
}
break;
case 4: /* inf. norm (row-sum) */
{
data = hypre_CTAlloc(HYPRE_Real, AN_num_nonzeros_offd*num_functions);
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_offd_i[row]; k < A_offd_i[row+1]; k++)
{
k_map = map_to_map[A_offd_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_offd_j[index] = k_map;
data[index*num_functions + j] = fabs(A_offd_data[k]);
index++;
}
else
{
data[(counter[k_map])*num_functions + j] += fabs(A_offd_data[k]);
}
}
row++;
}
start_index = index;
}
for (i=0; i < AN_num_nonzeros_offd; i++)
{
AN_offd_data[i] = data[i*num_functions];
for (j=1; j< num_functions; j++)
{
AN_offd_data[i] = hypre_max( AN_offd_data[i],data[i*num_functions+j]);
}
}
hypre_TFree(data);
}
break;
case 6: /* sum of value of all elements in block */
{
for (i=0; i < num_nodes; i++)
{
for (j=0; j < num_functions; j++)
{
for (k=A_offd_i[row]; k < A_offd_i[row+1]; k++)
{
k_map = map_to_map[A_offd_j[k]];
if (counter[k_map] < start_index)
{
counter[k_map] = index;
AN_offd_j[index] = k_map;
AN_offd_data[index] = (A_offd_data[k]);
index++;
}
else
{
AN_offd_data[counter[k_map]] += (A_offd_data[k]);
}
}
row++;
}
start_index = index;
}
}
break;
}
hypre_TFree(map_to_map);
}
if (diag_option ==1 )
{
/* make the diag entry the negative of the sum of off-diag entries (here we are adding the
off_diag contribution)*/
/* the diagonal is the first element listed in each row of AN_diag_data - */
for (i=0; i < num_nodes; i++)
{
sum = 0.0;
for (k = AN_offd_i[i]; k < AN_offd_i[i+1]; k++)
{
sum += AN_offd_data[k];
}
index = AN_diag_i[i];/* location of diag entry in data */
AN_diag_data[index] -= sum; /* subtract from current value */
}
}
AN = hypre_ParCSRMatrixCreate(comm, global_num_nodes, global_num_nodes,
row_starts_AN, row_starts_AN, num_cols_offd_AN,
AN_num_nonzeros_diag, AN_num_nonzeros_offd);
/* we already created the diag and offd matrices - so we don't need the ones
created above */
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(AN));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(AN));
hypre_ParCSRMatrixDiag(AN) = AN_diag;
hypre_ParCSRMatrixOffd(AN) = AN_offd;
hypre_ParCSRMatrixColMapOffd(AN) = col_map_offd_AN;
hypre_ParCSRMatrixCommPkg(AN) = comm_pkg_AN;
new_num_cols_offd = num_functions*num_cols_offd_AN;
if (new_num_cols_offd > num_cols_offd)
{
new_col_map_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd);
cnt = 0;
for (i=0; i < num_cols_offd_AN; i++)
{
for (j=0; j < num_functions; j++)
{
new_col_map_offd[cnt++] = num_functions*col_map_offd_AN[i]+j;
}
}
cnt = 0;
for (i=0; i < num_cols_offd; i++)
{
while (col_map_offd[i] > new_col_map_offd[cnt])
cnt++;
col_map_offd[i] = cnt++;
}
for (i=0; i < num_recvs+1; i++)
{
recv_vec_starts[i] = num_functions*recv_vec_starts_AN[i];
}
for (i=0; i < num_nonzeros_offd; i++)
{
j = A_offd_j[i];
A_offd_j[i] = col_map_offd[j];
}
hypre_ParCSRMatrixColMapOffd(A) = new_col_map_offd;
hypre_CSRMatrixNumCols(A_offd) = new_num_cols_offd;
hypre_TFree(col_map_offd);
}
hypre_TFree(map_to_node);
new_send_elmts_size = send_map_starts_AN[num_sends]*num_functions;
if (new_send_elmts_size > send_map_starts[num_sends])
{
new_send_map_elmts = hypre_CTAlloc(HYPRE_Int,new_send_elmts_size);
cnt = 0;
send_map_starts[0] = 0;
for (i=0; i < num_sends; i++)
{
send_map_starts[i+1] = send_map_starts_AN[i+1]*num_functions;
for (j=send_map_starts_AN[i]; j < send_map_starts_AN[i+1]; j++)
{
for (k=0; k < num_functions; k++)
new_send_map_elmts[cnt++] = send_map_elmts_AN[j]*num_functions+k;
}
}
hypre_TFree(send_map_elmts);
hypre_ParCSRCommPkgSendMapElmts(comm_pkg) = new_send_map_elmts;
}
*AN_ptr = AN;
hypre_TFree(counter);
return hypre_error_flag;
}
/* This creates a scalar version of the CF_marker, dof_array and strength matrix (SN) */
HYPRE_Int
hypre_BoomerAMGCreateScalarCFS(hypre_ParCSRMatrix *SN,
HYPRE_Int *CFN_marker,
HYPRE_Int *col_offd_SN_to_AN,
HYPRE_Int num_functions,
HYPRE_Int nodal,
HYPRE_Int data,
HYPRE_Int **dof_func_ptr,
HYPRE_Int **CF_marker_ptr,
HYPRE_Int **col_offd_S_to_A_ptr,
hypre_ParCSRMatrix **S_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(SN);
hypre_ParCSRMatrix *S;
hypre_CSRMatrix *S_diag;
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
hypre_CSRMatrix *S_offd;
HYPRE_Int *S_offd_i;
HYPRE_Int *S_offd_j;
HYPRE_Real *S_offd_data;
HYPRE_Int *row_starts_S;
HYPRE_Int *col_starts_S;
HYPRE_Int *row_starts_SN = hypre_ParCSRMatrixRowStarts(SN);
HYPRE_Int *col_starts_SN = hypre_ParCSRMatrixColStarts(SN);
hypre_CSRMatrix *SN_diag = hypre_ParCSRMatrixDiag(SN);
HYPRE_Int *SN_diag_i = hypre_CSRMatrixI(SN_diag);
HYPRE_Int *SN_diag_j = hypre_CSRMatrixJ(SN_diag);
HYPRE_Real *SN_diag_data;
hypre_CSRMatrix *SN_offd = hypre_ParCSRMatrixOffd(SN);
HYPRE_Int *SN_offd_i = hypre_CSRMatrixI(SN_offd);
HYPRE_Int *SN_offd_j = hypre_CSRMatrixJ(SN_offd);
HYPRE_Real *SN_offd_data = NULL;
HYPRE_Int *CF_marker;
HYPRE_Int *col_map_offd_SN = hypre_ParCSRMatrixColMapOffd(SN);
HYPRE_Int *col_map_offd_S;
HYPRE_Int *dof_func;
HYPRE_Int num_nodes = hypre_CSRMatrixNumRows(SN_diag);
HYPRE_Int num_variables;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(SN);
HYPRE_Int num_sends;
HYPRE_Int num_recvs;
HYPRE_Int *send_procs;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
HYPRE_Int *recv_procs;
HYPRE_Int *recv_vec_starts;
hypre_ParCSRCommPkg *comm_pkg_S;
HYPRE_Int *send_procs_S;
HYPRE_Int *send_map_starts_S;
HYPRE_Int *send_map_elmts_S;
HYPRE_Int *recv_procs_S;
HYPRE_Int *recv_vec_starts_S;
HYPRE_Int *col_offd_S_to_A = NULL;
HYPRE_Int num_coarse_nodes;
HYPRE_Int i,j,k,k1,jj,cnt;
HYPRE_Int row, start, end;
HYPRE_Int num_procs;
HYPRE_Int num_cols_offd_SN = hypre_CSRMatrixNumCols(SN_offd);
HYPRE_Int num_cols_offd_S;
HYPRE_Int SN_num_nonzeros_diag;
HYPRE_Int SN_num_nonzeros_offd;
HYPRE_Int S_num_nonzeros_diag;
HYPRE_Int S_num_nonzeros_offd;
HYPRE_Int global_num_vars;
HYPRE_Int global_num_cols;
HYPRE_Int global_num_nodes;
hypre_MPI_Comm_size(comm, &num_procs);
num_variables = num_functions*num_nodes;
CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables);
if (nodal < 0)
{
cnt = 0;
num_coarse_nodes = 0;
for (i=0; i < num_nodes; i++)
{
if (CFN_marker[i] == 1) num_coarse_nodes++;
for (j=0; j < num_functions; j++)
CF_marker[cnt++] = CFN_marker[i];
}
dof_func = hypre_CTAlloc(HYPRE_Int,num_coarse_nodes*num_functions);
cnt = 0;
for (i=0; i < num_nodes; i++)
{
if (CFN_marker[i] == 1)
{
for (k=0; k < num_functions; k++)
dof_func[cnt++] = k;
}
}
*dof_func_ptr = dof_func;
}
else
{
cnt = 0;
for (i=0; i < num_nodes; i++)
for (j=0; j < num_functions; j++)
CF_marker[cnt++] = CFN_marker[i];
}
*CF_marker_ptr = CF_marker;
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_starts_S = hypre_CTAlloc(HYPRE_Int,2);
for (i=0; i < 2; i++)
row_starts_S[i] = num_functions*row_starts_SN[i];
if (row_starts_SN != col_starts_SN)
{
col_starts_S = hypre_CTAlloc(HYPRE_Int,2);
for (i=0; i < 2; i++)
col_starts_S[i] = num_functions*col_starts_SN[i];
}
else
{
col_starts_S = row_starts_S;
}
#else
row_starts_S = hypre_CTAlloc(HYPRE_Int,num_procs+1);
for (i=0; i < num_procs+1; i++)
row_starts_S[i] = num_functions*row_starts_SN[i];
if (row_starts_SN != col_starts_SN)
{
col_starts_S = hypre_CTAlloc(HYPRE_Int,num_procs+1);
for (i=0; i < num_procs+1; i++)
col_starts_S[i] = num_functions*col_starts_SN[i];
}
else
{
col_starts_S = row_starts_S;
}
#endif
SN_num_nonzeros_diag = SN_diag_i[num_nodes];
SN_num_nonzeros_offd = SN_offd_i[num_nodes];
global_num_nodes = hypre_ParCSRMatrixGlobalNumRows(SN);
global_num_cols = hypre_ParCSRMatrixGlobalNumCols(SN)*num_functions;
global_num_vars = global_num_nodes*num_functions;
S_num_nonzeros_diag = num_functions*SN_num_nonzeros_diag;
S_num_nonzeros_offd = num_functions*SN_num_nonzeros_offd;
num_cols_offd_S = num_functions*num_cols_offd_SN;
S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_cols,
row_starts_S, col_starts_S, num_cols_offd_S,
S_num_nonzeros_diag, S_num_nonzeros_offd);
S_diag = hypre_ParCSRMatrixDiag(S);
S_offd = hypre_ParCSRMatrixOffd(S);
S_diag_i = hypre_CTAlloc(HYPRE_Int, num_variables+1);
S_offd_i = hypre_CTAlloc(HYPRE_Int, num_variables+1);
S_diag_j = hypre_CTAlloc(HYPRE_Int, S_num_nonzeros_diag);
hypre_CSRMatrixI(S_diag) = S_diag_i;
hypre_CSRMatrixJ(S_diag) = S_diag_j;
if (data)
{
SN_diag_data = hypre_CSRMatrixData(SN_diag);
S_diag_data = hypre_CTAlloc(HYPRE_Real, S_num_nonzeros_diag);
hypre_CSRMatrixData(S_diag) = S_diag_data;
if (num_cols_offd_S)
{
SN_offd_data = hypre_CSRMatrixData(SN_offd);
S_offd_data = hypre_CTAlloc(HYPRE_Real, S_num_nonzeros_offd);
hypre_CSRMatrixData(S_offd) = S_offd_data;
}
}
hypre_CSRMatrixI(S_offd) = S_offd_i;
if (comm_pkg)
{
comm_pkg_S = hypre_CTAlloc(hypre_ParCSRCommPkg,1);
hypre_ParCSRCommPkgComm(comm_pkg_S) = comm;
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_ParCSRCommPkgNumSends(comm_pkg_S) = num_sends;
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
hypre_ParCSRCommPkgNumRecvs(comm_pkg_S) = num_recvs;
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
send_procs_S = NULL;
send_map_elmts_S = NULL;
if (num_sends)
{
send_procs_S = hypre_CTAlloc(HYPRE_Int,num_sends);
send_map_elmts_S = hypre_CTAlloc(HYPRE_Int,
num_functions*send_map_starts[num_sends]);
}
send_map_starts_S = hypre_CTAlloc(HYPRE_Int,num_sends+1);
recv_vec_starts_S = hypre_CTAlloc(HYPRE_Int,num_recvs+1);
recv_procs_S = NULL;
if (num_recvs) recv_procs_S = hypre_CTAlloc(HYPRE_Int,num_recvs);
send_map_starts_S[0] = 0;
for (i=0; i < num_sends; i++)
{
send_procs_S[i] = send_procs[i];
send_map_starts_S[i+1] = num_functions*send_map_starts[i+1];
}
recv_vec_starts_S[0] = 0;
for (i=0; i < num_recvs; i++)
{
recv_procs_S[i] = recv_procs[i];
recv_vec_starts_S[i+1] = num_functions*recv_vec_starts[i+1];
}
cnt = 0;
for (i=0; i < send_map_starts[num_sends]; i++)
{
k1 = num_functions*send_map_elmts[i];
for (j=0; j < num_functions; j++)
{
send_map_elmts_S[cnt++] = k1+j;
}
}
hypre_ParCSRCommPkgSendProcs(comm_pkg_S) = send_procs_S;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_S) = send_map_starts_S;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg_S) = send_map_elmts_S;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_S) = recv_procs_S;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_S) = recv_vec_starts_S;
hypre_ParCSRMatrixCommPkg(S) = comm_pkg_S;
}
if (num_cols_offd_S)
{
S_offd_j = hypre_CTAlloc(HYPRE_Int, S_num_nonzeros_offd);
hypre_CSRMatrixJ(S_offd) = S_offd_j;
col_map_offd_S = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S);
cnt = 0;
for (i=0; i < num_cols_offd_SN; i++)
{
k1 = col_map_offd_SN[i]*num_functions;
for (j=0; j < num_functions; j++)
col_map_offd_S[cnt++] = k1+j;
}
hypre_ParCSRMatrixColMapOffd(S) = col_map_offd_S;
}
if (col_offd_SN_to_AN)
{
col_offd_S_to_A = hypre_CTAlloc(HYPRE_Int, num_cols_offd_S);
cnt = 0;
for (i=0; i < num_cols_offd_SN; i++)
{
k1 = col_offd_SN_to_AN[i]*num_functions;
for (j=0; j < num_functions; j++)
col_offd_S_to_A[cnt++] = k1+j;
}
*col_offd_S_to_A_ptr = col_offd_S_to_A;
}
cnt = 0;
row = 0;
for (i=0; i < num_nodes; i++)
{
row++;
start = cnt;
for (j=SN_diag_i[i]; j < SN_diag_i[i+1]; j++)
{
jj = SN_diag_j[j];
if (data) S_diag_data[cnt] = SN_diag_data[j];
S_diag_j[cnt++] = jj*num_functions;
}
end = cnt;
S_diag_i[row] = cnt;
for (k1=1; k1 < num_functions; k1++)
{
row++;
for (k=start; k < end; k++)
{
if (data) S_diag_data[cnt] = S_diag_data[k];
S_diag_j[cnt++] = S_diag_j[k]+k1;
}
S_diag_i[row] = cnt;
}
}
cnt = 0;
row = 0;
for (i=0; i < num_nodes; i++)
{
row++;
start = cnt;
for (j=SN_offd_i[i]; j < SN_offd_i[i+1]; j++)
{
jj = SN_offd_j[j];
if (data) S_offd_data[cnt] = SN_offd_data[j];
S_offd_j[cnt++] = jj*num_functions;
}
end = cnt;
S_offd_i[row] = cnt;
for (k1=1; k1 < num_functions; k1++)
{
row++;
for (k=start; k < end; k++)
{
if (data) S_offd_data[cnt] = S_offd_data[k];
S_offd_j[cnt++] = S_offd_j[k]+k1;
}
S_offd_i[row] = cnt;
}
}
*S_ptr = S;
return hypre_error_flag;
}
/* This function just finds the scalaer CF_marker and dof_func */
HYPRE_Int
hypre_BoomerAMGCreateScalarCF(HYPRE_Int *CFN_marker,
HYPRE_Int num_functions,
HYPRE_Int num_nodes,
HYPRE_Int **dof_func_ptr,
HYPRE_Int **CF_marker_ptr)
{
HYPRE_Int *CF_marker;
HYPRE_Int *dof_func;
HYPRE_Int num_variables;
HYPRE_Int num_coarse_nodes;
HYPRE_Int i,j,k,cnt;
num_variables = num_functions*num_nodes;
CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables);
cnt = 0;
num_coarse_nodes = 0;
for (i=0; i < num_nodes; i++)
{
if (CFN_marker[i] == 1) num_coarse_nodes++;
for (j=0; j < num_functions; j++)
CF_marker[cnt++] = CFN_marker[i];
}
dof_func = hypre_CTAlloc(HYPRE_Int,num_coarse_nodes*num_functions);
cnt = 0;
for (i=0; i < num_nodes; i++)
{
if (CFN_marker[i] == 1)
{
for (k=0; k < num_functions; k++)
dof_func[cnt++] = k;
}
}
*dof_func_ptr = dof_func;
*CF_marker_ptr = CF_marker;
return hypre_error_flag;
}
| 36,939 | 29.604805 | 97 | c |
AMG | AMG-master/parcsr_ls/par_nongalerkin.c | /*BHEADER**********************************************************************
* Copyright (c) 2017, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322.
* This file is part of AMG. See files README and COPYRIGHT for details.
*
* AMG is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
***********************************************************************EHEADER*/
#include "_hypre_parcsr_ls.h"
#include "../HYPRE.h"
/* This file contains the routines for constructing non-Galerkin coarse grid
* operators, based on the original Galerkin coarse grid
*/
/* Take all of the indices from indices[start, start+1, start+2, ..., end]
* and take the corresponding entries in array and place them in-order in output.
* Assumptions:
* output is of length end-start+1
* indices never contains an index that goes out of bounds in array
* */
HYPRE_Int
hypre_GrabSubArray(HYPRE_Int * indices,
HYPRE_Int start,
HYPRE_Int end,
HYPRE_Int * array,
HYPRE_Int * output)
{
HYPRE_Int i, length;
length = end - start + 1;
for(i = 0; i < length; i++)
{ output[i] = array[ indices[start + i] ]; }
return 0;
}
/* Quick Sort based on magnitude on w (HYPRE_Real), move v */
void hypre_qsort2_abs( HYPRE_Int *v,
HYPRE_Real *w,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
return;
hypre_swap2( v, w, left, (left+right)/2);
last = left;
for (i = left+1; i <= right; i++)
if (fabs(w[i]) < fabs(w[left]))
{
hypre_swap2(v, w, ++last, i);
}
hypre_swap2(v, w, left, last);
hypre_qsort2_abs(v, w, left, last-1);
hypre_qsort2_abs(v, w, last+1, right);
}
/* Compute the intersection of x and y, placing
* the intersection in z. Additionally, the array
* x_data is associated with x, i.e., the entries
* that we grab from x, we also grab from x_data.
* If x[k] is placed in z[m], then x_data[k] goes to
* output_x_data[m].
*
* Assumptions:
* z is of length min(x_length, y_length)
* x and y are sorted
* x_length and y_length are similar in size, otherwise,
* in the longer array is faster.
* */
HYPRE_Int
hypre_IntersectTwoArrays(HYPRE_Int *x,
HYPRE_Real *x_data,
HYPRE_Int x_length,
HYPRE_Int *y,
HYPRE_Int y_length,
HYPRE_Int *z,
HYPRE_Real *output_x_data,
HYPRE_Int *intersect_length)
{
HYPRE_Int x_index = 0;
HYPRE_Int y_index = 0;
*intersect_length = 0;
/* Compute Intersection, looping over each array */
while ( (x_index < x_length) && (y_index < y_length) )
{
if (x[x_index] > y[y_index])
{
y_index = y_index + 1;
}
else if (x[x_index] < y[y_index])
{
x_index = x_index + 1;
}
else
{
z[*intersect_length] = x[x_index];
output_x_data[*intersect_length] = x_data[x_index];
x_index = x_index + 1;
y_index = y_index + 1;
*intersect_length = *intersect_length + 1;
}
}
return 1;
}
/* Copy CSR matrix A to CSR matrix B. The column indices are
* assumed to be sorted, and the sparsity pattern of B is a subset
* of the sparsity pattern of A.
*
* Assumptions:
* Column indices of A and B are sorted
* Sparsity pattern of B is a subset of A's
* A and B are the same size and have same data layout
**/
HYPRE_Int
hypre_SortedCopyParCSRData(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B)
{
/* Grab off A and B's data structures */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(B_diag);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *temp_int_array = NULL;
HYPRE_Int temp_int_array_length=0;
HYPRE_Int i, length, offset_A, offset_B;
for(i = 0; i < num_variables; i++)
{
/* Deal with the first row entries, which may be diagonal elements */
if( A_diag_j[A_diag_i[i]] == i)
{ offset_A = 1; }
else
{ offset_A = 0; }
if( B_diag_j[B_diag_i[i]] == i)
{ offset_B = 1; }
else
{ offset_B = 0; }
if( (offset_B == 1) && (offset_A == 1) )
{ B_diag_data[B_diag_i[i]] = A_diag_data[A_diag_i[i]]; }
/* This finds the intersection of the column indices, and
* also copies the matching data in A to the data array in B
**/
if( (A_diag_i[i+1] - A_diag_i[i] - offset_A) > temp_int_array_length )
{
hypre_TFree(temp_int_array);
temp_int_array_length = (A_diag_i[i+1] - A_diag_i[i] - offset_A);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length);
}
hypre_IntersectTwoArrays(&(A_diag_j[A_diag_i[i] + offset_A]),
&(A_diag_data[A_diag_i[i] + offset_A]),
A_diag_i[i+1] - A_diag_i[i] - offset_A,
&(B_diag_j[B_diag_i[i] + offset_B]),
B_diag_i[i+1] - B_diag_i[i] - offset_B,
temp_int_array,
&(B_diag_data[B_diag_i[i] + offset_B]),
&length);
if( (A_offd_i[i+1] - A_offd_i[i]) > temp_int_array_length )
{
hypre_TFree(temp_int_array);
temp_int_array_length = (A_offd_i[i+1] - A_offd_i[i]);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length);
}
hypre_IntersectTwoArrays(&(A_offd_j[A_offd_i[i]]),
&(A_offd_data[A_offd_i[i]]),
A_offd_i[i+1] - A_offd_i[i],
&(B_offd_j[B_offd_i[i]]),
B_offd_i[i+1] - B_offd_i[i],
temp_int_array,
&(B_offd_data[B_offd_i[i]]),
&length);
}
if(temp_int_array)
{ hypre_TFree(temp_int_array); }
return 1;
}
/*
* Equivalent to hypre_BoomerAMGCreateS, except, the data array of S
* is not Null and contains the data entries from A.
*/
HYPRE_Int
hypre_BoomerAMG_MyCreateS(hypre_ParCSRMatrix *A,
HYPRE_Real strength_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = NULL;
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd = 0;
HYPRE_Int num_cols_offd = 0;
hypre_ParCSRMatrix *S;
hypre_CSRMatrix *S_diag;
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
hypre_CSRMatrix *S_offd;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_Real *S_offd_data;
HYPRE_Real diag, row_scale, row_sum;
HYPRE_Int i, jA, jS;
HYPRE_Int ierr = 0;
HYPRE_Int *dof_func_offd;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int index, start, j;
/*--------------------------------------------------------------
* Compute a ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = aij, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
num_nonzeros_diag = A_diag_i[num_variables];
num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
A_offd_i = hypre_CSRMatrixI(A_offd);
num_nonzeros_offd = A_offd_i[num_variables];
/* Initialize S */
S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars,
row_starts, row_starts,
num_cols_offd, num_nonzeros_diag, num_nonzeros_offd);
/* row_starts is owned by A, col_starts = row_starts */
hypre_ParCSRMatrixSetRowStartsOwner(S,0);
S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1);
hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag);
hypre_CSRMatrixData(S_diag) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_diag);
S_offd = hypre_ParCSRMatrixOffd(S);
hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd_i = hypre_CSRMatrixI(S_offd);
dof_func_offd = NULL;
if (num_cols_offd)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd);
hypre_CSRMatrixData(S_offd) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_offd);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
if (num_functions > 1)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd);
}
/*-------------------------------------------------------------------
* Get the dof_func data for the off-processor columns
*-------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_functions > 1)
{
int_buf_data = hypre_CTAlloc(HYPRE_Int,hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
/* give S same nonzero structure as A */
hypre_ParCSRMatrixCopy(A,S,1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_variables; i++)
{
diag = A_diag_data[A_diag_i[i]];
/* compute scaling factor and row sum */
row_scale = 0.0;
row_sum = diag;
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
/* compute row entries of S */
S_diag_j[A_diag_i[i]] = -1;
if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0))
{
/* make all dependencies weak */
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
S_diag_j[jA] = -1;
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
S_offd_j[jA] = -1;
}
}
else
{
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
}
}
}
/*--------------------------------------------------------------
* "Compress" the strength matrix.
*
* NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor!
*
* NOTE: This "compression" section of code may not be removed, the
* non-Galerkin routine depends on it.
*----------------------------------------------------------------*/
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (S_diag_j[jA] > -1)
{
S_diag_j[jS] = S_diag_j[jA];
S_diag_data[jS] = S_diag_data[jA];
jS++;
}
}
}
S_diag_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_diag) = jS;
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (S_offd_j[jA] > -1)
{
S_offd_j[jS] = S_offd_j[jA];
S_offd_data[jS] = S_offd_data[jA];
jS++;
}
}
}
S_offd_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_offd) = jS;
hypre_ParCSRMatrixCommPkg(S) = NULL;
*S_ptr = S;
hypre_TFree(dof_func_offd);
return (ierr);
}
/**
* Initialize the IJBuffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferInit( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Int *ijbuf_numcols )
{
HYPRE_Int ierr = 0;
(*ijbuf_cnt) = 0;
(*ijbuf_rowcounter) = 1; /*Always points to the next row*/
ijbuf_numcols[0] = 0;
return ierr;
}
/**
* Update the buffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferNewRow(HYPRE_Int *ijbuf_rownums, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_numcols,
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Int new_row)
{
HYPRE_Int ierr = 0;
/* First check to see if the previous row was empty, and if so, overwrite that row */
if( ijbuf_numcols[(*ijbuf_rowcounter)-1] == 0 )
{
ijbuf_rownums[(*ijbuf_rowcounter)-1] = new_row;
}
else
{
/* Move to the next row */
ijbuf_rownums[(*ijbuf_rowcounter)] = new_row;
ijbuf_numcols[(*ijbuf_rowcounter)] = 0;
(*ijbuf_rowcounter)++;
}
return ierr;
}
/**
* Compress the current row in an IJ Buffer by removing duplicate entries
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompressRow( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real *ijbuf_data,
HYPRE_Int *ijbuf_cols,
HYPRE_Int *ijbuf_rownums,
HYPRE_Int *ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int nentries, i, nduplicate;
/* Compress the current row by removing any repeat entries,
* making sure to decrement ijbuf_cnt by nduplicate */
nentries = ijbuf_numcols[ ijbuf_rowcounter-1 ];
nduplicate = 0;
hypre_qsort1(ijbuf_cols, ijbuf_data, (*ijbuf_cnt)-nentries, (*ijbuf_cnt)-1 );
for(i =(*ijbuf_cnt)-nentries+1; i <= (*ijbuf_cnt)-1; i++)
{
if( ijbuf_cols[i] == ijbuf_cols[i-1] )
{
/* Shift duplicate entry down */
nduplicate++;
ijbuf_data[i - nduplicate] += ijbuf_data[i];
}
else if(nduplicate > 0)
{
ijbuf_data[i - nduplicate] = ijbuf_data[i];
ijbuf_cols[i - nduplicate] = ijbuf_cols[i];
}
}
(*ijbuf_cnt) -= nduplicate;
ijbuf_numcols[ ijbuf_rowcounter-1 ] -= nduplicate;
return ierr;
}
/**
* Compress the entire buffer, removing duplicate rows
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompress( HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_Int **ijbuf_cols,
HYPRE_Int **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int *indys = hypre_CTAlloc(HYPRE_Int, (*ijbuf_rowcounter) );
HYPRE_Int i, j, duplicate, cnt_new, rowcounter_new, prev_row;
HYPRE_Int row_start, row_stop, row_loc, row;
HYPRE_Real *data_new;
HYPRE_Int *cols_new;
HYPRE_Int *rownums_new;
HYPRE_Int *numcols_new;
/* Do a sort on rownums, but store the original order in indys.
* Then see if there are any duplicate rows */
for(i = 0; i < (*ijbuf_rowcounter); i++)
{ indys[i] = i; }
hypre_qsort2i((*ijbuf_rownums), indys, 0, (*ijbuf_rowcounter)-1);
duplicate = 0;
for(i = 1; i < (*ijbuf_rowcounter); i++)
{
if(indys[i] != (indys[i-1]+1))
{
duplicate = 1;
break;
}
}
/* Compress duplicate rows */
if(duplicate)
{
/* Accumulate numcols, so that it functions like a CSR row-pointer */
for(i = 1; i < (*ijbuf_rowcounter); i++)
{ (*ijbuf_numcols)[i] += (*ijbuf_numcols)[i-1]; }
/* Initialize new buffer */
prev_row = -1;
rowcounter_new = 0;
cnt_new = 0;
data_new = hypre_CTAlloc(HYPRE_Real, ijbuf_size);
cols_new = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
rownums_new = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
numcols_new = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
numcols_new[0] = 0;
/* Cycle through each row */
for(i = 0; i < (*ijbuf_rowcounter); i++)
{
/* Find which row this is in local and global numberings, and where
* this row's data starts and stops in the buffer*/
row_loc = indys[i];
row = (*ijbuf_rownums)[i];
if(row_loc > 0)
{
row_start = (*ijbuf_numcols)[row_loc-1];
row_stop = (*ijbuf_numcols)[row_loc];
}
else
{
row_start = 0;
row_stop = (*ijbuf_numcols)[row_loc];
}
/* Is this a new row? If so, compress previous row, and add a new
* one. Noting that prev_row = -1 is a special value */
if(row != prev_row)
{
if(prev_row != -1)
{
/* Compress previous row */
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
prev_row = row;
numcols_new[rowcounter_new] = 0;
rownums_new[rowcounter_new] = row;
rowcounter_new++;
}
/* Copy row into new buffer */
for(j = row_start; j < row_stop; j++)
{
data_new[cnt_new] = (*ijbuf_data)[j];
cols_new[cnt_new] = (*ijbuf_cols)[j];
numcols_new[rowcounter_new-1]++;
cnt_new++;
}
}
/* Compress the final row */
if(i > 1)
{
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
*ijbuf_cnt = cnt_new;
*ijbuf_rowcounter = rowcounter_new;
/* Point to the new buffer */
hypre_TFree(*ijbuf_data);
hypre_TFree(*ijbuf_cols);
hypre_TFree(*ijbuf_rownums);
hypre_TFree(*ijbuf_numcols);
(*ijbuf_data) = data_new;
(*ijbuf_cols) = cols_new;
(*ijbuf_rownums) = rownums_new;
(*ijbuf_numcols) = numcols_new;
}
hypre_TFree(indys);
return ierr;
}
/**
* Do a buffered write to an IJ matrix.
* That is, write to the buffer, until the buffer is full. Then when the
* buffer is full, write to the IJ matrix and reset the buffer counters
* In effect, this buffers this operation
* A[row_to_write, col_to_write] += val_to_write
**/
HYPRE_Int
hypre_NonGalerkinIJBufferWrite( HYPRE_IJMatrix B, /* Unassembled matrix to add an entry to */
HYPRE_Int *ijbuf_cnt, /* current buffer size */
HYPRE_Int ijbuf_size, /* max buffer size */
HYPRE_Int *ijbuf_rowcounter, /* num of rows in rownums, (i.e., size of rownums) */
/* This counter will increase as you call this function for multiple rows */
HYPRE_Real **ijbuf_data, /* Array of values, of size ijbuf_size */
HYPRE_Int **ijbuf_cols, /* Array of col indices, of size ijbuf_size */
HYPRE_Int **ijbuf_rownums, /* Holds row-indices that with numcols makes for a CSR-like data structure*/
HYPRE_Int **ijbuf_numcols, /* rownums[i] is the row num, and numcols holds the number of entries being added */
/* for that row. Note numcols is not cumulative like an actual CSR data structure*/
HYPRE_Int row_to_write, /* Entry to add to the buffer */
HYPRE_Int col_to_write, /* Ditto */
HYPRE_Real val_to_write ) /* Ditto */
{
HYPRE_Int ierr = 0;
if( (*ijbuf_cnt) == 0 )
{
/* brand new buffer: increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
else if((*ijbuf_rownums)[ (*ijbuf_rowcounter)-1 ] != row_to_write)
{
/* If this is a new row, compress the previous row */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
/* increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow( (*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
/* Add new entry to buffer */
(*ijbuf_cols)[(*ijbuf_cnt)] = col_to_write;
(*ijbuf_data)[(*ijbuf_cnt)] = val_to_write;
(*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ]++;
(*ijbuf_cnt)++;
/* Buffer is full, write to the matrix object */
if ( (*ijbuf_cnt) == (ijbuf_size-1) )
{
/* If the last row is empty, decrement rowcounter */
if( (*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ] == 0)
{ (*ijbuf_rowcounter)--; }
/* Compress and Add Entries */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, *ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data));
/* Reinitialize the buffer */
hypre_NonGalerkinIJBufferInit( ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_numcols));
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
return ierr;
}
/**
* Empty the IJ Buffer with a final AddToValues.
**/
HYPRE_Int
hypre_NonGalerkinIJBufferEmpty(HYPRE_IJMatrix B, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt,
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_Int **ijbuf_cols,
HYPRE_Int **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
if( (*ijbuf_cnt) > 0)
{
/* Compress the last row and then write */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, &ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data));
}
(*ijbuf_cnt = 0);
return ierr;
}
/*
* Construct sparsity pattern based on R_I A P, plus entries required by drop tolerance
*/
hypre_ParCSRMatrix *
hypre_NonGalerkinSparsityPattern(hypre_ParCSRMatrix *R_IAP,
hypre_ParCSRMatrix *RAP,
HYPRE_Int * CF_marker,
HYPRE_Real droptol,
HYPRE_Int sym_collapse,
HYPRE_Int collapse_beta )
{
/* MPI Communicator */
MPI_Comm comm = hypre_ParCSRMatrixComm(RAP);
/* Declare R_IAP */
hypre_CSRMatrix *R_IAP_diag = hypre_ParCSRMatrixDiag(R_IAP);
HYPRE_Int *R_IAP_diag_i = hypre_CSRMatrixI(R_IAP_diag);
HYPRE_Int *R_IAP_diag_j = hypre_CSRMatrixJ(R_IAP_diag);
hypre_CSRMatrix *R_IAP_offd = hypre_ParCSRMatrixOffd(R_IAP);
HYPRE_Int *R_IAP_offd_i = hypre_CSRMatrixI(R_IAP_offd);
HYPRE_Int *R_IAP_offd_j = hypre_CSRMatrixJ(R_IAP_offd);
HYPRE_Int *col_map_offd_R_IAP = hypre_ParCSRMatrixColMapOffd(R_IAP);
/* Declare RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_Int first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
HYPRE_Int last_col_diag_RAP = first_col_diag_RAP + num_cols_diag_RAP - 1;
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_Int *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
/* Declare A */
HYPRE_Int num_fine_variables = hypre_CSRMatrixNumRows(R_IAP_diag);
/* Declare IJ matrices */
HYPRE_IJMatrix Pattern;
hypre_ParCSRMatrix *Pattern_CSR = NULL;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_Int *ijbuf_cols, *ijbuf_rownums, *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_Int *ijbuf_sym_cols, *ijbuf_sym_rownums, *ijbuf_sym_numcols;
/* Other Declarations */
HYPRE_Int ierr = 0;
HYPRE_Real max_entry = 0.0;
HYPRE_Real max_entry_offd = 0.0;
HYPRE_Int * rownz = NULL;
HYPRE_Int i, j, Cpt, row_start, row_end, global_row, global_col;
/* Other Setup */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
/*
* Initialize the IJ matrix, leveraging our rough knowledge of the
* nonzero structure of Pattern based on RAP
*
* ilower, iupper, jlower, jupper */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP, first_col_diag_RAP, last_col_diag_RAP, &Pattern);
ierr += HYPRE_IJMatrixSetObjectType(Pattern, HYPRE_PARCSR);
rownz = hypre_CTAlloc (HYPRE_Int, num_variables);
for(i = 0; i < num_variables; i++)
{ rownz[i] = 1.2*(RAP_diag_i[i+1] - RAP_diag_i[i]) + 1.2*(RAP_offd_i[i+1] - RAP_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(Pattern, rownz);
ierr += HYPRE_IJMatrixInitialize(Pattern);
hypre_TFree(rownz);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size);
ijbuf_cols = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
ijbuf_rownums = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
hypre_NonGalerkinIJBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if(sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_Int, ijbuf_size);
ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size);
hypre_NonGalerkinIJBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Place entries in R_IAP into Pattern
*/
Cpt = -1; /* Cpt contains the fine grid index of the i-th Cpt */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
/* Find the next Coarse Point in CF_marker */
for(j = Cpt+1; j < num_fine_variables; j++)
{
if(CF_marker[j] == 1) /* Found Next C-point */
{
Cpt = j;
break;
}
}
/* Diag Portion */
row_start = R_IAP_diag_i[Cpt];
row_end = R_IAP_diag_i[Cpt+1];
for(j = row_start; j < row_end; j++)
{
global_col = R_IAP_diag_j[j] + first_col_diag_RAP;
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
/* Offdiag Portion */
row_start = R_IAP_offd_i[Cpt];
row_end = R_IAP_offd_i[Cpt+1];
for(j = row_start; j < row_end; j++)
{
global_col = col_map_offd_R_IAP[ R_IAP_offd_j[j] ];
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
}
/*
* Use drop-tolerance to compute new entries for sparsity pattern
*/
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE
#endif*/
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
/* Compute the drop tolerance for this row, which is just
* abs(max of row i)*droptol */
max_entry = -1.0;
for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)
{
if( (RAP_diag_j[j] != i) && (max_entry < fabs(RAP_diag_data[j]) ) )
{ max_entry = fabs(RAP_diag_data[j]); }
}
for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)
{
{
if( max_entry < fabs(RAP_offd_data[j]) )
{ max_entry = fabs(RAP_offd_data[j]); }
}
}
max_entry *= droptol;
max_entry_offd = max_entry*collapse_beta;
/* Loop over diag portion, adding all entries that are "strong" */
for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)
{
if( fabs(RAP_diag_data[j]) > max_entry )
{
global_col = RAP_diag_j[j] + first_col_diag_RAP;
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if(sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
/* Loop over offd portion, adding all entries that are "strong" */
for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)
{
if( fabs(RAP_offd_data[j]) > max_entry_offd )
{
global_col = col_map_offd_RAP[ RAP_offd_j[j] ];
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if(sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if(sym_collapse)
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Finalize Construction of Pattern */
ierr += HYPRE_IJMatrixAssemble(Pattern);
ierr += HYPRE_IJMatrixGetObject( Pattern, (void**) &Pattern_CSR );
/* Deallocate */
ierr += HYPRE_IJMatrixSetObjectType(Pattern, -1);
ierr += HYPRE_IJMatrixDestroy(Pattern);
hypre_TFree(ijbuf_data);
hypre_TFree(ijbuf_cols);
hypre_TFree(ijbuf_rownums);
hypre_TFree(ijbuf_numcols);
if(sym_collapse)
{
hypre_TFree(ijbuf_sym_data);
hypre_TFree(ijbuf_sym_cols);
hypre_TFree(ijbuf_sym_rownums);
hypre_TFree(ijbuf_sym_numcols);
}
return Pattern_CSR;
}
HYPRE_Int
hypre_BoomerAMGBuildNonGalerkinCoarseOperator( hypre_ParCSRMatrix **RAP_ptr,
hypre_ParCSRMatrix *AP,
HYPRE_Real strong_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int * dof_func_value,
HYPRE_Real S_commpkg_switch,
HYPRE_Int * CF_marker,
HYPRE_Real droptol, HYPRE_Int sym_collapse,
HYPRE_Real lump_percent, HYPRE_Int collapse_beta )
{
/* Initializations */
MPI_Comm comm = hypre_ParCSRMatrixComm(*RAP_ptr);
hypre_ParCSRMatrix *S = NULL;
hypre_ParCSRMatrix *RAP = *RAP_ptr;
HYPRE_Int *col_offd_S_to_A = NULL;
HYPRE_Int i, j, k, row_start, row_end, value, num_cols_offd_Sext, num_procs;
HYPRE_Int S_ext_diag_size, S_ext_offd_size, last_col_diag_RAP, cnt_offd, cnt_diag, cnt;
HYPRE_Int col_indx_Pattern, current_Pattern_j, col_indx_RAP;
/* HYPRE_Real start_time = hypre_MPI_Wtime(); */
/* HYPRE_Real end_time; */
HYPRE_Int * temp = NULL;
HYPRE_Int ierr = 0;
char filename[256];
/* Lumping related variables */
HYPRE_IJMatrix ijmatrix;
HYPRE_Int * Pattern_offd_indices = NULL;
HYPRE_Int * S_offd_indices = NULL;
HYPRE_Int * offd_intersection = NULL;
HYPRE_Real * offd_intersection_data = NULL;
HYPRE_Int * diag_intersection = NULL;
HYPRE_Real * diag_intersection_data = NULL;
HYPRE_Int Pattern_offd_indices_len = 0;
HYPRE_Int Pattern_offd_indices_allocated_len= 0;
HYPRE_Int S_offd_indices_len = 0;
HYPRE_Int S_offd_indices_allocated_len = 0;
HYPRE_Int offd_intersection_len = 0;
HYPRE_Int offd_intersection_allocated_len = 0;
HYPRE_Int diag_intersection_len = 0;
HYPRE_Int diag_intersection_allocated_len = 0;
HYPRE_Real intersection_len = 0;
HYPRE_Int * Pattern_indices_ptr = NULL;
HYPRE_Int Pattern_diag_indices_len = 0;
HYPRE_Int global_row = 0;
HYPRE_Int has_row_ended = 0;
HYPRE_Real lump_value = 0.;
HYPRE_Real diagonal_lump_value = 0.;
HYPRE_Real neg_lump_value = 0.;
HYPRE_Real sum_strong_neigh = 0.;
HYPRE_Int * rownz = NULL;
/* offd and diag portions of RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_Int first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_Int *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
HYPRE_Int global_num_vars = hypre_ParCSRMatrixGlobalNumRows(RAP);
/* offd and diag portions of S */
hypre_CSRMatrix *S_diag = NULL;
HYPRE_Int *S_diag_i = NULL;
HYPRE_Real *S_diag_data = NULL;
HYPRE_Int *S_diag_j = NULL;
hypre_CSRMatrix *S_offd = NULL;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Real *S_offd_data = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_Int *col_map_offd_S = NULL;
HYPRE_Int num_cols_offd_S;
/* HYPRE_Int num_nonzeros_S_diag; */
/* off processor portions of S */
hypre_CSRMatrix *S_ext = NULL;
HYPRE_Int *S_ext_i = NULL;
HYPRE_Real *S_ext_data = NULL;
HYPRE_Int *S_ext_j = NULL;
HYPRE_Int *S_ext_diag_i = NULL;
HYPRE_Real *S_ext_diag_data = NULL;
HYPRE_Int *S_ext_diag_j = NULL;
HYPRE_Int *S_ext_offd_i = NULL;
HYPRE_Real *S_ext_offd_data = NULL;
HYPRE_Int *S_ext_offd_j = NULL;
HYPRE_Int *col_map_offd_Sext = NULL;
/* HYPRE_Int num_nonzeros_S_ext_diag;
HYPRE_Int num_nonzeros_S_ext_offd;
HYPRE_Int num_rows_Sext = 0; */
HYPRE_Int row_indx_Sext = 0;
/* offd and diag portions of Pattern */
hypre_ParCSRMatrix *Pattern = NULL;
hypre_CSRMatrix *Pattern_diag = NULL;
HYPRE_Int *Pattern_diag_i = NULL;
HYPRE_Real *Pattern_diag_data = NULL;
HYPRE_Int *Pattern_diag_j = NULL;
hypre_CSRMatrix *Pattern_offd = NULL;
HYPRE_Int *Pattern_offd_i = NULL;
HYPRE_Real *Pattern_offd_data = NULL;
HYPRE_Int *Pattern_offd_j = NULL;
HYPRE_Int *col_map_offd_Pattern = NULL;
HYPRE_Int num_cols_Pattern_offd;
HYPRE_Int my_id;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_Int *ijbuf_cols, *ijbuf_rownums, *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_Int *ijbuf_sym_cols, *ijbuf_sym_rownums, *ijbuf_sym_numcols;
/* Further Initializations */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* Compute Sparsity Pattern */
Pattern = hypre_NonGalerkinSparsityPattern(AP, RAP, CF_marker, droptol, sym_collapse, collapse_beta);
Pattern_diag = hypre_ParCSRMatrixDiag(Pattern);
Pattern_diag_i = hypre_CSRMatrixI(Pattern_diag);
Pattern_diag_data = hypre_CSRMatrixData(Pattern_diag);
Pattern_diag_j = hypre_CSRMatrixJ(Pattern_diag);
Pattern_offd = hypre_ParCSRMatrixOffd(Pattern);
Pattern_offd_i = hypre_CSRMatrixI(Pattern_offd);
Pattern_offd_j = hypre_CSRMatrixJ(Pattern_offd);
col_map_offd_Pattern = hypre_ParCSRMatrixColMapOffd(Pattern);
num_cols_Pattern_offd = hypre_CSRMatrixNumCols(Pattern_offd);
if (num_cols_Pattern_offd)
{ Pattern_offd_data = hypre_CSRMatrixData(Pattern_offd); }
/**
* Fill in the entries of Pattern with entries from RAP
**/
/* First, sort column indices in RAP and Pattern */
for(i = 0; i < num_variables; i++)
{
/* The diag matrices store the diagonal as first element in each row.
* We maintain that for the case of Pattern and RAP, because the
* strength of connection routine relies on it and we need to ignore
* diagonal entries in Pattern later during set intersections.
* */
/* Sort diag portion of RAP */
row_start = RAP_diag_i[i];
if( RAP_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = RAP_diag_i[i+1];
hypre_qsort1(RAP_diag_j, RAP_diag_data, row_start, row_end-1 );
/* Sort diag portion of Pattern */
row_start = Pattern_diag_i[i];
if( Pattern_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = Pattern_diag_i[i+1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 );
/* Sort offd portion of RAP */
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i+1];
hypre_qsort1(RAP_offd_j, RAP_offd_data, row_start, row_end-1 );
/* Sort offd portion of Pattern */
/* Be careful to map coarse dof i with CF_marker into Pattern */
row_start = Pattern_offd_i[i];
row_end = Pattern_offd_i[i+1];
hypre_qsort1(Pattern_offd_j, Pattern_offd_data, row_start, row_end-1 );
}
/* Create Strength matrix based on RAP or Pattern. If Pattern is used,
* then the SortedCopyParCSRData(...) function call must also be commented
* back in */
/* hypre_SortedCopyParCSRData(RAP, Pattern); */
if(0)
{
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum, */
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
num_functions, dof_func_value, &S);
}
else
{
/* Passing in "1, NULL" because dof_array is not needed
* because we assume that the number of functions is 1 */
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum,*/
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
1, NULL, &S);
}
/*if (0)*/ /*(strong_threshold > S_commpkg_switch)*/
/*{ hypre_BoomerAMG_MyCreateSCommPkg(RAP, S, &col_offd_S_to_A); }*/
/* Grab diag and offd parts of S */
S_diag = hypre_ParCSRMatrixDiag(S);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd = hypre_ParCSRMatrixOffd(S);
S_offd_i = hypre_CSRMatrixI(S_offd);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
/* num_nonzeros_S_diag = S_diag_i[num_variables]; */
/* Grab part of S that is distance one away from the local rows
* This is needed later for the stencil collapsing. This section
* of the code mimics par_rap.c when it extracts Ps_ext.
* When moving from par_rap.c, the variable name changes were:
* A --> RAP
* P --> S
* Ps_ext --> S_ext
* P_ext_diag --> S_ext_diag
* P_ext_offd --> S_ext_offd
*
* The data layout of S_ext as returned by ExtractBExt gives you only global
* column indices, and must be converted to the local numbering. This code
* section constructs S_ext_diag and S_ext_offd, which are the distance 1
* couplings in S based on the sparsity structure in RAP.
* --> S_ext_diag corresponds to the same column slice that RAP_diag
* corresponds to. Thus, the column indexing is the same as in
* RAP_diag such that S_ext_diag_j[k] just needs to be offset by
* the RAP_diag first global dof offset.
* --> S_ext_offd column indexing is a little more complicated, and
* requires the computation below of col_map_S_ext_offd, which
* maps the local 0,1,2,... column indexing in S_ext_offd to global
* dof numbers. Note, that the num_cols_RAP_offd is NOT equal to
* num_cols_offd_S_ext
* --> The row indexing of S_ext_diag|offd is as follows. Use
* col_map_offd_RAP, where the first index corresponds to the
* first global row index in S_ext_diag|offd. Remember that ExtractBExt
* grabs the information from S required for locally computing
* (RAP*S)[proc_k row slice, :] */
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,RAP,1);
S_ext_data = hypre_CSRMatrixData(S_ext);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixJ(S_ext);
}
/* This uses the num_cols_RAP_offd to set S_ext_diag|offd_i, because S_ext
* is the off-processor information needed to compute RAP*S. That is,
* num_cols_RAP_offd represents the number of rows needed from S_ext for
* the multiplication */
S_ext_diag_i = hypre_CTAlloc(HYPRE_Int,num_cols_RAP_offd+1);
S_ext_offd_i = hypre_CTAlloc(HYPRE_Int,num_cols_RAP_offd+1);
S_ext_diag_size = 0;
S_ext_offd_size = 0;
/* num_rows_Sext = num_cols_RAP_offd; */
last_col_diag_RAP = first_col_diag_RAP + num_cols_diag_RAP - 1;
/* construct the S_ext_diag and _offd row-pointer arrays by counting elements
* This looks to create offd and diag blocks related to the local rows belonging
* to this processor...we may not need to split up S_ext this way...or we could.
* be the bottle neck so LETS SPLIT THIS UP Between offd and diag*/
for (i=0; i < num_cols_RAP_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
S_ext_offd_size++;
else
S_ext_diag_size++;
S_ext_diag_i[i+1] = S_ext_diag_size;
S_ext_offd_i[i+1] = S_ext_offd_size;
}
if (S_ext_diag_size)
{
S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size);
S_ext_diag_data = hypre_CTAlloc(HYPRE_Real, S_ext_diag_size);
}
if (S_ext_offd_size)
{
S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size);
S_ext_offd_data = hypre_CTAlloc(HYPRE_Real, S_ext_offd_size);
}
/* This copies over the column indices into the offd and diag parts.
* The diag portion has it's local column indices shifted to start at 0.
* The offd portion requires more work to construct the col_map_offd array
* and a local column ordering. */
cnt_offd = 0;
cnt_diag = 0;
cnt = 0;
for (i=0; i < num_cols_RAP_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
{
S_ext_offd_data[cnt_offd] = S_ext_data[j];
S_ext_offd_j[cnt_offd++] = S_ext_j[j];
}
else
{
S_ext_diag_data[cnt_diag] = S_ext_data[j];
S_ext_diag_j[cnt_diag++] = S_ext_j[j] - first_col_diag_RAP;
}
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(S_ext);
S_ext = NULL;
}
/* This creates col_map_offd_Sext */
if (S_ext_offd_size || num_cols_offd_S)
{
temp = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size+num_cols_offd_S);
for (i=0; i < S_ext_offd_size; i++)
temp[i] = S_ext_offd_j[i];
cnt = S_ext_offd_size;
for (i=0; i < num_cols_offd_S; i++)
temp[cnt++] = col_map_offd_S[i];
}
if (cnt)
{
/* after this, the first so many entries of temp will hold the
* unique column indices in S_ext_offd_j unioned with the indices
* in col_map_offd_S */
hypre_qsort0(temp, 0, cnt-1);
num_cols_offd_Sext = 1;
value = temp[0];
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_Sext++] = value;
}
}
}
else
{
num_cols_offd_Sext = 0;
}
/* num_nonzeros_S_ext_diag = cnt_diag;
num_nonzeros_S_ext_offd = S_ext_offd_size; */
if (num_cols_offd_Sext)
col_map_offd_Sext = hypre_CTAlloc(HYPRE_Int, num_cols_offd_Sext);
for (i=0; i < num_cols_offd_Sext; i++)
col_map_offd_Sext[i] = temp[i];
if (S_ext_offd_size || num_cols_offd_S)
hypre_TFree(temp);
/* look for S_ext_offd_j[i] in col_map_offd_Sext, and set S_ext_offd_j[i]
* to the index of that column value in col_map_offd_Sext */
for (i=0 ; i < S_ext_offd_size; i++)
S_ext_offd_j[i] = hypre_BinarySearch(col_map_offd_Sext,
S_ext_offd_j[i],
num_cols_offd_Sext);
/* Need to sort column indices in S and S_ext */
for(i = 0; i < num_variables; i++)
{
/* Re-Sort diag portion of Pattern, placing the diagonal entry in a
* sorted position */
row_start = Pattern_diag_i[i];
row_end = Pattern_diag_i[i+1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 );
/* Sort diag portion of S, noting that no diagonal entry */
/* S has not "data" array...it's just NULL */
row_start = S_diag_i[i];
row_end = S_diag_i[i+1];
hypre_qsort1(S_diag_j, S_diag_data, row_start, row_end-1 );
/* Sort offd portion of S */
/* S has no "data" array...it's just NULL */
row_start = S_offd_i[i];
row_end = S_offd_i[i+1];
hypre_qsort1(S_offd_j, S_offd_data, row_start, row_end-1 );
}
/* Sort S_ext
* num_cols_RAP_offd equals num_rows for S_ext*/
for(i = 0; i < num_cols_RAP_offd; i++)
{
/* Sort diag portion of S_ext */
row_start = S_ext_diag_i[i];
row_end = S_ext_diag_i[i+1];
hypre_qsort1(S_ext_diag_j, S_ext_diag_data, row_start, row_end-1 );
/* Sort offd portion of S_ext */
row_start = S_ext_offd_i[i];
row_end = S_ext_offd_i[i+1];
hypre_qsort1(S_ext_offd_j, S_ext_offd_data, row_start, row_end-1 );
}
/*
* Now, for the fun stuff -- Computing the Non-Galerkin Operator
*/
/* Initialize the ijmatrix, leveraging our knowledge of the nonzero
* structure in Pattern */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP,
first_col_diag_RAP, last_col_diag_RAP, &ijmatrix);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR);
rownz = hypre_CTAlloc (HYPRE_Int, num_variables);
for(i = 0; i < num_variables; i++)
{ rownz[i] = 1.2*(Pattern_diag_i[i+1] - Pattern_diag_i[i]) + 1.2*(Pattern_offd_i[i+1] - Pattern_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(ijmatrix, rownz);
ierr += HYPRE_IJMatrixInitialize(ijmatrix);
hypre_TFree(rownz);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real,ijbuf_size);
ijbuf_cols = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
ijbuf_rownums = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
hypre_NonGalerkinIJBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if(sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real,ijbuf_size);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_Int, ijbuf_size);
ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_Int, ijbuf_size);
ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size);
hypre_NonGalerkinIJBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Eliminate Entries In RAP_diag
* */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
row_start = RAP_diag_i[i];
row_end = RAP_diag_i[i+1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if( row_start < row_end)
{
/* Grab pointer to current entry in Pattern_diag */
current_Pattern_j = Pattern_diag_i[i];
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping */
/* Ensure adequate length */
Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i];
if(Pattern_offd_indices_allocated_len < Pattern_offd_indices_len)
{
hypre_TFree(Pattern_offd_indices);
Pattern_offd_indices = hypre_CTAlloc(HYPRE_Int, Pattern_offd_indices_len);
Pattern_offd_indices_allocated_len = Pattern_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of Pattern_offd_j */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i+1]-1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i];
}
}
for(j = row_start; j < row_end; j++)
{
col_indx_RAP = RAP_diag_j[j];
/* Ignore zero entries in RAP */
if( RAP_diag_data[j] != 0.0)
{
/* Don't change the diagonal, just write it */
if(col_indx_RAP == i)
{
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, global_row] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, RAP_diag_data[j] );
/*}*/
}
/* The entry in RAP does not appear in Pattern, so LUMP it */
else if( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* Lump entry (i, col_indx_RAP) in RAP */
/* Grab the indices for row col_indx_RAP of S_offd and diag. This will
* be for computing lumping locations */
S_offd_indices_len = S_offd_i[col_indx_RAP+1] - S_offd_i[col_indx_RAP];
if(S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices);
S_offd_indices = hypre_CTAlloc(HYPRE_Int, S_offd_indices_len);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_offd_j */
hypre_GrabSubArray(S_offd_j, S_offd_i[col_indx_RAP], S_offd_i[col_indx_RAP+1]-1,
col_map_offd_S, S_offd_indices);
/* No need to grab info out of S_diag_j[...], here we just start from
* S_diag_i[col_indx_RAP] and end at index S_diag_i[col_indx_RAP+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if(offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection);
hypre_TFree(offd_intersection_data);
offd_intersection = hypre_CTAlloc(HYPRE_Int, cnt);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt);
offd_intersection_allocated_len = cnt;
}
/* This intersection also tracks S_offd_data and assumes that
* S_offd_indices is the first argument here */
hypre_IntersectTwoArrays(S_offd_indices,
&(S_offd_data[ S_offd_i[col_indx_RAP] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. Note that S_diag_j does
* not have a diagonal entry, so no lumping occurs to the diagonal. */
cnt = hypre_max(Pattern_diag_indices_len,
S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP] );
if(diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection);
hypre_TFree(diag_intersection_data);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt);
diag_intersection_allocated_len = cnt;
}
/* There is no diagonal entry in first position of S */
hypre_IntersectTwoArrays( &(S_diag_j[S_diag_i[col_indx_RAP]]),
&(S_diag_data[ S_diag_i[col_indx_RAP] ]),
S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_diag_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if(intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* col_indx_RAP in S, corresponding to the indices we are
* collapsing to in row i This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for(k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for(k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_diag_data[j]/sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for(k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k]+first_col_diag_RAP;
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, cnt] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (lump_percent < 1.0)
{
/* Preserve row sum by updating diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if(sym_collapse)
{
/* Update mirror entry */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
/* Update mirror entry diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
/*}*/
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for(k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5*RAP_diag_data[j]; }
else
{ lump_value = RAP_diag_data[j]; }
cnt = col_indx_RAP+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if(col_indx_RAP == col_indx_Pattern)
{
cnt = col_indx_RAP+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, RAP_diag_data[j] );
/* Only go to the next entry in Pattern, if this is not the end of a row */
if( current_Pattern_j < Pattern_diag_i[i+1]-1 )
{
current_Pattern_j += 1;
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if(col_indx_RAP > col_indx_Pattern)
{
for(; current_Pattern_j < Pattern_diag_i[i+1]; current_Pattern_j++)
{
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
if(col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if(col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
/*
* Eliminate Entries In RAP_offd
* Structure of this for-loop is very similar to the RAP_diag for-loop
* */
if(num_cols_RAP_offd)
{
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i+1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if( row_start < row_end)
{
current_Pattern_j = Pattern_offd_i[i];
Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i];
if( (Pattern_offd_j != NULL) && (Pattern_offd_indices_len > 0) )
{ col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ]; }
else
{ /* if Pattern_offd_j is not allocated or this is a zero length row,
then all entries need to be lumped.
This is an analagous situation to has_row_ended=1. */
col_indx_Pattern = -1;
has_row_ended = 1;
}
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping. The above
* loop over RAP_diag ensures adequate length of Pattern_offd_indices */
/* Ensure adequate length */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i+1]-1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i];
}
}
for(j = row_start; j < row_end; j++)
{
/* Ignore zero entries in RAP */
if( RAP_offd_data[j] != 0.0)
{
/* In general for all the offd_j arrays, we have to indirectly
* index with the col_map_offd array to get a global index */
col_indx_RAP = col_map_offd_RAP[ RAP_offd_j[j] ];
/* The entry in RAP does not appear in Pattern, so LUMP it */
if( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* The row_indx_Sext would be found with:
row_indx_Sext = hypre_BinarySearch(col_map_offd_RAP, col_indx_RAP, num_cols_RAP_offd);
But, we already know the answer to this with, */
row_indx_Sext = RAP_offd_j[j];
/* Grab the indices for row row_indx_Sext from the offd and diag parts. This will
* be for computing lumping locations */
S_offd_indices_len = S_ext_offd_i[row_indx_Sext+1] - S_ext_offd_i[row_indx_Sext];
if(S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices);
S_offd_indices = hypre_CTAlloc(HYPRE_Int, S_offd_indices_len);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_ext_offd_j */
hypre_GrabSubArray(S_ext_offd_j, S_ext_offd_i[row_indx_Sext], S_ext_offd_i[row_indx_Sext+1]-1,
col_map_offd_Sext, S_offd_indices);
/* No need to grab info out of S_ext_diag_j[...], here we just start from
* S_ext_diag_i[row_indx_Sext] and end at index S_ext_diag_i[row_indx_Sext+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if(offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection);
hypre_TFree(offd_intersection_data);
offd_intersection = hypre_CTAlloc(HYPRE_Int, cnt);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt);
offd_intersection_allocated_len = cnt;
}
hypre_IntersectTwoArrays(S_offd_indices,
&(S_ext_offd_data[ S_ext_offd_i[row_indx_Sext] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. */
cnt = hypre_max(Pattern_diag_indices_len,
S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext] );
if(diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection);
hypre_TFree(diag_intersection_data);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt);
diag_intersection_allocated_len = cnt;
}
hypre_IntersectTwoArrays( &(S_ext_diag_j[S_ext_diag_i[row_indx_Sext]]),
&(S_ext_diag_data[ S_ext_diag_i[row_indx_Sext] ]),
S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_offd_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if(intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* row_indx_Sext in S, corresponding to the indices we are
* collapsing to in row i. This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for(k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for(k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_offd_data[j]/sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for(k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k]+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, cnt, lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value);
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for(k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5*RAP_offd_data[j]; }
else
{ lump_value = RAP_offd_data[j]; }
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, col_indx_RAP, global_row,
lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if (col_indx_RAP == col_indx_Pattern)
{
/* For the offd structure, col_indx_RAP is a global dof number */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
RAP_offd_data[j]);
/* Only go to the next entry in Pattern, if this is not the end of a row */
if( current_Pattern_j < Pattern_offd_i[i+1]-1 )
{
current_Pattern_j += 1;
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if(col_indx_RAP > col_indx_Pattern)
{
for(; current_Pattern_j < Pattern_offd_i[i+1]; current_Pattern_j++)
{
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
if(col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if(col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if(sym_collapse)
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Assemble non-Galerkin Matrix, and overwrite current RAP*/
ierr += HYPRE_IJMatrixAssemble (ijmatrix);
ierr += HYPRE_IJMatrixGetObject( ijmatrix, (void**) RAP_ptr);
/* Optional diagnostic matrix printing */
if (0)
{
hypre_sprintf(filename, "Pattern_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(Pattern, 0, 0, filename);
hypre_sprintf(filename, "Strength_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(S, 0, 0, filename);
hypre_sprintf(filename, "RAP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(RAP, 0, 0, filename);
hypre_sprintf(filename, "RAPc_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(*RAP_ptr, 0, 0, filename);
hypre_sprintf(filename, "AP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(AP, 0, 0, filename);
}
/* Free matrices and variables and arrays */
hypre_TFree(ijbuf_data);
hypre_TFree(ijbuf_cols);
hypre_TFree(ijbuf_rownums);
hypre_TFree(ijbuf_numcols);
if(sym_collapse)
{
hypre_TFree(ijbuf_sym_data);
hypre_TFree(ijbuf_sym_cols);
hypre_TFree(ijbuf_sym_rownums);
hypre_TFree(ijbuf_sym_numcols);
}
hypre_TFree(Pattern_offd_indices);
hypre_TFree(S_ext_diag_i);
hypre_TFree(S_ext_offd_i);
hypre_TFree(S_offd_indices);
hypre_TFree(offd_intersection);
hypre_TFree(offd_intersection_data);
hypre_TFree(diag_intersection);
hypre_TFree(diag_intersection_data);
if (S_ext_diag_size)
{
hypre_TFree(S_ext_diag_j);
hypre_TFree(S_ext_diag_data);
}
if (S_ext_offd_size)
{
hypre_TFree(S_ext_offd_j);
hypre_TFree(S_ext_offd_data);
}
if (num_cols_offd_Sext)
{ hypre_TFree(col_map_offd_Sext); }
if (0) /*(strong_threshold > S_commpkg_switch)*/
{ hypre_TFree(col_offd_S_to_A); }
ierr += hypre_ParCSRMatrixDestroy(Pattern);
ierr += hypre_ParCSRMatrixDestroy(RAP);
ierr += hypre_ParCSRMatrixDestroy(S);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, -1);
ierr += HYPRE_IJMatrixDestroy(ijmatrix);
/*end_time = hypre_MPI_Wtime();
if(my_id == 0)
{ fprintf(stdout, "NonGalerkin Time: %1.2e\n", end_time-start_time); } */
return ierr;
}
| 102,298 | 43.711101 | 151 | c |