Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
r-mae-main/pretrain/dataset/processor/processors.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import os import random import torchvision.transforms as transforms from pretrain.utils.distributed import is_master from pretrain.dataset.processor import functional as F PROCESSOR_REGISTRY = {} def register_processor(name): def register_processor_cls(cls): if name in PROCESSOR_REGISTRY: raise ValueError("Cannot register duplicate process ({})".format(name)) PROCESSOR_REGISTRY[name] = cls return cls return register_processor_cls def build_processor(config): if not hasattr(config, "type"): raise AttributeError( "Config must have 'type' attribute to specify type of processor" ) if config["type"] in PROCESSOR_REGISTRY: processor_class = PROCESSOR_REGISTRY[config["type"]] else: raise ValueError("Unknown processor type {}".format(config["type"])) params = {} if not hasattr(config, "params") and is_master(): print( "Config doesn't have 'params' attribute to " "specify parameters of the processor " "of type {}. Setting to default \{\}".format(config["type"]) ) else: params = config["params"] try: processor_instance = processor_class(**params) except Exception as e: print("Error in", processor_class.__name__) raise e return processor_instance class BaseProcessor: def __init__(self, params={}): for kk, vv in params.items(): setattr(self, kk, vv) def __call__(self, item, *args, **kwargs): return item @register_processor("answer") class AnswerProcessor(BaseProcessor): NO_OBJECT = "<nobj>" def __init__(self, class_file, data_root_dir=None): defaults = dict(class_file=class_file, data_root_dir=data_root_dir) super().__init__(defaults) if not os.path.isabs(class_file) and data_root_dir is not None: class_file = os.path.join(data_root_dir, class_file) if not os.path.exists(class_file): raise RuntimeError( "Vocab file {} for vocab dict doesn't exist!".format(class_file) ) self.word_list = self._load_str_list(class_file) def _load_str_list(self, class_file): with open(class_file) as f: lines = f.readlines() lines = [self._process_answer(l) for l in lines] return lines def _process_answer(self, answer): remove = [",", "?"] answer = answer.lower() for item in remove: answer = answer.replace(item, "") answer = answer.replace("'s", " 's") return answer.strip() def get_size(self): return len(self.word_list) def idx2cls(self, n_w): return self.word_list[n_w] def cls2idx(self, w): if w in self.word2idx_dict: return self.word2idx_dict[w] else: raise ValueError("class %s not in dictionary" % w) def __len__(self): return len(self.word_list) # =========================== # # --------- 2d ops ---------- # # =========================== # @register_processor("to_tensor") class ToTensor(BaseProcessor): def __init__(self): super().__init__() def __call__(self, sample, target=None): sample, target = F.to_tensor(sample, target) return sample, target @register_processor("normalize") class Normalize(BaseProcessor): def __init__(self, mean, std): defaults = dict(mean=mean, std=std) super().__init__(defaults) def __call__(self, sample, target=None): sample, target = F.normalize(sample, target, mean=self.mean, std=self.std) return sample, target @register_processor("random_size_crop") class RandomSizeCrop(BaseProcessor): def __init__(self, min_size, max_size): defaults = dict(min_size=min_size, max_size=max_size) super().__init__(defaults) def __call__(self, sample, target=None): img = sample["image"] w = random.randint(self.min_size, min(img.width, self.max_size)) h = random.randint(self.min_size, min(img.height, self.max_size)) region = transforms.RandomCrop.get_params(img, [h, w]) return F.crop(sample, target, region) @register_processor("random_resize") class RandomResize(BaseProcessor): def __init__(self, min_size, max_size=None): if isinstance(min_size, int): min_size = (min_size,) else: min_size = list(range(*min_size)) defaults = dict(min_size=min_size, max_size=max_size) super().__init__(defaults) def __call__(self, sample, target=None): size = random.choice(self.min_size) sample, target = F.resize(sample, target, size, self.max_size) return sample, target @register_processor("random_horizontal_flip") class RandomHorizontalFlip(BaseProcessor): def __init__(self, prob=0.5): super().__init__(dict(p=prob)) def __call__(self, sample, target=None): if random.random() < self.p: sample, target = F.hflip(sample, target) return sample, target @register_processor("random_select") class RandomSelect(BaseProcessor): def __init__(self, preprocessors, probs): super().__init__(dict(preprocessors=preprocessors, p=probs)) self.preprocessors = [] for preprocessor in preprocessors: self.preprocessors.append(build_processor(preprocessor)) assert len(self.preprocessors) == len(self.p) def __call__(self, sample, target=None): idx = random.choices(list(range(len(self.preprocessors))), weights=self.p)[0] sample, target = self.preprocessors[idx](sample, target) return sample, target @register_processor("resize_scale") class ResizeScale(BaseProcessor): def __init__(self, min_scale, max_scale, image_size, interpolation=2): super().__init__( dict( min_scale=min_scale, max_scale=max_scale, image_size=image_size, interpolation=interpolation, ) ) def __call__(self, sample, target=None): scale = random.uniform(self.min_scale, self.max_scale) return F.resize_scale( sample, target, scale, self.image_size, self.image_size, self.interpolation ) @register_processor("fixed_size_crop") class FixedSizeCrop(BaseProcessor): def __init__(self, image_size, pad_value=0): crop_size = (image_size, image_size) super().__init__( dict(image_size=image_size, pad_value=pad_value, crop_size=crop_size) ) def __call__(self, sample, target=None): return F.random_crop( sample, target, self.crop_size, is_fixed=True, pad_value=self.pad_value ) @register_processor("random_size_crop_v2") class RandomSizeCropv2(BaseProcessor): def __init__(self, image_size): crop_size = (image_size, image_size) super().__init__(dict(image_size=image_size, crop_size=crop_size)) def __call__(self, sample, target=None): return F.random_crop(sample, target, self.crop_size, is_fixed=False) @register_processor("random_resize_crop") class RandomResizeCrop(BaseProcessor): def __init__( self, image_size, scale, ratio=(0.75, 1.3333333333333333), interpolation=3 ): super().__init__( dict( image_size=image_size, scale=scale, ratio=ratio, interpolation=interpolation, ) ) def __call__(self, sample, target=None): return F.random_resize_crop( sample, target, self.scale, self.ratio, self.image_size, self.interpolation ) @register_processor("random_resize_crop_w_loop") class RandomResizeCropWithLoop(BaseProcessor): def __init__( self, image_size, scale, ratio=(0.75, 1.3333333333333333), interpolation=3 ): super().__init__( dict( image_size=image_size, scale=scale, ratio=ratio, interpolation=interpolation, ) ) def __call__(self, sample, target=None): return F.random_resize_crop_with_loop( sample, target, self.scale, self.ratio, self.image_size, self.interpolation ) @register_processor("resize") class Resize(BaseProcessor): def __init__(self, image_size, interpolation=3): super().__init__(dict(image_size=image_size, interpolation=interpolation)) def __call__(self, sample, target=None): sample, target = F.resize( sample, target, self.image_size, interpolation=self.interpolation ) return sample, target @register_processor("center_crop") class CenterCrop(BaseProcessor): def __init__(self, image_size): if isinstance(image_size, int): image_size = (image_size, image_size) super().__init__(dict(image_size=image_size)) def __call__(self, sample, target=None): sample, target = F.center_crop(sample, target, self.image_size) return sample, target @register_processor("compose") class Compose(BaseProcessor): def __init__(self, preprocessors): super().__init__(dict(preprocessors=preprocessors)) self.preprocessors = [] for preprocessor in preprocessors: self.preprocessors.append(build_processor(preprocessor)) def __call__(self, sample, target=None): for transform in self.preprocessors: sample, target = transform(sample, target) return sample, target def __repr__(self): format_string = self.__class__.__name__ + "(" for transform in self.preprocessors: format_string += "\n" format_string += "\t{0}".format(transform) format_string += "\n" return format_string
10,114
28.75
87
py
null
r-mae-main/pretrain/dataset/reader/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from pretrain.dataset.reader.image_reader import ImageReader __all__ = ["ImageReader"]
286
27.7
61
py
null
r-mae-main/pretrain/dataset/reader/image_reader.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import os import cv2 from PIL import Image class ImageReader: def __init__(self, base_path, reader_type): self.base_path = base_path self.reader_type = reader_type self.image_reader = None def _init_reader(self): if self.reader_type == "cv2": self.image_reader = Cv2Reader() elif self.reader_type == "pil": self.image_reader = PILReader() else: raise TypeError("unknown image reader type") def read(self, image_path): image_path = os.path.join(self.base_path, image_path) if self.image_reader is None: self._init_reader() return self.image_reader.read(image_path) class Cv2Reader: def read(self, image_path): image_data = cv2.cvtColor( cv2.imread(image_path, cv2.CV_LOAD_IMAGE_COLOR), cv2.COLOR_BGR2RGB ) # numpy object return image_data class PILReader: def read(self, image_path): image_data = Image.open(image_path).convert("RGB") # Image object return image_data
1,271
24.959184
78
py
null
r-mae-main/pretrain/model/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import importlib import os from pretrain.model.base_model import BaseModel ARCH_REGISTRY = {} __all__ = ["BaseModel"] def build_model(config, num_classes): model_name = config.model model_config = config.model_config[model_name] if model_name not in ARCH_REGISTRY: raise ValueError("Model architecture ({}) is not found.".format(model_name)) if num_classes is not None: model = ARCH_REGISTRY[model_name]( model_config, num_classes, global_config=config ) else: model = ARCH_REGISTRY[model_name](model_config, global_config=config) if hasattr(model, "build"): model.build() return model def register_model(name): def register_model_cls(cls): if name in ARCH_REGISTRY: raise ValueError("Cannot register duplicate model ({})".format(name)) elif not issubclass(cls, BaseModel): raise ValueError( "Model ({}: {}) must extend BaseModel".format(name, cls.__name__), ) ARCH_REGISTRY[name] = cls return cls return register_model_cls def get_arch_list(): return tuple(ARCH_REGISTRY.keys()) models_dir = os.path.dirname(__file__) for file in os.listdir(models_dir): path = os.path.join(models_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): model_name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module("pretrain.model." + model_name)
1,777
25.537313
84
py
null
r-mae-main/pretrain/model/base_model.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from torch import nn class BaseModel(nn.Module): """For integration with the trainer, datasets and other features, models needs to inherit this class, call `super`, write a build function, write a forward function taking a ``SampleList`` as input and returning a dict as output and finally, register it using ``@registry.register_model`` Args: config (DictConfig): ``model_config`` configuration from global config. """ def __init__(self, config, global_config): super().__init__() self.config = config self._global_config = global_config def _build(self): """Function to be implemented by the child class, in case they need to build their model separately than ``__init__``. All model related downloads should also happen here. """ raise NotImplementedError( "Build method not implemented in the child model class." ) def build(self): self._build() self.inference(False) def inference(self, mode=True): if mode: super().train(False) self.inferencing = mode for module in self.modules(): if hasattr(module, "inferencing"): module.inferencing = mode else: setattr(module, "inferencing", mode) def train(self, mode=True): if mode: self.inferencing = False for module in self.modules(): if hasattr(module, "inferencing"): module.inferencing = False else: setattr(module, "inferencing", False) super().train(mode)
1,873
31.310345
79
py
null
r-mae-main/pretrain/model/mae.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import copy from pretrain.model import BaseModel, register_model from pretrain.module import build_mae, build_rmae from pretrain.utils.modeling import get_mae_parameters @register_model("mae") class MaskedAutoencoder(BaseModel): def __init__(self, config, **kwargs): super().__init__(config, global_config=kwargs["global_config"]) self.mask_ratio = config["mask_ratio"] def get_optimizer_parameters(self): wd_norm = self._global_config.optimizer.params.wd_norm wd_bias = self._global_config.optimizer.params.wd_bias mae_groups = get_mae_parameters(self.mae_vit, wd_norm=wd_norm, wd_bias=wd_bias) return mae_groups def _build(self): mae_vit_config = copy.deepcopy(self.config.mae_vit) self.mae_vit = build_mae(mae_vit_config) def state_dict(self, *args, **kwargs): return self.mae_vit.state_dict(*args, **kwargs) def load_state_dict(self, state_dict, *args, **kwargs): return self.mae_vit.load_state_dict(state_dict, *args, **kwargs) def forward(self, sample, target=None): imgs = sample["image"] loss, pred, metric, mask = self.mae_vit(imgs, self.mask_ratio) if not self.training: return loss, pred, mask return {"losses": {"pretrain_loss": loss}, "metrics": {"mae_loss": metric}} @register_model("rmae") class RegionMaskedAutoencoder(MaskedAutoencoder): def __init__(self, config, **kwargs): super().__init__(config, global_config=kwargs["global_config"]) self.num_region = config.mae_vit.params.num_region self.region_sample_type = config.mae_vit.params.region_sample_type def _build(self): mae_vit_config = copy.deepcopy(self.config.mae_vit) self.mae_vit = build_rmae(mae_vit_config) def forward(self, sample, target=None): imgs = sample["image"] if self.num_region > 0: region = sample["region"] else: region = None if self.region_sample_type == "random_fg": assert self.num_region > 0, "num_region should be greater than 0" shuffle_ids = sample["shuffle_ids"] else: shuffle_ids = None loss, pred, metric, mask = self.mae_vit( imgs, self.mask_ratio, region=region, shuffle_ids=shuffle_ids ) if not self.training: return loss, pred[0], pred[1], region, mask mae_loss, proposal_loss = metric return { "losses": {"pretrain_loss": loss}, "metrics": {"mae_loss": mae_loss, "region_loss": proposal_loss}, }
2,822
31.448276
87
py
null
r-mae-main/pretrain/module/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from .mae import build_mae from .rmae import build_rmae __all__ = [ "build_mae", "build_rmae", ]
305
19.4
61
py
null
r-mae-main/pretrain/module/layers.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn from pretrain.utils.functional import drop_path class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob=None, scale_by_keep=True): super(DropPath, self).__init__() self.drop_prob = drop_prob self.scale_by_keep = scale_by_keep def forward(self, x): return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) class Mlp(nn.Module): """MLP as used in Vision Transformer, MLP-Mixer and related networks""" def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features if isinstance(drop, tuple): drop_probs = drop else: drop_probs = (drop, drop) self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.drop1 = nn.Dropout(drop_probs[0]) self.fc2 = nn.Linear(hidden_features, out_features) self.drop2 = nn.Dropout(drop_probs[1]) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x class PatchEmbed(nn.Module): """2D Image to Patch Embedding""" def __init__( self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, ): super().__init__() if isinstance(img_size, tuple): img_size = img_size else: img_size = (img_size, img_size) if isinstance(patch_size, tuple): patch_size = patch_size else: patch_size = (patch_size, patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = flatten self.proj = nn.Conv2d( in_chans, embed_dim, kernel_size=patch_size, stride=patch_size ) self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() def forward(self, x): x = self.proj(x) bchw = x.shape if self.flatten: x = x.flatten(2).transpose(1, 2) # BCHW -> BNC x = self.norm(x) return x, bchw class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0, use_cls_token=True, ): super().__init__() assert dim % num_heads == 0, "dim should be divisible by num_heads" self.num_heads = num_heads head_dim = dim // num_heads self.scale = head_dim**-0.5 self.use_cls_token = use_cls_token self.qkv_bias = qkv_bias self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, rel_pos_bias=None, attn_mask=None): # attn_mask: B x N B, N, C = x.shape qkv = ( self.qkv(x) .reshape(B, N, 3, self.num_heads, C // self.num_heads) .permute(2, 0, 3, 1, 4) ) q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple) attn = torch.matmul(q * self.scale, k.transpose(-2, -1)) if attn_mask is not None: attn = attn.masked_fill(attn_mask.unsqueeze(1), -65504.0) if rel_pos_bias is not None: if not self.use_cls_token: rel_pos_bias = rel_pos_bias[:, 1:, 1:] attn = attn + rel_pos_bias attn_wo_softmax = attn attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) self.attention = attn x = torch.matmul(attn, v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, attn_wo_softmax class Block(nn.Module): def __init__( self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, win_size=0, use_cls_token=True, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, use_cls_token=use_cls_token, ) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, ) self.win_size = win_size self.hw = None def forward(self, x, rel_pos_bias=None, attn_mask=None): ori_x = x x = self.norm1(x) x, attn = self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask) x = ori_x + self.drop_path(x) x = x + self.drop_path(self.mlp(self.norm2(x))) return x, attn class CrossAttention(nn.Module): def __init__( self, qdim, kvdim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0, use_cls_token=True, ): super().__init__() assert qdim % num_heads == 0, "dim should be divisible by num_heads" self.num_heads = num_heads head_dim = qdim // num_heads self.scale = head_dim**-0.5 self.use_cls_token = use_cls_token self.qkv_bias = qkv_bias self.q = nn.Linear(qdim, qdim, bias=qkv_bias) self.kv = nn.Linear(kvdim, qdim * 2, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(qdim, qdim) self.proj_drop = nn.Dropout(proj_drop) def forward( self, query, memory, rel_pos_bias=None, attn_mask=None, with_mask=False ): # attn_mask: B x N B, N, C = query.shape L = memory.shape[1] q = ( self.q(query) .reshape(B, N, self.num_heads, C // self.num_heads) .permute(0, 2, 1, 3) ) k, v = ( self.kv(memory) .reshape(B, L, 2, self.num_heads, C // self.num_heads) .permute(2, 0, 3, 1, 4) .unbind(0) ) attn = torch.matmul(q * self.scale, k.transpose(-2, -1)) if attn_mask is not None: attn = attn.masked_fill(attn_mask.unsqueeze(1), -65504.0) if rel_pos_bias is not None: if not self.use_cls_token: rel_pos_bias = rel_pos_bias[:, 1:, 1:] attn = attn + rel_pos_bias attn_wo_softmax = attn attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) self.attention = attn x = torch.matmul(attn, v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, attn_wo_softmax class DecoderBlock(nn.Module): def __init__( self, dim, enc_dim, num_heads, mlp_ratio=4.0, qkv_bias=False, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_cls_token=True, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, use_cls_token=use_cls_token, ) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = norm_layer(dim) self.cross_attn = CrossAttention( dim, enc_dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, use_cls_token=use_cls_token, ) self.norm3 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, ) def forward(self, x, memory, rel_pos_bias=None, attn_mask=None): ori_x = x x = self.norm1(x) x, attn = self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask) x = ori_x + self.drop_path(x) ori_x = x x = self.norm2(x) x, attn = self.cross_attn( x, memory, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask, ) x = ori_x + self.drop_path(x) ori_x = x x = self.drop_path(self.mlp(self.norm3(x))) x = ori_x + self.drop_path(x) return x, attn class DecoderBlockWithExpansion(nn.Module): def __init__( self, dim, enc_dim, num_heads, qkv_bias=False, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_cls_token=True, ): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, use_cls_token=use_cls_token, ) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = nn.Identity() self.proj = nn.Linear(enc_dim, dim) self.norm3 = norm_layer(dim) self.mlp = Mlp( in_features=dim, hidden_features=dim, act_layer=act_layer, drop=drop, ) def forward(self, x, memory, rel_pos_bias=None, attn_mask=None): ori_x = x x = self.norm1(x) x, attn = self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask) x = ori_x + self.drop_path(x) ori_x = x x = self.norm2(x) x_mask = self.proj(memory) x_mask = ori_x.unsqueeze(2) + self.drop_path(x_mask).unsqueeze(1) x_mask = self.mlp(self.norm3(x_mask)) return x_mask, attn
11,237
27.165414
99
py
null
r-mae-main/pretrain/module/mae.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import copy import warnings from functools import partial import omegaconf import torch import torch.nn as nn from pretrain.module.layers import PatchEmbed, Block from pretrain.utils.functional import get_2d_sincos_pos_embed class MaskedAutoencoderViT(nn.Module): def __init__( self, img_size=224, patch_size=16, in_chans=3, embed_dim=1024, depth=24, num_heads=16, decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, mlp_ratio=4.0, norm_layer=nn.LayerNorm, norm_pix_loss=False, use_mae_loss=True, **kwargs, ): super().__init__() if len(list(kwargs.keys())) > 0: warnings.warn( f"Arguments {list(kwargs.keys())} are unused in {self.__class__.__name__}" ) # -------------------------------------------------------------------------- # MAE encoder specifics self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) self.pos_embed = nn.Parameter( torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False ) # fixed sin-cos embedding self.blocks = nn.ModuleList( [ Block( embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, ) for i in range(depth) ] ) self.norm = norm_layer(embed_dim) # -------------------------------------------------------------------------- if use_mae_loss: # -------------------------------------------------------------------------- # MAE decoder specifics self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True) self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) self.decoder_pos_embed = nn.Parameter( torch.zeros(1, num_patches + 1, decoder_embed_dim), requires_grad=False ) # fixed sin-cos embedding self.decoder_blocks = nn.ModuleList( [ Block( decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, ) for i in range(decoder_depth) ] ) self.decoder_norm = norm_layer(decoder_embed_dim) self.decoder_pred = nn.Linear( decoder_embed_dim, (patch_size**2) * in_chans, bias=True ) # decoder to patch # -------------------------------------------------------------------------- self.norm_pix_loss = norm_pix_loss self.use_mae_loss = use_mae_loss self.patch_size = patch_size self.initialize_weights() def initialize_weights(self): print("Initializing MAE params...") # initialization # initialize (and freeze) pos_embed by sin-cos embedding pos_embed = get_2d_sincos_pos_embed( self.pos_embed.shape[-1], int(self.patch_embed.num_patches**0.5), cls_token=True, ) self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0)) # initialize patch_embed like nn.Linear (instead of nn.Conv2d) w = self.patch_embed.proj.weight.data torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) # timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.) torch.nn.init.normal_(self.cls_token, std=0.02) if self.use_mae_loss: decoder_pos_embed = get_2d_sincos_pos_embed( self.decoder_pos_embed.shape[-1], int(self.patch_embed.num_patches**0.5), cls_token=True, ) self.decoder_pos_embed.data.copy_( torch.from_numpy(decoder_pos_embed).float().unsqueeze(0) ) torch.nn.init.normal_(self.mask_token, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): # we use xavier_uniform following official JAX ViT: torch.nn.init.xavier_uniform_(m.weight) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def patchify(self, imgs): """ imgs: (N, C, H, W) x: (N, L, patch_size**2 *C) """ c = imgs.shape[1] p = self.patch_embed.patch_size[0] assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 h = w = imgs.shape[2] // p x = imgs.reshape(shape=(imgs.shape[0], c, h, p, w, p)) x = torch.einsum("nchpwq->nhwpqc", x) x = x.reshape(shape=(imgs.shape[0], h * w, (p**2) * c)) return x def random_masking(self, x, mask_ratio): """ Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random noise. x: [N, L, D], sequence """ N, L, D = x.shape # batch, length, dim len_keep = int(L * (1 - mask_ratio)) noise = torch.rand(N, L, device=x.device) # noise in [0, 1] # sort noise for each sample ids_shuffle = torch.argsort( noise, dim=1 ) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1) # keep the first subset ids_keep = ids_shuffle[:, :len_keep] x_masked = torch.gather( x, dim=1, index=ids_keep.unsqueeze(-1).expand(-1, -1, D) ) # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([N, L], device=x.device) mask[:, :len_keep] = 0 # unshuffle to get the binary mask mask = torch.gather(mask, dim=1, index=ids_restore) return x_masked, mask, ids_restore def forward_encoder(self, x, mask_ratio): # embed patches x = self.patch_embed(x)[0] # add pos embed w/o cls token x = x + self.pos_embed[:, 1:, :] # masking: length -> length * mask_ratio x, mask = self.random_masking(x, mask_ratio) # append cls token cls_token = self.cls_token + self.pos_embed[:, :1, :] cls_tokens = cls_token.expand(x.shape[0], -1, -1) x = torch.cat((cls_tokens, x), dim=1) # apply Transformer blocks for blk in self.blocks: x = blk(x)[0] x = self.norm(x) return x, mask def forward_decoder(self, x, ids_restore): # embed tokens x = self.decoder_embed(x) # append mask tokens to sequence mask_tokens = self.mask_token.expand( x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], -1 ) x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token x_ = torch.gather( x_, dim=1, index=ids_restore.unsqueeze(-1).expand(-1, -1, x.shape[2]) ) # unshuffle x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token # add pos embed x = x + self.decoder_pos_embed # apply Transformer blocks for blk in self.decoder_blocks: x = blk(x)[0] x = self.decoder_norm(x) # predictor projection x = self.decoder_pred(x) # remove cls token x = x[:, 1:, :] return x def _forward_mae_loss(self, pred, pred_target): l2_loss = (pred - pred_target) ** 2 l2_loss = l2_loss.mean(dim=-1) # [N, L], mean loss per patch return l2_loss def forward_loss(self, imgs, pred, mask): """ imgs: [N, 3, H, W] pred: [N, L, p*p*3] mask: [N, L], 0 is keep, 1 is remove, """ target = self.patchify(imgs) if self.norm_pix_loss: mean = target.mean(dim=-1, keepdim=True) var = target.var(dim=-1, keepdim=True) target = (target - mean) / (var + 1.0e-6) ** 0.5 mae_loss = self._forward_mae_loss(pred, target) mae_loss = (mae_loss * mask).sum() / mask.sum() # mean loss on removed patches return mae_loss, mae_loss def forward_loss(self, imgs, mask_ratio=0.75): latent, mask, ids_restore = self.forward_encoder(imgs, mask_ratio) pred = self.forward_decoder(latent, ids_restore) loss, metric = self.forward_loss(imgs, pred, mask) return loss, pred, metric, mask def build_mae(config): arch = config["arch"] params = copy.deepcopy(config["params"]) with omegaconf.open_dict(params): if "mae_base_patch16" in arch: patch_size = 16 embed_dim = 768 depth = 12 num_heads = 12 elif "mae_large_patch16" in arch: patch_size = 16 embed_dim = 1024 depth = 24 num_heads = 16 elif "mae_huge_patch14" in arch: patch_size = 14 embed_dim = 1280 depth = 32 num_heads = 16 else: raise ValueError( "Only support mae_base_patch16|mae_large_patch16|mae_huge_patch14" ) net = MaskedAutoencoderViT( patch_size=patch_size, embed_dim=embed_dim, depth=depth, num_heads=num_heads, mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **params, ) return net
10,108
30.990506
98
py
null
r-mae-main/pretrain/module/rmae.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import copy import warnings from functools import partial import omegaconf import torch import torch.nn as nn import torch.nn.functional as F from .mae import MaskedAutoencoderViT from pretrain.utils.functional import get_2d_sincos_pos_embed from pretrain.module.layers import ( DecoderBlockWithExpansion, DecoderBlock, PatchEmbed, Block, ) class RegionMaskedAutoencoderViT(MaskedAutoencoderViT): def __init__( self, img_size=224, patch_size=16, in_chans=3, embed_dim=1024, depth=24, num_heads=16, decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, mlp_ratio=4.0, norm_layer=nn.LayerNorm, norm_pix_loss=False, num_region=0, use_mae_loss=True, mae_loss_weight=1.0, bg_loss_weight=1.0, region_loss_weight=1.0, region_mask_ratio=0.75, region_enc_dim=768, region_sample_type="random", ): if use_mae_loss is False and num_region == 0: raise ValueError( "There should be at least one loss in training. Found use_mae_loss=False and num_region=0!" ) super().__init__( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, depth=depth, num_heads=num_heads, decoder_embed_dim=decoder_embed_dim, decoder_depth=decoder_depth, decoder_num_heads=decoder_num_heads, mlp_ratio=mlp_ratio, norm_layer=norm_layer, norm_pix_loss=norm_pix_loss, use_mae_loss=use_mae_loss, ) self.num_region = num_region self.mae_loss_weight = mae_loss_weight self.bg_loss_weight = bg_loss_weight self.region_loss_weight = region_loss_weight self.region_mask_ratio = region_mask_ratio self.region_enc_dim = region_enc_dim assert region_sample_type in ( "random", "random_fg", ), "Only random|random_fg are allowed for region_sample_type" self.region_sample_type = region_sample_type def random_masking(self, x, mask_ratio, region, shuffle_ids): """ Perform per-sample random masking by per-sample shuffling. Per-sample shuffling is done by argsort random noise. x: [N, L, D], sequence region: [N, num_region, L, region_enc_dim] """ N, L, D = x.shape # batch, length, dim len_keep = int(L * (1 - mask_ratio)) if self.num_region > 0: len_region_keep = int(L * (1 - self.region_mask_ratio)) if self.region_sample_type == "random": noise = torch.rand(N, L, device=x.device) # ascend: small is keep, large is remove region_shuffle = torch.argsort(noise, dim=1) elif self.region_sample_type == "random_fg": region_shuffle = shuffle_ids region_restore = torch.argsort(region_shuffle, dim=1) region_keep = region_shuffle[:, :len_region_keep] region_keep = region_keep.unsqueeze(-1).expand(-1, -1, self.region_enc_dim) region_mask = torch.ones([N, L], device=x.device) region_mask[:, :len_region_keep] = 0 region_mask = torch.gather(region_mask, dim=1, index=region_restore) if region is not None: if region_keep.dim() < region.dim(): region_keep = region_keep.unsqueeze(1).expand( -1, self.num_region, -1, -1 ) region_masked = torch.gather(region, dim=-2, index=region_keep) else: region_masked = None else: region_mask = None region_masked = None region_restore = None if self.num_region > 0: ids_shuffle = region_shuffle ids_restore = region_restore else: noise = torch.rand(N, L, device=x.device) # noise in [0, 1] # sort noise for each sample ids_shuffle = torch.argsort( noise, dim=1 ) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1) # keep the first subset ids_keep = ids_shuffle[:, :len_keep] x_masked = torch.gather( x, dim=1, index=ids_keep.unsqueeze(-1).expand(-1, -1, D) ) # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([N, L], device=x.device) mask[:, :len_keep] = 0 # unshuffle to get the binary mask mask = torch.gather(mask, dim=1, index=ids_restore) return x_masked, mask, ids_restore, region_masked, region_mask, region_restore def forward_encoder(self, x, mask_ratio, shuffle_ids=None, region=None): # embed patches x = self.patch_embed(x)[0] # add pos embed w/o cls token x = x + self.pos_embed[:, 1:, :] # masking: length -> length * mask_ratio ( x, mask, ids_restore, region, region_mask, region_restore, ) = self.random_masking(x, mask_ratio, region, shuffle_ids) # append cls token cls_token = self.cls_token + self.pos_embed[:, :1, :] cls_tokens = cls_token.expand(x.shape[0], -1, -1) x = torch.cat((cls_tokens, x), dim=1) # apply Transformer blocks for blk in self.blocks: x = blk(x)[0] x = self.norm(x) return x, mask, ids_restore, region, region_mask, region_restore def forward_region_decoder(self): raise NotImplementedError def _forward_region_loss(self, pred, pred_target): region_loss = F.binary_cross_entropy_with_logits( pred, pred_target, reduction="none" ) if self.bg_loss_weight != 1.0: weight_loss = pred_target.detach().clone() weight_loss[pred_target == 0] = self.bg_loss_weight region_loss = region_loss * weight_loss region_loss = region_loss.mean(dim=-1) return region_loss def forward_loss(self, imgs, pred, mask, pred_region, region_mask, target_region): """ imgs: [N, 3, H, W] pred: [N, L, p*p*3] mask: [N, L], 0 is keep, 1 is remove, """ if self.use_mae_loss: target = self.patchify(imgs) if self.norm_pix_loss: mean = target.mean(dim=-1, keepdim=True) var = target.var(dim=-1, keepdim=True) target = (target - mean) / (var + 1.0e-6) ** 0.5 mae_loss = self._forward_mae_loss(pred, target) else: mae_loss = torch.zeros_like(mask) if self.num_region > 0: region_loss = self._forward_region_loss(pred_region, target_region) else: region_loss = torch.zeros_like(mae_loss) region_mask = torch.ones_like(mask) mae_loss = (mae_loss * mask).sum() / mask.sum() # mean loss on removed patches region_loss = ( region_loss * region_mask ).sum() / region_mask.sum() # mean loss on removed patches loss = mae_loss * self.mae_loss_weight + region_loss * self.region_loss_weight return loss, mae_loss, region_loss def forward(self): raise NotImplementedError class RegionQueryRMAE(RegionMaskedAutoencoderViT): def __init__( self, img_size=224, patch_size=16, in_chans=3, embed_dim=1024, depth=24, num_heads=16, decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=16, mlp_ratio=4.0, norm_layer=nn.LayerNorm, norm_pix_loss=False, num_region=0, use_mae_loss=True, mae_loss_weight=1.0, bg_loss_weight=1.0, region_loss_weight=1.0, region_mask_ratio=0.75, region_enc_dim=768, region_enc_depth=1, region_enc_num_heads=8, region_dec_dim=128, region_dec_depth=1, region_dec_num_heads=8, region_sample_type="random", region_cross_layer=8, ): super().__init__( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, depth=depth, num_heads=num_heads, decoder_embed_dim=decoder_embed_dim, decoder_depth=decoder_depth if use_mae_loss else region_cross_layer, decoder_num_heads=decoder_num_heads, mlp_ratio=mlp_ratio, norm_layer=norm_layer, norm_pix_loss=norm_pix_loss, num_region=num_region, use_mae_loss=use_mae_loss, mae_loss_weight=mae_loss_weight, bg_loss_weight=bg_loss_weight, region_loss_weight=region_loss_weight, region_mask_ratio=region_mask_ratio, region_enc_dim=region_enc_dim, region_sample_type=region_sample_type, ) self.use_mae_loss = use_mae_loss if use_mae_loss is False: self.decoder_norm = None self.decoder_pred = None # -------------------------------------------------------------------------- # MAE region specifics if num_region > 0: num_patches = self.patch_embed.num_patches self.region_cross_pos_embed = nn.Parameter( torch.zeros(1, num_patches + 1, region_dec_dim), requires_grad=False, ) # fixed sin-cos embedding self.region_cross_embed = nn.Linear(embed_dim, region_dec_dim, bias=True) self.region_dec_norm = norm_layer(region_dec_dim) # self.region_dec_norm = nn.Identity() self.region_cross_mask_token = nn.Parameter( torch.zeros(1, 1, region_dec_dim) ) self.region_cross_blocks = nn.ModuleList() for _ in range(region_cross_layer): self.region_cross_blocks.append( Block( region_dec_dim, region_dec_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, ) ) cross_embed_dim = region_dec_dim self.region_patch_embed = PatchEmbed( img_size, patch_size, 1, region_enc_dim ) self.region_pos_embed = nn.Parameter( torch.zeros(1, num_patches + 1, region_enc_dim), requires_grad=False ) # fixed sin-cos embedding self.region_enc_blocks = nn.ModuleList( [ Block( region_enc_dim, region_enc_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, ) for i in range(region_enc_depth) ] ) self.region_proj = nn.Sequential( norm_layer(region_enc_dim), nn.Linear(region_enc_dim, region_dec_dim), ) self.region_dec_blocks = nn.ModuleList() for _ in range(region_dec_depth - 1): self.region_dec_blocks.append( DecoderBlock( region_dec_dim, cross_embed_dim, region_dec_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, ) ) self.region_dec_blocks.append( DecoderBlockWithExpansion( region_dec_dim, cross_embed_dim, region_dec_num_heads, qkv_bias=True, norm_layer=norm_layer, ) ) self.region_pred = nn.Sequential( nn.GELU(), nn.Linear(region_dec_dim, patch_size**2, bias=True), ) self.region_cross_layer = region_cross_layer self.initialize_rmae_weights() def initialize_rmae_weights(self): super().initialize_weights() if self.num_region > 0: region_pos_embed = get_2d_sincos_pos_embed( self.region_pos_embed.shape[-1], int(self.patch_embed.num_patches**0.5), cls_token=True, ) self.region_pos_embed.data.copy_( torch.from_numpy(region_pos_embed).float().unsqueeze(0) ) # initialize region_patch_embed like nn.Linear (instead of nn.Conv2d) w = self.region_patch_embed.proj.weight.data torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) torch.nn.init.normal_(self.region_cross_mask_token, std=0.02) region_cross_pos_embed = get_2d_sincos_pos_embed( self.region_cross_pos_embed.shape[-1], int(self.patch_embed.num_patches**0.5), cls_token=True, ) self.region_cross_pos_embed.data.copy_( torch.from_numpy(region_cross_pos_embed).float().unsqueeze(0) ) # initialize nn.Linear and nn.LayerNorm self.apply(self._init_weights) def forward_encoder(self, x, mask_ratio, shuffle_ids=None, region=None): if self.num_region > 0: region = region + self.region_pos_embed[None, :, 1:, :] return super().forward_encoder( x, mask_ratio, shuffle_ids=shuffle_ids, region=region, ) def forward_decoder(self, x, ids_restore): x_cross = None if self.num_region > 0: x_cross = self.region_cross_embed(x) cross_mask_tokens = self.region_cross_mask_token.expand( x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], -1 ) x_cross_ = torch.cat([x_cross[:, 1:, :], cross_mask_tokens], dim=1) x_cross_ = torch.gather( x_cross_, dim=1, index=ids_restore.unsqueeze(-1).expand(-1, -1, x_cross.shape[2]), ) x_cross = torch.cat([x_cross[:, :1, :], x_cross_], dim=1) x_cross = x_cross + self.region_cross_pos_embed for blk in self.region_cross_blocks: x_cross = blk(x_cross)[0] x_cross = self.region_dec_norm(x_cross) if self.use_mae_loss: # embed tokens x = self.decoder_embed(x) # append mask tokens to sequence mask_tokens = self.mask_token.expand( x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], -1 ) x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) # no cls token x_ = torch.gather( x_, dim=1, index=ids_restore.unsqueeze(-1).expand(-1, -1, x.shape[2]) ) # unshuffle x = torch.cat([x[:, :1, :], x_], dim=1) # append cls token # add pos embed x = x + self.decoder_pos_embed # apply Transformer blocks for i, blk in enumerate(self.decoder_blocks): x = blk(x)[0] if self.use_mae_loss: x = self.decoder_norm(x) # predictor projection x = self.decoder_pred(x) # remove cls token x = x[:, 1:, :] else: x = None return x, x_cross def forward_region_encoder(self, region): l = region.shape[1] for blk in self.region_enc_blocks: region = blk(region)[0] region = region.view(-1, self.num_region, l, self.region_enc_dim) region = region.mean(dim=2) return region def forward_region_decoder(self, region, memory): region = self.region_proj(region) for blk in self.region_dec_blocks: region = blk(region, memory)[0] region = self.region_pred(region) region = region[:, :, 1:, :] return region def _forward_region_loss(self, pred, pred_target): region_loss = super()._forward_region_loss(pred, pred_target) region_loss = region_loss.mean(dim=1) return region_loss def forward(self, imgs, mask_ratio=0.75, region=None, shuffle_ids=None): if region is not None: b, c, h, w = region.shape region = region.view(b * c, 1, h, w) target_region = self.patchify(region).view(b, c, -1, self.patch_size**2) region = self.region_patch_embed(region - 0.5)[0] region = region.view(b, c, -1, self.region_enc_dim) else: region = None target_region = None ( latent, mask, ids_restore, region_masked, region_mask, _, ) = self.forward_encoder( imgs, mask_ratio, shuffle_ids=shuffle_ids, region=region, ) if region is not None: region_latent = self.forward_region_encoder(region_masked.flatten(0, 1)) else: region_latent = None pred, memory = self.forward_decoder(latent, ids_restore) if self.num_region > 0: pred_region = self.forward_region_decoder(region_latent, memory) else: pred_region = None loss, mae_loss, region_loss = self.forward_loss( imgs, pred, mask, pred_region, region_mask, target_region ) return loss, (pred, pred_region), (mae_loss, region_loss), (mask, region_mask) def build_rmae(config): arch = config["arch"] params = copy.deepcopy(config["params"]) with omegaconf.open_dict(params): if "rmae_base_patch16" in arch: patch_size = 16 embed_dim = 768 depth = 12 num_heads = 12 elif "rmae_large_patch16" in arch: patch_size = 16 embed_dim = 1024 depth = 24 num_heads = 16 elif "rmae_huge_patch14" in arch: patch_size = 14 embed_dim = 1280 depth = 32 num_heads = 16 else: raise ValueError( "Only support rmae_base_patch16|rmae_large_patch16|rmae_huge_patch14" ) net = RegionQueryRMAE( patch_size=patch_size, embed_dim=embed_dim, depth=depth, num_heads=num_heads, mlp_ratio=4, norm_layer=partial(nn.LayerNorm, eps=1e-6), **params, ) return net
19,154
32.313043
107
py
null
r-mae-main/pretrain/optim/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import importlib import collections.abc from lib2to3.pgen2.token import OP import os import copy import torch import torch.optim as optim import omegaconf from pretrain.optim.oss import OSS from pretrain.optim.lars import LARS from pretrain.utils.general import get_optimizer_parameters OPTIM_REGISTRY = {"sgd": optim.SGD, "adamw": optim.AdamW, "lars": LARS} def build_optimizer(config, model): optim_type = config.optimizer["type"] optim_config = copy.deepcopy(config.optimizer["params"]) with omegaconf.open_dict(optim_config): use_oss = optim_config.pop("use_oss", False) redundants = ["lr_decay_rate", "wd_norm", "wd_bias"] for redundant in redundants: if redundant in optim_config: optim_config.pop(redundant) if optim_type not in OPTIM_REGISTRY: raise ValueError("Optimizer ({}) is not found.".format(optim_type)) model_params = get_optimizer_parameters(model) if isinstance(model_params[0], collections.abc.Sequence): param_groups = [] backbone_group, transformer_group = model_params with omegaconf.open_dict(optim_config): lr_backbone = optim_config.pop("lr_backbone", optim_config["lr"]) for bgroup in backbone_group: if "lr_multi" in bgroup: bgroup["lr"] = lr_backbone * bgroup.pop("lr_multi") else: bgroup["lr"] = lr_backbone param_groups.append(bgroup) for tgroup in transformer_group: if "lr_multi" in tgroup: tgroup["lr"] = optim_config["lr"] * tgroup.pop("lr_multi") param_groups.append(tgroup) elif isinstance(model_params[0], collections.abc.Mapping): param_groups = model_params else: param_groups = [{"lr": optim_config["lr"], "params": model_params}] if use_oss: optimizer = OSS( params=param_groups, optim=OPTIM_REGISTRY[optim_type], **optim_config ) else: optimizer = OPTIM_REGISTRY[optim_type](param_groups, **optim_config) return optimizer def register_optim(name): def register_optim_cls(cls): if name in OPTIM_REGISTRY: raise ValueError("Cannot register duplicate optimizer ({})".format(name)) elif not issubclass(cls, torch.optim.Optimizer): raise ValueError( "Optimizer ({}: {}) must extend torch.optim.Optimizer".format( name, cls.__name__ ) ) OPTIM_REGISTRY[name] = cls return cls return register_optim_cls optims_dir = os.path.dirname(__file__) for file in os.listdir(optims_dir): path = os.path.join(optims_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): optim_name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module("pretrain.optim." + optim_name)
3,246
31.148515
85
py
null
r-mae-main/pretrain/optim/lars.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch class LARS(torch.optim.Optimizer): """ LARS optimizer, no rate scaling or weight decay for parameters <= 1D. """ def __init__( self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001 ): defaults = dict( lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient, ) super().__init__(params, defaults) @torch.no_grad() def step(self): for g in self.param_groups: for p in g["params"]: dp = p.grad if dp is None: continue if p.ndim > 1: # if not normalization gamma/beta or bias dp = dp.add(p, alpha=g["weight_decay"]) param_norm = torch.norm(p) update_norm = torch.norm(dp) one = torch.ones_like(param_norm) q = torch.where( param_norm > 0.0, torch.where( update_norm > 0, (g["trust_coefficient"] * param_norm / update_norm), one, ), one, ) dp = dp.mul(q) param_state = self.state[p] if "mu" not in param_state: param_state["mu"] = torch.zeros_like(p) mu = param_state["mu"] mu.mul_(g["momentum"]).add_(dp) p.add_(mu, alpha=-g["lr"])
1,815
30.859649
81
py
null
r-mae-main/pretrain/optim/oss.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import copy import io import logging from itertools import chain from math import inf from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type, Union from collections import OrderedDict import torch import torch.profiler as profiler import torch.distributed as dist from pretrain.utils.params import ( ParamBucket, calc_grad_norm, recursive_copy_to_device, get_global_rank, ) from pretrain.utils.distributed import is_dist_avail_and_initialized __all__ = ["OSS"] if TYPE_CHECKING: # pragma: no cover from torch.optim.optimizer import _params_t else: _params_t = Any _gpu_is_old: Optional[bool] = None def _gpu_capabilities_older_than_50() -> bool: """Return True if the GPU's compute capability is older than SM50.""" global _gpu_is_old if _gpu_is_old is None: for i in range(torch.cuda.device_count()): major, minor = torch.cuda.get_device_capability(f"cuda:{i}") if major <= 5: _gpu_is_old = True if _gpu_is_old is None: _gpu_is_old = False return _gpu_is_old def _broadcast_object( obj: Any, src_rank: int, group: object = dist.group.WORLD, dist_device: torch.device = torch.device("cpu"), ) -> Any: """ Either broadcast from master to the fleet (default), or use the src setting as the original rank. This is only needed for some older GPUs where dist.broadcast_object_list seems to hang. Also the hang behavior persist across processes once it happens. I.e. once we call dist.broadcast_object_list, subsequent calls with _broadcast_object also hang. """ if dist.get_rank() == src_rank: # Emit data buffer = io.BytesIO() torch.save(obj, buffer) data = bytearray(buffer.getbuffer()) length_tensor = torch.LongTensor([len(data)]).to(dist_device) data_send_tensor = torch.ByteTensor(data).to(dist_device) dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False) dist.broadcast(data_send_tensor, src=src_rank, group=group, async_op=False) else: # Fetch from the source length_tensor = torch.LongTensor([0]).to(dist_device) dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False) data_recv_tensor = torch.empty( [int(length_tensor.item())], dtype=torch.uint8, device=dist_device ) dist.broadcast(data_recv_tensor, src=src_rank, group=group, async_op=False) buffer = io.BytesIO(data_recv_tensor.cpu().numpy()) obj = torch.load(buffer, map_location=dist_device) return obj class OSS(torch.optim.Optimizer): """Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>` optimizer and shards its state as described by ZeRO_. :: opt = OSS(params, optim=torch.optim.Adam, lr=0.01) .. _ZeRO: https://arxiv.org/abs/1910.02054 We use a greedy algorithm to pack a number of parameters at each rank. Each parameter belongs to a single rank and is not divided among rank. After each rank completed their parameter update, they broadcast the new version of the parameters to all other ranks to synchronize the parameters for next round forward/backward computation. Args: params (list of tensors): parameters to be optimized Keyword Args: optim (torch.nn.Optimizer): optimizer to shard (default: SGD) group (group): torch.distributed group (default: group.WORLD) broadcast_buffer_size (int): (deprecated) used to cap the size of the broadcast buffers, not being used anymore. broadcast_fp16 (bool): Compress the model shards in fp16 before sharing them in between ranks. This is safe to use when PyTorch AMP is activated. Without torch AMP this will lead to a slight degradation in terms of accuracy. .. warning: the communication patterns that OSS use depend on the "trainability" graph, meaning that all the parameters which `require_grad` are handled differently. This is not reevaluated at every step, please use `refresh_trainable()` if your model changed (freeze or unfreeze for instance). If used with :class:<fairscale.nn.ShardedDDP> then an automatic change detection is possible, via the `auto_refresh_trainable` parameter. """ #: The optimizer used for a given shard optim: torch.optim.Optimizer in_super_constructor: bool def __init__( self, params: _params_t, optim: Type[torch.optim.Optimizer] = torch.optim.SGD, group: Optional[Any] = None, broadcast_buffer_size: int = -1, broadcast_fp16: bool = False, **default: Any, ): if not is_dist_avail_and_initialized(): raise RuntimeError("OSS should be used with distributed training!") # Hold all the model params in the root .param_groups self.in_super_constructor = True super().__init__(params, default) self.in_super_constructor = False # Partition information. lazy evaluation, computed when requested self.__per_device_params: Dict[ torch.device, List[List[torch.nn.Parameter]] ] = OrderedDict() # device, rank, params self.__param_rank: Dict[torch.Tensor, int] = {} self._partition_parameters: List[List[dict]] = [] self.__param_to_index: Dict[int, int] = {} self.__local_params: Optional[List[torch.Tensor]] = None # Default empty values + immutables self._optim_defaults = default self._optim_constructor = optim self.group = group if group is not None else dist.group.WORLD self.world_size = dist.get_world_size(self.group) self.backend = dist.get_backend(self.group) self.rank = dist.get_rank(self.group) self.global_rank = get_global_rank(self.group, self.rank) self._local_to_global_rank = [ get_global_rank(self.group, i) for i in range(self.world_size) ] self.broadcast_fp16 = broadcast_fp16 self.buckets: Dict[torch.device, Dict[int, ParamBucket]] = {} self._all_states: List[ Dict[str, Any] ] = [] # Optional consolidated optimizer state self._default_device = torch.device("cpu") # Setup everything which is related to the parameters to be trained # (partition and optimizer for the shard) self.refresh_trainable() # Partition helpers def partition_parameters(self) -> List[List[dict]]: """Partitions parameters across distributed data parallel ranks. Returns a list of param_groups (which is a list of dict) where each element of the list contains the param_groups for a rank. Element 0 corresponds to rank 0, etc. We need all the ranks for the broadcast inside step(). """ if len(self._partition_parameters) == 0: self._partition_parameters = [list() for _ in range(self.world_size)] sizes = [0] * self.world_size for param_group in self.param_groups: param_lists: List[List] = [list() for _ in range(self.world_size)] for param in param_group["params"]: # Add this param to rank with smallest size. rank = sizes.index(min(sizes)) param_lists[rank].append(param) # We're partitioning the optimizer state, # so trainable parameters are the ones which really count if param.requires_grad: sizes[rank] += param.numel() else: # Spread frozen params on a per-tensor basis # Mostly useful for balance partitions for fine tuning for instance # Not required strictly speaking sizes[rank] += 1 for rank, params in enumerate(param_lists): param_group_rank = copy.copy(param_group) param_group_rank["params"] = params self._partition_parameters[rank].append(param_group_rank) return self._partition_parameters # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs. # For example, the apex library contains fused optimizers with a step that supports extra kwargs. def step( self, closure: Optional[Callable[[], float]] = None, **kwargs: Any ) -> Optional[float]: """Performs a single optimization step (parameter update). Arguments: closure (callable): A closure that reevaluates the model and returns the loss. Optional for most optimizers. .. note: Any extra parameter is passed to the base optimizer as-is""" # Sync oss param_groups attributes in case they've been updated by a scheduler. OSS._sync_param_groups(self.param_groups, self.optim.param_groups) # Catch a possible change of devices in between OSS construction and step() with profiler.record_function("fairscale::oss::refresh_trainable"): if ( self._default_device.type != self.param_groups[0]["params"][0].device.type ): logging.info( "OSS detected that the parameter changed devices, re-allocating buffers" ) self._clear_cache() self.refresh_trainable() # Run the optimizer step on this shard only: with profiler.record_function("fairscale::oss::optim_step"): if closure is not None: loss = self.optim.step(closure=closure, **kwargs) # type: ignore else: loss = self.optim.step(**kwargs) # Sync all the updated shards in between the ranks self._broadcast_params() # Sync hypothethical new results from the wrapped optimizer to the exposed param_groups OSS._sync_param_groups(self.optim.param_groups, self.param_groups) return loss def clip_grad_norm( self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0, filter_params_fn: Callable[[Any], Any] = None, ) -> torch.Tensor: """ Clip all gradients at this point in time. The norm is computed over all gradients together, as if they were concatenated into a single vector. Gradients are modified in-place. Arguments: max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). .. note: This is analogous to `torch.nn.utils.clip_grad_norm_` but handles the partitioning and multiple devices per rank under the hood. The default torch util is not applicable here, because each rank only has a partial view of all the grads in the model, so calling it in the OSS context would lead to different scaling being applied per subset of model parameters .. warning: This needs to be called on all ranks, since synchronization primitives will be used """ # Compute the max norm for this shards's worth of gradients max_norm = float(max_norm) norm_type = float(norm_type) with profiler.record_function("fairscale::oss::clip_grad_norm"): # Option to filter parameters from the grad_norm calculation. This is useful for model parallelism. # To avoid double counting, only consider parameters on rank zero + anything marked 'model_parallel' # 'model_parallel' flag is set in Megatron-LM: # https://github.com/NVIDIA/Megatron-LM/blob/19301985dd31c8b612095cbad15bd903e8ddd497/megatron/mpu/layers.py#L54 local_params = ( filter_params_fn(self._local_params) if filter_params_fn is not None else self._local_params ) local_norm = calc_grad_norm(local_params, norm_type).to( self._default_device ) # Compute the norm on this grad set, # then sync all the norms from all ranks if norm_type == inf: total_norm = local_norm # all reduce over data parallel and model parallel workers dist.all_reduce( total_norm, op=torch.distributed.ReduceOp.MAX, group=dist.group.WORLD, ) else: # local norm result can be accumulated with the remote ones if put to the right power # n_i = sum_rank(a^p)^1/p # -> n_total = all_reduce(n_i^p)^(1/p) = sum_i(n_i^p)^1/p = sum_i(sum_rank(a^p))^1/p # all reduce over data parallel and model parallel workers total_norm = local_norm**norm_type dist.all_reduce(total_norm) total_norm = total_norm ** (1.0 / norm_type) clip_coef = torch.tensor( max_norm, dtype=total_norm.dtype, device=total_norm.device ) / (total_norm + 1e-6) if clip_coef < 1: for device, device_params in self._per_device_params.items(): for p in filter( lambda x: x.grad is not None, device_params[self.rank] ): p.grad.detach().mul_(clip_coef.to(device)) return total_norm # State dict interfaces def consolidate_state_dict(self, recipient_rank: int = 0) -> None: """Update the consolidated state_dict list, one per rank. Arguments: recipient_rank (int): on which rank to materialize the full state dict. -1 is a special value, which means that all ranks should have the state .. warning: This needs to be called on all replicas""" # Sync lr and other attributes in case its been updated OSS._sync_param_groups(self.param_groups, self.optim.param_groups) # Pull the sharded state from all the other replicas # Store all the states in order, rank by rank logging.debug("Pulling the sharded optimizer state from all replicas") self._all_states = [] should_collect_state = self.rank == recipient_rank or recipient_rank == -1 should_send_state = self.rank != recipient_rank # NCCL requires CUDA tensors for all communication primitives dist_device = ( torch.device("cuda") if self.backend == dist.Backend.NCCL else self._default_device ) for rank in range(self.world_size): if rank == self.rank: if should_collect_state: logging.debug("Saving self state") self._all_states.append( recursive_copy_to_device( self.optim.state_dict(), non_blocking=True, device=torch.device("cpu"), ) ) # Sync with other replicas state_to_share = ( self.optim.state_dict() if should_send_state else torch.tensor([0], dtype=torch.uint8, device=dist_device) ) if _gpu_capabilities_older_than_50(): _broadcast_object( state_to_share, src_rank=self.global_rank, group=self.group, dist_device=dist_device, ) else: obj_list = [state_to_share] dist.broadcast_object_list( obj_list, src=self.global_rank, group=self.group, ) else: # Fetch the optim state from the other replicas if _gpu_capabilities_older_than_50(): replica_state = _broadcast_object( torch.tensor([0], dtype=torch.uint8, device=dist_device), src_rank=self._local_to_global_rank[rank], group=self.group, dist_device=dist_device, ) else: obj_list = [ torch.tensor([0], dtype=torch.uint8, device=dist_device) ] dist.broadcast_object_list( obj_list, src=self._local_to_global_rank[rank], group=self.group, ) replica_state = obj_list[0] if should_collect_state: self._all_states.append( recursive_copy_to_device( replica_state, non_blocking=True, device=torch.device("cpu") ) ) logging.debug("State from rank %s received", rank) def state_dict(self, all_ranks: bool = False) -> Dict[str, Any]: """Return the last known global optimizer state. The returned state is compatible with Pytorch, in that the sharded properties are not exposed. Arguments: all_ranks (bool): materialize the state on all ranks. In that case, `.state_dict()` needs to be called on all ranks Returns: a dict with two entries * state - a dict holding current optimization state. Its content differs between optimizer classes. * param_groups - a dict containing all parameter groups .. warning: Returning the global state is limited to the replica which was responsible for the consolidation, if `all_ranks` was not set to `True`. In that case, the state may also not be up to date, depending on when `consolidate_state_dict` was last called. """ if not all_ranks and len(self._all_states) == 0: raise RuntimeError( "Optimizer state has not been consolidated on this rank. \ Please call `consolidate_state_dict()` on all ranks beforehand if you meant to save the global state" ) if all_ranks: # Consolidate the state on every rank self.consolidate_state_dict(recipient_rank=-1) # Unify the shard states and the state that pytorch would expect, given the model. # Indexation needs several redirections, since each shard only knows a limited scope of the model # - get the pytorch compliant parameter indexing state_dict = super().state_dict() # - go through the per-shard states, which are all indexed locally for rank, s in enumerate(self._all_states): # -- match the local indexing and the global partition, update the corresponding saved state globally for local_pg, global_pg in zip( s["param_groups"], self.partition_parameters()[rank] ): local_index_to_param_id = { i_param: id(global_pg["params"][i]) for i, i_param in enumerate(local_pg["params"]) } for local_param_index in local_pg["params"]: # Update the state, if any if local_param_index in s["state"].keys(): global_id = self._param_to_index[ local_index_to_param_id[local_param_index] ] state_dict["state"][global_id] = s["state"][local_param_index] # Make sure that the parameters are sorted in the state, as expected for a pytorch dict state_dict["state"] = dict(sorted(state_dict["state"].items())) return state_dict def load_state_dict(self, state_dict: Dict[str, Any]) -> None: """Restore the global parameter groups as well as the shard. Arguments: state_dict (dict): optimizer state. Should be an object returned from a call to :meth:`state_dict` """ # Update the state, trusting the ordering in param_groups # Apart from the removal of states not owned by this rank, the pytorch logic is kept # (See torch.optim.optimizer) id_map = { old_id: p for old_id, p in zip( chain.from_iterable((g["params"] for g in state_dict["param_groups"])), chain.from_iterable((g["params"] for g in self.param_groups)), ) } for key, value in state_dict["state"].items(): param = id_map[key] # Populate the sharded optimizer state on the fly, # remove the params that this rank does not own if self._param_to_rank[param] != self.rank: state_dict["state"][key] = {} else: self.optim.state[param] = recursive_copy_to_device( value, non_blocking=True, device=param.device ) super().load_state_dict(state_dict) # Sync with the optimizer param groups OSS._sync_param_groups(state_dict["param_groups"], self.param_groups) OSS._sync_param_groups(self.param_groups, self.optim.param_groups) def refresh_trainable(self) -> None: """Updates the partitioning and communication patterns if the trainability (`requires_grad`) of some parameters changed. """ # Make sure that we capture the current default device self._default_device = list(self._per_device_params.keys())[0] # Create the optim which will work on the param shard if not hasattr(self, "optim"): self._clear_cache() self.optim = self._optim_constructor( self.partition_parameters()[self.rank], **self._optim_defaults ) OSS._sync_param_groups(self.optim.param_groups, self.param_groups) self._setup_flat_buffers() def add_param_group(self, param_group: dict) -> None: """Add a param group to the :class:`Optimizer` s `param_groups`. This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the :class:`Optimizer` as training progresses. Arguments: param_group (dict): Specifies what Tensors should be optimized along with group specific optimization options .. warning: This handles updating the shards on all partitions, but needs to be called on all ranks. """ super().add_param_group(param_group) if not self.in_super_constructor: # Force a re-partitioning self._clear_cache() # Update the partition param_groups = self.partition_parameters()[self.rank] if len(param_groups) == len(self.optim.param_groups) + 1: self.optim.add_param_group(param_groups[-1]) # Update the bucketing strategy accordingly self._setup_flat_buffers() @property def _local_params(self) -> List[torch.Tensor]: """Iterable which goes through the parameters that this rank owns""" if self.__local_params is None: self.__local_params = list( chain( *[ list( filter( lambda x: x.grad is not None, device_params[self.rank] ) ) for device_params in self._per_device_params.values() ] ) ) # Make sure that the iterator is not consumed, only expose a copy return self.__local_params @property def _param_to_index(self) -> Dict[int, int]: """Hash table in between parameter indices in the global optimizer scheme, and the actual params""" if len(self.__param_to_index) == 0: self.__param_to_index = { id(p): i for i, p in enumerate(chain(*(g["params"] for g in self.param_groups))) } return self.__param_to_index @property def _per_device_params(self) -> Dict[torch.device, List[List[torch.nn.Parameter]]]: """Sorted list of all the params, first per device then per rank. Within a list params are sorted per number of elements to allow for an easy bucketing. """ if len(self.__per_device_params) == 0: # Go through all params, log them per device # The ordering is important here, needs to be the same on all ranks # So that ulterior broadcast calls are matching for param_group in self.param_groups: for param in param_group["params"]: device = param.device if self.__per_device_params.get(device) is None: self.__per_device_params[device] = [ [] for _ in range(self.world_size) ] self.__per_device_params[device][self._param_to_rank[param]] += [ param ] # Sort param_lists by size for device in self.__per_device_params.keys(): for rank_params in self.__per_device_params[device]: rank_params.sort(key=lambda x: x.numel()) return self.__per_device_params @property def _param_to_rank(self) -> Dict[torch.Tensor, int]: """Map the params to the rank which owns them""" if len(self.__param_rank) == 0: for rank, param_groups in enumerate(self.partition_parameters()): for param_group in param_groups: for param in param_group["params"]: self.__param_rank[param] = rank logging.debug( "FairScale OSS: Parameters dispatched to ranks %s " % list(self.__param_rank.values()) ) return self.__param_rank def _clear_cache(self) -> None: self._partition_parameters.clear() self.__per_device_params.clear() self.__param_rank.clear() self.__param_to_index.clear() self.__local_params = None @staticmethod def _sync_param_groups( source: List[Dict[Any, Any]], destination: List[Dict[Any, Any]] ) -> None: """Sync learning rate and other optimizer attributes (needed to support schedulers).""" for source_group, destination_group in zip(source, destination): # Sync everything but the parameters for k in filter(lambda x: x != "params", source_group.keys()): destination_group[k] = source_group[k] @torch.no_grad() def _broadcast_params(self) -> None: """Helper function to broadcast all the parameters from a given device""" with profiler.record_function("fairscale::oss::refresh_trainable"): # if NCCL broadcasts will be done in an independent stream # make sure that prior compute work is complete if torch.device("cuda").type == self._default_device.type: for device in self._per_device_params.keys(): torch.cuda.synchronize(device=device) work_handles = ( [] ) # Work handles are consumed within this scope, no callback # Populate the fp16 shards if self.broadcast_fp16: for device in self.buckets.keys(): for dst_rank, bucket in self.buckets[device].items(): bucket.to( dtype=torch.float16, device=device, non_blocking=True, keep_param_alignment=False, ) if torch.cuda.is_available(): torch.cuda.synchronize() # Exchange all the shards with the other ranks for device in self.buckets.keys(): for dst_rank, bucket in self.buckets[device].items(): work_handles.append( dist.broadcast( tensor=bucket.buffer, src=self._local_to_global_rank[dst_rank], group=self.group, async_op=True, ) ) _ = list(filter(lambda x: x.wait(), work_handles)) # Populate back the fp32 shards if self.broadcast_fp16: for device in self.buckets.keys(): for dst_rank, bucket in self.buckets[device].items(): bucket.to( dtype=torch.float32, device=device, non_blocking=True, keep_param_alignment=True, ) def _setup_flat_buffers(self) -> None: """Make all params which are on the same device and tied to the same rank views of a single buffer. This is used at construction time, and anytime parameter trainability is changed (frozen or unfrozen) and `refresh_trainability` is called. """ for device, per_rank_params in self._per_device_params.items(): # Only wipe the existing buckets if there are none # (could be that this is called twice, when trainability changes) if device not in self.buckets.keys(): self.buckets[device] = {} # Make parameters a view of the bucket for dst_rank, params in enumerate(per_rank_params): if len(params) > 0: # Clone the non-trainable params, if in a bucket it will get destroyed for param in filter(lambda x: not x.requires_grad, params): param.data = param.data.detach().clone() # Merge all the trainable params in a single bucket trainable_params = list(filter(lambda x: x.requires_grad, params)) if trainable_params: buffer_size = sum(map(lambda x: x.numel(), trainable_params)) bucket = ParamBucket( size=buffer_size, dtype=trainable_params[0].dtype, device=device, ) for param in trainable_params: bucket.add_param(param) self.buckets[device][dst_rank] = bucket # Clear the buffer keys which are not in use anymore (could be that the devices changed) devices_in_use = list(self._per_device_params.keys()) devices_to_pop = list( filter(lambda x: x not in devices_in_use, self.buckets.keys()) ) for d in devices_to_pop: self.buckets.pop(d)
31,784
42.541096
135
py
null
r-mae-main/pretrain/optim/scheduler/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import importlib import os import warnings from pretrain.optim.scheduler.lr_scheduler import BaseScheduler SCHEDULER_REGISTRY = {} __all__ = ["BaseScheduler"] def build_scheduler(config, optimizer): scheduler_config = config.get("scheduler", {}) if not hasattr(scheduler_config, "type"): raise ValueError( "LRScheduler attributes must have a 'type' key " "specifying the type of optimizer. " "(Custom or PyTorch)" ) scheduler_type = scheduler_config.type if not hasattr(scheduler_config, "params"): warnings.warn("schduler attributes has no params defined, defaulting to {}.") if scheduler_type not in SCHEDULER_REGISTRY: raise ValueError("Scheduler ({}) is not found.".format(scheduler_type)) params = getattr(scheduler_config, "params", {}) scheduler = SCHEDULER_REGISTRY[scheduler_type](params, optimizer) return scheduler def register_scheduler(name): def register_scheduler_cls(cls): if name in SCHEDULER_REGISTRY: raise ValueError("Cannot register duplicate lr_scheduler ({})".format(name)) elif not issubclass(cls, BaseScheduler): raise ValueError( "LR_Scheduler ({}: {}) must extend BaseScheduler".format( name, cls.__name__ ) ) SCHEDULER_REGISTRY[name] = cls return cls return register_scheduler_cls schedulers_dir = os.path.dirname(__file__) for file in os.listdir(schedulers_dir): path = os.path.join(schedulers_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): scheduler_name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module("pretrain.optim.scheduler." + scheduler_name)
2,087
29.26087
88
py
null
r-mae-main/pretrain/optim/scheduler/cosine_scheduler.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import math from pretrain.optim.scheduler import register_scheduler, BaseScheduler @register_scheduler("cosine_annealing") class CosineAnnealingScheduler(BaseScheduler): def __init__(self, config, optimizer): eta_min = config["eta_min"] self.T_max = config["T_max"] self.use_warmup = config["use_warmup"] self.warmup_iterations = config["warmup_iterations"] if self.use_warmup else 0 self.warmup_factor = config["warmup_factor"] base_lrs = list(map(lambda group: group["lr"], optimizer.param_groups)) max_base_lr = max(base_lrs) self.eta_mins = [lr / max_base_lr * eta_min for lr in base_lrs] if self.use_warmup and self.warmup_iterations > 0: self.T_max -= self.warmup_iterations super(CosineAnnealingScheduler, self).__init__(config, optimizer) def get_iter_lr(self): if self.last_iter <= self.warmup_iterations and self.use_warmup is True: alpha = float(self.last_iter) / float(self.warmup_iterations) lr_ratio = self.warmup_factor * (1.0 - alpha) + alpha return [base_lr * lr_ratio for base_lr in self.base_lrs] else: return [ self.eta_mins[i] + (base_lr - self.eta_mins[i]) * ( 1 + math.cos( math.pi * (self.last_iter - self.warmup_iterations) / self.T_max ) ) / 2 for i, base_lr in enumerate(self.base_lrs) ]
1,776
36.020833
88
py
null
r-mae-main/pretrain/optim/scheduler/lr_scheduler.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import warnings import weakref from functools import wraps import torch class BaseScheduler(object): def __init__(self, config, optimizer): super(BaseScheduler, self).__init__() if not isinstance(optimizer, torch.optim.Optimizer): raise ValueError("optimizer must be an instance of torch.optim.Optimizer") self.config = config self.optimizer = optimizer last_epoch = config.get("last_epoch", -1) last_iter = config.get("last_iter", -1) # Following https://github.com/pytorch/pytorch/issues/20124 # We would like to ensure that `lr_scheduler.step()` is called after # `optimizer.step()` def with_counter(method): if getattr(method, "_with_counter", False): # `optimizer.step()` has already been replaced, return. return method # Keep a weak reference to the optimizer instance to prevent # cyclic references. instance_ref = weakref.ref(method.__self__) # Get the unbound method for the same purpose. func = method.__func__ cls = instance_ref().__class__ del method @wraps(func) def wrapper(*args, **kwargs): instance = instance_ref() instance._step_count += 1 wrapped = func.__get__(instance, cls) return wrapped(*args, **kwargs) # Note that the returned function here is no longer a bound method, # so attributes like `__func__` and `__self__` no longer exist. wrapper._with_counter = True return wrapper self.optimizer.step = with_counter(self.optimizer.step) self.optimizer._step_count = 0 if last_epoch == -1: for group in self.optimizer.param_groups: group.setdefault("initial_lr", group["lr"]) else: for i, group in enumerate(self.optimizer.param_groups): if "initial_lr" not in group: raise KeyError( "param 'initial_lr' is not specified " "in param_groups[{}] when resuming an optimizer".format(i) ) if last_epoch == -1 and last_iter != -1: raise ValueError( "Found last_epoch = -1 but last_iter = {}".format(last_iter) ) elif last_epoch != -1 and last_iter == -1: raise ValueError( "Found last_epoch = {} but last_iter = -1".format(last_epoch) ) elif last_epoch == -1 and last_iter == -1: last_epoch = 0 last_iter = 0 self.base_lrs = list( map(lambda group: group["initial_lr"], self.optimizer.param_groups) ) self.last_iter = last_iter self.last_epoch = last_epoch self._step_count = 0 self._step_epoch_count = 0 self.step(last_iter) self.step_epoch(last_epoch) def state_dict(self): return { key: value for key, value in self.__dict__.items() if key not in ("optimizer", "config") } def load_state_dict(self, state_dict): self.__dict__.update(state_dict) print( "Step to the last epoch {}, last iter {}".format( self.last_epoch, self.last_iter ) ) self.step(self.last_iter) self.step_epoch(self.last_epoch) def get_iter_lr(self): return [None for _ in self.base_lrs] def get_epoch_lr(self): return [None for _ in self.base_lrs] def step(self, iter=None): if self._step_count == 1: if self.optimizer._step_count < 1: warnings.warn( "Detected call of 'lr_scheduler.step()' before 'optimizer.step()'. " "In Pytorch 1.1.0 and later, you should call them in the opposite order: " "'optimizer.step()' before 'lr_scheduler.step()'. Failure to do this " "will result in Pytorch skipping the first value of the learning rate schedule." ) self._step_count += 1 if iter is None: iter = self.last_iter + 1 self.last_iter = iter for param_group, lr in zip(self.optimizer.param_groups, self.get_iter_lr()): if lr is not None: param_group["lr"] = lr def step_epoch(self, epoch=None): if self._step_epoch_count == 1: if self.optimizer._step_count < 1: warnings.warn( "Detected call of 'lr_scheduler.step_epoch()' before 'optimizer.step_epoch()'. " "In Pytorch 1.1.0 and later, you should call them in the opposite order: " "'optimizer.step_epoch()' before 'lr_scheduler.step_epoch()'. Failure to do this " "will result in Pytorch skipping the first value of the learning rate schedule." ) self._step_epoch_count += 1 if epoch is None: epoch = self.last_epoch + 1 self.last_epoch = epoch for param_group, lr in zip(self.optimizer.param_groups, self.get_epoch_lr()): if lr is not None: param_group["lr"] = lr
5,543
35.715232
102
py
null
r-mae-main/pretrain/optim/scheduler/multi_step_scheduler.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from bisect import bisect_right from pretrain.optim.scheduler import register_scheduler, BaseScheduler @register_scheduler("multi_step") class MultiStepScheduler(BaseScheduler): def __init__(self, config, optimizer): self.use_warmup = config["use_warmup"] self.mode = config["mode"] self.lr_steps = config["lr_steps"] self.lr_ratio = config["lr_ratio"] self.warmup_iterations = config.get("warmup_iterations", 0) self.warmup_factor = config.get("warmup_factor", 1) assert self.warmup_iterations < self.lr_steps[0] assert self.mode in ["iter", "epoch"], "Only iter|epoch are accepted!" super(MultiStepScheduler, self).__init__(config, optimizer) def get_iter_lr(self): if self.last_iter <= self.warmup_iterations and self.use_warmup is True: alpha = float(self.last_iter) / float(self.warmup_iterations) lr_ratio = self.warmup_factor * (1.0 - alpha) + alpha return [base_lr * lr_ratio for base_lr in self.base_lrs] if self.mode == "iter": return [ base_lr * self.lr_ratio ** bisect_right(self.lr_steps, self.last_iter) for base_lr in self.base_lrs ] return [None for _ in self.base_lrs] def get_epoch_lr(self): if self.mode == "epoch": return [ base_lr * self.lr_ratio ** bisect_right(self.lr_steps, self.last_epoch) for base_lr in self.base_lrs ] return [None for _ in self.base_lrs]
1,763
35.75
87
py
null
r-mae-main/pretrain/optim/scheduler/step_scheduler.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from pretrain.optim.scheduler import register_scheduler, BaseScheduler @register_scheduler("step") class StepScheduler(BaseScheduler): def __init__(self, config, optimizer): self.use_warmup = config["use_warmup"] self.mode = config["mode"] self.step_size = config["step_size"] self.lr_ratio = config["lr_ratio"] self.warmup_iterations = config.get("warmup_iterations", 0) self.warmup_factor = config.get("warmup_factor", 1) assert self.mode in ["iter", "epoch"], "Only iter|epoch are accepted!" super().__init__(config, optimizer) def get_iter_lr(self): if self.last_iter <= self.warmup_iterations and self.use_warmup is True: alpha = float(self.last_iter) / float(self.warmup_iterations) lr_ratio = self.warmup_factor * (1.0 - alpha) + alpha return [base_lr * lr_ratio for base_lr in self.base_lrs] if self.mode == "iter": return [ base_lr * self.lr_ratio ** (self.last_epoch // self.step_size) for base_lr in self.base_lrs ] return [None for _ in self.base_lrs] def get_epoch_lr(self): if self.mode == "epoch": return [ base_lr * self.lr_ratio ** (self.last_epoch // self.step_size) for base_lr in self.base_lrs ] return [None for _ in self.base_lrs]
1,623
35.088889
80
py
null
r-mae-main/pretrain/trainer/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import importlib import os TRAINER_REGISTRY = {} def build_trainer(configuration, *args, **kwargs): configuration.freeze() config = configuration.get_config() trainer = config.training.trainer trainer = TRAINER_REGISTRY[trainer](configuration) return trainer def register_trainer(name): def register_trainer_cls(cls): if name in TRAINER_REGISTRY: raise ValueError("Cannot register duplicate trainer ({})".format(name)) TRAINER_REGISTRY[name] = cls return cls return register_trainer_cls trainers_dir = os.path.dirname(__file__) for file in os.listdir(trainers_dir): path = os.path.join(trainers_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): trainer_name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module("pretrain.trainer." + trainer_name)
1,178
25.2
83
py
null
r-mae-main/pretrain/trainer/base_trainer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import time import os import collections import torch from torch.cuda.amp import GradScaler from pretrain.utils.meter import Meter from pretrain.utils.checkpoint import Checkpoint from pretrain.utils.distributed import ( is_master, is_dist_avail_and_initialized, synchronize, ) from pretrain.utils.general import print_model_parameters from pretrain.utils.logger import Logger, TensorboardLogger from pretrain.utils.timer import Timer from pretrain.model import build_model from pretrain.optim import build_optimizer from pretrain.optim.scheduler import build_scheduler from pretrain.dataset import build_dataset, build_dataloader from pretrain.trainer import register_trainer from pretrain.trainer.engine import build_engine @register_trainer("base_trainer") class BaseTrainer: def __init__(self, configuration): self.configuration = configuration self.config = self.configuration.get_config() self.profiler = Timer() self.total_timer = Timer() self.running_config = self.config.training if self.configuration is not None: self.args = self.configuration.args @property def model_without_ddp(self): if self.parallel: return self.model.module else: return self.model def load(self): self._set_device() self.run_type = self.running_config.get("run_type", "train") self.writer = Logger( self.running_config.save_dir, self.running_config.logger_level, self.running_config.log_format, self.running_config.should_not_log, ) self.configuration.pretty_print(self.writer) self.config_based_setup() self.load_task() self.load_model_and_optimizer() def _set_device(self): self.local_rank = self.config.device_id if self.config.distributed.init_method is not None: self.device = torch.device("cuda", self.local_rank) elif torch.cuda.is_available(): self.device = torch.device("cuda") else: self.device = torch.device("cpu") def config_based_setup(self): seed = self.running_config.seed if seed is None: # guard against training without seed raise ValueError("seed should not be None") torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = False def load_task(self): self.writer.write("Loading datasets", "info") self.datasets = {} self.dataloaders = {} self.samplers = {} self._load_split_task(["train", "val", "test"]) def _load_split_task(self, splits): self.splits = [] for split in splits: dataset, dataloader, sampler = None, None, None if split in self.run_type: dataset = build_dataset(self.config, split, self.device) dataloader, sampler = build_dataloader(self.config, split, dataset) self.splits.append(split) self.datasets[split] = dataset self.dataloaders[split] = dataloader self.samplers[split] = sampler for split, dataset in self.datasets.items(): if dataset is not None: print(f"{split}: {len(dataset)} images") print(f"{split}: {dataset}") def load_model_and_optimizer(self): self.writer.write("Loading model and optimizer", "info") num_classes = self.datasets[self.splits[0]].get_answer_size() self.model = build_model(self.config, num_classes) if "cuda" in str(self.device): device_info = "CUDA Device {} is: {}".format( self.config.distributed.rank, torch.cuda.get_device_name(self.local_rank), ) self.writer.write(device_info, log_all=True) self.model = self.model.to(self.device) self.optimizer = build_optimizer(self.config, self.model) self.lr_scheduler = build_scheduler(self.config, self.optimizer) print(self.optimizer) self._parallelize_model() self._init_params_and_checkpoint() def _parallelize_model(self): self.parallel = False if "cuda" in str(self.device) and is_dist_avail_and_initialized(): find_unused_parameters = self.running_config.find_unused_parameters self.model = torch.nn.parallel.DistributedDataParallel( self.model, device_ids=[self.local_rank], output_device=self.local_rank, find_unused_parameters=find_unused_parameters, ) self.parallel = True def _init_params_and_checkpoint(self): self.writer.write("Torch version is: " + torch.__version__) self.use_fp16 = ( False if self.running_config.use_fp16 == "none" else self.running_config.use_fp16 ) self.grad_scaler = GradScaler() if self.use_fp16 else None self.checkpoint = Checkpoint(self) self.engine = build_engine(self.config, self) self.log_interval = self.running_config.log_interval self.eval_interval = self.running_config.evaluation_interval self.save_interval = self.running_config.checkpoint_interval self.iter_per_update = self.running_config.iter_per_update self.iou_type = ( tuple(self.running_config.iou_type) if self.running_config.iou_type is not None else None ) self.max_update = self.running_config.max_update self.max_epoch = self.running_config.max_epoch if self.max_epoch is not None and self.max_update is not None: raise ValueError("max_epoch and max_update are mutually exclusive!") batch_size = self.running_config.batch_size if self.dataloaders["train"] is not None: update_per_epoch = len(self.datasets["train"]) // batch_size if self.max_epoch is not None: self.max_update = self.max_epoch * update_per_epoch self.eval_interval = int(self.eval_interval * update_per_epoch) self.save_interval = int(self.save_interval * update_per_epoch) else: self.max_update = 0 self.meters = { "train": Meter(), "val": Meter(), "test": Meter(), } self.timers = {"train": Timer(), "val": Timer(), "test": Timer()} self.eval_iteration = 0 if self.datasets["val"] is not None: self.eval_iteration = len(self.datasets["val"]) // batch_size self.current_epoch = 0 self.current_update = 0 self.is_resumed = self.checkpoint.load_state_dict() self.not_debug = self.running_config.logger_level != "debug" self.tb_writer = None if self.running_config.tensorboard: tb_log_folder = os.path.join(self.writer.log_folder, "tensorboard") if self.running_config.tensorboard_logdir: tb_log_folder = self.running_config.tensorboard_logdir if is_master() and not os.path.exists(tb_log_folder): os.makedirs(tb_log_folder) synchronize() self.tb_writer = TensorboardLogger(tb_log_folder) def train(self): self.writer.write("===== Model =====") self.writer.write(self.model) print_model_parameters(self.model, self.writer) if "train" not in self.run_type: self.inference() return self.model.train() self.profile("Setup Time") torch.autograd.set_detect_anomaly(False) self.writer.write("Starting training...") self._resume_training_state_if_needed() while self.current_update < self.max_update: self.current_epoch += 1 self.engine.train_epoch(0) self.finalize() def _resume_training_state_if_needed(self): trained_idx = self.current_update % len(self.dataloaders["train"]) is_epoch_finished = trained_idx == 0 if self.is_resumed: self.writer.write(f"Resuming training at {self.current_update}...") if is_epoch_finished: self.lr_scheduler.step_epoch(self.current_epoch) else: self.current_epoch += 1 self.lr_scheduler.step_epoch(self.current_epoch) def finalize(self): self.writer.write("Stepping into final check") self.inference() self.writer.write( "Finished run in {}".format(self.total_timer.get_time_since_start()) ) self.checkpoint.finalize() if self.tb_writer is not None: self.tb_writer.close() del self.tb_writer def inference(self): if "val" in self.run_type and ( (self.current_update % self.eval_interval != 0) or ("train" not in self.run_type) ): self.writer.write("Starting inference on val set") self.engine.evaluate("val") if "inference" in self.run_type or "test" in self.run_type: self.writer.write("Starting inference on test set") self.engine.evaluate("test") self.writer.write("The inference finished!") def _update_tensorboard(self, split): if self.running_config.tensorboard: stats_dict = self.meters[split].get_scalar_dict() self.tb_writer.add_scalars(stats_dict, self.current_update) def _print_log(self, split, stats={}): log_dict = collections.OrderedDict() log_dict.update( {f"progress on {split}": f"{self.current_update}/{self.max_update}"} ) log_dict.update(self.meters[split].get_log_dict(split)) log_dict["\n"] = "-" * 20 log_dict.update(stats) self.writer.log_progress(log_dict) def _calculate_time_left(self): time_taken_for_log = time.time() * 1000 - self.timers["train"].start iterations_left = self.max_update - self.current_update num_logs_left = iterations_left / self.log_interval time_left = num_logs_left * time_taken_for_log eval_to_log_ratio = self.eval_iteration / self.log_interval num_eval_left = iterations_left / self.eval_interval time_left += num_eval_left * eval_to_log_ratio * time_taken_for_log return self.timers["train"].get_time_hhmmss(gap=time_left) def profile(self, text): if self.not_debug: return synchronize() self.writer.write(text + ": " + self.profiler.get_time_since_start(), "debug") self.profiler.reset()
10,912
34.780328
86
py
null
r-mae-main/pretrain/trainer/engine/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import importlib import os ENGINE_REGISTRY = {} from .base_engine import BaseEngine def build_engine(config, trainer): task_name = config.task engine = ENGINE_REGISTRY[task_name](trainer) return engine def register_engine(*name_list): def register_engine_cls(cls): for name in name_list: if name in ENGINE_REGISTRY: raise ValueError("Cannot register duplicate engine ({})".format(name)) elif not issubclass(cls, BaseEngine): raise ValueError( "Engine ({}: {}) must extend BaseEngine".format(name, cls.__name__) ) ENGINE_REGISTRY[name] = cls return cls return register_engine_cls trainers_dir = os.path.dirname(__file__) for file in os.listdir(trainers_dir): path = os.path.join(trainers_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): engine_name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module("pretrain.trainer.engine." + engine_name)
1,352
26.06
87
py
null
r-mae-main/pretrain/trainer/engine/base_engine.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import torch from pretrain.utils.distributed import reduce_dict from pretrain.utils.general import clip_grad_norm, filter_grads class BaseEngine: def __init__(self, trainer): self.trainer = trainer self.model = self.trainer.model self.optimizer = self.trainer.optimizer self.lr_scheduler = self.trainer.lr_scheduler self.dataloaders = self.trainer.dataloaders self.datasets = self.trainer.datasets self.params = filter_grads(self.model.parameters()) self.num_skip = 0 @torch.no_grad() def evaluate(self, split): raise NotImplementedError @property def current_epoch(self): current_update = self.trainer.current_update batch_size = self.trainer.running_config.batch_size if self.datasets["train"] is not None: update_per_epoch = len(self.datasets["train"]) // batch_size else: update_per_epoch = 1 return (current_update + update_per_epoch - 1) // update_per_epoch def train_epoch(self, trained_batch_idx): raise NotImplementedError def _compute_loss(self, output, target): raise NotImplementedError def _forward(self, batch, **kwargs): self.trainer.profile("Batch prepare time") sample, target = batch if self.trainer.use_fp16: assert self.trainer.use_fp16 in ("float16", "bfloat16") dtype = ( torch.bfloat16 if self.trainer.use_fp16 == "bfloat16" else torch.float16 ) with torch.autocast(device_type="cuda", dtype=dtype): output = self.model(sample, target) output = self._compute_loss(output, target, **kwargs) else: output = self.model(sample, target) output = self._compute_loss(output, target, **kwargs) self.trainer.profile("Forward time") return output, target def _backward(self, output): loss = output["losses"] if self.trainer.use_fp16: self.trainer.grad_scaler.scale(loss).backward() else: loss.backward() self.trainer.profile("Backward time") def _step(self, current_update): max_norm = self.trainer.running_config.max_norm if self.trainer.use_fp16: self.trainer.grad_scaler.unscale_(self.optimizer) self.trainer.profile("Unscale time") norm = clip_grad_norm(self.params, max_norm) self.trainer.profile("Clip grad time") if self.trainer.use_fp16: self.trainer.grad_scaler.step(self.optimizer) self.trainer.profile("Step time") self.trainer.grad_scaler.update() self.trainer.profile("Update time") else: self.optimizer.step() self.trainer.profile("Step time") if torch.isnan(norm).item() or torch.isinf(norm).item(): self.num_skip += 1 if self.num_skip >= 100: raise RuntimeError("Skipping iteration for more than 100 steps...") return current_update else: self.num_skip = 0 if self.trainer.tb_writer is not None: self.trainer.tb_writer.add_scalars({"total_norm": norm}, current_update) current_update += 1 return current_update def _sync_losses_and_metrics(self, split, output): losses = output["losses_stat"] metrics = output["metrics"] reduced_losses = reduce_dict(losses) reduced_metrics = reduce_dict(metrics) update_dict = {} update_dict.update(reduced_losses) update_dict.update(reduced_metrics) batch_size = self.trainer.running_config.batch_size self.trainer.meters[split].update(update_dict, batch_size) @torch.no_grad() def _update_info(self, split): current_update = self.trainer.current_update log_interval = self.trainer.log_interval if split == "train" and current_update % log_interval == 0: stats = {} ups = log_interval / self.trainer.timers["train"].unix_time_since_start() if "cuda" in str(self.trainer.device): stats["max mem"] = torch.cuda.max_memory_allocated() / 1000 stats["max mem"] //= 1000 stats.update( { "epoch": self.current_epoch, "data_epoch": self.trainer.current_epoch, "update": current_update, "max_update": self.trainer.max_update, "lr": [ param_group["lr"] for param_group in self.optimizer.param_groups ], "ups": "{:.2f}".format(ups), "time": self.trainer.timers["train"].get_time_since_start(), "time_since_start": self.trainer.total_timer.get_time_since_start(), "eta": self.trainer._calculate_time_left(), } ) self.trainer._print_log(split, stats) self.trainer._update_tensorboard(split) self.trainer.timers["train"].reset() self.trainer.profile("Update info time")
5,423
34.45098
88
py
null
r-mae-main/pretrain/trainer/engine/pretrain_engine.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import gc import collections import torch import torchvision.transforms as transforms import torchvision.transforms.functional as F from torchvision.utils import make_grid from pretrain.dataset.helper import Prefetcher from pretrain.trainer.engine import register_engine from pretrain.utils.distributed import is_master, synchronize from pretrain.utils.functional import patchify, unpatchify from pretrain.trainer.engine.base_engine import BaseEngine @register_engine("pretrain") class PretrainEngine(BaseEngine): def __init__(self, trainer): super().__init__(trainer) def _compute_loss(self, model_output, target): if self.model.training: iter_per_update = self.trainer.iter_per_update # Make sure theat the output from the model is a Mapping assert isinstance( model_output, collections.abc.Mapping ), "A dict must be returned from the forward of the model." assert "losses" in model_output, "losses should be returned in model_output" assert isinstance( model_output["losses"], collections.abc.Mapping ), "'losses' must be a dict." loss_dict = { k: v / iter_per_update for k, v in model_output["losses"].items() } losses_stat = {} total_loss = sum(loss_dict[k] for k in loss_dict.keys()) losses_stat.update({k: v for k, v in loss_dict.items()}) losses_stat["total_loss"] = total_loss model_output["losses"] = total_loss model_output["losses_stat"] = losses_stat assert ( "metrics" in model_output ), "metrics should be returned in model_output" assert isinstance( model_output["metrics"], collections.abc.Mapping ), "'metrics' must be a dict." for kk, vv in model_output["metrics"].items(): model_output["metrics"][kk] = vv.detach() return model_output @torch.no_grad() def evaluate(self, split): self.trainer.writer.write(f"Evaluation time. Running on full {split} set...") self.trainer.timers[split].reset() self.model.eval() invTrans = transforms.Compose( [ transforms.Normalize( mean=[0.0, 0.0, 0.0], std=[1 / 0.229, 1 / 0.224, 1 / 0.225] ), transforms.Normalize( mean=[-0.485, -0.456, -0.406], std=[1.0, 1.0, 1.0] ), # transforms.ToPILImage(), ] ) prefetcher = Prefetcher( self.dataloaders[split], self.datasets[split], prefetch=False ) for i in range(15): batch = prefetcher.get_next_sample() output = self._forward(batch)[0] sample = batch[0] _, pred, pred_region, mask_label, (mask, region_mask) = output b, c, h, w = sample["image"].shape num_region = ( 0 if not getattr(self.datasets[split], "rmae_sampling", False) else self.datasets[split].num_region ) if num_region > 0: patch_size = self.datasets[split].patch_size num_patches = h // patch_size if region_mask.dim() < pred_region.dim() - 1: pred_region = torch.where( ~(region_mask.unsqueeze(-1).unsqueeze(1).bool()), torch.tensor( [0.5], dtype=torch.float, device=pred_region.device ), (pred_region >= 0).float(), ) else: pred_region = torch.where( ~(region_mask.unsqueeze(-1).bool()), torch.tensor( [0.5], dtype=torch.float, device=pred_region.device ), (pred_region >= 0).float(), ) pred_region = pred_region.view( b, -1, num_patches, num_patches, patch_size, patch_size ) pred_region = pred_region.permute(0, 1, 2, 4, 3, 5).reshape(b, -1, h, w) patches = patchify(sample["image"], patch_size) masked_image = patches.masked_fill(mask.unsqueeze(-1).bool(), 0.0) masked_image = unpatchify(masked_image, patch_size=patch_size) if pred is not None: mean = patches.mean(dim=-1, keepdim=True) var = patches.var(dim=-1, keepdim=True) pred = pred * ((var + 1.0e-6) ** 0.5) + mean pred_image = unpatchify(pred, patch_size=patch_size) pred_masked_image = unpatchify( torch.where(mask.unsqueeze(-1).bool(), pred, patches), patch_size=patch_size, ) image_i = invTrans(sample["image"][0].cpu()).clamp(min=0, max=1) masked_image_i = invTrans(masked_image[0].cpu()).clamp(min=0, max=1) image_vis = make_grid([image_i, masked_image_i], nrow=2, pad_value=1.0) self.trainer.tb_writer.add_image( f"{i}_image", image_vis, self.current_epoch ) if pred is not None: pred_image_i = invTrans(pred_image[0].cpu()).clamp(min=0, max=1) pred_masked_image_i = invTrans(pred_masked_image[0].cpu()).clamp( min=0, max=1 ) pred_image_vis = make_grid( [pred_image_i, pred_masked_image_i], nrow=2, pad_value=1.0 ) self.trainer.tb_writer.add_image( f"{i}_pred_image", pred_image_vis, self.current_epoch ) if pred_region is not None: for j in range(pred_region.shape[1]): region = make_grid( [ mask_label[0, [j, j, j]].cpu().clamp(min=0, max=1), pred_region[0, [j, j, j]].cpu().clamp(min=0, max=1), ], nrow=2, pad_value=1.0, ) self.trainer.tb_writer.add_image( f"{i}_pred_mask_{j}", region, self.current_epoch, ) self.model.train() gc.collect() if "cuda" in str(self.trainer.device): torch.cuda.empty_cache() self.trainer.timers["train"].reset() def train_epoch(self, trained_batch_idx): current_epoch = self.trainer.current_epoch current_update = self.trainer.current_update max_update = self.trainer.max_update iter_per_update = self.trainer.iter_per_update eval_interval = self.trainer.eval_interval save_interval = self.trainer.save_interval prefetcher = Prefetcher( self.trainer.dataloaders["train"], self.trainer.datasets["train"], prefetch=False, ) if self.trainer.samplers["train"] is not None and self.trainer.parallel: self.trainer.samplers["train"].set_epoch(current_epoch) for idx in range(len(self.trainer.dataloaders["train"])): # for idx, batch in enumerate(trainer.dataloaders["train"]): batch = prefetcher.get_next_sample() if idx < trained_batch_idx: continue self.optimizer.zero_grad(set_to_none=True) # for param in self.params: # param.grad = None if iter_per_update > 1: assert iter_per_update == len(batch) for idx, splitted_batch in enumerate(batch): if (idx + 1) < iter_per_update: with self.model.no_sync(): output = self._forward(splitted_batch)[0] if output is None: continue self._sync_losses_and_metrics("train", output) self._backward(output) else: output = self._forward(splitted_batch)[0] if output is None: continue self._sync_losses_and_metrics("train", output) self._backward(output) else: output = self._forward(batch)[0] self._sync_losses_and_metrics("train", output) self._backward(output) current_update = self._step(current_update) if current_update == self.trainer.current_update: self.trainer.writer.write("Skipping iteration...", "warning") continue self.lr_scheduler.step(current_update) assert self.trainer.current_update == (current_update - 1) self.trainer.current_update = current_update self._update_info("train") if current_update % save_interval == 0: self.trainer.writer.write("Checkpoint time. Saving a checkpoint...") self.trainer.checkpoint.save(current_update) if current_update % eval_interval == 0 and "val" in self.trainer.run_type: self.evaluate("val") if current_update > max_update: break self.lr_scheduler.step_epoch(current_epoch)
9,868
36.957692
88
py
null
r-mae-main/pretrain/utils/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree.
196
38.4
61
py
null
r-mae-main/pretrain/utils/box_ops.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from typing import List import torch import torch.nn.functional as F from torchvision.ops.boxes import box_area from pycocotools import mask as coco_mask def convert_to_xywh(boxes): xmin, ymin, xmax, ymax = boxes.unbind(1) return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) def box_xywh_to_xyxy(x) -> torch.Tensor: x, y, w, h = x.unbind(-1) return torch.stack((x, y, x + w, y + h), dim=-1) def box_cxcywh_to_xyxy(x) -> torch.Tensor: x_c, y_c, w, h = x.unbind(-1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=-1) def box_xyxy_to_cxcywh(x) -> torch.Tensor: x0, y0, x1, y1 = x.unbind(-1) b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] return torch.stack(b, dim=-1) def box_intersect(boxes1, boxes2) -> torch.Tensor: lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] wh = (rb - lt).clamp(min=0) # [N,M,2] inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] return inter def sample_boxes(num_sample, num_boxes, device, dtype): boxes = torch.rand(num_sample, num_boxes, 4, device=device, dtype=dtype) boxes = box_cxcywh_to_xyxy(boxes).clamp(min=0.01, max=0.99) return box_xyxy_to_cxcywh(boxes) # modified from torchvision to also return the union def box_iou(boxes1, boxes2) -> List[torch.Tensor]: area1 = box_area(boxes1) area2 = box_area(boxes2) inter = box_intersect(boxes1, boxes2) union = area1[:, None] + area2 - inter iou = inter / union return iou, union def box_iou_detectron(boxes1, boxes2) -> torch.Tensor: area1 = box_area(boxes1) area2 = box_area(boxes2) inter = box_intersect(boxes1, boxes2) # handle empty boxes iou = torch.where( inter > 0, inter / (area1[:, None] + area2 - inter), torch.zeros(1, dtype=inter.dtype, device=inter.device), ) return iou def generalized_box_iou(boxes1, boxes2) -> torch.Tensor: """ Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ assert (boxes1[:, 2:] >= boxes1[:, :2]).all() assert (boxes2[:, 2:] >= boxes2[:, :2]).all() iou, union = box_iou(boxes1, boxes2) lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) wh = (rb - lt).clamp(min=0) # [N, M, 2] area = wh[:, :, 0] * wh[:, :, 1] return iou - (area - union) / area def dice_cost(masks1: torch.Tensor, masks2: torch.Tensor) -> torch.Tensor: """ masks1: [N, C] masks2: [M, C] """ masks1 = masks1.flatten(1) masks2 = masks2.flatten(1) numerator = 2 * torch.einsum("nc,mc->nm", masks1, masks2) denominator = masks1.sum(-1).unsqueeze(1) + masks2.sum(-1).unsqueeze(0) return -(numerator + 1) / (denominator + 1) def focal_mask_cost( masks1: torch.Tensor, masks2: torch.Tensor, alpha: float = 0.25, gamma: float = 2.0 ) -> torch.Tensor: """ masks1: [N, C] masks2: [M, C] """ masks1 = masks1.flatten(1) masks2 = masks2.flatten(1) hw = masks1.shape[1] neg_cost = (1 - alpha) * (masks1**gamma) * (-(1 - masks1 + 1e-6).log()) pos_cost = alpha * ((1 - masks1) ** gamma) * (-(masks1 + 1e-6).log()) cost = torch.einsum("nc,mc->nm", pos_cost, masks2) + torch.einsum( "nc,mc->nm", neg_cost, (1 - masks2) ) return cost / hw def bce_mask_cost(masks1: torch.Tensor, masks2: torch.Tensor) -> torch.Tensor: """ Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). Returns: Loss tensor """ masks1 = masks1.flatten(1) masks2 = masks2.flatten(1) hw = masks1.shape[1] pos = F.binary_cross_entropy_with_logits( masks1, torch.ones_like(masks1), reduction="none" ) neg = F.binary_cross_entropy_with_logits( masks1, torch.zeros_like(masks1), reduction="none" ) loss = torch.einsum("nc,mc->nm", pos, masks2) + torch.einsum( "nc,mc->nm", neg, (1 - masks2) ) return loss / hw def focal_cls_cost( out_prob: torch.Tensor, tgt_ids: torch.Tensor, alpha: float = 0.25, gamma: float = 2.0, ) -> torch.Tensor: """ out_prob: [N, C] tgt_ids: [N,] """ neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids] return cost_class def bce_cls_cost(out_prob: torch.Tensor, tgt_ids: torch.Tensor) -> torch.Tensor: neg_cost_class = -(1 - out_prob + 1e-8).log() pos_cost_class = -(out_prob + 1e-8).log() cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids] return cost_class def ce_cls_cost(out_prob: torch.Tensor, tgt_ids: torch.Tensor) -> torch.Tensor: """ out_prob: [N, C] tgt_ids: [N,] """ cost_class = -out_prob[:, tgt_ids] return cost_class def masks_to_boxes(masks) -> torch.Tensor: """ Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensors, with the boxes in xyxy format """ if masks.numel() == 0: return torch.zeros(0, 4, device=masks.device) h, w = masks.shape[-2:] y = torch.arange(h, dtype=torch.float) x = torch.arange(w, dtype=torch.float) y, x = torch.meshgrid(y, x) x_mask = masks * x.unsqueeze(0) x_max = x_mask.flatten(1).max(-1)[0] x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] y_mask = masks * y.unsqueeze(0) y_max = y_mask.flatten(1).max(-1)[0] y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] return torch.stack([x_min, y_min, x_max, y_max], 1) def iou_with_ign(boxes1, boxes2) -> torch.Tensor: """ Computes the amount of overlap of boxes2 has within boxes1, which is handy for dealing with ignore areas. Hence, assume that boxes2 are ignore regions and boxes1 are anchor boxes, then we may want to know how much overlap the anchors have inside the ignore regions boxes2. boxes1: (M, 4) [x1, y1, x2, y2] boxes2: (N, 4) [x1, y1, x2, y2] mode: if 'elementwise', M needs to be equal to N and we compute intersection of M pairs in boxes1 and boxes2 elementwise. Otherwise, we compute intersection of NxM pairs. """ area1 = box_area(boxes1) intersect = box_intersect(boxes1, boxes2) iou_w_ign = intersect / area1 return iou_w_ign def create_ref_windows(tensor_list, mask_list, ref_size, ref_size_ratios=None): ref_windows = [] eps = 1e-6 for i, tensor in enumerate(tensor_list): if mask_list is not None: not_mask = ~(mask_list[i]) y_embed = not_mask.cumsum(1, dtype=tensor.dtype) x_embed = not_mask.cumsum(2, dtype=tensor.dtype) size_h = not_mask[:, :, 0].sum(dim=-1, dtype=tensor.dtype) size_w = not_mask[:, 0, :].sum(dim=-1, dtype=tensor.dtype) else: size_h, size_w = tensor.shape[-2:] y_embed = torch.arange( 1, size_h + 1, dtype=tensor.dtype, device=tensor.device ) x_embed = torch.arange( 1, size_w + 1, dtype=tensor.dtype, device=tensor.device ) y_embed, x_embed = torch.meshgrid(y_embed, x_embed, indexing="ij") x_embed = x_embed.unsqueeze(0).repeat(tensor.shape[0], 1, 1) y_embed = y_embed.unsqueeze(0).repeat(tensor.shape[0], 1, 1) size_h = torch.tensor( [size_h] * tensor.shape[0], dtype=tensor.dtype, device=tensor.device ) size_w = torch.tensor( [size_w] * tensor.shape[0], dtype=tensor.dtype, device=tensor.device ) y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) center = torch.stack([x_embed, y_embed], dim=-1).flatten(1, 2) # b x l x 2 if ref_size_ratios is not None: center = center.unsqueeze(-2).expand( -1, -1, len(ref_size_ratios), -1 ) # b x l x nh x 2 ref_size = ref_size * torch.FloatTensor(list(ref_size_ratios)).to( tensor_list[0] ) h_embed = ref_size / size_h.unsqueeze(1) # b x nh w_embed = ref_size / size_w.unsqueeze(1) # b x nh else: h_embed = ref_size / size_h w_embed = ref_size / size_w size = torch.stack([w_embed, h_embed], dim=-1) # b x nh x 2 size = size.unsqueeze(1).expand_as(center) # b x l x nh x 2 ref_box = torch.cat([center, size], dim=-1) ref_windows.append(ref_box) ref_windows = torch.cat(ref_windows, dim=1) return ref_windows def extract_grid( x, x_mask, boxes, grid_size=15, align_corners=False, roi_align=False, mode="bilinear", ): """ Params: :x: (B, C, H, W) :x_mask: (B, H, W) :boxes: (B, L, 4) Return: :grid: (B, L, grid_size, grid_size, C) """ b, l = boxes.shape[:2] c = x.shape[1] if b == 0: return torch.zeros( 0, l, grid_size, grid_size, c, device=x.device, dtype=x.dtype ) grid_size = grid_size * 2 if roi_align else grid_size if align_corners: indices = torch.arange(0, grid_size, device=x.device, dtype=x.dtype) step = 1.0 / (grid_size - 1) else: indices = 0.5 + torch.arange(0, grid_size, device=x.device, dtype=x.dtype) step = 1.0 / grid_size i, j = torch.meshgrid(indices, indices, indexing="ij") grid_indices = torch.stack([j, i], dim=-1) # 7 x 7 x 2 boxes = box_cxcywh_to_xyxy(boxes) if x_mask is not None: not_x_mask = ~x_mask size_h = not_x_mask[:, :, 0].sum(dim=1, dtype=x.dtype) size_w = not_x_mask[:, 0, :].sum(dim=1, dtype=x.dtype) h, w = x.shape[-2:] ratio_h = size_h / h ratio_w = size_w / w ratio = torch.stack([ratio_w, ratio_h, ratio_w, ratio_h], dim=-1) boxes = boxes * ratio.unsqueeze(1) boxes1, boxes2 = boxes.unsqueeze(-2).unsqueeze(-2).split(2, dim=-1) grid = grid_indices * step * (boxes2 - boxes1) + boxes1 grid = grid * 2 - 1 grid = grid.view(b, l, grid_size * grid_size, 2) grid = F.grid_sample(x, grid, align_corners=False, mode=mode) if roi_align: grid = grid.view(b, -1, l, grid_size // 2, 2, grid_size // 2, 2) grid = grid.max(-1)[0].max(-2)[0] else: grid = grid.view(b, -1, l, grid_size, grid_size) grid = grid.permute(0, 2, 3, 4, 1) return grid def paste_grid(seg_mask, boxes, x_size): # seg_mask: l x 7 x 7 # boxes: l x 4 assert seg_mask.dim() == 3 assert boxes.shape[0] == seg_mask.shape[0] nq = boxes.shape[0] h, w = x_size x1, y1, x2, y2 = boxes.unsqueeze(-2).unsqueeze(-2).unbind(-1) img_x = torch.arange(w, device=boxes.device, dtype=boxes.dtype) + 0.5 img_y = torch.arange(h, device=boxes.device, dtype=boxes.dtype) + 0.5 img_y, img_x = torch.meshgrid(img_y, img_x, indexing="ij") # l x h x w img_y = (img_y - y1) / (y2 - y1) * 2 - 1 img_x = (img_x - x1) / (x2 - x1) * 2 - 1 img_grid = torch.stack([img_x, img_y], dim=-1) img_grid = img_grid.view(nq, h, w, 2) img = F.grid_sample(seg_mask[:, None], img_grid, align_corners=False) img = img.view(nq, h, w) return img def convert_coco_poly_to_mask(segmentations, height, width): masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = torch.as_tensor(mask, dtype=torch.uint8) mask = mask.any(dim=2) masks.append(mask) if masks: masks = torch.stack(masks, dim=0) else: masks = torch.zeros((0, height, width), dtype=torch.uint8) return masks def mask_process(pred_mask, img_size, output_height, output_width): pred_mask = pred_mask[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1) pred_mask = F.interpolate( pred_mask, size=(output_height, output_width), mode="bilinear", align_corners=False, )[0] return pred_mask def get_uncertain_point_coords_with_randomness( coarse_logits, uncertainty_func, num_points, oversample_ratio, importance_sample_ratio, logits_mask=None, ): """ Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties are calculated for each point using 'uncertainty_func' function that takes point's logit prediction as input. See PointRend paper for details. Args: coarse_logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for class-specific or class-agnostic prediction. uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that contains logit predictions for P points and returns their uncertainties as a Tensor of shape (N, 1, P). num_points (int): The number of points P to sample. oversample_ratio (int): Oversampling parameter. importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling. logits_mask (Tensor): A tensor of shape (N, Hmask, Wmask) for masking the padded region. Returns: point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P sampled points. """ assert oversample_ratio >= 1 assert importance_sample_ratio <= 1 and importance_sample_ratio >= 0 num_boxes = coarse_logits.shape[0] num_sampled = int(num_points * oversample_ratio) point_coords = torch.rand(num_boxes, num_sampled, 2, device=coarse_logits.device) point_logits = point_sample( coarse_logits, point_coords, input_mask=logits_mask, align_corners=False ) # It is crucial to calculate uncertainty based on the sampled prediction value for the points. # Calculating uncertainties of the coarse predictions first and sampling them for points leads # to incorrect results. # To illustrate this: assume uncertainty_func(logits)=-abs(logits), a sampled point between # two coarse predictions with -1 and 1 logits has 0 logits, and therefore 0 uncertainty value. # However, if we calculate uncertainties for the coarse predictions first, # both will have -1 uncertainty, and the sampled point will get -1 uncertainty. point_uncertainties = uncertainty_func(point_logits) num_uncertain_points = int(importance_sample_ratio * num_points) num_random_points = num_points - num_uncertain_points idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] shift = num_sampled * torch.arange( num_boxes, dtype=torch.long, device=coarse_logits.device ) idx += shift[:, None] point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( num_boxes, num_uncertain_points, 2 ) if num_random_points > 0: point_coords = torch.cat( [ point_coords, torch.rand( num_boxes, num_random_points, 2, device=coarse_logits.device ), ], dim=1, ) return point_coords def calculate_uncertainty(logits): """ We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the foreground class in `classes`. Args: logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or class-agnostic, where R is the total number of predicted masks in all images and C is the number of foreground classes. The values are logits. Returns: scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with the most uncertain locations having the highest uncertainty score. """ assert logits.shape[1] == 1 gt_class_logits = logits.clone() return -(torch.abs(gt_class_logits)) def point_sample(input, point_coords, input_mask=None, **kwargs): """ A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors. Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside [0, 1] x [0, 1] square. Args: input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid. point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains [0, 1] x [0, 1] normalized point coordinates. input_mask (Tensor): A mask of input (N, H, W). Returns: output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains features for points in `point_coords`. The features are obtained via bilinear interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`. """ if input_mask is not None: not_mask = ~input_mask size_h = not_mask[:, :, 0].sum(dim=1, dtype=input.dtype) size_w = not_mask[:, 0, :].sum(dim=1, dtype=input.dtype) h, w = input.shape[-2:] ratio_h = size_h / h ratio_w = size_w / w ratio = torch.stack([ratio_w, ratio_h], dim=-1) if point_coords.dim() == 3: point_coords = point_coords * ratio.unsqueeze(1) elif point_coords.dim() == 4: point_coords = point_coords * ratio.unsqueeze(1).unsqueeze(2) add_dim = False if point_coords.dim() == 3: add_dim = True point_coords = point_coords.unsqueeze(2) output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs) if add_dim: output = output.squeeze(3) return output def iou_with_ign(boxes1, boxes2) -> torch.Tensor: """ Computes the amount of overlap of boxes2 has within boxes1, which is handy for dealing with ignore areas. Hence, assume that boxes2 are ignore regions and boxes1 are anchor boxes, then we may want to know how much overlap the anchors have inside the ignore regions boxes2. boxes1: (M, 4) [x1, y1, x2, y2] boxes2: (N, 4) [x1, y1, x2, y2] mode: if 'elementwise', M needs to be equal to N and we compute intersection of M pairs in boxes1 and boxes2 elementwise. Otherwise, we compute intersection of NxM pairs. """ area1 = box_area(boxes1) intersect = box_intersect(boxes1, boxes2) iou_w_ign = intersect / area1 return iou_w_ign def proposals_to_boxes(proposals, masks) -> torch.Tensor: """ Compute the bounding boxes around the provided masks The proposals should be in format [N, L, H, W] where N is the number of masks, (H, W) are the spatial dimensions. The masks should be in format [N, H, W] Returns a [N, L, 4] tensors, with the boxes in xyxy format """ if proposals.numel() == 0: return torch.zeros(0, 0, 4, device=proposals.device) n, l, h, w = proposals.shape if masks is None: not_masks = proposals.new_ones(n, h, w, dtype=torch.bool) else: not_masks = ~masks y = torch.arange(h, dtype=proposals.dtype, device=proposals.device) x = torch.arange(w, dtype=proposals.dtype, device=proposals.device) y, x = torch.meshgrid(y, x, indexing="ij") proposals = proposals * not_masks.float().unsqueeze(1) proposals = (proposals.view(n * l, h, w) >= 0.5).float() x_mask = proposals * x.unsqueeze(0) x_max = x_mask.flatten(1).max(-1)[0] x_min = x_mask.masked_fill(~(proposals.bool()), 1e8).flatten(1).min(-1)[0] y_mask = proposals * y.unsqueeze(0) y_max = y_mask.flatten(1).max(-1)[0] y_min = y_mask.masked_fill(~(proposals.bool()), 1e8).flatten(1).min(-1)[0] boxes = torch.stack([x_min, y_min, x_max, y_max], 1).view(n, l, 4) img_h = not_masks[:, :, 0].sum(dim=-1, dtype=proposals.dtype) img_w = not_masks[:, 0, :].sum(dim=-1, dtype=proposals.dtype) img_size = torch.stack([img_w, img_h, img_w, img_h], dim=-1).unsqueeze(1) return boxes / img_size
21,006
33.494253
100
py
null
r-mae-main/pretrain/utils/checkpoint.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import os import glob import warnings import torch from omegaconf import OmegaConf TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) from pretrain.utils.distributed import is_master def load_pretraind_state_dict(state_dict, trainer, strict=False): model_dict = {} # remove 'module.' in module keys of state_dict for name in state_dict["model"]: processed_name = name if name.startswith("module."): processed_name = processed_name.replace("module.", "", 1) if name.startswith("_orig_mod.") and ( TORCH_VERSION < (2, 0) or not trainer.running_config.use_compile ): processed_name = processed_name.replace("_orig_mod.", "", 1) model_dict[processed_name] = state_dict["model"][name] print( "Loading model checkpoint:", trainer.model_without_ddp.load_state_dict(model_dict, strict=strict), ) if "optimizer" in state_dict: trainer.optimizer.load_state_dict(state_dict["optimizer"]) else: warnings.warn( "'optimizer' key is not present in the " "checkpoint asked to be loaded. Skipping." ) if "update" in state_dict: trainer.current_update = state_dict["update"] trainer.current_epoch = state_dict["epoch"] else: warnings.warn( "'update' and 'epoch' key is not present in the " "checkpoint asked to be loaded. Skipping." ) if "lr_scheduler" in state_dict: trainer.lr_scheduler.load_state_dict(state_dict["lr_scheduler"]) else: warnings.warn( "'lr_scheduler' key is not present in the " "checkpoint asked to be loaded. Skipping." ) if "grad_scaler" in state_dict and trainer.grad_scaler is not None: trainer.grad_scaler.load_state_dict(state_dict["grad_scaler"]) del state_dict del model_dict class Checkpoint: def __init__(self, trainer): self.trainer = trainer self.config = self.trainer.config self.save_dir = self.config.training.save_dir self.num_checkpoint = self.config.training.num_checkpoint self.model_name = self.config.model self.ckpt_foldername = self.save_dir self.device = trainer.device self.ckpt_prefix = "" if hasattr(self.trainer.model, "get_ckpt_name"): self.ckpt_prefix = self.trainer.model.get_ckpt_name() + "_" self.pth_filepath = os.path.join( self.ckpt_foldername, self.ckpt_prefix + self.model_name + "_final.pth" ) self.models_foldername = os.path.join(self.ckpt_foldername, "models") if is_master(): if not os.path.exists(self.models_foldername): os.makedirs(self.models_foldername, exist_ok=True) self.save_config() def _process_config(self): save_config = OmegaConf.create(OmegaConf.to_yaml(self.config, resolve=True)) save_config.distributed.init_method = None save_config.distributed.rank = 0 save_config.distributed.port = -1 save_config.distributed.backend = "nccl" save_config.distributed.world_size = 1 save_config.distributed.no_spawn = False return save_config def save_config(self): cfg_file = os.path.join(self.ckpt_foldername, "config.yaml") save_config = self._process_config() with open(cfg_file, "w") as f: f.write(OmegaConf.to_yaml(save_config, resolve=True)) def _extract_iter(self, path): return int(path.split("_")[-1].split(".")[0]) def load_state_dict(self): tp = self.config.training if tp.resume: if tp.resume_file is not None: self.trainer.writer.write( "Loading weights from {}".format(tp.resume_file) ) if os.path.exists(tp.resume_file): self._load(tp.resume_file) return True else: raise RuntimeError("{} doesn't exist".format(tp.resume_file)) else: self.trainer.writer.write("Loading weights the last checkpoint") ckpt_file_paths = sorted( glob.glob(os.path.join(self.models_foldername, "model_*.ckpt")), key=self._extract_iter, ) if len(ckpt_file_paths) > 0: ckpt_filepath = ckpt_file_paths[-1] self._load(ckpt_filepath) return True else: warnings.warn("No checkpoint found!") return False def _load(self, file): self.trainer.writer.write("Loading checkpoint") ckpt = self._torch_load(file) if "model" in ckpt: state_dict = ckpt else: state_dict = {"model": ckpt} load_pretraind_state_dict(state_dict, self.trainer) if "cuda" in str(self.device): torch.cuda.empty_cache() self.trainer.writer.write("Checkpoint loaded") def _torch_load(self, file): if "cuda" in str(self.device): return torch.load(file, map_location=self.device) else: return torch.load(file, map_location=lambda storage, loc: storage) def save(self, update): # Only save in main process if not is_master(): return ckpt_filepath = os.path.join(self.models_foldername, "model_%d.ckpt" % update) ckpt = { "model": self.trainer.model_without_ddp.state_dict(), "optimizer": self.trainer.optimizer.state_dict(), "lr_scheduler": self.trainer.lr_scheduler.state_dict(), "epoch": self.trainer.current_epoch, "update": self.trainer.current_update, "config": OmegaConf.to_container(self.config, resolve=True), } if self.trainer.grad_scaler is not None: ckpt["grad_scaler"] = self.trainer.grad_scaler.state_dict() torch.save(ckpt, ckpt_filepath) ckpt_file_paths = sorted( glob.glob(os.path.join(self.models_foldername, "model_*.ckpt")), key=self._extract_iter, reverse=True, ) while len(ckpt_file_paths) > self.num_checkpoint: file_path = ckpt_file_paths.pop() os.remove(file_path) def finalize(self): if is_master(): torch.save(self.trainer.model_without_ddp.state_dict(), self.pth_filepath)
6,758
32.132353
86
py
null
r-mae-main/pretrain/utils/configuration.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import collections import json import os import warnings from ast import literal_eval import torch from omegaconf import OmegaConf from pretrain.utils.general import get_root def load_yaml(f): # Convert to absolute path for loading includes f = os.path.abspath(f) mapping = OmegaConf.load(f) if mapping is None: mapping = OmegaConf.create() includes = mapping.get("includes", []) if not isinstance(includes, collections.abc.Sequence): raise AttributeError( "Includes must be a list, {} provided".format(type(includes)) ) include_mapping = OmegaConf.create() root_dir = get_root() for include in includes: original_include_path = include include = os.path.join(root_dir, include) print("include path:", include) if not os.path.exists(include): include = os.path.join(os.path.dirname(f), original_include_path) current_include_mapping = load_yaml(include) include_mapping = OmegaConf.merge(include_mapping, current_include_mapping) mapping.pop("includes", None) mapping = OmegaConf.merge(include_mapping, mapping) return mapping class Configuration: def __init__(self, args): self.config = {} self.args = args self._register_resolvers() default_config = self._build_default_config() user_config = self._build_user_config(args.config) self._default_config = default_config self._user_config = user_config self.config = OmegaConf.merge(default_config, user_config) self.config = self._merge_with_dotlist(self.config, args.opts) self._update_specific(self.config, args) def _build_default_config(self): self.default_config_path = self._get_default_config_path() default_config = load_yaml(self.default_config_path) return default_config def _build_user_config(self, config_path): user_config = {} # Update user_config with opts if passed self.config_path = config_path if self.config_path is not None: user_config = load_yaml(self.config_path) return user_config def get_config(self): self._register_resolvers() return self.config def _register_resolvers(self): OmegaConf.clear_resolvers() # Device count resolver device_count = max(1, torch.cuda.device_count()) OmegaConf.register_new_resolver("device_count", lambda: device_count) def _merge_with_dotlist(self, config, opts): # TODO: To remove technical debt, a possible solution is to use # struct mode to update with dotlist OmegaConf node. Look into this # in next iteration if opts is None: opts = [] if len(opts) == 0: return config # Support equal e.g. model=visual_bert for better future hydra support has_equal = opts[0].find("=") != -1 if has_equal: opt_values = [opt.split("=") for opt in opts] else: assert len(opts) % 2 == 0, "Number of opts should be multiple of 2" opt_values = zip(opts[0::2], opts[1::2]) for opt, value in opt_values: splits = opt.split(".") current = config for idx, field in enumerate(splits): array_index = -1 if field.find("[") != -1 and field.find("]") != -1: stripped_field = field[: field.find("[")] array_index = int(field[field.find("[") + 1 : field.find("]")]) else: stripped_field = field if stripped_field not in current: raise AttributeError( "While updating configuration" " option {} is missing from" " configuration at field {}".format(opt, stripped_field) ) if isinstance(current[stripped_field], collections.abc.Mapping): current = current[stripped_field] elif ( isinstance(current[stripped_field], collections.abc.Sequence) and array_index != -1 ): current_value = current[stripped_field][array_index] # Case where array element to be updated is last element if not isinstance( current_value, (collections.abc.Mapping, collections.abc.Sequence), ): print("Overriding option {} to {}".format(opt, value)) current[stripped_field][array_index] = self._decode_value(value) else: # Otherwise move on down the chain current = current_value else: if idx == len(splits) - 1: print("Overriding option {} to {}".format(opt, value)) current[stripped_field] = self._decode_value(value) else: raise AttributeError( "While updating configuration", "option {} is not present " "after field {}".format(opt, stripped_field), ) return config def _decode_value(self, value): # https://github.com/rbgirshick/yacs/blob/master/yacs/config.py#L400 if not isinstance(value, str): return value if value == "None": value = None try: value = literal_eval(value) except ValueError: pass except SyntaxError: pass return value def freeze(self): OmegaConf.set_struct(self.config, True) def defrost(self): OmegaConf.set_struct(self.config, False) def pretty_print(self, writer=None): if not self.config.training.log_detailed_config: return if writer is not None: self.writer = writer self.writer.write("===== Training Parameters =====", "info") self.writer.write(self._convert_node_to_json(self.config.training), "info") self.writer.write(self._convert_node_to_json(self.config.distributed), "info") self.writer.write("====== Dataset Attributes ======", "info") task = self.config.task if task in self.config.dataset_config: self.writer.write("======== {} =======".format(task), "info") dataset_config = self.config.dataset_config[task] self.writer.write(self._convert_node_to_json(dataset_config), "info") else: self.writer.write( "No dataset named '{}' in config. Skipping".format(task), "warning", ) self.writer.write("====== Optimizer Attributes ======", "info") self.writer.write(self._convert_node_to_json(self.config.optimizer), "info") self.writer.write("====== LR_Scheduler Attributes ======", "info") self.writer.write(self._convert_node_to_json(self.config.scheduler), "info") if self.config.model not in self.config.model_config: raise ValueError( "{} not present in model attributes".format(self.config.model) ) self.writer.write( "====== Model ({}) Attributes ======".format(self.config.model), "info" ) self.writer.write( self._convert_node_to_json(self.config.model_config[self.config.model]), "info", ) def _convert_node_to_json(self, node): container = OmegaConf.to_container(node, resolve=True) return json.dumps(container, indent=4, sort_keys=True) def _get_default_config_path(self): directory = os.path.dirname(os.path.abspath(__file__)) return os.path.join(directory, "..", "config", "default.yaml") def _update_specific(self, config, args): if not torch.cuda.is_available() and "cuda" in config.training.device: warnings.warn( "Device specified is 'cuda' but cuda is not present. Switching to CPU version" ) config.training.device = "cpu" # update task and model to config config.task = args.task config.model = args.model config.dataset = args.dataset return config
8,726
34.189516
94
py
null
r-mae-main/pretrain/utils/distributed.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import os import numpy as np import socket import subprocess import warnings import functools import torch from torch import distributed as dist def synchronize(): if not dist.is_available(): return if not dist.is_initialized(): return world_size = dist.get_world_size() if world_size == 1: return if dist.get_backend() == dist.Backend.NCCL: # This argument is needed to avoid warnings. # It's valid only for NCCL backend. dist.barrier(device_ids=[torch.cuda.current_device()]) else: dist.barrier() def get_rank(): if not dist.is_nccl_available(): return 0 if not dist.is_initialized(): return 0 return dist.get_rank() def is_master(): return get_rank() == 0 def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not dist.is_nccl_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def broadcast_tensor(tensor, src=0): world_size = get_world_size() if world_size < 2: return tensor with torch.no_grad(): dist.broadcast(tensor, src=0) return tensor def broadcast_scalar(scalar, src=0, device="cpu"): scalar_tensor = torch.tensor(scalar).to(device) scalar_tensor = broadcast_tensor(scalar_tensor, src) return scalar_tensor.item() def reduce_tensor(tensor): world_size = get_world_size() if world_size < 2: return tensor with torch.no_grad(): dist.reduce(tensor, dst=0) if dist.get_rank() == 0: tensor = tensor.div(world_size) return tensor def gather_tensor(tensor): world_size = get_world_size() if world_size < 2: return tensor with torch.no_grad(): tensor_list = [] for _ in range(world_size): tensor_list.append(torch.zeros_like(tensor)) dist.all_gather(tensor_list, tensor) tensor_list = torch.stack(tensor_list, dim=0) return tensor_list def reduce_dict(dictionary): world_size = get_world_size() if world_size < 2: return dictionary with torch.no_grad(): keys, values = zip(*sorted(dictionary.items())) values = torch.stack(values, dim=0) dist.reduce(values, dst=0) if dist.get_rank() == 0: # only main process gets accumulated, so only divide by # world_size in this case values /= world_size reduced_dict = {k: v for k, v in zip(keys, values)} return reduced_dict @functools.lru_cache() def _get_global_gloo_group(): """ Return a process group based on gloo backend, containing all the ranks The result is cached. """ if dist.get_backend() == "nccl": return dist.new_group(backend="gloo") else: return dist.group.WORLD def gather(data, dst=0, group=None): """ Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list. """ if get_world_size() == 1: return [data] if group is None: group = _get_global_gloo_group() world_size = dist.get_world_size(group=group) if world_size == 1: return [data] rank = dist.get_rank(group=group) if rank == dst: output = [None for _ in range(world_size)] dist.gather_object(data, output, dst=dst, group=group) return output else: dist.gather_object(data, None, dst=dst, group=group) return [] def all_reduce_dict(dictionary): world_size = get_world_size() if world_size < 2: return dictionary with torch.no_grad(): keys, values = zip(*sorted(dictionary.items())) values = torch.stack(values, dim=0) dist.all_reduce(values) values /= world_size reduced_dict = {k: v for k, v in zip(keys, values)} return reduced_dict def all_gather(data, group=None): """ Run all_gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: list of data gathered from each rank """ if get_world_size() == 1: return [data] if group is None: group = ( _get_global_gloo_group() ) # use CPU group by default, to reduce GPU RAM usage. world_size = dist.get_world_size(group) if world_size == 1: return [data] output = [None for _ in range(world_size)] dist.all_gather_object(output, data, group=group) return output def shared_random_seed(): """ Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock. """ ints = np.random.randint(2 ** 31) all_ints = all_gather(ints) return all_ints[0] def infer_init_method(config): if config.distributed.init_method is not None: return # support torch.distributed.launch if all( key in os.environ for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"] ): print("support launch") config.distributed.init_method = "env://" config.distributed.world_size = int(os.environ["WORLD_SIZE"]) config.distributed.rank = int(os.environ["RANK"]) # we can determine the init method automatically for Slurm else: node_list = os.environ.get("SLURM_STEP_NODELIST") if node_list is None: node_list = os.environ.get("SLURM_JOB_NODELIST") if node_list is not None: if config.distributed.port < 0: config.distributed.port = 16749 try: nnodes = int(os.environ.get("SLURM_NNODES")) # don't need to initialize distributed training on a single gpu if nnodes == 1 and torch.cuda.device_count() == 1: return hostnames = subprocess.check_output( ["scontrol", "show", "hostnames", node_list] ) config.distributed.init_method = "tcp://{host}:{port}".format( host=hostnames.split()[0].decode("utf-8"), port=config.distributed.port, ) ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE") if ntasks_per_node is not None: ntasks_per_node = int(ntasks_per_node) else: ntasks = int(os.environ.get("SLURM_NTASKS")) assert ntasks % nnodes == 0, f"ntasks: {ntasks}, nnodes: {nnodes}" ntasks_per_node = int(ntasks / nnodes) gpus_per_node = torch.cuda.device_count() config.distributed.world_size = nnodes * gpus_per_node if ntasks_per_node == 1: node_id = int(os.environ.get("SLURM_NODEID")) config.distributed.rank = node_id * gpus_per_node else: assert ( gpus_per_node == ntasks_per_node ), f"gpus_per_node: {gpus_per_node}, ntasks_per_node: {ntasks_per_node}" config.distributed.no_spawn = True config.distributed.rank = int(os.environ.get("SLURM_PROCID")) config.device_id = int(os.environ.get("SLURM_LOCALID")) except subprocess.CalledProcessError as e: # scontrol failed raise e except FileNotFoundError: # Slurm is not installed pass def distributed_init(config): if config.distributed.world_size == 1: raise ValueError("Cannot initialize distributed with distributed_world_size=1") if dist.is_initialized(): warnings.warn("Distributed is already initialized, cannot initialize twice!") else: print( "Distributed Init (Rank {}): {}".format( config.distributed.rank, config.distributed.init_method ), flush=True, ) dist.init_process_group( backend=config.distributed.backend, init_method=config.distributed.init_method, world_size=config.distributed.world_size, rank=config.distributed.rank, ) print( "Initialized Host {} as Rank {}".format( socket.gethostname(), config.distributed.rank ), flush=True, ) # perform a dummy all-reduce to initialize the NCCL communicator dist.all_reduce(torch.zeros(1).cuda()) suppress_output(is_master()) config.distributed.rank = dist.get_rank() return config.distributed.rank def suppress_output(is_master): """Suppress printing on the current device. Force printing with `force=True`.""" import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop("force", False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print import warnings builtin_warn = warnings.warn def warn(*args, **kwargs): force = kwargs.pop("force", False) if is_master or force: builtin_warn(*args, **kwargs) # Log warnings only once warnings.warn = warn warnings.simplefilter("once", UserWarning)
10,206
27.997159
92
py
null
r-mae-main/pretrain/utils/env.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import os import random import datetime import numpy as np import torch import torch.backends.cudnn as cudnn from pretrain.dataset import get_task_list from pretrain.model import get_arch_list from pretrain.utils.distributed import get_rank def set_seed(seed): if seed: if seed == -1: # From detectron2 seed = ( os.getpid() + int(datetime.datetime.now().strftime("%S%f")) + int.from_bytes(os.urandom(2), "big") ) else: seed = seed + get_rank() np.random.seed(seed) torch.manual_seed(seed) random.seed(seed) cudnn.benchmark = True return seed def _configure_libraries(): # Disable opencl in opencv since its interaction with cuda often has negative effects # This envvar is supported after OpenCV 3.4.0 os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" try: import cv2 if int(cv2.__version__.split(".")[0]) >= 3: cv2.ocl.setUseOpenCL(False) except ModuleNotFoundError: # Other types of ImportError, if happened, should not be ignored. # Because a failed opencv import could mess up address space # https://github.com/skvark/opencv-python/issues/381 pass def get_version(module, digit=2): return tuple(map(int, module.__version__.split(".")[:digit])) import yaml assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1" assert get_version(torch) >= (1, 4), "Requires torch>=1.4" _ENV_SETUP_DONE = False def setup_environment(): """Perform environment setup work. The default setup is a no-op, but this function allows the user to specify a Python source file or a module in the $DETECTRON2_ENV_MODULE environment variable, that performs custom setup work that may be necessary to their computing environment. """ global _ENV_SETUP_DONE if _ENV_SETUP_DONE: return _ENV_SETUP_DONE = True _configure_libraries() def get_parser(): parser = argparse.ArgumentParser() parser.add_argument_group("Core Arguments") parser.add_argument( "--config", type=str, required=True, help="Path to the config file." ) parser.add_argument( "--model", type=str, required=True, choices=get_arch_list(), help="The architecture for training and testing.", ) parser.add_argument( "--task", type=str, required=True, choices=get_task_list(), help="The working task.", ) parser.add_argument( "--dataset", type=str, required=True, help="The corresponding dataset.", ) parser.add_argument( "opts", default=None, nargs="*", help="Modify config options from command line", ) return parser
3,093
25.672414
89
py
null
r-mae-main/pretrain/utils/functional.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import math import warnings import collections from functools import partial import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.init import _calculate_fan_in_and_fan_out string_classes = str ############################################# # init # ############################################# def _no_grad_trunc_normal_(tensor, mean, std, a, b): # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn( "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " "The distribution of values may be incorrect.", stacklevel=2, ) with torch.no_grad(): # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values l = norm_cdf((a - mean) / std) u = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [l, u], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * l - 1, 2 * u - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.0)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor def trunc_normal_( tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0, ) -> torch.Tensor: r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ return _no_grad_trunc_normal_(tensor, mean, std, a, b) def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"): fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) if mode == "fan_in": denom = fan_in elif mode == "fan_out": denom = fan_out elif mode == "fan_avg": denom = (fan_in + fan_out) / 2 variance = scale / denom if distribution == "truncated_normal": # constant is stddev of standard normal truncated to (-2, 2) nn.init.trunc_normal_(tensor, std=math.sqrt(variance) / 0.87962566103423978) elif distribution == "normal": tensor.normal_(std=math.sqrt(variance)) elif distribution == "uniform": bound = math.sqrt(3 * variance) tensor.uniform_(-bound, bound) else: raise ValueError(f"invalid distribution {distribution}") def lecun_normal_(tensor): variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal") def init_weights_vit(module: nn.Module, name: str = ""): """ViT weight initialization, original impl (for reproducibility)""" if isinstance(module, nn.Linear): if hasattr(module, "final_linear") and module.final_linear: print("final_linear") nn.init.constant_(module.weight, 0.0) else: nn.init.trunc_normal_(module.weight, std=0.02) if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): if hasattr(module, "final_conv") and module.final_conv: print("final_conv") nn.init.constant_(module.weight, 0.0) else: nn.init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu") if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)): nn.init.constant_(module.bias, 0.0) nn.init.constant_( module.weight, 0.0 if hasattr(module, "final_norm") and module.final_norm else 1.0, ) if hasattr(module, "final_norm") and module.final_norm: print("final_norm") if ( hasattr(module, "final_norm") or hasattr(module, "final_conv") or hasattr(module, "final_linear") ): nn.init.constant_(module.weight, 0.0) if module.bias is not None: nn.init.zeros_(module.bias) print("final: zero_init") def init_weights_vit_jax(module: nn.Module, name: str = "", head_bias: float = 0.0): """ViT weight initialization, matching JAX (Flax) impl""" if isinstance(module, nn.Linear): if name.startswith("head"): nn.init.zeros_(module.weight) nn.init.constant_(module.bias, head_bias) elif name.startswith("pre_logits"): lecun_normal_(module.weight) nn.init.zeros_(module.bias) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.normal_( module.bias, std=1e-6 ) if "mlp" in name else nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): lecun_normal_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) def init_weights_vit_moco(module: nn.Module, name: str = ""): """ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed""" if isinstance(module, nn.Linear): if "qkv" in name: # treat the weights of Q, K, V separately val = math.sqrt( 6.0 / float(module.weight.shape[0] // 3 + module.weight.shape[1]) ) nn.init.uniform_(module.weight, -val, val) else: nn.init.xavier_uniform_(module.weight) if module.bias is not None: nn.init.zeros_(module.bias) def get_init_weights_vit(mode="jax", head_bias: float = 0.0): if "jax" in mode: return partial(init_weights_vit_jax, head_bias=head_bias) elif "moco" in mode: return init_weights_vit_moco else: return init_weights_vit ############################################# # ViT # ############################################# def patchify(imgs, patch_size, padding=True, channels=3): """ imgs: (N, 3, H, W) x: (N, L, patch_size**2 *3) """ assert imgs.shape[2] == imgs.shape[3] assert padding or imgs.shape[2] % patch_size == 0 if padding and imgs.shape[2] % patch_size != 0: num_patch = math.ceil(float(imgs.shape[2]) / patch_size) pad_size = num_patch * patch_size - imgs.shape[2] imgs = F.pad(imgs, (0, pad_size, 0, pad_size)) h = w = imgs.shape[2] // patch_size x = imgs.reshape(shape=(imgs.shape[0], channels, h, patch_size, w, patch_size)) x = torch.einsum("nchpwq->nhwpqc", x) x = x.reshape(shape=(imgs.shape[0], h * w, (patch_size**2) * channels)) return x def unpatchify(x, patch_size, channels=3): """ x: (N, L, patch_size**2 *3) imgs: (N, 3, H, W) """ h = w = int(x.shape[1] ** 0.5) assert h * w == x.shape[1] x = x.reshape(shape=(x.shape[0], h, w, patch_size, patch_size, channels)) x = torch.einsum("nhwpqc->nchpwq", x) imgs = x.reshape(shape=(x.shape[0], channels, h * patch_size, w * patch_size)) return imgs def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): """ grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) """ grid_h = np.arange(grid_size, dtype=np.float32) grid_w = np.arange(grid_size, dtype=np.float32) grid = np.meshgrid(grid_w, grid_h) # here w goes first grid = np.stack(grid, axis=0) grid = grid.reshape([2, 1, grid_size, grid_size]) pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) if cls_token: pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0) return pos_embed def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): assert embed_dim % 2 == 0 # use half of dimensions to encode grid_h emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) return emb def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): """ embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) """ assert embed_dim % 2 == 0 omega = np.arange(embed_dim // 2, dtype=np.float32) omega /= embed_dim / 2.0 omega = 1.0 / 10000**omega # (D/2,) pos = pos.reshape(-1) # (M,) out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product emb_sin = np.sin(out) # (M, D/2) emb_cos = np.cos(out) # (M, D/2) emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) return emb # -------------------------------------------------------- # Interpolate position embeddings for high-resolution # References: # DeiT: https://github.com/facebookresearch/deit # -------------------------------------------------------- def interpolate_pos_embed(model, checkpoint_model): if "pos_embed" in checkpoint_model: pos_embed_checkpoint = checkpoint_model["pos_embed"] embedding_size = pos_embed_checkpoint.shape[-1] num_patches = model.patch_embed.num_patches num_extra_tokens = model.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding new_size = int(num_patches**0.5) # class_token and dist_token are kept unchanged if orig_size != new_size: print( "Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size) ) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape( -1, orig_size, orig_size, embedding_size ).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode="bicubic", align_corners=False, ) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) checkpoint_model["pos_embed"] = new_pos_embed def get_mae_pos_embed(proposals, hidden_dim): assert hidden_dim % 2 == 0 embed_dim = hidden_dim // 2 omega = torch.arange(embed_dim // 2, dtype=proposals.dtype, device=proposals.device) omega /= embed_dim / 2.0 omega = 1.0 / 10000**omega pos_x, pos_y = proposals.unbind(-1) pos_x = pos_x.reshape(-1) pos_y = pos_y.reshape(-1) out_x = torch.einsum("m,d->md", pos_x, omega) out_y = torch.einsum("m,d->md", pos_y, omega) emb_sin_x = torch.sin(out_x) emb_cos_x = torch.cos(out_x) emb_sin_y = torch.sin(out_y) emb_cos_y = torch.cos(out_y) emb = torch.cat([emb_sin_x, emb_cos_x, emb_sin_y, emb_cos_y], dim=1) return emb def make_window(x, hw, win_size, shift_size=0): B, _, C = x.shape H, W = hw x = x.view(B, H, W, C) pad_h = (win_size - H % win_size) % win_size pad_w = (win_size - W % win_size) % win_size if pad_h > 0 or pad_w > 0: x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) Hp, Wp = H + pad_h, W + pad_w if shift_size > 0: x = torch.roll(x, shifts=(-shift_size, -shift_size), dims=(1, 2)) x = x.view(B, Hp // win_size, win_size, Wp // win_size, win_size, C) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, win_size * win_size, C) return x, (Hp, Wp) def revert_window(x, pad_hw, hw, win_size, shift_size=0): Hp, Wp = pad_hw H, W = hw B = x.shape[0] // (Hp * Wp // win_size // win_size) # B * nWin, win_size, win_size, C -> B, H, W, C x = x.view(B, Hp // win_size, Wp // win_size, win_size, win_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) if shift_size > 0: x = torch.roll(x, shifts=(shift_size, shift_size), dims=(1, 2)) if Hp > H or Wp > W: x = x[:, :H, :W, :].contiguous() x = x.view(B, H * W, -1) return x def drop_path( x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True ): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * ( x.ndim - 1 ) # work with diff dim tensors, not just 2D ConvNets random_tensor = x.new_empty(shape).bernoulli_(keep_prob) if keep_prob > 0.0 and scale_by_keep: random_tensor.div_(keep_prob) return x * random_tensor def window_partition(x, window_size): """ Partition into non-overlapping windows with padding if needed. Args: x (tensor): input tokens with [B, H, W, C]. window_size (int): window size. Returns: windows: windows after partition with [B * num_windows, window_size, window_size, C]. (Hp, Wp): padded height and width before partition """ B, H, W, C = x.shape pad_h = (window_size - H % window_size) % window_size pad_w = (window_size - W % window_size) % window_size if pad_h > 0 or pad_w > 0: x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) Hp, Wp = H + pad_h, W + pad_w x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) windows = ( x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) ) return windows, (Hp, Wp) def window_unpartition(windows, window_size, pad_hw, hw): """ Window unpartition into original sequences and removing padding. Args: x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. window_size (int): window size. pad_hw (Tuple): padded height and width (Hp, Wp). hw (Tuple): original height and width (H, W) before padding. Returns: x: unpartitioned sequences with [B, H, W, C]. """ Hp, Wp = pad_hw H, W = hw B = windows.shape[0] // (Hp * Wp // window_size // window_size) x = windows.view( B, Hp // window_size, Wp // window_size, window_size, window_size, -1 ) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) if Hp > H or Wp > W: x = x[:, :H, :W, :].contiguous() return x def get_rel_pos(q_size, k_size, rel_pos): """ Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size (int): size of query q. k_size (int): size of key k. rel_pos (Tensor): relative position embeddings (L, C). Returns: Extracted positional embeddings according to relative positions. """ max_rel_dist = int(2 * max(q_size, k_size) - 1) # Interpolate rel pos if needed. if rel_pos.shape[0] != max_rel_dist: # Interpolate rel pos. rel_pos_resized = F.interpolate( rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode="linear", ) rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos # Scale the coords with short length if shapes for q and k are different. q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()] def add_decomposed_rel_pos(attn, q, rel_pos_h, rel_pos_w, q_size, k_size): """ Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 Args: attn (Tensor): attention map. q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. q_size (Tuple): spatial sequence size of query q with (q_h, q_w). k_size (Tuple): spatial sequence size of key k with (k_h, k_w). Returns: attn (Tensor): attention map with added relative positional embeddings. """ q_h, q_w = q_size k_h, k_w = k_size Rh = get_rel_pos(q_h, k_h, rel_pos_h) Rw = get_rel_pos(q_w, k_w, rel_pos_w) B, _, dim = q.shape r_q = q.reshape(B, q_h, q_w, dim) rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) attn = ( attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] ).view(B, q_h * q_w, k_h * k_w) return attn def get_abs_pos(abs_pos, has_cls_token, hw): """ Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the original embeddings. Args: abs_pos (Tensor): absolute positional embeddings with (1, num_position, C). has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token. hw (Tuple): size of input image tokens. Returns: Absolute positional embeddings after processing with shape (1, H, W, C) """ h, w = hw if has_cls_token: abs_pos = abs_pos[:, 1:] xy_num = abs_pos.shape[1] size = int(math.sqrt(xy_num)) assert size * size == xy_num if size != h or size != w: new_abs_pos = F.interpolate( abs_pos.reshape(1, size, size, -1).permute(0, 3, 1, 2), size=(h, w), mode="bicubic", align_corners=False, ) return new_abs_pos.permute(0, 2, 3, 1) else: return abs_pos.reshape(1, h, w, -1) ############################################# # detection # ############################################# def get_detr_pos_embed(embed_dim, grid_size, cls_token=False): eps = 1e-6 grid_h = torch.arange(1, grid_size + 1, dtype=torch.float32) grid_w = torch.arange(1, grid_size + 1, dtype=torch.float32) y_embed, x_embed = torch.meshgrid(grid_h, grid_w, indexing="ij") y_embed = (y_embed - 0.5) / (y_embed[-1:, :] + eps) x_embed = (x_embed - 0.5) / (x_embed[:, -1:] + eps) grid = torch.stack([x_embed, y_embed], dim=-1).flatten(0, 1) pos_embed = get_proposal_pos_embed(grid, embed_dim) if cls_token: pos_embed = torch.cat([torch.zeros(1, embed_dim), pos_embed], dim=0) return pos_embed def get_proposal_pos_embed(proposals, hidden_dim, detr_pos=True): assert hidden_dim % proposals.shape[-1] == 0 num_pos_feats = int(hidden_dim / proposals.shape[-1]) temperature = 10000 if detr_pos: scale = 2 * math.pi else: scale = 1.0 dim_t = torch.arange(num_pos_feats, dtype=proposals.dtype, device=proposals.device) dim_t = temperature ** (2 * (dim_t.div(2, rounding_mode="floor")) / num_pos_feats) proposals = proposals * scale proposals = proposals.unbind(-1) pos = [] for proposal in proposals: proposal = proposal[..., None] / dim_t proposal = torch.stack( (proposal[..., 0::2].sin(), proposal[..., 1::2].cos()), dim=-1 ).flatten(-2) pos.append(proposal) pos = torch.cat(pos, dim=-1) return pos def flatten_with_shape(tensor_list, mask_list): """ Params: :tensor_list: [(B, C, H1, W1), ..., (B, C, HN, WN)] :mask_list: [(B, H1, W1), ..., (B, HN, WN)] Return: :tensor_flatten: (B, L, C) :mask_flatten: (B, L) :tensor_shape: (N, 2) """ assert isinstance(tensor_list, collections.abc.Sequence) assert len(tensor_list) > 0 N = len(tensor_list) tensor_shape = torch.zeros(N, 2, dtype=torch.int64, device=tensor_list[0].device) tensor_flatten = [] if mask_list is not None: mask_flatten = [] for i, tensor in enumerate(tensor_list): new_tensor = tensor.flatten(2).permute(0, 2, 1) tensor_flatten.append(new_tensor) if mask_list is not None: mask = mask_list[i] new_mask = mask.flatten(1) mask_flatten.append(new_mask) assert tensor.shape[2] == mask.shape[1] assert tensor.shape[3] == mask.shape[2] tensor_shape[i, 0] = tensor.shape[2] tensor_shape[i, 1] = tensor.shape[3] mask_flatten = torch.cat(mask_flatten, dim=1) if mask_list is not None else None tensor_flatten = torch.cat(tensor_flatten, dim=1) return tensor_flatten, mask_flatten, tensor_shape def view_with_shape(tensor_flatten, mask_flatten, tensor_shape): """ Params: :tensor_flatten: (B, L, C) :mask_flatten: (B, L) :tensor_shape: (N, 2) Return: :tensor_list: [(B, C, H1, W1), ..., (B, C, HN, WN)] :mask_list: [(B, H1, W1), ..., (B, HN, WN)] """ chunk_sizes = (tensor_shape[:, 0] * tensor_shape[:, 1]).tolist() N = tensor_shape.shape[0] if tensor_flatten is None and mask_flatten is None: raise ValueError("Both tensor and mask are None") B = tensor_flatten.shape[0] if tensor_flatten is not None else mask_flatten.shape[0] if tensor_flatten is not None: tensor_list = torch.split(tensor_flatten, chunk_sizes, dim=1) if mask_flatten is not None: mask_list = torch.split(mask_flatten, chunk_sizes, dim=1) tensor2d_list = [] if tensor_flatten is not None else None mask2d_list = [] if mask_flatten is not None else None for i in range(N): H, W = tensor_shape[i].tolist() if tensor_flatten is not None: tensor2d_list.append( tensor_list[i].view(B, H, W, -1).permute(0, 3, 1, 2).contiguous() ) if mask_flatten is not None: mask2d_list.append(mask_list[i].view(B, H, W)) return tensor2d_list, mask2d_list def split_with_shape(tensor_flatten, mask_flatten, tensor_shape): """ Params: :tensor_flatten: (B, L, C) :mask_flatten: (B, L) :tensor_shape: (N, 2) Return: :tensor_list: [(B, H1 * W1, C), ..., (B, HN * WN, C)] :mask_list: [(B, H1 * W1), ..., (B, HN * WN)] """ chunk_sizes = (tensor_shape[:, 0] * tensor_shape[:, 1]).tolist() if tensor_flatten is None and mask_flatten is None: raise ValueError("Both tensor and mask are None") if tensor_flatten is not None: tensor_list = torch.split(tensor_flatten, chunk_sizes, dim=1) else: tensor_list = None if mask_flatten is not None: mask_list = torch.split(mask_flatten, chunk_sizes, dim=1) else: mask_list = None return tensor_list, mask_list def inverse_sigmoid(x, eps=1.0e-6): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) def concat_and_pad_masks(tensor_list): spatial_shape = (tensor_list[i].shape[-2:] for i in range(len(tensor_list))) num_tensor = sum(tensor_list[i].shape[0] for i in range(len(tensor_list))) shape = (num_tensor, *(max(elem) for elem in zip(*spatial_shape))) tensor = tensor_list[0].new_zeros(shape) mask = tensor_list[0].new_ones(shape).bool() idx = 0 for item in tensor_list: b, h, w = item.shape tensor[idx : idx + b, :h, :w].copy_(item) mask[idx : idx + b, :h, :w] = False idx += b assert idx == num_tensor return tensor, mask
25,610
33.331099
129
py
null
r-mae-main/pretrain/utils/general.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import os import copy import collections import re import sys import torch import torch.nn as nn import torchvision import torch.nn.functional as F from pretrain.utils.distributed import get_world_size, synchronize string_classes = str def clip_grad_norm(params, max_norm, with_name=False): if max_norm > 0: return torch.nn.utils.clip_grad_norm_(params, max_norm) else: if with_name: device = params[0][1].grad.device for name, p in params: if p.grad is None: print(name) synchronize() total_norm = torch.norm( torch.stack( [torch.norm(p.grad.detach(), 2.0).to(device) for _, p in params] ), 2.0, ) else: device = params[0].grad.device total_norm = torch.norm( torch.stack( [torch.norm(p.grad.detach(), 2.0).to(device) for p in params] ), 2.0, ) return total_norm def get_clones(module, N): if N == 0: return [] return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def get_norm(norm, out_channels): if norm == "LN": return nn.LayerNorm(out_channels) if norm == "BN": return nn.BatchNorm2d(out_channels) if norm == "GN": return nn.GroupNorm(32, out_channels) raise RuntimeError(f"norm layer should be BN | LN | GN, not {norm}") def get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": return F.relu if activation == "gelu": return F.gelu if activation == "glu": return F.glu raise RuntimeError(f"activation should be relu/gelu, not {activation}.") def filter_grads(parameters, with_name=False): if with_name: return [(name, param) for name, param in parameters if param.requires_grad] return [param for param in parameters if param.requires_grad] def get_root(): root_folder = os.path.dirname(os.path.abspath(__file__)) root_folder = os.path.abspath(os.path.join(root_folder, "..")) return root_folder def get_cache_dir(cache_dir): # If cache_dir path exists do not join to mmf root if not os.path.exists(cache_dir): cache_dir = os.path.join(get_root(), cache_dir) return cache_dir def get_batch_size(batch_size): world_size = get_world_size() if batch_size % world_size != 0: raise RuntimeError( "Batch size {} must be divisible by number " "of GPUs {} used.".format(batch_size, world_size) ) return batch_size // world_size def get_absolute_path(paths): # String check should be first as Sequence would pass for string too if isinstance(paths, str): if not os.path.isabs(paths): root_dir = get_root() paths = os.path.join(root_dir, paths) return paths elif isinstance(paths, collections.abc.Iterable): return [get_absolute_path(path) for path in paths] else: raise TypeError("Paths passed to dataset should either be " "string or list") def print_cuda_usage(): print("Memory Allocated:", torch.cuda.memory_allocated() / (1024 * 1024)) print("Max Memory Allocated:", torch.cuda.max_memory_allocated() / (1024 * 1024)) print("Memory Cached:", torch.cuda.memory_cached() / (1024 * 1024)) print("Max Memory Cached:", torch.cuda.max_memory_cached() / (1024 * 1024)) def print_model_parameters(model, writer, return_only=False): total_params = sum(p.numel() for p in model.parameters()) trained_params = sum(p.numel() for p in model.parameters() if p.requires_grad) if not return_only: writer.write( "Total Parameters: {}. Trained Parameters: {}".format( total_params, trained_params ) ) return total_params, trained_params def get_optimizer_parameters(model): is_parallel = isinstance(model, nn.DataParallel) or isinstance( model, nn.parallel.DistributedDataParallel ) has_custom = ( hasattr(model.module, "get_optimizer_parameters") if is_parallel else hasattr(model, "get_optimizer_parameters") ) if has_custom: parameters = ( model.module.get_optimizer_parameters() if is_parallel else model.get_optimizer_parameters() ) else: parameters = filter_grads(model.parameters()) return parameters def interpolate( input, size=None, scale_factor=None, mode="nearest", align_corners=None ): """ Equivalent to nn.functional.interpolate, but with support for empty batch sizes. This will eventually be supported natively by PyTorch, and this class can go away. """ return torchvision.ops.misc.interpolate( input, size, scale_factor, mode, align_corners ) np_str_obj_array_pattern = re.compile(r"[SaUO]") def data_to_tensor(data): data_type = type(data) if isinstance(data, torch.Tensor): return data elif ( data_type.__module__ == "numpy" and data_type.__name__ != "str_" and data_type.__name__ != "string_" ): if data_type.__name__ == "ndarray" or data_type.__name__ == "memmap": # array of string classes and object if np_str_obj_array_pattern.search(data.dtype.str) is not None: return data return torch.as_tensor(data) elif data.shape == (): return torch.as_tensor([data.item()]) elif isinstance(data, float): return torch.tensor([data], dtype=torch.float32) elif isinstance(data, int): return torch.tensor([data]) elif isinstance(data, string_classes): return data elif isinstance(data, collections.abc.Mapping): return {key: data_to_tensor(value) for key, value in data.items()} elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple return data_type(*(data_to_tensor(elem) for elem in data)) elif isinstance(data, collections.abc.Sequence): return [data_to_tensor(elem) for elem in data] # Disable def blockPrint(): sys.stdout = open(os.devnull, "w") # Restore def enablePrint(): sys.stdout = sys.__stdout__
6,570
28.334821
85
py
null
r-mae-main/pretrain/utils/grad_checkpoint.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import functools import threading import weakref from typing import Any, Dict, List, Generator, Optional, Tuple, Union from contextlib import contextmanager from dataclasses import dataclass import torch from torch import Tensor import torch.nn as nn import torch.utils.checkpoint as torch_checkpoint from torch.nn.modules.batchnorm import _BatchNorm def pack_kwargs(*args: Any, **kwargs: Any) -> Tuple[Tuple[str, ...], Tuple[Any, ...]]: """ Turn argument list into separate key list and value list (unpack_kwargs does the opposite) Usage:: kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4) assert kwarg_keys == ("a", "b") assert flat_args == (1, 2, 3, 4) args, kwargs = unpack_kwargs(kwarg_keys, flat_args) assert args == (1, 2) assert kwargs == {"a": 3, "b": 4} """ kwarg_keys: List[str] = [] flat_args: List[Any] = list(args) for k, v in kwargs.items(): kwarg_keys.append(k) flat_args.append(v) return tuple(kwarg_keys), tuple(flat_args) def unpack_kwargs( kwarg_keys: Tuple[str, ...], flat_args: Tuple[Any, ...] ) -> Tuple[Tuple[Any, ...], Dict[str, Any]]: """See pack_kwargs.""" assert len(kwarg_keys) <= len( flat_args ), f"too many keys {len(kwarg_keys)} vs. {len(flat_args)}" if len(kwarg_keys) == 0: return flat_args, {} args = flat_args[: -len(kwarg_keys)] kwargs = {k: v for k, v in zip(kwarg_keys, flat_args[-len(kwarg_keys) :])} return args, kwargs def split_non_tensors( mixed: Union[torch.Tensor, Tuple[Any, ...]] ) -> Tuple[Tuple[torch.Tensor, ...], Optional[Dict[str, List[Any]]]]: """ Split a tuple into a list of tensors and the rest with information for later reconstruction. When called with a tensor X, will return: (x,), None Usage:: x = torch.Tensor([1]) y = torch.Tensor([2]) tensors, packed_non_tensors = split_non_tensors((x, y, None, 3)) assert tensors == (x, y) assert packed_non_tensors == { "is_tensor": [True, True, False, False], "objects": [None, 3], } recon = unpack_non_tensors(tensors, packed_non_tensors) assert recon == (x, y, None, 3) """ if isinstance(mixed, torch.Tensor): return (mixed,), None tensors: List[torch.Tensor] = [] packed_non_tensors: Dict[str, List[Any]] = {"is_tensor": [], "objects": []} for o in mixed: if isinstance(o, torch.Tensor): packed_non_tensors["is_tensor"].append(True) tensors.append(o) else: packed_non_tensors["is_tensor"].append(False) packed_non_tensors["objects"].append(o) return tuple(tensors), packed_non_tensors def unpack_non_tensors( tensors: Tuple[torch.Tensor, ...], packed_non_tensors: Optional[Dict[str, List[Any]]], ) -> Tuple[Any, ...]: """See split_non_tensors.""" if packed_non_tensors is None: return tensors assert isinstance(packed_non_tensors, dict), type(packed_non_tensors) mixed: List[Any] = [] is_tensor_list = packed_non_tensors["is_tensor"] objects = packed_non_tensors["objects"] assert len(tensors) + len(objects) == len(is_tensor_list), ( f"len(tensors) {len(tensors)} len(objects) {len(objects)} " f"len(is_tensor_list) {len(is_tensor_list)}" ) obj_i = tnsr_i = 0 for is_tensor in is_tensor_list: if is_tensor: mixed.append(tensors[tnsr_i]) tnsr_i += 1 else: mixed.append(objects[obj_i]) obj_i += 1 return tuple(mixed) # https://docs.python.org/3/library/threading.html#thread-local-data # Manage the checkpoint context with thread-local data. @dataclass class ThreadLocalCheckpointingState(threading.local): is_checkpointing: bool = False is_recomputing: bool = False is_checkpointing_disabled: bool = False thread_local = ThreadLocalCheckpointingState() @contextmanager def disable_checkpointing() -> Generator[None, None, None]: """Makes :func:`is_checkpointing_disabled` return :data:`True` within a context.""" orig = thread_local.is_checkpointing_disabled thread_local.is_checkpointing_disabled = True try: yield finally: thread_local.is_checkpointing_disabled = orig @contextmanager def enable_checkpointing() -> Generator[None, None, None]: """Makes :func:`is_checkpointing` return :data:`True` within a context.""" orig = thread_local.is_checkpointing thread_local.is_checkpointing = True try: yield finally: thread_local.is_checkpointing = orig @contextmanager def enable_recomputing() -> Generator[None, None, None]: """Makes :func:`is_recomputing` return :data:`True` within a context.""" orig = thread_local.is_recomputing thread_local.is_recomputing = True try: yield finally: thread_local.is_recomputing = orig def is_checkpointing() -> bool: """Whether the current forward propagation is under checkpointing. Returns: bool: :data:`True` if it's under checkpointing. """ return thread_local.is_checkpointing def is_recomputing() -> bool: """Whether the current forward propagation is under checkpoint recomputation. Use this to prevent duplicated side-effects at forward propagation:: class Counter(nn.Module): def __init__(self): super().__init__() self.counter = 0 def forward(self, input): if not is_recomputing(): self.counter += 1 return input Returns: bool: :data:`True` if it's under checkpoint recomputation. """ return thread_local.is_recomputing def checkpoint_wrapper( module: nn.Module, offload_to_cpu: bool = False, ) -> nn.Module: """ A friendlier wrapper for performing activation checkpointing. Compared to the PyTorch version, this version: - wraps an nn.Module, so that all subsequent calls will use checkpointing - handles keyword arguments in the forward - handles non-Tensor outputs from the forward - supports offloading activations to CPU Usage:: checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True) a, b = checkpointed_module(x, y=3, z=torch.Tensor([1])) To understand the benefits of checkpointing and the `offload_to_cpu` flag, let's divide activations into 2 types: inner activations and outer activations w.r.t. the checkpointed modules. The inner ones are saved by activation checkpointing, the outer ones are saved by offload_to_cpu. In terms of GPU memory savings: - When inner ones are large in size and outer ones are small, checkpointing helps a lot, offload_to_cpu may help a little. - When inner ones are small and outer ones are large, checkpointing helps little, offload_to_cpu helps a lot. - When both inner and outer are large, both help and the benefit is additive. ..Note:: The first and last layers are not likely to benefit from the `offload_to_cpu` flag because (1) there are typically other references to the first layer's input, so the GPU memory won't be freed; (2) the input to the last layer is immediately used by the backward pass and won't result in memory savings. Args: module (nn.Module): The module to be wrapped offload_to_cpu (bool): Whether to offload activations to CPU. Returns: (nn.Module): Wrapped module """ # Patch the batchnorm layers in case there are any in this module. patch_batchnorm(module) # The use of weakref here is to prevent creating a ref cycle: m -> m.forward -> m. # When such cycle exists, gc won't collect the module when the module is freed. # That causes GPU memory to be leaked. See the unit test for how we catch that. # # We prefer this over a class wrapper since the class wrapper would have to # proxy a lot of fields and methods. module.forward = functools.partial( # type: ignore _checkpointed_forward, type(module).forward, weakref.ref(module), offload_to_cpu ) return module def _checkpointed_forward( original_forward: Any, weak_self: Any, offload_to_cpu: bool, *args: Any, **kwargs: Any, ) -> Any: module = weak_self() # If gradients are disabled, just use original `.forward()` method directly. if not torch.is_grad_enabled() or thread_local.is_checkpointing_disabled: return original_forward(module, *args, **kwargs) # Autograd Functions in PyTorch work best with positional args, since # the backward must return gradients (or None) for every input argument. # We can flatten keyword arguments to make this easier. args = (module,) + args kwarg_keys, flat_args = pack_kwargs(*args, **kwargs) parent_ctx_dict: Dict[str, Any] = { "offload": offload_to_cpu, } # Dummy tensor with grad is used to ensure the backward pass is called. This is needed # when original_forward's input are non-tensor (i.e. a tuple). Using this dummy tensor # avoids requiring users to set their input tensors's requires_grad flag. In the case # of tuple type inputs, setting the flag won't even trigger the backward pass. # # One implication of this is that since we always feed in a dummy tensor # needing grad, then the output will always require grad, even if it originally # wouldn't, such as if the module and original input both do not require grad. # We get around this by saving the desired requires_grad value in output and # detaching the output if needed. output = CheckpointFunction.apply( torch.tensor([], requires_grad=True), original_forward, parent_ctx_dict, kwarg_keys, *flat_args, ) output_requires_grad = parent_ctx_dict["output_requires_grad"] if not isinstance(output, torch.Tensor): # If output should not require grad, then detach it, since otherwise it will # always have requires_grad = True due to our dummy tensor input above that # requires_grad output = [x.detach() if not output_requires_grad else x for x in output] packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"] if packed_non_tensor_outputs: output = unpack_non_tensors(output, packed_non_tensor_outputs) else: # If output should not require grad, then detach it, since otherwise it will # always have requires_grad = True due to our dummy tensor input above that # requires_grad if not output_requires_grad: output = output.detach() return output def get_rng_state() -> Dict[str, Any]: state = {"torch_rng_state": torch.get_rng_state()} if torch.cuda.is_available(): state["cuda_rng_state"] = torch.cuda.get_rng_state() return state def set_rng_state(state: Dict[str, Any]) -> None: torch.set_rng_state(state["torch_rng_state"]) if torch.cuda.is_available(): torch.cuda.set_rng_state(state["cuda_rng_state"]) def is_autocast_enabled() -> bool: """Similar to torch.is_autocast_enabled, but compatible with torch 1.5.1""" if hasattr(torch, "is_autocast_enabled"): return torch.is_autocast_enabled() return False @contextmanager def autocast(enabled: bool) -> Generator: """Similar to torch.cuda.amp.autocast, but compatible with torch 1.5.1""" if enabled: with torch.cuda.amp.autocast(enabled): yield else: yield class CheckpointFunction(torch.autograd.Function): """Similar to the torch version, but support non-Tensor outputs. The caller is expected to provide a dict (*parent_ctx_dict*) that will hold the non-Tensor outputs. These should be combined with the Tensor *outputs* by calling :func:`unpack_non_tensors`. """ @staticmethod def forward( # type: ignore ctx: Any, dummy_tensor_requires_grad: torch.Tensor, run_function: Any, parent_ctx_dict: Dict[str, Any], kwarg_keys: Tuple[str, ...], *args: Any, **kwargs: Any, ) -> Any: torch_checkpoint.check_backward_validity(args) ctx.run_function = run_function ctx.kwarg_keys = kwarg_keys ctx.fwd_rng_state = get_rng_state() ctx.had_autocast_in_fwd = is_autocast_enabled() tensor_inputs, packed_non_tensor_inputs = split_non_tensors(args) if parent_ctx_dict["offload"]: ctx.fwd_device = tuple(x.device for x in tensor_inputs) ctx.grad_requirements = tuple(x.requires_grad for x in tensor_inputs) tensor_inputs = tuple(x.to("cpu", non_blocking=True) for x in tensor_inputs) else: ctx.fwd_device, ctx.grad_requirements = None, None ctx.save_for_backward(*tensor_inputs) ctx.packed_non_tensor_inputs = packed_non_tensor_inputs with torch.no_grad(), enable_checkpointing(): unpacked_args, unpacked_kwargs = unpack_kwargs(kwarg_keys, args) outputs = run_function(*unpacked_args, **unpacked_kwargs) the_module = unpacked_args[0] # Because we run with torch.no_grad(), we can't actually access # outputs.requires_grad. Instead, we manually compute it by # checking if either the input or the module needs grads parameters = list(the_module.parameters()) # If the module is wrapped by FlattenParamsWrapper, then the # parameters would have been deleted. If so, we need to access # the views into the flattened parameters. if hasattr(the_module, "_unflattened_param_views"): parameters += the_module._unflattened_param_views output_requires_grad = any(param.requires_grad for param in parameters) or any( x.requires_grad for x in tensor_inputs ) parent_ctx_dict["output_requires_grad"] = output_requires_grad if not isinstance(outputs, torch.Tensor): # Autograd Functions don't like non-Tensor outputs. We can split the # non-Tensor and Tensor outputs, returning the former by reference # through *parent_ctx_dict* and returning the latter directly. outputs, packed_non_tensor_outputs = split_non_tensors(outputs) parent_ctx_dict["packed_non_tensor_outputs"] = packed_non_tensor_outputs return outputs @staticmethod def backward(ctx: Any, *args: Any) -> Tuple[Optional[Tensor], ...]: if not torch.autograd._is_checkpoint_valid(): raise RuntimeError( "Checkpointing is not compatible with .grad(), please use .backward() if possible" ) tensor_inputs: Tuple = ctx.saved_tensors tensor_inputs = torch_checkpoint.detach_variable(tensor_inputs) if ctx.fwd_device is not None: tensor_inputs = tuple( t.to(ctx.fwd_device[i], non_blocking=True) for i, t in enumerate(tensor_inputs) ) for i, need_grad in enumerate(ctx.grad_requirements): tensor_inputs[i].requires_grad = need_grad inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs) # Store the current states. bwd_rng_state = get_rng_state() # Set the states to what it used to be before the forward pass. set_rng_state(ctx.fwd_rng_state) with torch.enable_grad(), enable_recomputing(), autocast( ctx.had_autocast_in_fwd ): unpacked_args, unpacked_kwargs = unpack_kwargs(ctx.kwarg_keys, inputs) outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs) tensor_outputs, _ = split_non_tensors(outputs) # Set the states back to what it was at the start of this function. set_rng_state(bwd_rng_state) # Run backward() with only Tensors that require grad outputs_with_grad = [] args_with_grad = [] for i in range(len(tensor_outputs)): if tensor_outputs[i].requires_grad: outputs_with_grad.append(tensor_outputs[i]) args_with_grad.append(args[i]) if len(outputs_with_grad) == 0: raise RuntimeError( "None of the outputs have requires_grad=True, " "this checkpoint() is not necessary" ) torch.autograd.backward(outputs_with_grad, args_with_grad) grads = tuple( inp.grad if isinstance(inp, torch.Tensor) else None for inp in inputs ) return (None, None, None, None) + grads def patch_batchnorm(module: nn.Module) -> List: """Patch all batchnorm instances (1d, 2d, 3d, sync_bn, etc.) of a module so that they don't track running stats when torch.no_grad() is enabled. This is important in activation checkpointing to ensure stats are tracked correctly as if there were no activation checkpointing. The reason is that activation checkpointing runs the forward function twice, first with torch.no_grad(), then with torch.grad(). Args: module (nn.Module): The module to be patched in-place. Returns: (list): A list of hook handles, late can be freed. """ def pre_forward(module: _BatchNorm, input: Tensor) -> None: if torch.is_grad_enabled(): return module._track_running_stats_backup = module.track_running_stats module.track_running_stats = False def post_forward(module: _BatchNorm, input: Tensor, result: Tensor) -> None: if torch.is_grad_enabled(): return module.track_running_stats = module._track_running_stats_backup hooks = [] for name, child in module.named_modules(): # _BatchNorm is base for bn1d, bn2d, bn3d and sync_bn, apex_sync_bn, etc. if isinstance(child, _BatchNorm) and not hasattr( child, "disable_patch_batchnorm" ): # Register the pre/post hooks. pre_handle = child.register_forward_pre_hook(pre_forward) post_handle = child.register_forward_hook(post_forward) hooks += [pre_handle, post_handle] return hooks
18,667
37.411523
98
py
null
r-mae-main/pretrain/utils/logger.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import collections import json import logging import os import sys from torch.utils.tensorboard import SummaryWriter from pretrain.utils.distributed import is_master from pretrain.utils.timer import Timer class Logger: def __init__(self, save_dir, logger_level, log_format, should_not_log): self.logger = None self._is_master = is_master() self.timer = Timer() self.save_dir = save_dir self.debug = logger_level == "debug" self.log_format = log_format self.time_format = "%Y-%m-%dT%H:%M:%S" self.log_filename = "train_" self.log_filename += self.timer.get_time_hhmmss(None, format=self.time_format) self.log_filename += ".log" self.log_folder = os.path.join(self.save_dir, "logs") if not os.path.exists(self.log_folder): os.makedirs(self.log_folder, exist_ok=True) self.log_filename = os.path.join(self.log_folder, self.log_filename) if not self._is_master: return if self._is_master: print("Logging to:", self.log_filename) logging.captureWarnings(True) self.logger = logging.getLogger(__name__) self._file_only_logger = logging.getLogger(__name__) warnings_logger = logging.getLogger("py.warnings") # Set level self.logger.setLevel(getattr(logging, logger_level.upper())) self._file_only_logger.setLevel(getattr(logging, logger_level.upper())) formatter = logging.Formatter( "%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%dT%H:%M:%S" ) # Add handler to file channel = logging.FileHandler(filename=self.log_filename, mode="a") channel.setFormatter(formatter) self.logger.addHandler(channel) self._file_only_logger.addHandler(channel) warnings_logger.addHandler(channel) # Add handler to stdout channel = logging.StreamHandler(sys.stdout) channel.setFormatter(formatter) self.logger.addHandler(channel) warnings_logger.addHandler(channel) self.should_log = not should_not_log # Single log wrapper map self._single_log_map = set() def write(self, x, level="info", donot_print=False, log_all=False): if self.logger is None: return if log_all is False and not self._is_master: return # if it should not log then just print it if self.should_log: if hasattr(self.logger, level): if donot_print: getattr(self._file_only_logger, level)(str(x)) else: getattr(self.logger, level)(str(x)) else: self.logger.error("Unknown log level type: %s" % level) else: print(str(x) + "\n") def log_progress(self, info): if not isinstance(info, collections.Mapping): self.write(info) if not self._is_master: return if self.log_format == "simple": output = ", ".join( ["{}: {}".format(key, value) for key, value in info.items()] ) elif self.log_format == "json": output = json.dumps(info) else: output = str(info) self.write(output) def single_write(self, x, level="info", log_all=False): if self.logger is None: return if log_all is False and not self._is_master: return if x + "_" + level in self._single_log_map: return else: self.write(x, level) class TensorboardLogger: def __init__(self, log_folder="./logs"): self.summary_writer = None self._is_master = is_master() self.log_folder = log_folder if self._is_master: self.summary_writer = SummaryWriter(self.log_folder) def __del__(self): if getattr(self, "summary_writer", None) is not None: self.summary_writer.close() def close(self): if getattr(self, "summary_writer", None) is not None: self.summary_writer.close() def _should_log_tensorboard(self): if self.summary_writer is None or not self._is_master: return False else: return True def add_scalar(self, key, value, iteration): if not self._should_log_tensorboard(): return self.summary_writer.add_scalar(key, value, iteration) def add_scalars(self, scalar_dict, iteration): if not self._should_log_tensorboard(): return for key, val in scalar_dict.items(): self.summary_writer.add_scalar(key, val, iteration) def add_histogram_for_model(self, model, iteration): if not self._should_log_tensorboard(): return for name, param in model.named_parameters(): np_param = param.clone().cpu().data.numpy() self.summary_writer.add_histogram(name, np_param, iteration) def add_image(self, tag, image, iteration): if not self._should_log_tensorboard(): return self.summary_writer.add_image(tag, image, iteration) def add_images(self, image_dict, iteration): if not self._should_log_tensorboard(): return for tag, image in image_dict.items(): self.summary_writer.add_image(tag, image, iteration)
5,647
29.695652
86
py
null
r-mae-main/pretrain/utils/meter.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import datetime import time from collections import deque, defaultdict import torch import torch.distributed as dist from pretrain.utils.distributed import is_dist_avail_and_initialized class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value, ) class Meter(object): def __init__(self, window_size=20, delimiter=", "): self.meters = {} self.delimiter = delimiter self.window_size = window_size def update(self, update_dict, batch_size=1): for k, v in update_dict.items(): if isinstance(v, torch.Tensor): if v.dim() != 0: v = v.mean() v = v.item() assert isinstance(v, (float, int)) if k not in self.meters: self.meters[k] = SmoothedValue(self.window_size) self.meters[k].update(v, batch_size) def update_from_meter(self, meter): for key, value in meter.meters.items(): assert isinstance(value, SmoothedValue) if key not in self.meters: self.meters[key] = SmoothedValue(self.window_size) self.meters[key] = value def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError( "'{}' object has no attribute '{}'".format(type(self).__name__, attr) ) def get_scalar_dict(self): scalar_dict = {} for k, v in self.meters.items(): scalar_dict[k] = v.avg return scalar_dict def get_log_dict(self, split): log_dict = {} for k, v in self.meters.items(): if "train" == split: log_dict[k] = "{:.4f}".format(v.median) log_dict["{}/avg".format(k)] = "{:.4f}".format(v.avg) else: log_dict[k] = "{:.4f}".format(v.global_avg) return log_dict def __str__(self): loss_str = [] for name, meter in self.meters.items(): if "train" in name: loss_str.append( "{}: {:.4f} ({:.4f})".format(name, meter.median, meter.global_avg) ) else: # In case of val print global avg loss_str.append("{}: {:.4f}".format(name, meter.global_avg)) return self.delimiter.join(loss_str) class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if v is None: continue if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError( "'{}' object has no attribute '{}'".format(type(self).__name__, attr) ) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append("{}: {}".format(name, str(meter))) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = "" start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt="{avg:.4f}") data_time = SmoothedValue(fmt="{avg:.4f}") space_fmt = ":" + str(len(str(len(iterable)))) + "d" log_msg = [ header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}", ] if torch.cuda.is_available(): log_msg.append("max mem: {memory:.0f}") log_msg = self.delimiter.join(log_msg) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len(iterable) - 1: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print( log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB, ) ) else: print( log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), ) ) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print( "{} Total time: {} ({:.4f} s / it)".format( header, total_time_str, total_time / len(iterable) ) )
7,593
30.510373
86
py
null
r-mae-main/pretrain/utils/modeling.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, List, Set, Dict, Any import torch def get_layer_id(layer_name, num_layers): if "net.pos_embed" in layer_name: return 0 elif "net.patch_embed" in layer_name: return 0 elif "net.blocks." in layer_name: layer_id = int(layer_name[layer_name.find("net.blocks.") :].split(".")[2]) return layer_id + 1 return num_layers - 1 norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) def get_parameters( model: torch.nn.Module, lr_multi: Optional[float] = 1.0, lr_module: Optional[List[str]] = [], wd_norm: Optional[float] = None, module_except: Optional[List[str]] = [], ): param_group_wd_norm = {"params": []} param_group_lr_multi = {"params": []} param_group_others = {"params": []} for module_name, module in model.named_modules(): if any(nd in module_name for nd in module_except): continue for param_name, param in module.named_parameters(recurse=False): if not param.requires_grad: continue if isinstance(module, norm_module_types): param_group_wd_norm["params"].append(param) elif any(nd in param_name for nd in lr_module): param_group_lr_multi["params"].append(param) else: param_group_others["params"].append(param) if lr_multi is not None and lr_multi != 1.0: param_group_lr_multi["lr_multi"] = lr_multi if wd_norm is not None: param_group_wd_norm["weight_decay"] = wd_norm optimizer_grouped_parameters = [ param_group_wd_norm, param_group_lr_multi, param_group_others, ] return optimizer_grouped_parameters def get_vit_parameters( model: torch.nn.Module, wd_except: Optional[List[str]] = None, wd_bias: Optional[float] = None, wd_norm: Optional[float] = None, lr_decay_rate: Optional[float] = None, num_layers: Optional[int] = None, ): memo: Set[torch.nn.parameter.Parameter] = set() if lr_decay_rate is not None: assert num_layers is not None num_layers += 2 if lr_decay_rate is not None: param_group_decay = [{"params": []} for _ in range(num_layers + 1)] param_group_no_decay = [ {"params": [], "weight_decay": 0.0} for _ in range(num_layers + 1) ] else: param_group_decay = [{"params": []}] param_group_no_decay = [{"params": [], "weight_decay": 0.0}] for module_name, module in model.named_modules(): for param_name, param in module.named_parameters(recurse=False): if not param.requires_grad: continue if param in memo: continue memo.add(param) no_decay = False if isinstance(module, norm_module_types) and wd_norm == 0.0: no_decay = True if "bias" in param_name and wd_bias == 0.0: no_decay = True if wd_except is not None and any(nd in param_name for nd in wd_except): no_decay = True if lr_decay_rate is not None: layer_id = get_layer_id(f"{module_name}.{param_name}", num_layers) if no_decay: param_group_no_decay[layer_id]["params"].append(param) param_group_no_decay[layer_id]["lr_multi"] = lr_decay_rate ** ( num_layers - 1 - layer_id ) else: param_group_decay[layer_id]["params"].append(param) param_group_decay[layer_id]["lr_multi"] = lr_decay_rate ** ( num_layers - 1 - layer_id ) else: if no_decay: param_group_no_decay[0]["params"].append(param) else: param_group_decay[0]["params"].append(param) optimizer_grouped_parameters = param_group_decay + param_group_no_decay return optimizer_grouped_parameters def get_mae_parameters( model: torch.nn.Module, wd_except: Optional[List[str]] = None, wd_bias: Optional[float] = None, wd_norm: Optional[float] = None, ): memo: Set[torch.nn.parameter.Parameter] = set() param_group_decay = {"params": []} param_group_no_decay = {"params": [], "weight_decay": 0.0} for module in model.modules(): for param_name, param in module.named_parameters(recurse=False): if not param.requires_grad: continue if param in memo: continue memo.add(param) no_decay = False if isinstance(module, norm_module_types) and wd_norm == 0.0: no_decay = True if "bias" in param_name and wd_bias == 0.0: no_decay = True if wd_except is not None and any(nd in param_name for nd in wd_except): no_decay = True if no_decay: param_group_no_decay["params"].append(param) else: param_group_decay["params"].append(param) optimizer_grouped_parameters = [param_group_decay, param_group_no_decay] return optimizer_grouped_parameters
5,763
31.382022
83
py
null
r-mae-main/pretrain/utils/params.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from collections import abc from math import inf from typing import Any, Dict, List, Optional, Union, Callable import torch import torch.distributed as dist def get_global_rank(group: Any, rank: int) -> int: if group is dist.group.WORLD: return rank return dist.distributed_c10d._get_global_rank(group, rank) def recursive_copy_to_device( value: Any, non_blocking: bool, device: torch.device ) -> Any: """ Recursively searches lists, tuples, dicts and copies tensors to device if possible. Non-tensor values are passed as-is in the result. NOTE: These are all copies, so if there are two objects that reference the same object, then after this call, there will be two different objects referenced on the device. """ if isinstance(value, torch.Tensor): return value.to(device, non_blocking=non_blocking) if isinstance(value, (list, tuple)): values = [] for val in value: values.append( recursive_copy_to_device(val, non_blocking=non_blocking, device=device) ) return values if isinstance(value, list) else tuple(values) if isinstance(value, abc.Mapping): device_val: Dict[str, Any] = {} for key, val in value.items(): device_val[key] = recursive_copy_to_device( val, non_blocking=non_blocking, device=device ) return device_val return value def calc_grad_norm(parameters: List[torch.nn.Parameter], p: float) -> torch.Tensor: r"""Calculate gradient norm of an iterable of parameters. Returns: Total norm of the parameters (viewed as a single vector). """ if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = list(filter(lambda par: par.grad is not None, parameters)) if len(parameters) == 0: return torch.tensor(0.0) p = float(p) if p == inf: local_norm = max(par.grad.detach().abs().max() for par in parameters) # type: ignore else: # Compute the norm in full precision no matter what local_norm = torch.norm(torch.stack([torch.norm(par.grad.detach(), p, dtype=torch.float32) for par in parameters]), p).to(dtype=parameters[0].dtype) # type: ignore return local_norm class Workhandle: def __init__(self, handle: Any, callback: Optional[Callable]) -> None: self.handle = handle self.callback = callback class Bucket: """ Helper class to simplify the handling of buckets, which unify the underlying storage of multiple tensors """ def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None: self._params: List[torch.Tensor] = [] self._param_ids: List[int] = [] self._fill = 0 # The actual flat tensor self.buffer: torch.Tensor = torch.zeros(size, dtype=dtype, device=device) def to( # type: ignore self, device: Optional[Union[int, torch.device]], dtype: Optional[torch.dtype] = None, non_blocking: bool = False, keep_param_alignment: bool = True, ) -> "ParamBucket": """ Move the underlying buffer """ assert ( self.buffer is not None ), "Cannot move a collapsed bucket, please rebuild it" self.buffer = self.buffer.to(device, dtype, non_blocking) class ParamBucket(Bucket): """ Helper class to simplify the handling of parameter buckets """ def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None: super().__init__(size, dtype, device) def to( # type: ignore self, device: Optional[Union[int, torch.device]], dtype: Optional[torch.dtype] = None, non_blocking: bool = False, keep_param_alignment: bool = True, ) -> "ParamBucket": """ Move the underlying buffer """ super().to(device, dtype, non_blocking) if keep_param_alignment: self._reattach_params() @torch.no_grad() def add_param(self, param: torch.Tensor) -> None: """ Add a new parameter gradient to the bucket. Param becomes a view of this bucket buffer """ assert ( id(param) not in self._param_ids ), "The same param cannot be checked in twice" self._add_param_as_view(param) self._params.append(param) self._param_ids.append(id(param)) @torch.no_grad() def _add_param_as_view( self, param: torch.Tensor, keep_existing_value: bool = True ) -> None: assert self.buffer is not None assert ( param.dtype == self.buffer.dtype ), f"Different types for the bucket and the param, cannot proceed: {param.dtype} - {self.buffer.dtype}" assert ( param.device == self.buffer.device ), f"Different devices for the bucket and the param, cannot proceed: {param.device} - {self.buffer.device}" fill_next = self._fill + param.numel() assert fill_next <= self.buffer.numel() # Copy the current param value if keep_existing_value: self.buffer[self._fill : fill_next].copy_(param.data.flatten()) param.data = self.buffer[self._fill : fill_next].view_as(param.data) self._fill = fill_next @torch.no_grad() def _reattach_params(self) -> None: """ Given the parameters which have been registered previously, rebuild the whole bucket """ assert len(self._params) > 0 self._fill = 0 for p in self._params: if p.dtype != self.buffer.dtype: p.data = p.data.to(self.buffer.dtype) self._add_param_as_view(p, keep_existing_value=False) class GradBucket(Bucket): """ Helper class to simplify the handling of gradient buckets """ def __init__( self, size: int, dtype: torch.dtype, device: torch.device, destination: int ) -> None: super().__init__(size, dtype, device) self._max_size = size self._is_collapsed = False self.params_checked_in = 0 self.destination = destination self.sent = True self.callback: Optional[Callable[[Any], None]] = None def reset_checked_in(self) -> None: """Reset the counter of the parameter grads which have been checked in""" self.params_checked_in = 0 self.sent = False @property def all_checked_in(self) -> bool: """Have all the expected gradient check-in happened ?""" return len(self._params) == self.params_checked_in def can_add_grad_view(self, param: torch.Tensor) -> bool: """Is there enough room in the bucket to add this parameter gradient, and is this param not already checked in ?""" return ( self._fill + param.numel() < self._max_size and id(param) not in self._param_ids ) def to( # type: ignore self, device: Optional[Union[int, torch.device]], dtype: Optional[torch.dtype] = None, non_blocking: bool = False, keep_param_alignment: bool = True, ) -> "GradBucket": """ Move the underlying buffer """ if self._is_collapsed: self.rebuild() super().to(device, dtype, non_blocking) if keep_param_alignment: self._reattach_grads() def zero(self) -> None: """ Set all the grads to zero """ self.buffer.fill_(0.0) @torch.no_grad() def add_grad(self, param: torch.Tensor) -> None: """ Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer """ assert ( id(param) not in self._param_ids ), "The same gradients cannot be checked in twice" if param.grad is None: param.grad = torch.zeros_like(param) self._add_grad_as_view(param) self._params.append(param) self._param_ids.append(id(param)) @torch.no_grad() def collapse(self) -> None: """ Release the buffer from memory. The bucket will need to be rebuilt before use """ if not self._is_collapsed: for p in self._params: assert p.grad is not None p.grad.detach_() p.grad = None self.buffer = torch.zeros( 0, dtype=self.buffer.dtype, device=self.buffer.device ) self._fill = 0 self.params_checked_in = 0 self._is_collapsed = True @torch.no_grad() def rebuild(self) -> None: """ Given the parameter gradients which have been registered previously, rebuild the whole bucket """ assert len(self._params) > 0 if self._is_collapsed: self.buffer = torch.zeros( self._max_size, dtype=self._params[0].dtype, device=self._params[0].device, ) for p in self._params: self._add_grad_as_view(p) self._is_collapsed = False @torch.no_grad() def shrink(self) -> None: """ Shrink the buffer to the size of the parameter gradients currently checked in, release the extra memory """ assert ( self.buffer.numel() > 0 ), "Cannot shrink a collapsed bucket, please rebuild" self.buffer = self.buffer.resize_(self._fill).clone() self._fill = 0 for p in self._params: self._add_grad_as_view(p) self._max_size = self._fill @torch.no_grad() def _reattach_grads(self) -> None: """ Given the parameters gradients which have been registered previously, rebuild the whole bucket """ assert len(self._params) > 0 self._fill = 0 for p in self._params: self._add_grad_as_view(p, keep_existing_value=False) @torch.no_grad() def _add_grad_as_view( self, param: torch.Tensor, keep_existing_value: bool = True ) -> None: assert ( self.buffer.numel() > 0 ), "Cannot add a gradient to a collapsed bucket, please rebuild" assert param.dtype == self.buffer.dtype assert param.device == self.buffer.device fill_next = self._fill + param.numel() assert fill_next <= self.buffer.numel() # Copy the current grad value, if any if param.grad is not None: # keep param.grad in place if keep_existing_value: self.buffer[self._fill : fill_next].copy_(param.grad.data.flatten()) param.grad.data = self.buffer[self._fill : fill_next].view_as(param.data) else: param.grad = self.buffer[self._fill : fill_next].view_as(param.data) self._fill = fill_next
11,141
31.202312
172
py
null
r-mae-main/pretrain/utils/timer.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import time class Timer: DEFAULT_TIME_FORMAT_DATE_TIME = "%Y/%m/%d %H:%M:%S" DEFAULT_TIME_FORMAT = ["%03dms", "%02ds", "%02dm", "%02dh"] def __init__(self): self.start = time.time() * 1000 def get_current(self): return self.get_time_hhmmss(self.start) def reset(self): self.start = time.time() * 1000 def get_time_since_start(self, format=None): return self.get_time_hhmmss(self.start, format) def unix_time_since_start(self, in_seconds=True): gap = time.time() * 1000 - self.start if in_seconds: gap = gap // 1000 # Prevent 0 division errors if gap == 0: gap = 1 return gap def get_time_hhmmss(self, start=None, end=None, gap=None, format=None): """ Calculates time since `start` and formats as a string. """ if start is None and gap is None: if format is None: format = self.DEFAULT_TIME_FORMAT_DATE_TIME return time.strftime(format) if end is None: end = time.time() * 1000 if gap is None: gap = end - start s, ms = divmod(gap, 1000) m, s = divmod(s, 60) h, m = divmod(m, 60) if format is None: format = self.DEFAULT_TIME_FORMAT items = [ms, s, m, h] assert len(items) == len(format), "Format length should be same as items" time_str = "" for idx, item in enumerate(items): if item != 0: time_str = format[idx] % item + " " + time_str # Means no more time is left. if len(time_str) == 0: time_str = "0ms" return time_str.strip()
1,929
25.438356
81
py
null
r-mae-main/tools/run.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import random import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" import torch import pretrain from pretrain.trainer import build_trainer from pretrain.utils.configuration import Configuration from pretrain.utils.distributed import distributed_init, infer_init_method from pretrain.utils.env import set_seed, get_parser def main(configuration, init_distributed=False): config = configuration.get_config() if torch.cuda.is_available(): torch.cuda.set_device(config.device_id) torch.cuda.init() if init_distributed: distributed_init(config) config.training.seed = set_seed(config.training.seed) print("Using seed {}".format(config.training.seed)) trainer = build_trainer(configuration) trainer.load() trainer.train() def distributed_main(device_id, configuration): config = configuration.get_config() config.device_id = device_id if config.distributed.rank is None: config.distributed.rank = config.start_rank + device_id main(configuration, init_distributed=True) def run(args): configuration = Configuration(args) configuration.args = args config = configuration.get_config() config.start_rank = 0 if config.distributed.init_method is None: print("Infering distributed setting...") infer_init_method(config) if config.distributed.init_method is not None: if not config.distributed.no_spawn: config.start_rank = config.distributed.rank config.distributed.rank = None torch.multiprocessing.spawn( fn=distributed_main, args=(configuration,), nprocs=torch.cuda.device_count(), ) else: main(configuration, init_distributed=True) elif config.distributed.world_size > 1: assert config.distributed.world_size <= torch.cuda.device_count() port = random.randint(10000, 20000) config.distributed.init_method = "tcp://localhost:{port}".format(port=port) config.distributed.rank = None torch.multiprocessing.spawn( fn=distributed_main, args=(configuration,), nprocs=config.distributed.world_size, ) else: config.device_id = 0 main(configuration) if __name__ == "__main__": parser = get_parser() args = parser.parse_args() run(args)
2,599
28.213483
83
py
null
r-mae-main/tools/preprocess/README.md
# Pre-Process ## Generate FH masks for COCO Datasets As shown in the repository, the datasets are assumed to exist in a directory specified by the environment variable $E2E_DATASETS. In order to make it consistent, we want to generate FH mask proposals and save them to ```fh_train2017``` and ```fh_unlabeled2017``` folders under $E2E_DATASETS. ``` $E2E_DATASETS/ └── coco/ ├── annotations/ ├── instances_train2017.json ├── image_info_unlabeled2017.json ├── instances_val2017.json └── image_info_test-dev2017.json ├── image/ ├── train2017/ ├── fh_train2017/ ├── unlabeled2017/ ├── fh_unlabeled2017/ ├── val2017/ └── test2017/ └── vocabs/ └── coco_categories.txt - the mapping from coco categories to indices. ├── imagenet/ ├── fh_train/ ├── fh_val/ ``` The command for generating ```fh_train2017``` is as following: ```bash python create_fh_mask_for_coco.py --root_path $E2E_DATASETS/coco/image --image_folder train2017 --output_folder fh_train2017 --fh_scales '500,1000,1500' --fh_min_sizes '500,1000,1500' ``` The command for generating ```fh_unlabeled2017``` is as following: ```bash python create_fh_mask_for_coco.py --root_path $E2E_DATASETS/coco/image --image_folder unlabeled2017 --output_folder fh_unlabeled2017 --fh_scales '500,1000,1500' --fh_min_sizes '500,1000,1500' ``` ## Generate FH masks for ImageNet Datasets As shown in the repository, the datasets are assumed to exist in a directory specified by the environment variable $E2E_DATASETS. In order to make it consistent, we want to generate FH mask proposals and save them to ```fh_train``` and ```fh_val``` folders under $E2E_DATASETS. ``` $E2E_DATASETS/ └── coco/ ├── annotations/ ├── instances_train2017.json ├── image_info_unlabeled2017.json ├── instances_val2017.json ├── sam_instances_train2017.json ├── sam_image_info_unlabeled2017.json ├── sam_instances_val2017.json └── image_info_test-dev2017.json ├── image/ ├── train2017/ ├── unlabeled2017/ ├── val2017/ └── test2017/ └── vocabs/ └── coco_categories.txt - the mapping from coco categories to indices. ├── imagenet/ ├── sam_train/ ├── sam_val/ ``` The command for generating ```fh_train``` is as following: ```bash python create_fh_mask_for_imnet.py --root_path /datasets01/imagenet_full_size/061417 --image_folder train --output_path $HOME/proposal/data/imnet --output_folder fh_train --fh_scales '1000' --fh_min_sizes '1000' ```
2,426
31.797297
211
md
null
r-mae-main/tools/preprocess/__init__.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree.
196
38.4
61
py
null
r-mae-main/tools/preprocess/create_fh_mask_for_coco.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import os import glob from functools import partial from multiprocessing import Pool import numpy as np import skimage.segmentation from PIL import Image def compute_fh_segmentation(image_np, scales, min_sizes): """Compute FSZ segmentation on image and record stats.""" fh_segmentations = [] for scale, min_size in zip(scales, min_sizes): segmented_image = skimage.segmentation.felzenszwalb( image_np, scale=scale, min_size=min_size ) segmented_image = segmented_image.astype(np.dtype("<u1")) fh_segmentations.append(segmented_image) fh_segmentations = np.stack(fh_segmentations) return fh_segmentations def _process_image(filename, fh_scales=[1000], fh_min_sizes=[1000], output_folder=None): image_name = filename.split("/")[-1].replace("jpg", "npy") fh_image_path = os.path.join(output_folder, image_name) image_data = Image.open(filename).convert("RGB") image = np.array(image_data) fh_segmentations = compute_fh_segmentation(image, fh_scales, fh_min_sizes) assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] assert image.shape[2] == 3 np.save(fh_image_path, fh_segmentations) def main(args): if not os.path.isabs(args.image_folder): image_folder = os.path.join(args.root_path, args.image_folder) else: image_folder = args.image_folder if not os.path.exists(image_folder): raise RuntimeError("image_folder does not exist") if not os.path.isabs(args.output_folder): output_folder = os.path.join(args.root_path, args.output_folder) else: output_folder = args.output_folder assert ( image_folder != output_folder ), "image_folder should be different from output_folder" if not os.path.exists(output_folder): os.makedirs(output_folder) image_files = glob.glob(image_folder + "/*.jpg") fh_scales = [int(n) for n in args.fh_scales.split(",")] fh_min_sizes = [int(n) for n in args.fh_min_sizes.split(",")] print("fh_scales:", fh_scales) print("fh_min_scales:", fh_min_sizes) with Pool(args.ntasks) as p: p.map( partial( _process_image, fh_scales=fh_scales, fh_min_sizes=fh_min_sizes, output_folder=output_folder, ), image_files, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--root_path", type=str, default="./") parser.add_argument("--image_folder", type=str, default="train2017") parser.add_argument("--output_folder", type=str, default="fh_train2017") parser.add_argument("--fh_scales", type=str, default="1000") parser.add_argument("--fh_min_sizes", type=str, default="1000") parser.add_argument("--ntasks", type=int, default=32) args = parser.parse_args() main(args)
3,134
30.989796
88
py
null
r-mae-main/tools/preprocess/create_fh_mask_for_imnet.py
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import argparse import os import glob from functools import partial from multiprocessing import Pool import numpy as np import skimage.segmentation from torchvision.datasets import ImageFolder from PIL import Image def compute_fh_segmentation(image_np, scales, min_sizes): """Compute FSZ segmentation on image and record stats.""" fh_segmentations = [] for scale, min_size in zip(scales, min_sizes): segmented_image = skimage.segmentation.felzenszwalb( image_np, scale=scale, min_size=min_size ) segmented_image = segmented_image.astype(np.dtype("<u1")) fh_segmentations.append(segmented_image) fh_segmentations = np.stack(fh_segmentations) return fh_segmentations # def _is_png(filename): # """Determine if a file contains a PNG format image. # Args: # filename: string, path of the image file. # Returns: # boolean indicating if the image is a PNG. # """ # # File list from: # # https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU # return "n02105855_2933.JPEG" in filename # def _is_cmyk(filename): # """Determine if file contains a CMYK JPEG format image. # Args: # filename: string, path of the image file. # Returns: # boolean indicating if the image is a JPEG encoded with CMYK color space. # """ # # File list from: # # https://github.com/cytsai/ilsvrc-cmyk-image-list # cmyk_excluded = [ # "n01739381_1309.JPEG", # "n02077923_14822.JPEG", # "n02447366_23489.JPEG", # "n02492035_15739.JPEG", # "n02747177_10752.JPEG", # "n03018349_4028.JPEG", # "n03062245_4620.JPEG", # "n03347037_9675.JPEG", # "n03467068_12171.JPEG", # "n03529860_11437.JPEG", # "n03544143_17228.JPEG", # "n03633091_5218.JPEG", # "n03710637_5125.JPEG", # "n03961711_5286.JPEG", # "n04033995_2932.JPEG", # "n04258138_17003.JPEG", # "n04264628_27969.JPEG", # "n04336792_7448.JPEG", # "n04371774_5854.JPEG", # "n04596742_4225.JPEG", # "n07583066_647.JPEG", # "n13037406_4650.JPEG", # ] # return filename.split("/")[-1] in cmyk_excluded def _process_image( filename, fh_scales=[1000], fh_min_sizes=[1000], dataset_folder=None, output_folder=None, ): fh_image_path = filename.replace(dataset_folder, output_folder).replace( "JPEG", "npy" ) fh_image_folder = os.path.dirname(fh_image_path) os.makedirs(fh_image_folder, exist_ok=True) image_data = Image.open(filename).convert("RGB") image = np.array(image_data) fh_segmentations = compute_fh_segmentation(image, fh_scales, fh_min_sizes) assert len(image.shape) == 3 height = image.shape[0] width = image.shape[1] assert image.shape[2] == 3 np.save(fh_image_path, fh_segmentations) def _get_imnet_structure(dataset_folder): imnet = ImageFolder(dataset_folder) print(f"Pre-processing {len(imnet)} images from ImageNet") image_files = [] for i in range(len(imnet)): path = imnet.imgs[i][0] image_files.append(path) return image_files def main(args): if not os.path.isabs(args.image_folder): image_folder = os.path.join(args.root_path, args.image_folder) else: image_folder = args.image_folder if not os.path.exists(image_folder): raise RuntimeError("image_folder does not exist") if not os.path.isabs(args.output_folder): output_folder = os.path.join(args.output_path, args.output_folder) else: output_folder = args.output_folder assert ( image_folder != output_folder ), "image_folder should be different from output_folder" if not os.path.exists(output_folder): os.makedirs(output_folder) image_files = _get_imnet_structure(image_folder) fh_scales = [int(n) for n in args.fh_scales.split(",")] fh_min_sizes = [int(n) for n in args.fh_min_sizes.split(",")] print("fh_scales:", fh_scales) print("fh_min_scales:", fh_min_sizes) with Pool(args.ntasks) as p: p.map( partial( _process_image, fh_scales=fh_scales, fh_min_sizes=fh_min_sizes, dataset_folder=image_folder, output_folder=output_folder, ), image_files, ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--root_path", type=str, default="/datasets01/imagenet_full_size/061417" ) parser.add_argument("--image_folder", type=str, default="train") parser.add_argument("--output_path", type=str, default="./") parser.add_argument("--output_folder", type=str, default="fh_train") parser.add_argument("--fh_scales", type=str, default="1000") parser.add_argument("--fh_min_sizes", type=str, default="1000") parser.add_argument("--ntasks", type=int, default=64) args = parser.parse_args() main(args)
5,301
29.825581
91
py
PB-DFS
PB-DFS-master/README.md
# Learning Primal Heuristics for Mixed Integer Programs ## Requirements #### Python Code Dependencies 1. Python version 3.6.9. 2. Cuda version 10.0 (required by TRIG-GNN) 3. Cmake version >= 3.15 4. python3-venv (installed by running 'sudo apt-get install python3-venv') 5. Two virtual environments that contain different version of tensorflow, tf1 and tf2. (created by running 'python3 -m venv [env_name]'). 6. Latest pip (upgraded by running 'pip3 install -U pip' 7. Install dependencies for the two environments: - Activate the environment tf1 (source tf1/bin/activate), run 'pip3 install -r requirements_1.txt' - Activate the environment tf2 (source tf2/bin/activate), run 'pip3 install -r requirements_2.txt' #### C++ Code Dependencies 1. C++ boost library is required. The library can be downloaded here https://www.boost.org/users/download/. 2. SCIP solver **version 6.0.1** is required, which can be downloaded at https://www.scipopt.org/index.php#download. An academic license can be applied from https://www.scipopt.org/index.php#license. 3. After the setup, build the c++ code with cmake to obtain the execuable 'CO'. #### Datasets Datasets are available at https://drive.google.com/file/d/1HBBdwtQ1fa31inb9wVNcT-Tslu4WAeny/view?usp=sharing. ## Model Training - To train GG-GCN, XGBoost, and LR models, activate the tf1 environment and then run the bash script ./model_train.sh. - To train TRIG-GCN model, active the tf2 environment and then run the bash script ./model_train_trig_gcn.sh ## Model Testing - To test GG-GCN, XGBoost, and LR models, activate the tf1 environment and then run the bash script './model_test.sh'. - To test TRIG-GCN model, active the tf2 environment and then hen run the bash script './model_test_trig_gcn.sh'. The testing results is output to the folder 'ret_model'. These results corresponds to the results presented in Table 1 of our paper. ## Evaluation of Heuristics - Run the bash script './model_predict.sh' to produce solution predictions for the proposed heuristic. Or skip this step and use the provided solution predictions in the dataset. - Run the bash script ./heur_eval.sh. It takes several hours to obtain the results. Please note that if each process should run on a single cpu. The intermediate results is output to the folder ret_solver. - Upon the previous step is finished, run the bash script './calc_stats.sh' (under tf1 environment) to generate the mean statistics, which is output to folder 'ret_solver'. These results correspond to the statistics in Table 2 and Table 3 of our paper. ## Generating your own training/test problem instances - If you need to generate your own training and test instances, you can use the code in "data_generator" directory. Each problem directory contains two files: - gen_inst_*: generate problem instances with different parameters and/or solve the problem instances to optimality. - make_sample_*: extract features for problem instances and make training data. Two python packages are required for data generation: - gurobipy (for solving training instances to optimality). - PySCIPOpt (for feature extraction). Note that to have the feature extraction code please install our version of PySCIPOpt included in this project. ## FAQ - Currently, the ML models are implemented in the python code, and their predictions are wrriten into the filesystem to be used by the scip implemented in C++. - If you need to predict and search interactively, you may want to have a look at "PySCIPOpt", which binds python and C++ using cython. - If you have any questions, please contact me at shenyunzhuang@outlook.com. Hopefully, the code can be helpful for your own reasearch.
3,698
60.65
253
md
PB-DFS
PB-DFS-master/calc_stats.sh
#! /bin/bash python3 stats.py mis > ret_solver/mis.txt python3 stats.py vc > ret_solver/vc.txt python3 stats.py ca > ret_solver/ca.txt python3 stats.py ds > ret_solver/ds.txt
176
24.285714
41
sh
PB-DFS
PB-DFS-master/heur_eval.sh
#! /bin/bash prefix=build # Combinatorial Auction Problem nohup ${prefix}/CO -p 6 -h 0 & nohup ${prefix}/CO -p 6 -h 2 & nohup ${prefix}/CO -p 6 -h 4 & nohup ${prefix}/CO -p 6 -h 4 -t 50 & nohup ${prefix}/CO -p 6 -h 6 -t 50 & nohup ${prefix}/CO -p 6 -h 7 -t 50 & nohup ${prefix}/CO -p 6 -h 8 -t 50 & nohup ${prefix}/CO -p 6 -h 9 -t 50 & nohup ${prefix}/CO -p 6 -h 10 -t 50 & # Dominant Set Problem nohup ${prefix}/CO -p 5 -h 0 & nohup ${prefix}/CO -p 5 -h 2 & nohup ${prefix}/CO -p 5 -h 3 & nohup ${prefix}/CO -p 5 -h 3 -t 50 & nohup ${prefix}/CO -p 5 -h 5 -t 50 & nohup ${prefix}/CO -p 5 -h 7 -t 50 & nohup ${prefix}/CO -p 5 -h 8 -t 50 & nohup ${prefix}/CO -p 5 -h 9 -t 50 & nohup ${prefix}/CO -p 5 -h 10 -t 50 & # # Vertex Cover Problem nohup ${prefix}/CO -p 4 -h 0 & nohup ${prefix}/CO -p 4 -h 2 & nohup ${prefix}/CO -p 4 -h 4 & nohup ${prefix}/CO -p 4 -h 4 -t 50 & nohup ${prefix}/CO -p 4 -h 6 -t 50 & nohup ${prefix}/CO -p 4 -h 7 -t 50 & nohup ${prefix}/CO -p 4 -h 8 -t 50 & nohup ${prefix}/CO -p 4 -h 9 -t 50 & nohup ${prefix}/CO -p 4 -h 10 -t 50 & # # Maximum Independent Set Problem nohup ${prefix}/CO -p 0 -h 0 & nohup ${prefix}/CO -p 0 -h 2 & nohup ${prefix}/CO -p 0 -h 4 & nohup ${prefix}/CO -p 0 -h 4 -t 50 & nohup ${prefix}/CO -p 0 -h 6 -t 50 & nohup ${prefix}/CO -p 0 -h 7 -t 50 & nohup ${prefix}/CO -p 0 -h 8 -t 50 & nohup ${prefix}/CO -p 0 -h 9 -t 50 & nohup ${prefix}/CO -p 0 -h 10 -t 50 &
1,417
26.803922
39
sh
PB-DFS
PB-DFS-master/model_predict.sh
#! /bin/bash nohup python3 GG-GCN/pred_gcn.py mis & nohup python3 GG-GCN/pred_gcn.py vc & nohup python3 GG-GCN/pred_gcn.py ds & nohup python3 GG-GCN/pred_gcn.py ca & nohup python3 GG-GCN/pred_baselines.py mis -m lr & nohup python3 GG-GCN/pred_baselines.py vc -m lr & nohup python3 GG-GCN/pred_baselines.py ds -m lr & nohup python3 GG-GCN/pred_baselines.py ca -m lr &
368
32.545455
50
sh
PB-DFS
PB-DFS-master/model_test.sh
#! /bin/bash nohup python3 GG-GCN/test_gcn.py mis & nohup python3 GG-GCN/test_gcn.py vc & nohup python3 GG-GCN/test_gcn.py ds & nohup python3 GG-GCN/test_gcn.py ca & nohup python3 GG-GCN/test_baselines.py mis -m lr & nohup python3 GG-GCN/test_baselines.py vc -m lr & nohup python3 GG-GCN/test_baselines.py ds -m lr & nohup python3 GG-GCN/test_baselines.py ca -m lr & nohup python3 GG-GCN/test_baselines.py mis -m xgb & nohup python3 GG-GCN/test_baselines.py vc -m xgb & nohup python3 GG-GCN/test_baselines.py ds -m xgb & nohup python3 GG-GCN/test_baselines.py ca -m xgb &
574
34.9375
51
sh
PB-DFS
PB-DFS-master/model_test_trig_gcn.sh
#! /bin/bash nohup python3 TRIG-GCN/test.py mis & nohup python3 TRIG-GCN/test.py vc & nohup python3 TRIG-GCN/test.py ds & nohup python3 TRIG-GCN/test.py ca &
159
21.857143
36
sh
PB-DFS
PB-DFS-master/model_train.sh
#! /bin/bash nohup python3 GG-GCN/train_gcn.py mis & nohup python3 GG-GCN/train_gcn.py vc & nohup python3 GG-GCN/train_gcn.py ds & nohup python3 GG-GCN/train_gcn.py ca & nohup python3 GG-GCN/train_baselines.py mis -m lr & nohup python3 GG-GCN/train_baselines.py vc -m lr & nohup python3 GG-GCN/train_baselines.py ds -m lr & nohup python3 GG-GCN/train_baselines.py ca -m lr & nohup python3 GG-GCN/train_baselines.py mis -m xgb & nohup python3 GG-GCN/train_baselines.py vc -m xgb & nohup python3 GG-GCN/train_baselines.py ds -m xgb & nohup python3 GG-GCN/train_baselines.py ca -m xgb &
586
35.6875
52
sh
PB-DFS
PB-DFS-master/model_train_trig_gcn.sh
#! /bin/bash nohup python3 TRIG-GCN/train.py mis & nohup python3 TRIG-GCN/train.py vc & nohup python3 TRIG-GCN/train.py ds & nohup python3 TRIG-GCN/train.py ca &
162
26.166667
37
sh
PB-DFS
PB-DFS-master/stats.py
import sys,os import pandas as pd import argparse import shutil import numpy as np from scipy.stats.mstats import gmean def analyse(ret_dir, problem): def geo_mean(arr, mask=None): if mask is None: arr = arr.to_numpy().astype(float) else: arr = arr.to_numpy()[~mask].astype(float) shift=1 log_a = np.log(arr+shift) return np.abs(np.exp(log_a.mean(axis=0)) - shift) prob_dir = f'{ret_dir}/{problem}' methods = [sub_dir for sub_dir in os.listdir(prob_dir) if os.path.isdir(os.path.join(prob_dir, sub_dir))] method_dir_paths = [os.path.join(prob_dir, sub_dir) for sub_dir in methods] dfs = [] for method_name, method_dir in zip(methods, method_dir_paths): print(method_name) fnames = os.listdir(method_dir) fnames = [os.path.join(method_dir, fname) for fname in fnames if '.csv' in fname] fcontents = [] for fname in fnames: with open(fname, 'r') as f: fcontents.append(f.readlines()) fcontents[-1] = [line.strip() for line in fcontents[-1]] content = fcontents[0] for fcontent in fcontents[1:]: content.extend(fcontent[1:]) for i in range(len(content)): content[i] = content[i].split(',') dic = {} for arr in zip(*content): col_name = arr[0] col_content = arr[1:] dic[col_name] = col_content df = pd.DataFrame.from_dict(dic) print(f"problem: {problem} method: {method_name}") try: if method_name in ['ml_dfs1', 'ml_dfs2', 'scip_agg', 'ml_ding', 'scip_def'] or 'exact' in method_name: print('opt_gap', geo_mean(df['opt_gap'])) print('best_sol_obj', geo_mean(df['best_sol_obj'])) print('best_sol_time', geo_mean(df['best_sol_time'])) print('heur_tot_time', geo_mean(df['heur_tot_time'])) else: mask = df['best_heur_sol_obj'].to_numpy() == '0' print('nproblems not find solution:', np.sum(mask.astype(int))) print('best_heur_sol_obj', geo_mean(df['best_heur_sol_obj'], mask)) print('best_heur_sol_time', geo_mean(df['best_heur_sol_time'], mask)) print('heur_ncalls', geo_mean(df['heur_ncalls'])) print('heur_tot_time', geo_mean(df['heur_tot_time'])) except: pass print("\n") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( 'problem', choices=['mis', 'vc', 'ds', 'ca'], ) args = parser.parse_args() analyse('ret_solver', args.problem)
2,719
35.756757
114
py
PB-DFS
PB-DFS-master/GG-GCN/pred_baselines.py
import os import sys # sys.path.append( f'{os.path.dirname(os.path.realpath(__file__))}/../') import importlib import argparse import csv import numpy as np import time import pickle import pathlib import gzip import warnings warnings.filterwarnings("ignore") from utils import load_flat_samples from sklearn.linear_model import LogisticRegression from sklearn.ensemble import ExtraTreesRegressor import xgboost as xgb import time def load_data(filepath): xs, _ = load_flat_samples(filepath, augment_feats=False, normalize_feats=True) x_shift = xs.mean(axis=0) x_scale = xs.std(axis=0) x_scale[x_scale == 0] = 1 xs = (xs - x_shift) / x_scale return xs if __name__ == '__main__': filedir = os.path.dirname(__file__) parser = argparse.ArgumentParser() parser.add_argument( 'problem', help='the problem to test', choices=['mis', 'ds', 'ca', 'vc'], ) parser.add_argument( '-m', '--model', choices=['lr', 'xgb'], ) args = parser.parse_args() running_dir = f'{filedir}/../trained_models/{args.problem}/{args.model}' data_path =f'{filedir}/../datasets/{args.problem}/eval_large' files = list(pathlib.Path(data_path).glob('sample_*.pkl')) # load model with open(f"{running_dir}/model.pkl", 'rb') as f: model = pickle.load(f) t1 = time.time() for filepath in files: # load data xss = load_data(filepath) yhss = model.predict_proba(xss)[:,1] # write probability map to file print(filepath) with open(str(filepath)[:-3] + 'lr_prob', 'w+') as f: print('write to ' + str(filepath)[:-3] + 'lr_prob') for idx, prob in enumerate(yhss): f.write(f'{idx+1} {prob}\n') print(f'average time used: {(time.time() - t1)/len(files)}')
1,843
26.939394
82
py
PB-DFS
PB-DFS-master/GG-GCN/pred_gcn.py
from __future__ import division from __future__ import print_function import sys import os sys.path.append( f'{os.path.dirname(os.path.realpath(__file__))}/gcn') from os.path import expanduser home = expanduser("~") import time import scipy.io as sio import numpy as np import scipy.sparse as sp from copy import deepcopy import warnings warnings.simplefilter("ignore") import tensorflow.compat.v1 as tf tf.disable_eager_execution() from utils import * from models import GCN_DEEP_DIVER import time import argparse import pathlib N_bd = 57 # Settings flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('model', 'gcn_cheby', 'Model string.') # 'gcn', 'gcn_cheby', 'dense' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 201, 'Number of epochs to train.') flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.') flags.DEFINE_integer('diver_num', 1, 'Number of outputs.') flags.DEFINE_float('dropout', 0, 'Dropout rate (1 - keep probaNUmbility).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 1000, 'Tolerance for early stopping (# of epochs).') flags.DEFINE_integer('max_degree', 1, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('num_layer', 20, 'number of layers.') flags.DEFINE_string('matrix_type', 'sparse', 'Model string.') # 'sparse', 'dense' # Some preprocessing num_supports = 1 + FLAGS.max_degree model_func = GCN_DEEP_DIVER # use gpu 0 os.environ['CUDA_VISIBLE_DEVICES']=str(-0) # Initialize session config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Define model evaluation function def evaluate(features, support, placeholders): t_test = time.time() feed_dict_val = construct_feed_dict4pred(features, support, placeholders) outs_val = sess.run([model.outputs_softmax], feed_dict=feed_dict_val) return (time.time() - t_test), outs_val[0] if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( 'problem', choices=['mis', 'ds', 'vc', 'ca'], ) args = parser.parse_args() feat_dim = 57 # Define placeholders placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)] if FLAGS.matrix_type == 'sparse' else [tf.placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=(None, feat_dim)) if FLAGS.matrix_type == 'sparse' else tf.placeholder(tf.float32, shape=(None, feat_dim)), # featureless: #points 'labels': tf.placeholder(tf.float32, shape=(None, 2)), # 0: not linked, 1:linked 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout } # Create model model = model_func(placeholders, input_dim=N_bd, logging=True) args = parser.parse_args() filedir = os.path.dirname(__file__) model_dir = f'{filedir}/../trained_models/{args.problem}/GG-GCN' data_path = f'{filedir}/../datasets/{args.problem}/eval_large' data_files = [f'{data_path}/sample_{i}.pkl' for i in range(30)] saver=tf.train.Saver(max_to_keep=1000) sess.run(tf.global_variables_initializer()) ckpt=tf.train.get_checkpoint_state(model_dir) print('loaded '+ckpt.model_checkpoint_path) saver.restore(sess,ckpt.model_checkpoint_path) t1 = time.time() ct=0 for data_file in data_files: print('processing data file ' + data_file + '\n') data = read_data_general(data_file, lp_feat=True) ct += 1 xs, ys, adj, names, = data if FLAGS.matrix_type == 'sparse': xs = sparse_to_tuple(sp.lil_matrix(xs)) support = simple_polynomials(adj, FLAGS.max_degree) if FLAGS.model == 'gcn_cheby' else [preprocess_adj_sparse(adj)] else: support = simple_polynomials_to_dense(adj, FLAGS.max_degree) if FLAGS.model == 'gcn_cheby' else [preprocess_adj(adj)] _, z_out = evaluate(xs, support, placeholders) prob_map = z_out[:, 1].tolist() assert(len(names) == len(prob_map)) # write probability map to file with open(data_file[:-3] + 'prob', 'w+') as f: print('write to ' + data_file[:-3] + 'prob') for varname, prob in zip(names, prob_map): f.write(f'{varname} {prob}\n') print(f'average time used: {(time.time() - t1)/len(data_files)}')
4,646
35.880952
133
py
PB-DFS
PB-DFS-master/GG-GCN/test_baselines.py
import os import sys # sys.path.append( f'{os.path.dirname(os.path.realpath(__file__))}/../') import importlib import argparse import csv import numpy as np import time import pickle import pathlib import gzip import warnings warnings.filterwarnings("ignore") from utils import log, load_samples, calc_classification_metrics, calc_classification_metrics_top from sklearn.linear_model import LogisticRegression from sklearn.ensemble import ExtraTreesRegressor import xgboost as xgb def load_data(prob_folder, logfile): files = list(pathlib.Path(prob_folder).glob('sample_*.pkl'))[:100] xs, ys, cands = load_samples(files, logfile) x_shift = xs.mean(axis=0) x_scale = xs.std(axis=0) x_scale[x_scale == 0] = 1 xs = (xs - x_shift) / x_scale return xs, ys, cands if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( 'problem', help='the problem to test', choices=['mis', 'vc', 'ds', 'ca'], ) parser.add_argument( '-m', '--model', choices=['lr', 'xgb'], ) args = parser.parse_args() filedir = os.path.dirname(__file__) running_dir = f'{filedir}/../trained_models/{args.problem}/{args.model}' for data_dir in ['test_small', 'test_medium']: data_path = f'{filedir}/../datasets/{args.problem}/{data_dir}' os.makedirs(f'{filedir}/../ret_model', exist_ok=True) logfile = f'{filedir}/../ret_model/{args.problem}_{data_dir}_{args.model}.txt' # load model with open(f"{running_dir}/model.pkl", 'rb') as f: model = pickle.load(f) # load data xss, yss, ncands = load_data(data_path, logfile) nsamples = len(ncands) log(f"test problem: {args.problem} model: {args.model}", logfile) log(f'test dataset: <{data_path}>, number of instances: {nsamples}', logfile) log(f'log write to: <{logfile}>', logfile) yhss = model.predict_proba(xss)[:,1] line, stats = calc_classification_metrics(yss, yhss, ncands) log(line, logfile) line, stats = calc_classification_metrics_top(yss, yhss, ncands) log(line, logfile)
2,176
30.550725
97
py
PB-DFS
PB-DFS-master/GG-GCN/test_gcn.py
import sys import os sys.path.append( f'{os.path.dirname(os.path.realpath(__file__))}/gcn') # sys.path.append( f'{os.path.dirname(os.path.realpath(__file__))}/../') import warnings warnings.filterwarnings('ignore') from os.path import expanduser import time import scipy.io as sio import numpy as np from copy import deepcopy import scipy.sparse as sp import sklearn.metrics as metrics from utils import * from models import GCN_DEEP_DIVER import tensorflow.compat.v1 as tf tf.disable_eager_execution() import argparse import pathlib # Define model evaluation function def evaluate(features, support, labels, placeholders, masks=None): t_test = time.time() feed_dict_val = construct_feed_dict(features, support, labels, placeholders, masks) outs_val = sess.run([model.loss, model.accuracy, model.outputs_softmax], feed_dict=feed_dict_val) return outs_val if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( 'problem', choices=['mis', 'vc', 'ds', 'ca'], ) args = parser.parse_args() filedir = os.path.dirname(__file__) model_dir = f'{filedir}/../trained_models/{args.problem}/GG-GCN' # Settings feat_dim = 57 flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_string('model', 'gcn_cheby', 'Model string.') # 'gcn', 'gcn_cheby', 'dense' flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 101, 'Number of epochs to train.') flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.') flags.DEFINE_integer('diver_num', 1, 'Number of outputs.') flags.DEFINE_float('dropout', 0, 'Dropout rate (1 - keep probaNUmbility).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 1000, 'Tolerance for early stopping (# of epochs).') flags.DEFINE_integer('max_degree', 1, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('num_layer', 20, 'number of layers.') flags.DEFINE_string('matrix_type', 'dense', 'Model string.') # 'sparse', 'dense' num_supports = 1 + FLAGS.max_degree ####### model ####### model_func = GCN_DEEP_DIVER placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)] if FLAGS.matrix_type == 'sparse' else [tf.placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=(None, feat_dim)) if FLAGS.matrix_type == 'sparse' else tf.placeholder(tf.float32, shape=(None, feat_dim)), # featureless: #points 'labels': tf.placeholder(tf.float32, shape=(None, 2)), # 0: not linked, 1:linked 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout } model = model_func(placeholders, input_dim=feat_dim, logging=True) if args.problem == 'tsp': read_data = read_data_tsp elif args.problem == 'vrp': read_data = read_data_vrp elif args.problem in ['mis', 'ds', 'vc', 'ca']: read_data = read_data_general elif args.problem == 'sc': read_data = read_data_sc else: raise Exception('unknown problem!') ####### session ####### config = tf.ConfigProto() tf.device('/cpu:0') sess = tf.Session(config=config) saver=tf.train.Saver(max_to_keep=1000) sess.run(tf.global_variables_initializer()) ckpt=tf.train.get_checkpoint_state(model_dir) print('loaded '+ckpt.model_checkpoint_path) saver.restore(sess,ckpt.model_checkpoint_path) ####### Train model ####### ####### data ####### for data_dir in ["test_small", 'test_medium']: data_path = f'{filedir}/../datasets/{args.problem}/{data_dir}' data_files = list(pathlib.Path(data_path).glob('sample_*.pkl')) data_files = [str(data_file) for data_file in data_files][:100] os.makedirs(f'{filedir}/../ret_model', exist_ok=True) logfile = f'{filedir}/../ret_model/{args.problem}_{data_dir}_GG_GCN.txt' nsamples = len(data_files) log(f'test dataset: <{data_path}>, number of instances: {nsamples}', logfile) log(f'log write to: <{logfile}>', logfile) t1 = time.time() ct=0 yss = [] yhss = [] ncands = [] for idd in range(nsamples): ct+=1 data = read_data(data_files[idd], lp_feat = True) ct += 1 xs, ys, adj, names = data if FLAGS.matrix_type == 'sparse': xs = sparse_to_tuple(sp.lil_matrix(xs)) support = simple_polynomials(adj, FLAGS.max_degree) if FLAGS.model == 'gcn_cheby' else [preprocess_adj_sparse(adj)] else: support = simple_polynomials_to_dense(adj, FLAGS.max_degree) if FLAGS.model == 'gcn_cheby' else [preprocess_adj(adj)] # testing step outs = evaluate(xs, support, ys, placeholders, None) probs = outs[2] # calcuate precision recall f1 y_pred = probs[:,1] y_true = np.argmax(ys,axis=1) ncand = len(y_true) yss.append(y_true); yhss.append(y_pred); ncands.append(ncand) t2 = time.time() log(f'time per instance: {(t2-t1)/nsamples}', logfile=logfile) yss = np.concatenate(yss, axis=None) yhss = np.concatenate(yhss, axis=None) ncands = np.concatenate(ncands, axis=None) line, stats = calc_classification_metrics(yss, yhss, ncands) log(line, logfile) line, stats = calc_classification_metrics_top(yss, yhss, ncands) log(line, logfile) sys.stdout.flush()
5,899
38.072848
134
py
PB-DFS
PB-DFS-master/GG-GCN/train_baselines.py
import pickle import sys import os # sys.path.append( f'{os.path.dirname(os.path.realpath(__file__))}/../') import argparse import numpy as np import pathlib import shutil import warnings warnings.filterwarnings("ignore") from utils import log, load_samples, calc_classification_metrics from sklearn.linear_model import LogisticRegression from sklearn.ensemble import ExtraTreesRegressor import xgboost as xgb def load_data(prob_folder, train_size=500): rng = np.random.RandomState(0) files = rng.permutation(list(pathlib.Path(prob_folder).glob('sample_*.pkl'))) def load(begin, to): # Data loading xs, ys, cands = load_samples(files[begin:to], logfile) log(f" {xs.shape[0]} training samples", logfile) # Data normalization x_shift = xs.mean(axis=0) x_scale = xs.std(axis=0) x_scale[x_scale == 0] = 1 xs = (xs - x_shift) / x_scale return xs, ys, cands train_x, train_y, _ = load(begin=0, to=train_size) return train_x, train_y if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( 'problem', help='MILP instance type to process.', choices=['mis', 'ca', 'ds', 'vc'], ) parser.add_argument( '-m', '--model', help='Model to be trained.', type=str, choices=['lr', 'xgb'], ) args = parser.parse_args() home = os.path.expanduser("~") filedir = os.path.dirname(__file__) data_path = f'{filedir}/../datasets/{args.problem}/train' running_dir = f'{filedir}/../trained_models/{args.problem}/{args.model}' os.makedirs(running_dir, exist_ok=True) logfile = f"{running_dir}/log.txt" if args.model in ['lr', 'xgb']: log(f"Logfile for {args.model} model on {args.problem}", logfile) log(f"training files from {data_path}") log(f"model saves to {running_dir}") train_xs, train_ys = load_data(data_path) if args.model == 'lr': model = LogisticRegression() elif args.model == 'xgb': model = xgb.XGBClassifier() ###### train session ###### log("Starting training", logfile) model.fit(train_xs, train_ys) with open(f"{running_dir}/model.pkl", "wb") as file: pickle.dump(model, file) log(f"Done training", logfile)
2,368
30.171053
81
py
PB-DFS
PB-DFS-master/GG-GCN/train_gcn.py
import sys import os sys.path.append( f'{os.path.dirname(os.path.realpath(__file__))}/gcn') from os.path import expanduser import time import scipy.io as sio import numpy as np from copy import deepcopy import scipy.sparse as sp import sklearn.metrics as metrics from utils import * from models import GCN_DEEP_DIVER import tensorflow.compat.v1 as tf tf.disable_eager_execution() import warnings import pathlib warnings.simplefilter("ignore") import gzip import pickle import argparse def log(line, f=None, stdout=True): if stdout: print(line) sys.stdout.flush() if f is not None: f.write(f'{line}\n') f.flush() def set_train_params(): flags = tf.app.flags FLAGS = flags.FLAGS flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') flags.DEFINE_integer('epochs', 101, 'Number of epochs to train.') flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.') flags.DEFINE_integer('diver_num', 1, 'Number of outputs.') flags.DEFINE_float('dropout', 0, 'Dropout rate (1 - keep probaNUmbility).') flags.DEFINE_float('weight_decay', 5e-4, 'Weight for L2 loss on embedding matrix.') flags.DEFINE_integer('early_stopping', 1000, 'Tolerance for early stopping (# of epochs).') flags.DEFINE_integer('max_degree', 1, 'Maximum Chebyshev polynomial degree.') flags.DEFINE_integer('num_supports', 2, 'number of supports') flags.DEFINE_integer('num_layer', 20, 'number of layers.') flags.DEFINE_string('model', 'gcn_cheby', 'Model string.') # 'gcn', 'gcn_cheby', 'dense' # dense matrix can enjoey tf parallelism # but if the problem have a graph that is too large to fit into memory, we need to use sparse matrix flags.DEFINE_string('matrix_type', 'dense', 'Model string.') # 'sparse', 'dense' return FLAGS if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( 'problem', choices=['mis', 'vc', 'ds', 'ca'], ) args = parser.parse_args() filedir = os.path.dirname(__file__) save_model_to = f'{filedir}/../trained_models/{args.problem}/GG-GCN' os.makedirs(save_model_to, exist_ok=True) ####### data ####### data_path = f"{filedir}/../datasets/{args.problem}/train" data_files = list(pathlib.Path(data_path).glob('sample_*.pkl')) data_files = [str(data_file) for data_file in data_files][:500] if args.problem == 'tsp': read_data = read_data_tsp elif args.problem == 'vrp': read_data = read_data_vrp elif args.problem in ['mis', 'ds', 'vc', 'ca']: read_data = read_data_general elif args.problem == 'sc': read_data = read_data_sc else: raise Exception('unknown problem!') ####### model ####### feat_dim = 57 FLAGS = set_train_params() placeholders = { 'support': [tf.sparse_placeholder(tf.float32) for _ in range(FLAGS.num_supports)] if FLAGS.matrix_type == 'sparse' else [tf.placeholder(tf.float32) for _ in range(FLAGS.num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=(None, feat_dim)) if FLAGS.matrix_type == 'sparse' else tf.placeholder(tf.float32, shape=(None, feat_dim)), # featureless: #points 'labels': tf.placeholder(tf.float32, shape=(None, 2)), # 0: not linked, 1:linked 'labels_mask': tf.placeholder(tf.int32), 'dropout': tf.placeholder_with_default(0., shape=()), 'num_features_nonzero': tf.placeholder(tf.int32) # helper variable for sparse dropout } model = GCN_DEEP_DIVER(placeholders, input_dim=feat_dim, logging=True) ####### session ####### config = tf.ConfigProto() tf.device('/cpu:0') sess = tf.Session(config=config) saver=tf.train.Saver(max_to_keep=1000) sess.run(tf.global_variables_initializer()) ####### Train model ####### log_file=open(f"{save_model_to}/score.txt",'w+') samples_per_epoch = len(data_files) samples_per_log = samples_per_epoch // 10 print(f'dataset size: {len(data_files)}, samples_per_log: {samples_per_log}') best_loss = 1e9 for epoch in range(FLAGS.epochs): ct = 0 t1 = time.time() all_loss = [] all_acc = [] for idd in range(samples_per_epoch): t2 = time.time() data = read_data(data_files[idd], lp_feat = True) ct += 1 xs, ys, adj, names = data if FLAGS.matrix_type == 'sparse': xs = sparse_to_tuple(sp.lil_matrix(xs)) support = simple_polynomials(adj, FLAGS.max_degree) if FLAGS.model == 'gcn_cheby' else [preprocess_adj_sparse(adj)] else: support = simple_polynomials_to_dense(adj, FLAGS.max_degree) if FLAGS.model == 'gcn_cheby' else [preprocess_adj(adj)] # Construct feed dictionary feed_dict = construct_feed_dict(xs, support, ys, placeholders, None) feed_dict.update({placeholders['dropout']: FLAGS.dropout}) # Training step outs = sess.run([model.opt_op, model.loss, model.accuracy, model.outputs], feed_dict=feed_dict) all_loss.append(outs[1]) all_acc.append(outs[2]) if ct % samples_per_log == 0: line = '{} {} loss={:.4f} acc={:.4f} time_sample={:.1f}'.format( epoch + 1, ct, np.mean(all_loss[-samples_per_log:]), np.mean(all_acc[-samples_per_log:]), time.time() - t2) log(line, log_file) loss_cur_epoch = np.mean(all_loss) line = '[{} finished!] loss={:.4f} acc={:.4f} time_epoch={:.1f}'.format( epoch + 1, loss_cur_epoch, np.mean(all_acc), time.time() - t1) log(line, log_file) if loss_cur_epoch < best_loss: log(f'best model currently, save to {save_model_to}', log_file) saver.save(sess,f"{save_model_to}/model.ckpt") best_loss = loss_cur_epoch sys.stdout.flush() log_file.flush(); log_file.close() print("Optimization Finished!")
6,169
37.322981
134
py
PB-DFS
PB-DFS-master/GG-GCN/utils.py
import numpy as np import pickle import networkx as nx import scipy.sparse as sp from scipy.sparse.linalg.eigen.arpack import eigsh, eigs import sys import datetime import scipy.io as sio import sklearn.metrics as sk_metrics import gzip import math # import pyscipopt as scip import time def parse_index_file(filename): """Parse index file.""" index = [] for line in open(filename): index.append(int(line.strip())) return index def sample_mask(idx, l): """Create mask.""" mask = np.zeros(l) mask[idx] = 1 return np.array(mask, dtype=np.bool) def sparse_to_tuple(sparse_mx): """Convert sparse matrix to tuple representation.""" def to_tuple(mx): if not sp.isspmatrix_coo(mx): mx = mx.tocoo() coords = np.vstack((mx.row, mx.col)).transpose() values = mx.data shape = mx.shape return coords, values, shape if isinstance(sparse_mx, list): for i in range(len(sparse_mx)): sparse_mx[i] = to_tuple(sparse_mx[i]) else: sparse_mx = to_tuple(sparse_mx) return sparse_mx def preprocess_features(features): """Row-normalize feature matrix and convert to tuple representation""" rowsum = np.array(features.sum(1)) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = sp.diags(r_inv) features = r_mat_inv.dot(features) # features = features/features.shape[1] return sparse_to_tuple(features) def normalize_adj(adj): """Symmetrically normalize adjacency matrix.""" adj = sp.coo_matrix(adj) rowsum = np.array(adj.sum(1)) with np.errstate(divide='ignore'): d_inv_sqrt = np.power(rowsum, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. d_mat_inv_sqrt = sp.diags(d_inv_sqrt) return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() def construct_feed_dict(features, support, labels, placeholders, masks=None): """Construct feed dictionary.""" feed_dict = dict() feed_dict.update({placeholders['labels']: labels}) feed_dict.update({placeholders['features']: features}) feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))}) if masks is None: masks = np.ones([len(labels)], dtype=np.int32) feed_dict.update({placeholders['labels_mask']: masks}) feed_dict.update({placeholders['num_features_nonzero']: features[1].shape}) return feed_dict def construct_feed_dict4pred(features, support, placeholders): """Construct feed dictionary.""" feed_dict = dict() feed_dict.update({placeholders['features']: features}) feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))}) feed_dict.update({placeholders['num_features_nonzero']: features[1].shape}) return feed_dict def chebyshev_polynomials(adj, k): """Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).""" # print("Calculating Chebyshev polynomials up to order {}...".format(k)) adj_normalized = normalize_adj(adj) laplacian = sp.eye(adj.shape[0]) - adj_normalized largest_eigval, _ = eigs(laplacian, 1, which='LR', maxiter=5000) scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0]) t_k = list() t_k.append(sp.eye(adj.shape[0])) t_k.append(scaled_laplacian) def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap): s_lap = sp.csr_matrix(scaled_lap, copy=True) return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two for i in range(2, k+1): t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian)) return sparse_to_tuple(t_k) def preprocess_adj(adj): """Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.""" # adj_normalized = normalize_adj(adj) adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0])) # adj_normalized = sp.coo_matrix(adj) return sparse_to_tuple(adj_normalized) def simple_polynomials(adj, k): """Calculate polynomials up to order k. Return a list of sparse matrices (tuple representation).""" # print("Calculating polynomials up to order {}...".format(k)) adj_normalized = normalize_adj(adj) laplacian = sp.eye(adj.shape[0]) - adj_normalized t_k = list() t_k.append(sp.eye(adj.shape[0])) t_k.append(laplacian) for i in range(2, k+1): t_new = t_k[-1]*laplacian t_k.append(t_new) return sparse_to_tuple(t_k) def simple_polynomials_to_dense(adj, k): """Calculate polynomials up to order k. Return a list of sparse matrices (tuple representation).""" adj_normalized = normalize_adj(adj) laplacian = sp.eye(adj.shape[0]) - adj_normalized t_k = list() t_k.append(sp.eye(adj.shape[0])) t_k.append(laplacian) for i in range(2, k+1): t_new = t_k[-1]*laplacian t_k.append(t_new) for i in range(len(t_k)): t_k[i] = t_k[i].toarray() return t_k def log(line, logfile=None): line = f'[{datetime.datetime.now()}] {line}' if line is not None else "\n\n\n\n" print(line) if logfile is not None: with open(logfile, mode='a') as f: print(line, file=f) sys.stdout.flush() def calc_classification_metrics_top(y_true, y_pred, ncands=None, top_percentages=[0.80, 0.90 ,0.95, 1], threshold=0.5): def calc_single(y_true_single, y_pred_single): test_yhats_roundings = (y_pred_single > threshold).astype(int) acc = np.sum(y_true_single == test_yhats_roundings) / len(y_true_single) precision, recall, f1_score, _ = sk_metrics.precision_recall_fscore_support( y_true_single, test_yhats_roundings, labels=[0,1]) return acc, f1_score[0], f1_score[1], precision[0], precision[1] if ncands is None: ret = {} y_pred_confidence = np.where( y_pred < threshold, y_pred, 1 - y_pred) sortedargs = np.argsort(y_pred_confidence) for cur_percentage in top_percentages: num_vars_choosen = int(len(y_true)*cur_percentage) sortedargs_cur_percentage = sortedargs[:num_vars_choosen] stats = calc_single(y_true[sortedargs_cur_percentage], y_pred[sortedargs_cur_percentage]) ret[cur_percentage] = [[stats[i]] for i in range(len(stats))] else: ncands = np.insert(ncands, 0,0) slices = np.cumsum(ncands) mean_stats = {key: [] for key in top_percentages} for i in range(len(slices)-1): begin = slices[i]; end = slices[i+1] y_true_sinlge = y_true[begin:end]; y_pred_single = y_pred[begin:end] y_pred_confidence = np.where( y_pred_single < threshold, y_pred_single, 1 - y_pred_single) sortedargs = np.argsort(y_pred_confidence) for cur_percentage in top_percentages: num_vars_choosen = int(len(y_true_sinlge)*cur_percentage) # print(num_vars_choosen, len(y_true)) sortedargs_cur_percentage = sortedargs[:num_vars_choosen] # print(len(sortedargs_cur_percentage), len(sortedargs)) stats = calc_single(y_true_sinlge[sortedargs_cur_percentage], y_pred_single[sortedargs_cur_percentage]) mean_stats[cur_percentage].append(stats) ret = {key: [] for key in top_percentages} for cur_percentage in top_percentages: for stat in zip(*mean_stats[cur_percentage]): ret[cur_percentage].append((np.mean(stat) * 100, np.std(stat) * 100)) line = "" for p in top_percentages: line += f'percentage vars: {p} mean - acc: {ret[p][0][0]:0.2f}, f1_0: {ret[p][1][0]:0.2f}, f1_1: {ret[p][2][0]:0.2f}, p_0: {ret[p][3][0]:0.2f}, p1: {ret[p][4][0]:0.2f}\n' line += f'percentage vars: {p} std - acc: {ret[p][0][1]:0.2f}, f1_0: {ret[p][1][1]:0.2f}, f1_1: {ret[p][2][1]:0.2f}, p_0: {ret[p][3][1]:0.2f}, p1: {ret[p][4][1]:0.2f}\n\n' return line, ret def calc_classification_metrics(y_true, y_pred, ncands=None, threshold=0.5): def calc_single(y_true_single, y_pred_single): test_yhats_roundings = (y_pred_single > threshold).astype(int) acc = np.sum(y_true_single == test_yhats_roundings) / len(y_true_single) precision, recall, f1_score, _ = sk_metrics.precision_recall_fscore_support( y_true_single, test_yhats_roundings, labels=[0,1]) avg_precision = sk_metrics.average_precision_score(y_true_single, y_pred_single) return acc, np.nan_to_num(avg_precision), precision[0], recall[0], f1_score[0], precision[1], recall[1], f1_score[1] if ncands is None: mean_stats = calc_single(y_true, y_pred) mean_stats = [[mean_stats[i]] for i in range(len(mean_stats))] else: ncands = np.insert(ncands, 0,0) slices = np.cumsum(ncands) metricss = []; APs = [] for i in range(len(slices)-1): begin = slices[i]; end = slices[i+1] metrics = calc_single(y_true[begin:end], y_pred[begin:end]) metricss.append(metrics) APs.append(str(float(metrics[1]))) mean_stats = [] for stat in zip(*metricss): mean_stats.append(np.mean(stat) * 100) line = f'acc: {mean_stats[0]:0.2f}, ap: {mean_stats[1]:0.2f}\ \np_0: {mean_stats[2]:0.2f}, r_0: {mean_stats[3]:0.2f}, f1_0: {mean_stats[4]:0.2f},\ \np_1: {mean_stats[5]:0.2f}, r_1: {mean_stats[6]:0.2f}, f1_1: {mean_stats[7]:0.2f}' # line += f"\n APs: {','.join(APs)}" return line, mean_stats # def init_scip_params(model, seed, heuristics=False, presolving=False, separating=False, conflict=True): # seed = seed % 2147483648 # SCIP seed range # # set up randomization # model.setBoolParam('randomization/permutevars', False) # model.setIntParam('randomization/permutationseed', seed) # model.setIntParam('randomization/randomseedshift', seed) # # separation only at root node # model.setIntParam('separating/maxrounds', 0) # # if asked, disable presolving # if not presolving: # model.setIntParam('presolving/maxrounds', 0) # model.setIntParam('presolving/maxrestarts', 0) # # if asked, disable separating (cuts) # if not separating: # model.setIntParam('separating/maxroundsroot', 0) # # if asked, disable conflict analysis (more cuts) # if not conflict: # model.setBoolParam('conflict/enable', False) # # if asked, disable primal heuristics # if not heuristics: # model.setHeuristics(scip.SCIP_PARAMSETTING.OFF) # def extract_ding_variable_features(model): # """ # Extract features following Khalil et al. (2016) Learning to Branch in Mixed Integer Programming. # Parameters # ---------- # model : pyscipopt.scip.Model # The current model. # candidates : list of pyscipopt.scip.Variable's # A list of variables for which to compute the variable features. # root_buffer : dict # A buffer to avoid re-extracting redundant root node information (None to deactivate buffering). # Returns # ------- # variable_features : 2D np.ndarray # The features associated with the candidate variables. # """ # col_state = model.getDingStateCols() # col_feature_names = sorted(col_state) # for index, name in enumerate(col_feature_names): # if name == 'col_coefs': # break # col_state = np.stack([col_state[feature_name] for feature_name in col_feature_names], axis=1) # row_state = model.getDingStateRows() # row_feature_names = sorted(row_state) # row_state = np.stack([row_state[feature_name] for feature_name in row_feature_names], axis=1) # vc, vo, co = model.getDingStateLPgraph() # return (col_state, row_state, vc, vo, co), index def load_samples(filenames, logfile=None): x, y, ncands = [], [], [] total_ncands = 0 for i, filename in enumerate(filenames): # try: cand_x, cand_y = load_flat_samples(filename, augment_feats=False, normalize_feats=True) # except: # continue x.append(cand_x) y.append(cand_y) ncands.append(cand_x.shape[0]) total_ncands += ncands[-1] if (i + 1) % 100 == 0: log(f" {i+1}/{len(filenames)} files processed ({total_ncands} candidate variables)", logfile) x = np.concatenate(x) y = np.concatenate(y) ncands = np.asarray(ncands) return x, y, ncands def load_flat_samples(filename, augment_feats=False, normalize_feats=True): with gzip.open(filename, 'rb') as file: sample = pickle.load(file) ding_state, col_1s, col_cands, obj_idx = sample['data'] col_state, row_state, cv, vo, co = ding_state col_cands = np.array(col_cands) col_1s = np.array(col_1s) col_state = np.nan_to_num(col_state[col_cands]) ys = np.isin(col_cands, col_1s) * 1 # feature preprocessing col_state = preprocess_variable_features(col_state, interaction_augmentation=augment_feats, normalization=normalize_feats) col_state = np.nan_to_num(col_state) return col_state, ys def preprocess_variable_features(features, interaction_augmentation, normalization): """ Features preprocessing following Khalil et al. (2016) Learning to Branch in Mixed Integer Programming. Parameters ---------- features : 2D np.ndarray The candidate variable features to preprocess. interaction_augmentation : bool Whether to augment features with 2-degree interactions (useful for linear models such as SVMs). normalization : bool Wether to normalize features in [0, 1] (i.e., query-based normalization). Returns ------- variable_features : 2D np.ndarray The preprocessed variable features. """ # 2-degree polynomial feature augmentation if interaction_augmentation: interactions = ( np.expand_dims(features, axis=-1) * \ np.expand_dims(features, axis=-2) ).reshape((features.shape[0], -1)) features = np.concatenate([features, interactions], axis=1) # query-based normalization in [0, 1] if normalization: features -= features.min(axis=0, keepdims=True) max_val = features.max(axis=0, keepdims=True) max_val[max_val == 0] = 1 features /= max_val return features def construct_adj_from_lp(cv): adj = np.zeros([cv.shape[1], cv.shape[1]], dtype=float) for cons_arr in cv: indices = np.nonzero(cons_arr)[0] for i in range(len(indices)): for j in range(i): adj[indices[i],indices[j]] = 1 adj[indices[j],indices[i]] = 1 return adj def read_data_general(lp_path, lp_feat=True): with gzip.open(lp_path, 'rb') as f: sample = pickle.load(f) state_ding, label_cols, cand_cols, obj_idx = sample['data'] col_state, row_state, cv, vo, co = state_ding if lp_feat: col_state = np.nan_to_num(col_state) else: col_state = np.repeat(np.expand_dims(col_state[:, obj_idx], axis=1), 32, axis=1) label_cols = np.squeeze(label_cols) cand_cols = np.squeeze(cand_cols) # graph structure cv = np.take(cv, cand_cols, axis=1) adj = construct_adj_from_lp(cv) # features xs = col_state[cand_cols] xs_min = np.min(xs, axis=0, keepdims=True) xs_max = np.max(xs, axis=0, keepdims=True) xs_delta = xs_max - xs_min xs = (xs - xs_min)/xs_delta xs[np.isnan(xs)] = 1 # labels ys = np.expand_dims(np.isin(cand_cols, label_cols), axis=1) ys = np.concatenate([1-ys, ys],axis=1) mapping = {val:key[3:] for key, val in sample['mapping'].items()} names = [ mapping[cand_col] for cand_col in cand_cols] return xs, ys, adj, names def read_data_tsp(lp_path, lp_feat=True): with gzip.open(lp_path, 'rb') as f: sample = pickle.load(f) state_ding, label_cols, cands, obj_idx = sample['data'] col_state, row_state, cv, vo, co = state_ding cv = np.take(cv, cand_cols, axis=1) if lp_feat: col_state = np.nan_to_num(col_state) else: col_state = np.repeat(np.expand_dims(col_state[:, obj_idx], axis=1), 32, axis=1) label_cols = [int(label_col) for label_col in label_cols] label_cols = set(label_cols) name_index_mapping = sample['mapping'] try: ncity = int(lp_path.split('/')[-2].split('_')[1]) except: ncity = len(label_cols) gdata = sio.loadmat(f'/home/ubuntu/storage1/instances/tsp/dual_graph/{ncity}.dual', appendmat=False) orderednames = gdata['names']; orderednames = [name.strip() for name in orderednames]; adj = gdata['adj']; adj = adj + adj.transpose() xs = []; ys = [] for name in orderednames: col_idx = int(name_index_mapping[f't_x{name}']) xs.append(col_state[col_idx]) ys.append(1 if col_idx in label_cols else 0) xs = np.array(xs); ys = np.array(ys) xs_min = np.min(xs, axis=0, keepdims=True) xs_max = np.max(xs, axis=0, keepdims=True) xs_delta = xs_max - xs_min xs_delta[xs_delta==0] = 1 xs = (xs - xs_min)/xs_delta xs[np.isnan(xs)] = 0 return xs, ys, adj, None def read_data_sc(lp_path, lp_feat=True): tokens = lp_path.split('/') g_path = '/'.join(tokens[:-1]) + '/' + tokens[-1][7:-3] + 'sc' with gzip.open(lp_path, 'rb') as f: sample = pickle.load(f) state_ding, label_cols, cands, obj_idx = sample['data'] col_state, row_state, cv, vo, co = state_ding cv = np.take(cv, cand_cols, axis=1) if lp_feat: col_state = np.nan_to_num(col_state) else: col_state = np.repeat(np.expand_dims(col_state[:, obj_idx], axis=1), 32, axis=1) label_cols = [int(label_col) for label_col in label_cols] label_cols = set(label_cols) name_index_mapping = sample['mapping'] gdata = sio.loadmat(g_path, appendmat=False) orderednames = gdata['names']; orderednames = [name.strip() for name in orderednames]; adj = gdata['adj2']; adj = adj + adj.transpose() xs = []; ys = [] for name in orderednames: trans_name = f't_{name}' if 'x' in name else f't_x{name}' if 'y' in trans_name: continue assert(trans_name in name_index_mapping) col_idx = int(name_index_mapping[trans_name]) xs.append(col_state[col_idx]) ys.append(1 if col_idx in label_cols else 0) xs = np.array(xs); ys = np.array(ys) xs_min = np.min(xs, axis=0, keepdims=True) xs_max = np.max(xs, axis=0, keepdims=True) xs_delta = xs_max - xs_min xs_delta[xs_delta==0] = 1 xs = (xs - xs_min)/xs_delta return xs, ys, adj, None def read_data_vrp(lp_path, lp_feat=True): with gzip.open(lp_path, 'rb') as f: sample = pickle.load(f) state_ding, label_cols, cands, obj_idx = sample['data'] col_state, row_state, cv, vo, co = state_ding cv = np.take(cv, cand_cols, axis=1) if lp_feat: col_state = np.nan_to_num(col_state) else: col_state = np.repeat(np.expand_dims(col_state[:, obj_idx], axis=1), 32, axis=1) label_cols = [int(label_col) for label_col in label_cols] label_cols = set(label_cols) name_index_mapping = sample['mapping'] try: ncity = int(lp_path.split('/')[-2].split('_')[1]) except: ncity = int(np.ceil(np.sqrt(len(name_index_mapping)))) gdata = sio.loadmat(f'/home/ubuntu/storage1/instances/vrp/dual_graph/{ncity}.dual', appendmat=False) orderednames = gdata['names']; orderednames = [name.strip() for name in orderednames]; adj = gdata['adj']; adj = adj + adj.transpose() assert(ncity == gdata['nnodes']) xs = []; ys = []; for name in orderednames: col_idx = int(name_index_mapping[name]) xs.append(col_state[col_idx]) ys.append(1 if col_idx in label_cols else 0) xs = np.array(xs); ys = np.array(ys) xs_min = np.min(xs, axis=0, keepdims=True) xs_max = np.max(xs, axis=0, keepdims=True) xs_delta = xs_max - xs_min xs_delta[xs_delta==0] = 1 xs = (xs - xs_min)/xs_delta return xs, ys, adj, None def read_data_mis0(lp_path, lp_feat=True): tokens = lp_path.split('/') g_path = '/'.join(tokens[:-1]) + '/' + tokens[-1][7:-3] + 'adj' with gzip.open(lp_path, 'rb') as f: sample = pickle.load(f) state_ding, label_cols, cands, obj_idx = sample['data'] col_state, _, _, _, _ = state_ding if lp_feat: col_state = np.nan_to_num(col_state) else: col_state = np.repeat(np.expand_dims(col_state[:, obj_idx], axis=1), 32, axis=1) label_cols = [int(label_col) for label_col in label_cols] label_cols = set(label_cols) name_index_mapping = sample['mapping'] gdata = sio.loadmat(g_path, appendmat=False) adj = gdata['adj']; adj = adj + adj.transpose() orderednames = [str(i) for i in range(1, len(adj)+1)] xs = []; ys = [] for name in orderednames: trans_name = f't_x{name}' col_idx = int(name_index_mapping[trans_name]) xs.append(col_state[col_idx]) ys.append(1 if col_idx in label_cols else 0) xs = np.array(xs); ys = np.array(ys) xs_min = np.min(xs, axis=0, keepdims=True) xs_max = np.max(xs, axis=0, keepdims=True) xs_delta = xs_max - xs_min xs = (xs - xs_min)/xs_delta xs[np.isnan(xs)] = 1 # labels ys = np.concatenate([1-np.expand_dims(ys, axis=1), np.expand_dims(ys, axis=1)],axis=1) return xs, ys, adj, None
21,805
36.022071
179
py
PB-DFS
PB-DFS-master/GG-GCN/gcn/inits.py
import tensorflow.compat.v1 as tf import numpy as np def uniform(shape, scale=0.05, name=None): """Uniform init.""" initial = tf.random_uniform(shape, minval=-scale, maxval=scale, dtype=tf.float32) return tf.Variable(initial, name=name) def glorot(shape, name=None): """Glorot & Bengio (AISTATS 2010) init.""" init_range = np.sqrt(6.0/(shape[0]+shape[1])) initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32) return tf.Variable(initial, name=name) def zeros(shape, name=None): """All zeros.""" initial = tf.zeros(shape, dtype=tf.float32) return tf.Variable(initial, name=name) def ones(shape, name=None): """All ones.""" initial = tf.ones(shape, dtype=tf.float32) return tf.Variable(initial, name=name)
801
28.703704
95
py
PB-DFS
PB-DFS-master/GG-GCN/gcn/layers.py
from inits import * import tensorflow.compat.v1 as tf flags = tf.app.flags FLAGS = flags.FLAGS # global unique layer ID dictionary for layer name assignment _LAYER_UIDS = {} def get_layer_uid(layer_name=''): """Helper function, assigns unique layer IDs.""" if layer_name not in _LAYER_UIDS: _LAYER_UIDS[layer_name] = 1 return 1 else: _LAYER_UIDS[layer_name] += 1 return _LAYER_UIDS[layer_name] def sparse_dropout(x, keep_prob, noise_shape): """Dropout for sparse tensors.""" random_tensor = keep_prob random_tensor += tf.random_uniform(noise_shape) dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool) pre_out = tf.sparse_retain(x, dropout_mask) return pre_out * (1./keep_prob) def dot(x, y, sparse=False): """Wrapper for tf.matmul (sparse vs dense).""" if sparse: res = tf.sparse_tensor_dense_matmul(x, y) else: res = tf.matmul(x, y) return res class Layer(object): """Base layer class. Defines basic API for all layer objects. Implementation inspired by keras (http://keras.io). # Properties name: String, defines the variable scope of the layer. logging: Boolean, switches Tensorflow histogram logging on/off # Methods _call(inputs): Defines computation graph of layer (i.e. takes input, returns output) __call__(inputs): Wrapper for _call() _log_vars(): Log all variables """ def __init__(self, **kwargs): allowed_kwargs = {'name', 'logging'} for kwarg in kwargs.keys(): assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg name = kwargs.get('name') if not name: layer = self.__class__.__name__.lower() name = layer + '_' + str(get_layer_uid(layer)) self.name = name self.vars = {} logging = kwargs.get('logging', False) self.logging = logging self.sparse_inputs = False def _call(self, inputs): return inputs def __call__(self, inputs): with tf.name_scope(self.name): if self.logging and not self.sparse_inputs: tf.summary.histogram(self.name + '/inputs', inputs) outputs = self._call(inputs) if self.logging: tf.summary.histogram(self.name + '/outputs', outputs) return outputs def _log_vars(self): for var in self.vars: tf.summary.histogram(self.name + '/vars/' + var, self.vars[var]) class Dense(Layer): """Dense layer.""" def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False, act=tf.nn.relu, bias=False, featureless=False, **kwargs): super(Dense, self).__init__(**kwargs) if dropout: self.dropout = placeholders['dropout'] else: self.dropout = 0. self.act = act self.sparse_inputs = sparse_inputs self.featureless = featureless self.bias = bias # helper variable for sparse dropout self.num_features_nonzero = placeholders['num_features_nonzero'] with tf.variable_scope(self.name + '_vars'): self.vars['weights'] = glorot([input_dim, output_dim], name='weights') if self.bias: self.vars['bias'] = zeros([output_dim], name='bias') if self.logging: self._log_vars() def _call(self, inputs): x = inputs # dropout if self.sparse_inputs: x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero) else: x = tf.nn.dropout(x, 1-self.dropout) # transform output = dot(x, self.vars['weights'], sparse=self.sparse_inputs) # bias if self.bias: output += self.vars['bias'] return self.act(output) class GraphConvolution(Layer): """Graph convolution layer.""" def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False, act=tf.nn.relu, bias=False, featureless=False, **kwargs): super(GraphConvolution, self).__init__(**kwargs) if dropout: self.dropout = placeholders['dropout'] else: self.dropout = 0. self.act = act self.support = placeholders['support'] self.sparse_inputs = sparse_inputs self.featureless = featureless self.bias = bias # helper variable for sparse dropout self.num_features_nonzero = placeholders['num_features_nonzero'] with tf.variable_scope(self.name + '_vars'): for i in range(len(self.support)): self.vars['weights_' + str(i)] = glorot([input_dim, output_dim], name='weights_' + str(i)) if self.bias: self.vars['bias'] = zeros([output_dim], name='bias') if self.logging: self._log_vars() def _call(self, inputs): x = inputs # dropout if self.sparse_inputs: x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero) else: x = tf.nn.dropout(x, 1-self.dropout) # convolve supports = list() for i in range(len(self.support)): if not self.featureless: pre_sup = dot(x, self.vars['weights_' + str(i)], sparse=self.sparse_inputs) else: pre_sup = self.vars['weights_' + str(i)] support = dot(self.support[i], pre_sup, sparse=FLAGS.matrix_type == 'sparse') supports.append(support) output = tf.add_n(supports) # bias if self.bias: output += self.vars['bias'] return self.act(output)
5,917
30.312169
92
py
PB-DFS
PB-DFS-master/GG-GCN/gcn/metrics.py
import tensorflow.compat.v1 as tf def my_softmax_cross_entropy(preds, labels): """Softmax cross-entropy loss with masking.""" loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) return tf.reduce_mean(loss) def my_accuracy(preds, labels): """Accuracy with masking.""" correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1)) accuracy_all = tf.cast(correct_prediction, tf.float32) return tf.reduce_mean(accuracy_all) def masked_softmax_cross_entropy(preds, labels, mask): """Softmax cross-entropy loss with masking.""" loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels) mask = tf.cast(mask, dtype=tf.float32) mask /= tf.reduce_mean(mask) loss *= mask return tf.reduce_mean(loss) def masked_accuracy(preds, labels, mask): """Accuracy with masking.""" correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1)) accuracy_all = tf.cast(correct_prediction, tf.float32) mask = tf.cast(mask, dtype=tf.float32) mask /= tf.reduce_mean(mask) accuracy_all *= mask return tf.reduce_mean(accuracy_all)
1,153
33.969697
79
py
PB-DFS
PB-DFS-master/GG-GCN/gcn/models.py
from layers import * from metrics import * from layers import _LAYER_UIDS flags = tf.app.flags FLAGS = flags.FLAGS def lrelu(x): return tf.maximum(x*0.2,x) class Model(object): def __init__(self, **kwargs): allowed_kwargs = {'name', 'logging'} for kwarg in kwargs.keys(): assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg name = kwargs.get('name') if not name: name = self.__class__.__name__.lower() self.name = name logging = kwargs.get('logging', False) self.logging = logging self.vars = {} self.placeholders = {} self.layers = [] self.activations = [] self.inputs = None self.outputs = None self.outputs_softmax = None self.pred = None self.output_dim = None self.loss = 0 self.accuracy = 0 self.optimizer = None self.opt_op = None def _build(self): raise NotImplementedError def build(self): """ Wrapper for _build() """ with tf.variable_scope(self.name): self._build() # Build sequential layer model layer_id = 0 self.activations.append(self.inputs) for layer in self.layers: if self.name == 'gcn_deep' and layer_id % 2 == 0 and layer_id > 0 and layer_id < len(self.layers)-1: hidden = layer(self.activations[-1]) self.activations.append(tf.nn.relu(hidden+self.activations[-2])) layer_id = layer_id + 1 elif layer_id < len(self.layers)-1: hidden = tf.nn.relu(layer(self.activations[-1])) self.activations.append(hidden) layer_id = layer_id + 1 else: hidden = layer(self.activations[-1]) self.activations.append(hidden) layer_id = layer_id + 1 self.outputs = self.activations[-1] if self.name != 'gcn_dqn': self.outputs_softmax = tf.nn.softmax(self.outputs[:,0:2]) if self.name == 'gcn_deep_diver': for out_id in range(1, FLAGS.diver_num): self.outputs_softmax = tf.concat([self.outputs_softmax, tf.nn.softmax(self.outputs[:,self.output_dim*out_id:self.output_dim*(out_id+1)])], axis=1) if self.name == 'gcn_dqn': self.pred = tf.argmax(self.outputs) # Store model variables for easy access variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name) self.vars = {var.name: var for var in variables} # Build metrics self._loss() self._accuracy() self.opt_op = self.optimizer.minimize(self.loss) def predict(self): pass def _loss(self): raise NotImplementedError def _loss_reg(self): raise NotImplementedError def _accuracy(self): raise NotImplementedError def save(self, sess=None): if not sess: raise AttributeError("TensorFlow session not provided.") saver = tf.train.Saver(self.vars) save_path = saver.save(sess, "tmp/%s.ckpt" % self.name) print("Model saved in file: %s" % save_path) def load(self, sess=None): if not sess: raise AttributeError("TensorFlow session not provided.") saver = tf.train.Saver(self.vars) save_path = "tmp/%s.ckpt" % self.name saver.restore(sess, save_path) print("Model restored from file: %s" % save_path) class MLP(Model): def __init__(self, placeholders, input_dim, **kwargs): super(MLP, self).__init__(**kwargs) self.inputs = placeholders['features'] self.input_dim = input_dim # self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) self.build() def _loss(self): # Weight decay loss for var in self.layers[0].vars.values(): self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var) # Cross entropy error self.loss += my_softmax_cross_entropy(self.outputs, self.placeholders['labels']) def _loss_reg(self): # Weight decay loss for var in self.layers[0].vars.values(): self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var) # regression loss self.loss += tf.reduce_mean(tf.square(self.outputs-self.placeholders['labels'])) def _accuracy(self): self.accuracy = my_accuracy(self.outputs, self.placeholders['labels']) def _build(self): self.layers.append(Dense(input_dim=self.input_dim, output_dim=FLAGS.hidden1, placeholders=self.placeholders, act=tf.nn.relu, dropout=True, sparse_inputs=True, logging=self.logging)) self.layers.append(Dense(input_dim=FLAGS.hidden1, output_dim=self.output_dim, placeholders=self.placeholders, act=lambda x: x, dropout=True, logging=self.logging)) def predict(self): return tf.nn.softmax(self.outputs) class GCN_DEEP_DIVER(Model): def __init__(self, placeholders, input_dim, **kwargs): super(GCN_DEEP_DIVER, self).__init__(**kwargs) self.inputs = placeholders['features'] self.input_dim = input_dim # self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions self.output_dim = placeholders['labels'].get_shape().as_list()[1] self.placeholders = placeholders self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) self.build() def _loss(self): # Weight decay loss for var in self.layers[0].vars.values(): self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var) # 32 outputs diver_loss = masked_softmax_cross_entropy(self.outputs[:,0:self.output_dim], self.placeholders['labels'], self.placeholders['labels_mask']) for i in range(1,FLAGS.diver_num): diver_loss = tf.reduce_min([diver_loss, masked_softmax_cross_entropy(self.outputs[:, 2*i:2*i + self.output_dim], self.placeholders['labels'], self.placeholders['labels_mask'])]) self.loss += diver_loss def _loss_reg(self): # Weight decay loss for var in self.layers[0].vars.values(): self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var) # regression loss self.loss += tf.reduce_mean(tf.abs(self.outputs-self.placeholders['labels'])) def _accuracy(self): # 32 outputs acc = masked_accuracy(self.outputs[:,0:self.output_dim], self.placeholders['labels'], self.placeholders['labels_mask']) # acc = my_accuracy(self.outputs[:,0:self.output_dim], self.placeholders['labels']) for i in range(1,FLAGS.diver_num): acc = tf.reduce_max([acc, masked_accuracy(self.outputs[:,2*i:2*i+self.output_dim], self.placeholders['labels'], self.placeholders['labels_mask'])]) self.accuracy = acc def _build(self): _LAYER_UIDS['graphconvolution'] = 0 self.layers.append(GraphConvolution(input_dim=self.input_dim, output_dim=FLAGS.hidden1, placeholders=self.placeholders, act=tf.nn.relu, dropout=True, sparse_inputs= FLAGS.matrix_type == 'sparse' , logging=self.logging)) for i in range(FLAGS.num_layer-2): self.layers.append(GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden1, placeholders=self.placeholders, act=tf.nn.relu, dropout=True, logging=self.logging)) self.layers.append(GraphConvolution(input_dim=FLAGS.hidden1, output_dim=2*FLAGS.diver_num, placeholders=self.placeholders, act=lambda x: x, dropout=True, logging=self.logging)) def predict(self): return tf.nn.softmax(self.outputs)
9,002
38.660793
189
py
PB-DFS
PB-DFS-master/PySCIPOpt/.landscape.yml
doc-warnings: true test-warnings: true ignore-paths: - examples/unfinished - src/pyscipopt/__init__.py python-targets: - 2 - 3
135
14.111111
29
yml
PB-DFS
PB-DFS-master/PySCIPOpt/.travis.yml
os: linux dist: xenial sudo: true language: python matrix: include: - python: 2.7 - python: 3.5 - python: 3.6 - python: 3.7 env: TRAVIS_BUILD_DOCS=$TRAVIS_TAG addons: apt: packages: - doxygen - graphviz env: global: - secure: "CML6W6GUTFcZxZavt2x9vT3pUeg9jA2tber8Wl+34zBI9QxXel8PxKlw896OI2jnGPMvL7ANRElklE6/WNaVvogjgZXKcXnqaGKPoPlJNsGenHw0poxjqrrs9VnuX2XU56h53ESsOZ9mq53oFNaimS6fbtIAs7xlS27nY0KJk42ZEicaS2E9cbzH/XqOIEzdIZCHy8NjViMXFCspE9fhndv04T3ic2opXmGDy2veoZ/oF2zbOcz0e9XLEjTs0yXz5qir8AGEnRS4lwI6hb3jkMBOxbNKIPx63gsno3xUHjXYjiwb4iQV9eybhY0csli/5br8isIX81vlg5xeoEfvSy6sZvZ8rErx3Eos5OdCu4vnxqtMZvpb+2pCVQU2IldZTl9B3/lv4ehZhKurF3l89rnqKW14eh4p2eT6WQ2s0tjPd5NuPdow4hT5x7WWSeS1395exlJJGgv1bt4ASM+KNFfA/4CK4TjszZJ7xLttiJ7nOgo/8KtSd/dM0PfBWeeBQxi/0YgCyD781ieL009ZUPwvKf4B0RJ8pPaSDePypKHvzmcm7UGgT86zz1FnCxsIEmHFJQGazXbdBmi0OvPAo1fCrAdMXipppf+ckAotckWjOLIK6IN9RlrF/E9YFll/SfSiXi6EdB0P+T6m8iBqNEToJbUiRqKhMznr7A4+JLs=" - VERSION=6.0.1 notifications: email: false before_install: - wget http://scip.zib.de/download/release/SCIPOptSuite-$VERSION-Linux.deb - sudo apt-get update && sudo apt install -y ./SCIPOptSuite-$VERSION-Linux.deb install: - pip install cython networkx pytest-cov - python setup.py install script: py.test #--cov after_success: # Generate the docs only if version tag present and we can use secure variables (no PR) - if [[ $TRAVIS_BUILD_DOCS =~ v[0-9].[0-9].[0-9]* && $TRAVIS_PULL_REQUEST = "false" ]]; then echo "setup ssh config"; openssl aes-256-cbc -K $encrypted_c65e740238ca_key -iv $encrypted_c65e740238ca_iv -in travis.enc -out ~/.ssh/travis -d; chmod 600 ~/.ssh/travis; printf "%s\n" \ "Host github.com" \ " IdentityFile ~/.ssh/travis" \ " LogLevel ERROR" >> ~/.ssh/config; echo "generate documentation"; ./generate-docs.sh || travis_terminate 1; else echo $TRAVIS_BUILD_DOCS $TRAVIS_PULL_REQUEST; fi # - codecov deploy: provider: pypi user: pyscipopt password: secure: ePfiLq2vOJC4O5zYFChHk5wa+quza+m/lsCGPfKXBVpIyb7TvzTHaFDBYtYVZK7710LIKRIcHxvmJPELyKeK1l9QyLxi1x/jOHwk0VbKpf3f5fJjjPaYfXgAUKMMeUplrdhvzU6cgUMrsGhlUE1EIHxc97x5xOa2xlv3lis3j5yjdFUbP6e7MBCEb6c8yU88CclPU2BeHDATzOtMZp0dsyzFTjP9DI7fWbEvOfGy66e5uB/Cjk07mguBZVAUFoukkwKD0KUgBB7RlrAdE61uFVHG8nE5q+G9SZIhQcwULxPLz4v18osJf1aea0g/grZnnrgdG5F24rCA6dSBlvUhnA6aDJXDSgd/dCJ7FV/w3okwhsn18esnycBeM+i3O1pleHsmkq+yFCf2wTbZlm68Hxu+WSirKjie5AtzlSOHa82jQkTjkZI1AHE2syiShnWGvaWpPtoecJKr7aHdFylbJpKwyGvptsObRerWJH5GARXnOoH+FVJ4LrAKcahwCdx0CB63HU2s5p4JgYqAlQV+hFD6yfTDvcKO97/u+8BKlLe9Jnq+fSefEJW1ndOi4mJQ4xGG93sOCub13UCo6zGLvnFlO7R7vwHJeSMDL9Z0Jqmpo2sLhKmaYMr6PhyWvWpXauZOmLTaJEutcnJZ2cjXTU2VuULWwhNYzgXLu9rnVB0= on: tags: true skip_existing: true
2,813
43.666667
700
yml
PB-DFS
PB-DFS-master/PySCIPOpt/CONTRIBUTING.md
Contributing to PySCIPOpt ========================= Code contributions are very welcome and should comply to a few rules: 0. Read Design principles of PySCIPOpt\_. 1. Compatibility with both Python-2 and Python-3. 2. All tests defined in the Continuous Integration setup need to pass: - [.travis.yml](../../.travis.yml) - [appveyor.yml](../../appveyor.yml) 3. New features should be covered by tests *and* examples. Please extend [tests](tests) and [examples](examples). Tests uses pytest and examples are meant to be accessible for PySCIPOpt newcomers (even advanced examples). 4. New code should be documented in the same style as the rest of the code. 5. New code should be [pep8-compliant](https://www.python.org/dev/peps/pep-0008/). Help yourself with the [style guide checker](https://pypi.org/project/pep8/). 6. Before implementing a new PySCIPOpt feature, check whether the feature exists in SCIP. If so, implement it as a pure wrapper, mimicking SCIP whenever possible. If the new feature does not exist in SCIP but it is close to an existing one, consider if implementing that way is substantially convenient (e.g. Pythonic). If it does something completely different, you are welcome to pull your request and discuss the implementation. 7. PySCIPOpt uses [semantic versioning](https://semver.org/). Version number increase only happens on master and must be tagged to build a new PyPI release. For general reference, we suggest: - [PySCIPOpt README](README.md); - [SCIP documentation](http://scip.zib.de/doc/html/); - [SCIP mailing list](https://listserv.zib.de/mailman/listinfo/scip/) which can be easily searched with search engines (e.g. [Google](http://www.google.com/#q=site:listserv.zib.de%2Fpipermail%2Fscip)); - [open and closed PySCIPOpt issues](https://github.com/SCIP-Interfaces/PySCIPOpt/issues?utf8=%E2%9C%93&q=is%3Aissue); - [SCIP/PySCIPOpt Stack Exchange](https://stackoverflow.com/questions/tagged/scip). If you find this contributing guide unclear, please open an issue! :) Design principles of PySCIPOpt ============================== PySCIPOpt is meant to be a fast-prototyping interface of the pure SCIP C API. By design, we distinguish different functions in PySCIPOPT: - pure wrapping functions of SCIP; - convenience functions. **PySCIPOpt wrappers of SCIP functions** should act: - with an expected behavior - and parameters, returns, attributes, ... - as close to SCIP as possible - without *"breaking"* Python and the purpose for what the language it is meant. Ideally speaking, we want every SCIP function to be wrapped in PySCIPOpt. **Convenience functions** are additional, non-detrimental features meant to help prototyping the Python way. Since these functions are not in SCIP, we wish to limit them to prevent difference in features between SCIP and PySCIPOPT, which are always difficult to maintain. A few convenience functions survive in PySCIPOpt when keeping them is doubtless beneficial. Admittedly, *there is a middle ground where functions are not completely wrappers or just convenient*. That is the case, for instance, of fundamental `Model`{.sourceCode} methods like `addCons`{.sourceCode} or `writeProblem`{.sourceCode}. We want to leave their development to negotiation.
3,362
42.115385
93
md
PB-DFS
PB-DFS-master/PySCIPOpt/INSTALL.md
Requirements ============ PySCIPOpt requires a working installation of the [SCIP Optimization Suite](http://scip.zib.de/). If SCIP is not installed in the global path you need to specify the install location using the environment variable `SCIPOPTDIR`: - on Linux and OS X:\ `export SCIPOPTDIR=<path_to_install_dir>` - on Windows:\ `set SCIPOPTDIR=<path_to_install_dir>` `SCIPOPTDIR` needs to have a subdirectory `lib` that contains the library, e.g. `libscip.so` (for Linux) and a subdirectory `include` that contains the corresponding header files: SCIPOPTDIR > lib > libscip.so ... > include > scip > lpi > nlpi > ... If you are not using the installer packages, you need to [install the SCIP Optimization Suite using CMake](http://scip.zib.de/doc/html/CMAKE.php). The Makefile system is not compatible with PySCIPOpt! On Windows it is highly recommended to use the [Anaconda Python Platform](https://www.anaconda.com/). Installation from PyPI ====================== `pip install pyscipopt` On Windows you may need to ensure that the `scip` library can be found at runtime by adjusting your `PATH` environment variable: - on Windows: `set PATH=%PATH%;%SCIPOPTDIR%\bin` On Linux and OS X this is encoded in the generated PySCIPOpt library and therefore not necessary. Building everything from source =============================== PySCIPOpt requires [Cython](http://cython.org/), at least version 0.21 (`pip install cython`). Furthermore, you need to have the Python development files installed on your system (error message "Python.h not found"): sudo apt-get install python-dev # for Python 2, on Linux sudo apt-get install python3-dev # for Python 3, on Linux After setting up `SCIPOPTDIR` as specified above, please run python setup.py install You may use the additional options `--user` or `--prefix=<custom-python-path>`, to build the interface locally. Building with debug information =============================== To use debug information in PySCIPOpt you need to build it like this: python setup.py install --debug Be aware that you will need the **debug library** of the SCIP Optimization Suite for this to work (`cmake .. -DCMAKE_BUILD_TYPE=Debug`). Testing new installation ======================== To test your brand-new installation of PySCIPOpt you need [pytest](https://pytest.org/) on your system. Here is the [installation procedure](https://docs.pytest.org/en/latest/getting-started.html). Tests can be run in the `PySCIPOpt` directory with: : py.test # all the available tests py.test tests/test_name.py # a specific tests/test_name.py (Unix) Ideally, the status of your tests must be passed or skipped. Running tests with pytest creates the `__pycache__` directory in `tests` and, occasionally, a `model` file in the working directory. They can be removed harmlessly. Common errors ============= - readline: `libreadline.so.6: undefined symbol: PC` This is a readline/ncurses compatibility issue that can be fixed like this (when using `conda`): conda install -c conda-forge readline=6.2
3,161
29.699029
77
md
PB-DFS
PB-DFS-master/PySCIPOpt/README.md
PySCIPOpt ========= This project provides an interface from Python to the [SCIP Optimization Suite](http://scip.zib.de). [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/PySCIPOpt/Lobby) [![PySCIPOpt on PyPI](https://img.shields.io/pypi/v/pyscipopt.svg)](https://pypi.python.org/pypi/pyscipopt) [![TravisCI Status](https://travis-ci.org/SCIP-Interfaces/PySCIPOpt.svg?branch=master)](https://travis-ci.org/SCIP-Interfaces/PySCIPOpt) [![AppVeyor Status](https://ci.appveyor.com/api/projects/status/fsa896vkl8be79j9?svg=true)](https://ci.appveyor.com/project/mattmilten/pyscipopt) Documentation ------------- Please consult the [online documentation](http://scip-interfaces.github.io/PySCIPOpt/docs/html) or use the `help()` function directly in Python or `?` in IPython/Jupyter. Installation ------------ See [INSTALL.md](INSTALL.md) for instructions. Building and solving a model ---------------------------- There are several [examples](examples/finished) and [tutorials](examples/tutorial). These display some functionality of the interface and can serve as an entry point for writing more complex code. You might also want to have a look at this article about PySCIPOpt: <https://opus4.kobv.de/opus4-zib/frontdoor/index/index/docId/6045>. The following steps are always required when using the interface: 1) It is necessary to import python-scip in your code. This is achieved by including the line ``` {.sourceCode .python} from pyscipopt import Model ``` 2) Create a solver instance. ``` {.sourceCode .python} model = Model("Example") # model name is optional ``` 3) Access the methods in the `scip.pyx` file using the solver/model instance `model`, e.g.: ``` {.sourceCode .python} x = model.addVar("x") y = model.addVar("y", vtype="INTEGER") model.setObjective(x + y) model.addCons(2*x - y*y >= 0) model.optimize() ``` Writing new plugins ------------------- The Python interface can be used to define custom plugins to extend the functionality of SCIP. You may write a pricer, heuristic or even constraint handler using pure Python code and SCIP can call their methods using the callback system. Every available plugin has a base class that you need to extend, overwriting the predefined but empty callbacks. Please see `test_pricer.py` and `test_heur.py` for two simple examples. Please notice that in most cases one needs to use a `dictionary` to specify the return values needed by SCIP. Extending the interface ----------------------- PySCIPOpt already covers many of the SCIP callable library methods. You may also extend it to increase the functionality of this interface. The following will provide some directions on how this can be achieved: The two most important files in PySCIPOpt are the `scip.pxd` and `scip.pyx`. These two files specify the public functions of SCIP that can be accessed from your python code. To make PySCIPOpt aware of the public functions you would like to access, you must add them to `scip.pxd`. There are two things that must be done in order to properly add the functions: 1) Ensure any `enum`s, `struct`s or SCIP variable types are included in `scip.pxd` <br> 2) Add the prototype of the public function you wish to access to `scip.pxd` After following the previous two steps, it is then possible to create functions in python that reference the SCIP public functions included in `scip.pxd`. This is achieved by modifying the `scip.pyx` file to add the functionality you require. We are always happy to accept pull request containing patches or extensions! Please have a look at our [contribution guidelines](CONTRIBUTING.md). Gotchas ------- ### Ranged constraints While ranged constraints of the form ``` {.sourceCode .} lhs <= expression <= rhs ``` are supported, the Python syntax for [chained comparisons](https://docs.python.org/3.5/reference/expressions.html#comparisons) can't be hijacked with operator overloading. Instead, parenthesis must be used, e.g., ``` {.sourceCode .} lhs <= (expression <= rhs) ``` Alternatively, you may call `model.chgRhs(cons, newrhs)` or `model.chgLhs(cons, newlhs)` after the single-sided constraint has been created. ### Variable objects You can't use `Variable` objects as elements of `set`s or as keys of `dict`s. They are not hashable and comparable. The issue is that comparisons such as `x == y` will be interpreted as linear constraints, since `Variable`s are also `Expr` objects. ### Dual values While PySCIPOpt supports access to the dual values of a solution, there are some limitations involved: - Can only be used when presolving and propagation is disabled to ensure that the LP solver - which is providing the dual information - actually solves the unmodified problem. - Heuristics should also be disabled to avoid that the problem is solved before the LP solver is called. - There should be no bound constraints, i.e., constraints with only one variable. This can cause incorrect values as explained in [\#136](https://github.com/SCIP-Interfaces/PySCIPOpt/issues/136) Therefore, you should use the following settings when trying to work with dual information: ``` {.sourceCode .python} model.setPresolve(pyscipopt.SCIP_PARAMSETTING.OFF) model.setHeuristics(pyscipopt.SCIP_PARAMSETTING.OFF) model.disablePropagation() ```
5,331
33.623377
170
md
PB-DFS
PB-DFS-master/PySCIPOpt/appveyor.yml
version: '{build}' environment: SCIPOPTDIR: C:\scipoptdir pypipw: secure: HEa8MAJyyfSv33snyK3Gleflk9SIfZBxbnTiS39hlWM= optipw: secure: mi/mkS8vYK1Yza0A1FB4/Q== matrix: - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 PYTHON: C:\Python27-x64 PIP: C:\Python27-x64\Scripts\pip PYTEST: C:\Python27-x64\Scripts\pytest TWINE: C:\Python27-x64\Scripts\twine INCLUDE_REQUIRED: true - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 PYTHON: C:\Python35-x64 PIP: C:\Python35-x64\Scripts\pip PYTEST: C:\Python35-x64\Scripts\pytest TWINE: C:\Python35-x64\Scripts\twine - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 PYTHON: C:\Python36-x64 PIP: C:\Python36-x64\Scripts\pip PYTEST: C:\Python36-x64\Scripts\pytest TWINE: C:\Python36-x64\Scripts\twine install: # - ps: $uri = 'http://opti-test.zib.de/v600-rc06/scip/download/release/SCIPOptSuite-6.0.0-win64-VS15.exe' # - ps: $user = 'opti-test' # - ps: $pass = $env:optipw | ConvertTo-SecureString -AsPlainText -Force # - ps: $cred = New-Object Management.Automation.PSCredential ($user, ($pass)) # - ps: Invoke-WebRequest -Uri $uri -Credential $cred -OutFile 'scipopt-installer.exe' - ps: wget http://scip.zib.de/download/release/SCIPOptSuite-6.0.1-win64-VS15.exe -outfile scipopt-installer.exe - scipopt-installer.exe /S /D=%SCIPOPTDIR% - set PATH=%SCIPOPTDIR%\bin;%PYTHON%;%PATH% - if [%INCLUDE_REQUIRED%]==[true] copy .\VC9-include\* %SCIPOPTDIR%\include - cmd: "%PIP% install cython networkx pytest wheel twine" build_script: - python setup.py install test_script: - cmd: "%PYTEST% tests" artifacts: - path: dist\* after_test: - cmd: "echo [pypi] > %USERPROFILE%\\.pypirc" - cmd: "echo username: pyscipopt >> %USERPROFILE%\\.pypirc" - cmd: "echo password: %pypipw% >> %USERPROFILE%\\.pypirc" - python setup.py bdist_wheel on_success: - cmd: "if [%APPVEYOR_REPO_TAG%]==[true] %TWINE% upload dist\\*.whl"
1,997
34.052632
113
yml
PB-DFS
PB-DFS-master/PySCIPOpt/generate-docs.sh
#!/bin/bash # get repo info GH_REPO_ORG=`echo $TRAVIS_REPO_SLUG | cut -d "/" -f 1` GH_REPO_NAME=`echo $TRAVIS_REPO_SLUG | cut -d "/" -f 2` GH_REPO_REF="github.com/$GH_REPO_ORG/$GH_REPO_NAME.git" #get SCIP TAGFILE echo "Downloading SCIP tagfile to create links to SCIP docu" wget -q -O docs/scip.tag https://scip.zib.de/doc/scip.tag # generate html documentation in docs/html echo "Generating documentation" doxygen docs/doxy # fix broken links to SCIP online documentation sed -i "s/\.php\.html/\.php/g" docs/html/*.html # clone the docu branch git clone -b gh-pages git@github.com:${GH_REPO_ORG}/${GH_REPO_NAME} code_docs cd code_docs ##### Configure git. # Set the push default to simple i.e. push only the current branch. git config --global push.default simple # Pretend to be an user called Travis CI. git config user.name "Travis Deployment Bot" git config user.email "deploy@travis-ci.org" # go back to first commit git reset --hard `git rev-list --max-parents=0 --abbrev-commit HEAD` # copy new docu files to gh-pages mkdir -p docs/html mv ../docs/html/* docs/html/ git add --all git commit -m "Deploy docs to GitHub Pages, Travis build: ${TRAVIS_BUILD_NUMBER}" -m "Commit: ${TRAVIS_COMMIT}" # Force push to the remote gh-pages branch. # The ouput is redirected to /dev/null to hide any sensitive credential data # that might otherwise be exposed. git push --force git@github.com:${GH_REPO_ORG}/${GH_REPO_NAME} > /dev/null 2>&1
1,445
32.627907
111
sh
PB-DFS
PB-DFS-master/PySCIPOpt/setup.py
from setuptools import setup, Extension import numpy import os, platform, sys, re # look for environment variable that specifies path to SCIP Opt lib and headers scipoptdir = os.environ.get('SCIPOPTDIR', '').strip('"') includedir = os.path.abspath(os.path.join(scipoptdir, 'include')) libdir = os.path.abspath(os.path.join(scipoptdir, 'lib')) libname = 'scip' cythonize = True packagedir = os.path.join('src', 'pyscipopt') with open(os.path.join(packagedir, '__init__.py'), 'r') as initfile: version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', initfile.read(), re.MULTILINE).group(1) try: from Cython.Build import cythonize except ImportError: if not os.path.exists(os.path.join(packagedir, 'scip.c')): print('Cython is required') quit(1) cythonize = False if not os.path.exists(os.path.join(packagedir, 'scip.pyx')): cythonize = False ext = '.pyx' if cythonize else '.c' # set runtime libraries runtime_library_dirs = [] extra_link_args = [] if platform.system() in ['Linux', 'Darwin']: extra_link_args.append('-Wl,-rpath,'+libdir) # enable debug mode if requested extra_compile_args = [] if "--debug" in sys.argv: extra_compile_args.append('-UNDEBUG') sys.argv.remove("--debug") extensions = [Extension('pyscipopt.scip', [os.path.join(packagedir, 'scip'+ext)], include_dirs=[includedir, numpy.get_include()], library_dirs=[libdir], libraries=[libname], runtime_library_dirs=runtime_library_dirs, extra_compile_args = extra_compile_args, extra_link_args=extra_link_args )] if cythonize: extensions = cythonize(extensions, compiler_directives={'language_level': 3}) # extensions = cythonize(extensions, compiler_directives={'linetrace': True}) with open('README.md') as f: long_description = f.read() setup( name = 'PySCIPOpt', version = version, description = 'Python interface and modeling environment for SCIP', long_description = long_description, long_description_content_type='text/markdown', url = 'https://github.com/SCIP-Interfaces/PySCIPOpt', author = 'Zuse Institute Berlin', author_email = 'scip@zib.de', license = 'MIT', classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Science/Research', 'Intended Audience :: Education', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Programming Language :: Cython', 'Topic :: Scientific/Engineering :: Mathematics'], ext_modules = extensions, packages = ['pyscipopt'], package_dir = {'pyscipopt': packagedir}, package_data = {'pyscipopt': ['scip.pyx', 'scip.pxd', '*.pxi']} )
2,899
33.52381
81
py
PB-DFS
PB-DFS-master/PySCIPOpt/VC9-include/stdint.h
// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include <limits.h> // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include <wchar.h> #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef signed __int8 int8_t; typedef signed __int16 int16_t; typedef signed __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef signed __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 signed int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ]
7,728
30.165323
122
h
PB-DFS
PB-DFS-master/PySCIPOpt/docs/customdoxygen.css
/* The standard CSS for doxygen 1.8.11 */ body, table, div, p, dl { font: 400 14px/22px Roboto,sans-serif; } /* @group Heading Levels */ h1.groupheader { font-size: 150%; } .title { font: 400 14px/28px Roboto,sans-serif; font-size: 150%; font-weight: bold; margin: 10px 2px; } h2.groupheader { border-bottom: 1px solid #879ECB; color: #354C7B; font-size: 150%; font-weight: normal; margin-top: 1.75em; padding-top: 8px; padding-bottom: 4px; width: 100%; } h3.groupheader { font-size: 100%; } h1, h2, h3, h4, h5, h6 { -webkit-transition: text-shadow 0.5s linear; -moz-transition: text-shadow 0.5s linear; -ms-transition: text-shadow 0.5s linear; -o-transition: text-shadow 0.5s linear; transition: text-shadow 0.5s linear; margin-right: 15px; } h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { text-shadow: 0 0 15px cyan; } dt { font-weight: bold; } div.multicol { -moz-column-gap: 1em; -webkit-column-gap: 1em; -moz-column-count: 3; -webkit-column-count: 3; } p.startli, p.startdd { margin-top: 2px; } p.starttd { margin-top: 0px; } p.endli { margin-bottom: 0px; } p.enddd { margin-bottom: 4px; } p.endtd { margin-bottom: 2px; } /* @end */ caption { font-weight: bold; } span.legend { font-size: 70%; text-align: center; } h3.version { font-size: 90%; text-align: center; } div.qindex, div.navtab{ background-color: #EBEFF6; border: 1px solid #A3B4D7; text-align: center; } div.qindex, div.navpath { width: 100%; line-height: 140%; } div.navtab { margin-right: 15px; } /* @group Link Styling */ a { color: #3D578C; font-weight: normal; text-decoration: none; } .contents a:visited { color: #4665A2; } a:hover { text-decoration: underline; } a.qindex { font-weight: bold; } a.qindexHL { font-weight: bold; background-color: #9CAFD4; color: #ffffff; border: 1px double #869DCA; } .contents a.qindexHL:visited { color: #ffffff; } a.el { font-weight: bold; } a.elRef { } a.code, a.code:visited, a.line, a.line:visited { color: #4665A2; } a.codeRef, a.codeRef:visited, a.lineRef, a.lineRef:visited { color: #4665A2; } /* @end */ dl.el { margin-left: -1cm; } pre.fragment { border: 1px solid #C4CFE5; background-color: #FBFCFD; padding: 4px 6px; margin: 4px 8px 4px 2px; overflow: auto; word-wrap: break-word; font-size: 9pt; line-height: 125%; font-family: monospace, fixed; font-size: 105%; } div.fragment { padding: 4px 6px; margin: 4px 8px 4px 2px; background-color: #FBFCFD; border: 1px solid #C4CFE5; } div.line { font-family: monospace, fixed; font-size: 13px; min-height: 13px; line-height: 1.0; text-wrap: unrestricted; white-space: -moz-pre-wrap; /* Moz */ white-space: -pre-wrap; /* Opera 4-6 */ white-space: -o-pre-wrap; /* Opera 7 */ white-space: pre-wrap; /* CSS3 */ word-wrap: break-word; /* IE 5.5+ */ text-indent: -53px; padding-left: 53px; padding-bottom: 0px; margin: 0px; -webkit-transition-property: background-color, box-shadow; -webkit-transition-duration: 0.5s; -moz-transition-property: background-color, box-shadow; -moz-transition-duration: 0.5s; -ms-transition-property: background-color, box-shadow; -ms-transition-duration: 0.5s; -o-transition-property: background-color, box-shadow; -o-transition-duration: 0.5s; transition-property: background-color, box-shadow; transition-duration: 0.5s; } div.line:after { content:"\000A"; white-space: pre; } div.line.glow { background-color: cyan; box-shadow: 0 0 10px cyan; } span.lineno { padding-right: 4px; text-align: right; border-right: 2px solid #0F0; background-color: #E8E8E8; white-space: pre; } span.lineno a { background-color: #D8D8D8; } span.lineno a:hover { background-color: #C8C8C8; } div.ah, span.ah { background-color: black; font-weight: bold; color: #ffffff; margin-bottom: 3px; margin-top: 3px; padding: 0.2em; border: solid thin #333; border-radius: 0.5em; -webkit-border-radius: .5em; -moz-border-radius: .5em; box-shadow: 2px 2px 3px #999; -webkit-box-shadow: 2px 2px 3px #999; -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444)); background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000 110%); } div.classindex ul { list-style: none; padding-left: 0; } div.classindex span.ai { display: inline-block; } div.groupHeader { margin-left: 16px; margin-top: 12px; font-weight: bold; } div.groupText { margin-left: 16px; font-style: italic; } body { background-color: white; color: black; margin: 0; } div.contents { margin-top: 10px; margin-left: 12px; margin-right: 8px; } td.indexkey { background-color: #EBEFF6; font-weight: bold; border: 1px solid #C4CFE5; margin: 2px 0px 2px 0; padding: 2px 10px; white-space: nowrap; vertical-align: top; } td.indexvalue { background-color: #EBEFF6; border: 1px solid #C4CFE5; padding: 2px 10px; margin: 2px 0px; } tr.memlist { background-color: #EEF1F7; } p.formulaDsp { text-align: center; } img.formulaDsp { } img.formulaInl { vertical-align: middle; } div.center { text-align: center; margin-top: 0px; margin-bottom: 0px; padding: 0px; } div.center img { border: 0px; } address.footer { text-align: right; padding-right: 12px; } img.footer { border: 0px; vertical-align: middle; } /* @group Code Colorization */ span.keyword { color: #008000 } span.keywordtype { color: #604020 } span.keywordflow { color: #e08000 } span.comment { color: #800000 } span.preprocessor { color: #806020 } span.stringliteral { color: #002080 } span.charliteral { color: #008080 } span.vhdldigit { color: #ff00ff } span.vhdlchar { color: #000000 } span.vhdlkeyword { color: #700070 } span.vhdllogic { color: #ff0000 } blockquote { background-color: #F7F8FB; border-left: 2px solid #9CAFD4; margin: 0 24px 0 4px; padding: 0 12px 0 16px; } /* @end */ /* .search { color: #003399; font-weight: bold; } form.search { margin-bottom: 0px; margin-top: 0px; } input.search { font-size: 75%; color: #000080; font-weight: normal; background-color: #e8eef2; } */ td.tiny { font-size: 75%; } .dirtab { padding: 4px; border-collapse: collapse; border: 1px solid #A3B4D7; } th.dirtab { background: #EBEFF6; font-weight: bold; } hr { height: 0px; border: none; border-top: 1px solid #4A6AAA; } hr.footer { height: 1px; } /* @group Member Descriptions */ table.memberdecls { border-spacing: 0px; padding: 0px; } .memberdecls td, .fieldtable tr { -webkit-transition-property: background-color, box-shadow; -webkit-transition-duration: 0.5s; -moz-transition-property: background-color, box-shadow; -moz-transition-duration: 0.5s; -ms-transition-property: background-color, box-shadow; -ms-transition-duration: 0.5s; -o-transition-property: background-color, box-shadow; -o-transition-duration: 0.5s; transition-property: background-color, box-shadow; transition-duration: 0.5s; } .memberdecls td.glow, .fieldtable tr.glow { background-color: cyan; box-shadow: 0 0 15px cyan; } .mdescLeft, .mdescRight, .memItemLeft, .memItemRight, .memTemplItemLeft, .memTemplItemRight, .memTemplParams { background-color: #F9FAFC; border: none; margin: 4px; padding: 1px 0 0 8px; } .mdescLeft, .mdescRight { padding: 0px 8px 4px 8px; color: #555; } .memSeparator { border-bottom: 1px solid #DEE4F0; line-height: 1px; margin: 0px; padding: 0px; } .memItemLeft, .memTemplItemLeft { white-space: nowrap; } .memItemRight { width: 100%; } .memTemplParams { color: #4665A2; white-space: nowrap; font-size: 80%; } /* @end */ /* @group Member Details */ /* Styles for detailed member documentation */ .memtemplate { font-size: 80%; color: #4665A2; font-weight: normal; margin-left: 9px; } .memnav { background-color: #EBEFF6; border: 1px solid #A3B4D7; text-align: center; margin: 2px; margin-right: 15px; padding: 2px; } .mempage { width: 100%; } .memitem { padding: 0; margin-bottom: 10px; margin-right: 5px; -webkit-transition: box-shadow 0.5s linear; -moz-transition: box-shadow 0.5s linear; -ms-transition: box-shadow 0.5s linear; -o-transition: box-shadow 0.5s linear; transition: box-shadow 0.5s linear; display: table !important; width: 100%; } .memitem.glow { box-shadow: 0 0 15px cyan; } .memname { font-weight: bold; margin-left: 6px; } .memname td { vertical-align: bottom; } .memproto, dl.reflist dt { border-top: 1px solid #A8B8D9; border-left: 1px solid #A8B8D9; border-right: 1px solid #A8B8D9; padding: 6px 0px 6px 0px; color: #253555; font-weight: bold; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); background-image:url('nav_f.png'); background-repeat:repeat-x; background-color: #E2E8F2; /* opera specific markup */ box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); border-top-right-radius: 4px; border-top-left-radius: 4px; /* firefox specific markup */ -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; -moz-border-radius-topright: 4px; -moz-border-radius-topleft: 4px; /* webkit specific markup */ -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); -webkit-border-top-right-radius: 4px; -webkit-border-top-left-radius: 4px; } .memdoc, dl.reflist dd { border-bottom: 1px solid #A8B8D9; border-left: 1px solid #A8B8D9; border-right: 1px solid #A8B8D9; padding: 6px 10px 2px 10px; background-color: #FBFCFD; border-top-width: 0; background-image:url('nav_g.png'); background-repeat:repeat-x; background-color: #FFFFFF; /* opera specific markup */ border-bottom-left-radius: 4px; border-bottom-right-radius: 4px; box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); /* firefox specific markup */ -moz-border-radius-bottomleft: 4px; -moz-border-radius-bottomright: 4px; -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px; /* webkit specific markup */ -webkit-border-bottom-left-radius: 4px; -webkit-border-bottom-right-radius: 4px; -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15); } dl.reflist dt { padding: 5px; } dl.reflist dd { margin: 0px 0px 10px 0px; padding: 5px; } .paramkey { text-align: right; } .paramtype { white-space: nowrap; } .paramname { color: #602020; white-space: nowrap; } .paramname em { font-style: normal; } .paramname code { line-height: 14px; } .params, .retval, .exception, .tparams { margin-left: 0px; padding-left: 0px; } .params .paramname, .retval .paramname { font-weight: bold; vertical-align: top; } .params .paramtype { font-style: italic; vertical-align: top; } .params .paramdir { font-family: "courier new",courier,monospace; vertical-align: top; } table.mlabels { border-spacing: 0px; } td.mlabels-left { width: 100%; padding: 0px; } td.mlabels-right { vertical-align: bottom; padding: 0px; white-space: nowrap; } span.mlabels { margin-left: 8px; } span.mlabel { background-color: #728DC1; border-top:1px solid #5373B4; border-left:1px solid #5373B4; border-right:1px solid #C4CFE5; border-bottom:1px solid #C4CFE5; text-shadow: none; color: white; margin-right: 4px; padding: 2px 3px; border-radius: 3px; font-size: 7pt; white-space: nowrap; vertical-align: middle; } /* @end */ /* these are for tree view inside a (index) page */ div.directory { margin: 10px 0px; border-top: 1px solid #9CAFD4; border-bottom: 1px solid #9CAFD4; width: 100%; } .directory table { border-collapse:collapse; } .directory td { margin: 0px; padding: 0px; vertical-align: top; } .directory td.entry { white-space: nowrap; padding-right: 6px; padding-top: 3px; } .directory td.entry a { outline:none; } .directory td.entry a img { border: none; } .directory td.desc { width: 100%; padding-left: 6px; padding-right: 6px; padding-top: 3px; border-left: 1px solid rgba(0,0,0,0.05); } .directory tr.even { padding-left: 6px; background-color: #F7F8FB; } .directory img { vertical-align: -30%; } .directory .levels { white-space: nowrap; width: 100%; text-align: right; font-size: 9pt; } .directory .levels span { cursor: pointer; padding-left: 2px; padding-right: 2px; color: #3D578C; } .arrow { color: #9CAFD4; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; cursor: pointer; font-size: 80%; display: inline-block; width: 16px; height: 22px; } .icon { font-family: Arial, Helvetica; font-weight: bold; font-size: 12px; height: 14px; width: 16px; display: inline-block; background-color: #728DC1; color: white; text-align: center; border-radius: 4px; margin-left: 2px; margin-right: 2px; } .icona { width: 24px; height: 22px; display: inline-block; } .iconfopen { width: 24px; height: 18px; margin-bottom: 4px; background-image:url('folderopen.png'); background-position: 0px -4px; background-repeat: repeat-y; vertical-align:top; display: inline-block; } .iconfclosed { width: 24px; height: 18px; margin-bottom: 4px; background-image:url('folderclosed.png'); background-position: 0px -4px; background-repeat: repeat-y; vertical-align:top; display: inline-block; } .icondoc { width: 24px; height: 18px; margin-bottom: 4px; background-image:url('doc.png'); background-position: 0px -4px; background-repeat: repeat-y; vertical-align:top; display: inline-block; } table.directory { font: 400 14px Roboto,sans-serif; } /* @end */ div.dynheader { margin-top: 8px; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } address { font-style: normal; color: #2A3D61; } table.doxtable caption { caption-side: top; } table.doxtable { border-collapse:collapse; margin-top: 4px; margin-bottom: 4px; } table.doxtable td, table.doxtable th { border: 1px solid #2D4068; padding: 3px 7px 2px; } table.doxtable th { background-color: #374F7F; color: #FFFFFF; font-size: 110%; padding-bottom: 4px; padding-top: 5px; } table.fieldtable { /*width: 100%;*/ margin-bottom: 10px; border: 1px solid #A8B8D9; border-spacing: 0px; -moz-border-radius: 4px; -webkit-border-radius: 4px; border-radius: 4px; -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px; -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15); } .fieldtable td, .fieldtable th { padding: 3px 7px 2px; } .fieldtable td.fieldtype, .fieldtable td.fieldname { white-space: nowrap; border-right: 1px solid #A8B8D9; border-bottom: 1px solid #A8B8D9; vertical-align: top; } .fieldtable td.fieldname { padding-top: 3px; } .fieldtable td.fielddoc { border-bottom: 1px solid #A8B8D9; /*width: 100%;*/ } .fieldtable td.fielddoc p:first-child { margin-top: 0px; } .fieldtable td.fielddoc p:last-child { margin-bottom: 2px; } .fieldtable tr:last-child td { border-bottom: none; } .fieldtable th { background-image:url('nav_f.png'); background-repeat:repeat-x; background-color: #E2E8F2; font-size: 90%; color: #253555; padding-bottom: 4px; padding-top: 5px; text-align:left; -moz-border-radius-topleft: 4px; -moz-border-radius-topright: 4px; -webkit-border-top-left-radius: 4px; -webkit-border-top-right-radius: 4px; border-top-left-radius: 4px; border-top-right-radius: 4px; border-bottom: 1px solid #A8B8D9; } .tabsearch { top: 0px; left: 10px; height: 36px; background-image: url('tab_b.png'); z-index: 101; overflow: hidden; font-size: 13px; } .navpath ul { font-size: 11px; background-image:url('tab_b.png'); background-repeat:repeat-x; background-position: 0 -5px; height:30px; line-height:30px; color:#8AA0CC; border:solid 1px #C2CDE4; overflow:hidden; margin:0px; padding:0px; } .navpath li { list-style-type:none; float:left; padding-left:10px; padding-right:15px; background-image:url('bc_s.png'); background-repeat:no-repeat; background-position:right; color:#364D7C; } .navpath li.navelem a { height:32px; display:block; text-decoration: none; outline: none; color: #283A5D; font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif; text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9); text-decoration: none; } .navpath li.navelem a:hover { color:#6884BD; } .navpath li.footer { list-style-type:none; float:right; padding-left:10px; padding-right:15px; background-image:none; background-repeat:no-repeat; background-position:right; color:#364D7C; font-size: 8pt; } div.summary { float: right; font-size: 8pt; padding-right: 5px; width: 50%; text-align: right; } div.summary a { white-space: nowrap; } table.classindex { margin: 10px; white-space: nowrap; margin-left: 3%; margin-right: 3%; width: 94%; border: 0; border-spacing: 0; padding: 0; } div.ingroups { font-size: 8pt; width: 50%; text-align: left; } div.ingroups a { white-space: nowrap; } div.header { background-image:url('nav_h.png'); background-repeat:repeat-x; background-color: #F9FAFC; margin: 0px; border-bottom: 1px solid #C4CFE5; } div.headertitle { padding: 5px 5px 5px 10px; } dl { padding: 0 0 0 10px; } /* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */ dl.section { margin-left: 0px; padding-left: 0px; } dl.note { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #D0C000; } dl.warning, dl.attention { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #FF0000; } dl.pre, dl.post, dl.invariant { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #00D000; } dl.deprecated { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #505050; } dl.todo { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #00C0E0; } dl.test { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #3030E0; } dl.bug { margin-left:-7px; padding-left: 3px; border-left:4px solid; border-color: #C08050; } dl.section dd { margin-bottom: 6px; } #projectlogo { text-align: center; vertical-align: bottom; border-collapse: separate; } #projectlogo img { border: 0px none; } #projectalign { vertical-align: middle; } #projectname { font: 300% Tahoma, Arial,sans-serif; margin: 0px; padding: 2px 0px; } #projectbrief { font: 120% Tahoma, Arial,sans-serif; margin: 0px; padding: 0px; } #projectnumber { font: 50% Tahoma, Arial,sans-serif; margin: 0px; padding: 0px; } #titlearea { padding: 0px; margin: 0px; width: 100%; border-bottom: 1px solid #5373B4; } .image { text-align: center; } .dotgraph { text-align: center; } .mscgraph { text-align: center; } .diagraph { text-align: center; } .caption { font-weight: bold; } div.zoom { border: 1px solid #90A5CE; } dl.citelist { margin-bottom:50px; } dl.citelist dt { color:#334975; float:left; font-weight:bold; margin-right:10px; padding:5px; } dl.citelist dd { margin:2px 0; padding:5px 0; } div.toc { padding: 14px 25px; background-color: #F4F6FA; border: 1px solid #D8DFEE; border-radius: 7px 7px 7px 7px; float: right; height: auto; margin: 0 8px 10px 10px; width: 200px; } div.toc li { background: url("bdwn.png") no-repeat scroll 0 5px transparent; font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif; margin-top: 5px; padding-left: 10px; padding-top: 2px; } div.toc h3 { font: bold 12px/1.2 Arial,FreeSans,sans-serif; color: #4665A2; border-bottom: 0 none; margin: 0; } div.toc ul { list-style: none outside none; border: medium none; padding: 0px; } div.toc li.level1 { margin-left: 0px; } div.toc li.level2 { margin-left: 15px; } div.toc li.level3 { margin-left: 30px; } div.toc li.level4 { margin-left: 45px; } .inherit_header { font-weight: bold; color: gray; cursor: pointer; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } .inherit_header td { padding: 6px 0px 2px 5px; } .inherit { display: none; } tr.heading h2 { margin-top: 12px; margin-bottom: 4px; } /* tooltip related style info */ .ttc { position: absolute; display: none; } #powerTip { cursor: default; white-space: nowrap; background-color: white; border: 1px solid gray; border-radius: 4px 4px 4px 4px; box-shadow: 1px 1px 7px gray; display: none; font-size: smaller; max-width: 80%; opacity: 0.9; padding: 1ex 1em 1em; position: absolute; z-index: 2147483647; } #powerTip div.ttdoc { color: grey; font-style: italic; } #powerTip div.ttname a { font-weight: bold; } #powerTip div.ttname { font-weight: bold; } #powerTip div.ttdeci { color: #006318; } #powerTip div { margin: 0px; padding: 0px; font: 12px/16px Roboto,sans-serif; } #powerTip:before, #powerTip:after { content: ""; position: absolute; margin: 0px; } #powerTip.n:after, #powerTip.n:before, #powerTip.s:after, #powerTip.s:before, #powerTip.w:after, #powerTip.w:before, #powerTip.e:after, #powerTip.e:before, #powerTip.ne:after, #powerTip.ne:before, #powerTip.se:after, #powerTip.se:before, #powerTip.nw:after, #powerTip.nw:before, #powerTip.sw:after, #powerTip.sw:before { border: solid transparent; content: " "; height: 0; width: 0; position: absolute; } #powerTip.n:after, #powerTip.s:after, #powerTip.w:after, #powerTip.e:after, #powerTip.nw:after, #powerTip.ne:after, #powerTip.sw:after, #powerTip.se:after { border-color: rgba(255, 255, 255, 0); } #powerTip.n:before, #powerTip.s:before, #powerTip.w:before, #powerTip.e:before, #powerTip.nw:before, #powerTip.ne:before, #powerTip.sw:before, #powerTip.se:before { border-color: rgba(128, 128, 128, 0); } #powerTip.n:after, #powerTip.n:before, #powerTip.ne:after, #powerTip.ne:before, #powerTip.nw:after, #powerTip.nw:before { top: 100%; } #powerTip.n:after, #powerTip.ne:after, #powerTip.nw:after { border-top-color: #ffffff; border-width: 10px; margin: 0px -10px; } #powerTip.n:before { border-top-color: #808080; border-width: 11px; margin: 0px -11px; } #powerTip.n:after, #powerTip.n:before { left: 50%; } #powerTip.nw:after, #powerTip.nw:before { right: 14px; } #powerTip.ne:after, #powerTip.ne:before { left: 14px; } #powerTip.s:after, #powerTip.s:before, #powerTip.se:after, #powerTip.se:before, #powerTip.sw:after, #powerTip.sw:before { bottom: 100%; } #powerTip.s:after, #powerTip.se:after, #powerTip.sw:after { border-bottom-color: #ffffff; border-width: 10px; margin: 0px -10px; } #powerTip.s:before, #powerTip.se:before, #powerTip.sw:before { border-bottom-color: #808080; border-width: 11px; margin: 0px -11px; } #powerTip.s:after, #powerTip.s:before { left: 50%; } #powerTip.sw:after, #powerTip.sw:before { right: 14px; } #powerTip.se:after, #powerTip.se:before { left: 14px; } #powerTip.e:after, #powerTip.e:before { left: 100%; } #powerTip.e:after { border-left-color: #ffffff; border-width: 10px; top: 50%; margin-top: -10px; } #powerTip.e:before { border-left-color: #808080; border-width: 11px; top: 50%; margin-top: -11px; } #powerTip.w:after, #powerTip.w:before { right: 100%; } #powerTip.w:after { border-right-color: #ffffff; border-width: 10px; top: 50%; margin-top: -10px; } #powerTip.w:before { border-right-color: #808080; border-width: 11px; top: 50%; margin-top: -11px; } @media print { #top { display: none; } #side-nav { display: none; } #nav-path { display: none; } body { overflow:visible; } h1, h2, h3, h4, h5, h6 { page-break-after: avoid; } .summary { display: none; } .memitem { page-break-inside: avoid; } #doc-content { margin-left:0 !important; height:auto !important; width:auto !important; overflow:inherit; display:inline; } }
25,871
16.528455
111
css
PB-DFS
PB-DFS-master/PySCIPOpt/docs/footer.html
<!-- HTML footer for doxygen 1.8.11--> <!-- start footer part --> <!--BEGIN GENERATE_TREEVIEW--> <div id="nav-path" class="navpath"><!-- id is needed for treeview function! --> <ul> $navpath <li class="footer">$generatedby <a href="http://www.doxygen.org/index.html"> <img class="footer" src="$relpath^doxygen.png" alt="doxygen"/></a> $doxygenversion </li> </ul> </div> <!--END GENERATE_TREEVIEW--> <!--BEGIN !GENERATE_TREEVIEW--> <hr class="footer"/><address class="footer"><small> $generatedby &#160;<a href="http://www.doxygen.org/index.html"> <img class="footer" src="$relpath^doxygen.png" alt="doxygen"/> </a> $doxygenversion </small></address> <!--END !GENERATE_TREEVIEW--> </body> </html>
716
31.590909
92
html
PB-DFS
PB-DFS-master/PySCIPOpt/docs/header.html
<!-- HTML header for doxygen 1.8.11--> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen $doxygenversion"/> <!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME--> <!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME--> <link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="$relpath^jquery.js"></script> <script type="text/javascript" src="$relpath^dynsections.js"></script> $treeview $search $mathjax <link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" /> $extrastylesheet </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <!--BEGIN TITLEAREA--> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <!--BEGIN PROJECT_LOGO--> <td id="projectlogo"><img alt="Logo" src="$relpath^$projectlogo"/></td> <!--END PROJECT_LOGO--> <!--BEGIN PROJECT_NAME--> <td id="projectalign" style="padding-left: 0.5em;"> <div id="projectname">$projectname <!--BEGIN PROJECT_NUMBER-->&#160;<span id="projectnumber">$projectnumber</span><!--END PROJECT_NUMBER--> </div> <!--BEGIN PROJECT_BRIEF--><div id="projectbrief">$projectbrief</div><!--END PROJECT_BRIEF--> </td> <!--END PROJECT_NAME--> <!--BEGIN !PROJECT_NAME--> <!--BEGIN PROJECT_BRIEF--> <td style="padding-left: 0.5em;"> <div id="projectbrief">$projectbrief</div> </td> <!--END PROJECT_BRIEF--> <!--END !PROJECT_NAME--> <!--BEGIN DISABLE_INDEX--> <!--BEGIN SEARCHENGINE--> <td>$searchbox</td> <!--END SEARCHENGINE--> <!--END DISABLE_INDEX--> </tr> </tbody> </table> </div> <!--END TITLEAREA--> <!-- end header part -->
1,993
34.607143
121
html
PB-DFS
PB-DFS-master/PySCIPOpt/docs/maindoc.py
##@file maindoc.py #@brief Main documentation page ## @mainpage Overview # # This project provides an interface from Python to the [SCIP Optimization Suite](http://scip.zib.de). <br> # # See the [web site] (https://github.com/SCIP-Interfaces/PySCIPOpt) to download PySCIPOpt. # # @section Installation # See [INSTALL.md](INSTALL.md) for instructions. # # @section TABLEOFCONTENTS Structure of this manual # # This documentation gives an introduction to the functionality of the Python interface of the SCIP code in the following chapters # # - \ref Model Class with the most fundamental functions to create and solve a problem # - \ref examples/tutorial "Tutorials" and \ref examples/finished "Examples" to display some functionality of the interface # - @subpage EXTEND Explanations on extending the PySCIPOpt interface # # For a more detailed description on how to create a model and how to extend the interface, please have a look at the [README.md] (README.md). # ##@page EXTEND Extending the interface # PySCIPOpt already covers many of the SCIP callable library methods. You #may also extend it to increase the functionality of this interface. The #following will provide some directions on how this can be achieved: # #The two most important files in PySCIPOpt are the `scip.pxd` and #`scip.pyx`. These two files specify the public functions of SCIP that #can be accessed from your python code. # #To make PySCIPOpt aware of the public functions you would like to #access, you must add them to `scip.pxd`. There are two things that must #be done in order to properly add the functions: # # -# Ensure any `enum`s, `struct`s or SCIP variable types are included in # `scip.pxd` # -# Add the prototype of the public function you wish to access to # `scip.pxd` # #After following the previous two steps, it is then possible to create #functions in python that reference the SCIP public functions included in #`scip.pxd`. This is achieved by modifying the `scip.pyx` file to add the #functionality you require. # #We are always happy to accept pull request containing patches or #extensions! # #Please have a look at our [contribution guidelines](CONTRIBUTING.md).
2,177
41.705882
142
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/atsp.py
##@file atsp.py #@brief solve the asymmetric traveling salesman problem """ formulations implemented: - mtz -- Miller-Tucker-Zemlin's potential formulation - mtz_strong -- Miller-Tucker-Zemlin's potential formulation with stronger constraint - scf -- single-commodity flow formulation - mcf -- multi-commodity flow formulation Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def mtz(n,c): """mtz: Miller-Tucker-Zemlin's model for the (asymmetric) traveling salesman problem (potential formulation) Parameters: - n: number of nodes - c[i,j]: cost for traversing arc (i,j) Returns a model, ready to be solved. """ model = Model("atsp - mtz") x,u = {},{} for i in range(1,n+1): u[i] = model.addVar(lb=0, ub=n-1, vtype="C", name="u(%s)"%i) for j in range(1,n+1): if i != j: x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j)) for i in range(1,n+1): model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i) model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i) for i in range(1,n+1): for j in range(2,n+1): if i != j: model.addCons(u[i] - u[j] + (n-1)*x[i,j] <= n-2, "MTZ(%s,%s)"%(i,j)) model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize") model.data = x,u return model def mtz_strong(n,c): """mtz_strong: Miller-Tucker-Zemlin's model for the (asymmetric) traveling salesman problem (potential formulation, adding stronger constraints) Parameters: n - number of nodes c[i,j] - cost for traversing arc (i,j) Returns a model, ready to be solved. """ model = Model("atsp - mtz-strong") x,u = {},{} for i in range(1,n+1): u[i] = model.addVar(lb=0, ub=n-1, vtype="C", name="u(%s)"%i) for j in range(1,n+1): if i != j: x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j)) for i in range(1,n+1): model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i) model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i) for i in range(1,n+1): for j in range(2,n+1): if i != j: model.addCons(u[i] - u[j] + (n-1)*x[i,j] + (n-3)*x[j,i] <= n-2, "LiftedMTZ(%s,%s)"%(i,j)) for i in range(2,n+1): model.addCons(-x[1,i] - u[i] + (n-3)*x[i,1] <= -2, name="LiftedLB(%s)"%i) model.addCons(-x[i,1] + u[i] + (n-3)*x[1,i] <= n-2, name="LiftedUB(%s)"%i) model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize") model.data = x,u return model def scf(n,c): """scf: single-commodity flow formulation for the (asymmetric) traveling salesman problem Parameters: - n: number of nodes - c[i,j]: cost for traversing arc (i,j) Returns a model, ready to be solved. """ model = Model("atsp - scf") x,f = {},{} for i in range(1,n+1): for j in range(1,n+1): if i != j: x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j)) if i==1: f[i,j] = model.addVar(lb=0, ub=n-1, vtype="C", name="f(%s,%s)"%(i,j)) else: f[i,j] = model.addVar(lb=0, ub=n-2, vtype="C", name="f(%s,%s)"%(i,j)) for i in range(1,n+1): model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i) model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i) model.addCons(quicksum(f[1,j] for j in range(2,n+1)) == n-1, "FlowOut") for i in range(2,n+1): model.addCons(quicksum(f[j,i] for j in range(1,n+1) if j != i) - \ quicksum(f[i,j] for j in range(1,n+1) if j != i) == 1, "FlowCons(%s)"%i) for j in range(2,n+1): model.addCons(f[1,j] <= (n-1)*x[1,j], "FlowUB(%s,%s)"%(1,j)) for i in range(2,n+1): if i != j: model.addCons(f[i,j] <= (n-2)*x[i,j], "FlowUB(%s,%s)"%(i,j)) model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize") model.data = x,f return model def mcf(n,c): """mcf: multi-commodity flow formulation for the (asymmetric) traveling salesman problem Parameters: - n: number of nodes - c[i,j]: cost for traversing arc (i,j) Returns a model, ready to be solved. """ model = Model("mcf") x,f = {},{} for i in range(1,n+1): for j in range(1,n+1): if i != j: x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j)) if i != j and j != 1: for k in range(2,n+1): if i != k: f[i,j,k] = model.addVar(ub=1, vtype="C", name="f(%s,%s,%s)"%(i,j,k)) for i in range(1,n+1): model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i) model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i) for k in range(2,n+1): model.addCons(quicksum(f[1,i,k] for i in range(2,n+1) if (1,i,k) in f) == 1, "FlowOut(%s)"%k) model.addCons(quicksum(f[i,k,k] for i in range(1,n+1) if (i,k,k) in f) == 1, "FlowIn(%s)"%k) for i in range(2,n+1): if i != k: model.addCons(quicksum(f[j,i,k] for j in range(1,n+1) if (j,i,k) in f) == \ quicksum(f[i,j,k] for j in range(1,n+1) if (i,j,k) in f), "FlowCons(%s,%s)"%(i,k)) for (i,j,k) in f: model.addCons(f[i,j,k] <= x[i,j], "FlowUB(%s,%s,%s)"%(i,j,k)) model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize") model.data = x,f return model def sequence(arcs): """sequence: make a list of cities to visit, from set of arcs""" succ = {} for (i,j) in arcs: succ[i] = j curr = 1 # first node being visited sol = [curr] for i in range(len(arcs)-2): curr = succ[curr] sol.append(curr) return sol if __name__ == "__main__": n = 5 c = { (1,1):0, (1,2):1989, (1,3):102, (1,4):102, (1,5):103, (2,1):104, (2,2):0, (2,3):11, (2,4):104, (2,5):108, (3,1):107, (3,2):108, (3,3):0, (3,4):19, (3,5):102, (4,1):109, (4,2):102, (4,3):107, (4,4):0, (4,5):15, (5,1):13, (5,2):103, (5,3):104, (5,4):101, (5,5):0, } model = mtz(n,c) model.hideOutput() # silent mode model.optimize() cost = model.getObjVal() print() print("Miller-Tucker-Zemlin's model:") print("Optimal value:", cost) #model.printAttr("X") for v in model.getVars(): if model.getVal(v) > 0.001: print(v.name, "=", model.getVal(v)) x,u = model.data sol = [i for (p,i) in sorted([(int(model.getVal(u[i])+.5),i) for i in range(1,n+1)])] print(sol) arcs = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > .5] sol = sequence(arcs) print(sol) # assert cost == 5 model = mtz_strong(n,c) model.hideOutput() # silent mode model.optimize() cost = model.getObjVal() print() print("Miller-Tucker-Zemlin's model with stronger constraints:") print("Optimal value:",cost) #model.printAttr("X") for v in model.getVars(): if model.getVal(v) > 0.001: print(v.name, "=", model.getVal(v)) x,u = model.data sol = [i for (p,i) in sorted([(int(model.getVal(u[i])+.5),i) for i in range(1,n+1)])] print(sol) arcs = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > .5] sol = sequence(arcs) print(sol) # assert cost == 5 model = scf(n,c) model.hideOutput() # silent mode model.optimize() cost = model.getObjVal() print() print("Single-commodity flow formulation:") print("Optimal value:",cost) #model.printAttr("X") for v in model.getVars(): if model.getVal(v) > 0.001: print(v.name, "=", model.getVal(v)) x,f = model.data arcs = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > .5] sol = sequence(arcs) print(sol) # assert cost == 5 model = mcf(n,c) model.hideOutput() # silent mode model.optimize() cost = model.getObjVal() print() print("Multi-commodity flow formulation:") print("Optimal value:",cost) #model.printAttr("X") for v in model.getVars(): if model.getVal(v)>0.001: print(v.name, "=", model.getVal(v)) x,f = model.data arcs = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > .5] sol = sequence(arcs) print(sol) # assert cost == 5
8,763
31.579926
105
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/bpp.py
##@file bpp.py #@brief use SCIP for solving the bin packing problem. """ The instance of the bin packing problem is represented by the two lists of n items of sizes and quantity s=(s_i). The bin size is B. We use Martello and Toth (1990) formulation, and suggest extensions with tie-breaking and SOS constraints. Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum def FFD(s,B): """First Fit Decreasing heuristics for the Bin Packing Problem. Parameters: - s: list with item widths - B: bin capacity Returns a list of lists with bin compositions. """ remain = [B] # keep list of empty space per bin sol = [[]] # a list ot items (i.e., sizes) on each used bin for item in sorted(s,reverse=True): for (j,free) in enumerate(remain): if free >= item: remain[j] -= item sol[j].append(item) break else: #does not fit in any bin sol.append([item]) remain.append(B-item) return sol def bpp(s,B): """bpp: Martello and Toth's model to solve the bin packing problem. Parameters: - s: list with item widths - B: bin capacity Returns a model, ready to be solved. """ n = len(s) U = len(FFD(s,B)) # upper bound of the number of bins model = Model("bpp") # setParam("MIPFocus",1) x,y = {},{} for i in range(n): for j in range(U): x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j)) for j in range(U): y[j] = model.addVar(vtype="B", name="y(%s)"%j) # assignment constraints for i in range(n): model.addCons(quicksum(x[i,j] for j in range(U)) == 1, "Assign(%s)"%i) # bin capacity constraints for j in range(U): model.addCons(quicksum(s[i]*x[i,j] for i in range(n)) <= B*y[j], "Capac(%s)"%j) # tighten assignment constraints for j in range(U): for i in range(n): model.addCons(x[i,j] <= y[j], "Strong(%s,%s)"%(i,j)) # tie breaking constraints for j in range(U-1): model.addCons(y[j] >= y[j+1],"TieBrk(%s)"%j) # SOS constraints for i in range(n): model.addConsSOS1([x[i,j] for j in range(U)]) model.setObjective(quicksum(y[j] for j in range(U)), "minimize") model.data = x,y return model def solveBinPacking(s,B): """solveBinPacking: use an IP model to solve the in Packing Problem. Parameters: - s: list with item widths - B: bin capacity Returns a solution: list of lists, each of which with the items in a roll. """ n = len(s) U = len(FFD(s,B)) # upper bound of the number of bins model = bpp(s,B) x,y = model.data model.optimize() bins = [[] for i in range(U)] for (i,j) in x: if model.getVal(x[i,j]) > .5: bins[j].append(s[i]) for i in range(bins.count([])): bins.remove([]) for b in bins: b.sort() bins.sort() return bins import random def DiscreteUniform(n=10,LB=1,UB=99,B=100): """DiscreteUniform: create random, uniform instance for the bin packing problem.""" B = 100 s = [0]*n for i in range(n): s[i] = random.randint(LB,UB) return s,B if __name__ == "__main__": random.seed(256) s,B = DiscreteUniform() print("items:", s) print("bin size:", B) ffd = FFD(s,B) print("\n\n\n FFD heuristic:") print("Solution:") print(ffd) print(len(ffd), "bins") print("\n\n\n IP formulation:") bins = solveBinPacking(s,B) print("Solution:") print(bins) print(len(bins), "bins")
3,682
25.883212
87
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/diet.py
##@file diet.py #@brief model for the modern diet problem """ Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ # todo: can we make it work as "from pyscipopt import *"? from pyscipopt import Model, quicksum, multidict def diet(F,N,a,b,c,d): """diet -- model for the modern diet problem Parameters: - F: set of foods - N: set of nutrients - a[i]: minimum intake of nutrient i - b[i]: maximum intake of nutrient i - c[j]: cost of food j - d[j][i]: amount of nutrient i in food j Returns a model, ready to be solved. """ model = Model("modern diet") # Create variables x,y,z = {},{},{} for j in F: x[j] = model.addVar(vtype="I", name="x(%s)"%j) y[j] = model.addVar(vtype="B", name="y(%s)"%j) for i in N: z[i] = model.addVar(lb=a[i], ub=b[i], name="z(%s)"%j) v = model.addVar(vtype="C", name="v") # Constraints: for i in N: model.addCons(quicksum(d[j][i]*x[j] for j in F) == z[i], name="Nutr(%s)"%i) model.addCons(quicksum(c[j]*x[j] for j in F) == v, name="Cost") for j in F: model.addCons(y[j] <= x[j], name="Eat(%s)"%j) # Objective: model.setObjective(quicksum(y[j] for j in F), "maximize") model.data = x,y,z,v return model def make_inst(): """make_inst: prepare data for the diet model""" F,c,d = multidict({ # cost # composition "QPounder" : [ 1.84, {"Cal":510, "Carbo":34, "Protein":28, "VitA":15, "VitC": 6, "Calc":30, "Iron":20}], "McLean" : [ 2.19, {"Cal":370, "Carbo":35, "Protein":24, "VitA":15, "VitC": 10, "Calc":20, "Iron":20}], "Big Mac" : [ 1.84, {"Cal":500, "Carbo":42, "Protein":25, "VitA": 6, "VitC": 2, "Calc":25, "Iron":20}], "FFilet" : [ 1.44, {"Cal":370, "Carbo":38, "Protein":14, "VitA": 2, "VitC": 0, "Calc":15, "Iron":10}], "Chicken" : [ 2.29, {"Cal":400, "Carbo":42, "Protein":31, "VitA": 8, "VitC": 15, "Calc":15, "Iron": 8}], "Fries" : [ .77, {"Cal":220, "Carbo":26, "Protein": 3, "VitA": 0, "VitC": 15, "Calc": 0, "Iron": 2}], "McMuffin" : [ 1.29, {"Cal":345, "Carbo":27, "Protein":15, "VitA": 4, "VitC": 0, "Calc":20, "Iron":15}], "1% LFMilk": [ .60, {"Cal":110, "Carbo":12, "Protein": 9, "VitA":10, "VitC": 4, "Calc":30, "Iron": 0}], "OrgJuice" : [ .72, {"Cal": 80, "Carbo":20, "Protein": 1, "VitA": 2, "VitC":120, "Calc": 2, "Iron": 2}], }) N,a,b = multidict({ # min,max intake "Cal" : [ 2000, None ], "Carbo" : [ 350, 375 ], "Protein" : [ 55, None ], "VitA" : [ 100, None ], "VitC" : [ 100, None ], "Calc" : [ 100, None ], "Iron" : [ 100, None ], }) return F,N,a,b,c,d if __name__ == "__main__": F,N,a,b,c,d = make_inst() for b["Cal"] in [None,3500,3000,2500]: print("\nDiet for a maximum of {0} calories".format(b["Cal"] if b["Cal"] != None else "unlimited")) model = diet(F,N,a,b,c,d) model.hideOutput() # silent mode model.optimize() print("Optimal value:",model.getObjVal()) x,y,z,v = model.data for j in x: if model.getVal(x[j]) > 0: print("{0:30s}: {1:3.1f} dishes --> {2:4.2f} added to objective".format(j,model.getVal(x[j]),model.getVal(y[j]))) print("amount spent:",model.getObjVal()) print("amount of nutrients:") for i in z: print("{0:30s}: {1:4.2f}".format(i,model.getVal(z[i])))
3,848
35.657143
129
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/eoq_en.py
##@file eoq_en.py #@brief piecewise linear model to the multi-item economic ordering quantity problem. """ Approach: use a convex combination formulation. Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def eoq(I,F,h,d,w,W,a0,aK,K): """eoq -- multi-item capacitated economic ordering quantity model Parameters: - I: set of items - F[i]: ordering cost for item i - h[i]: holding cost for item i - d[i]: demand for item i - w[i]: unit weight for item i - W: capacity (limit on order quantity) - a0: lower bound on the cycle time (x axis) - aK: upper bound on the cycle time (x axis) - K: number of linear pieces to use in the approximation Returns a model, ready to be solved. """ # construct points for piecewise-linear relation, store in a,b a,b = {},{} delta = float(aK-a0)/K for i in I: for k in range(K): T = a0 + delta*k a[i,k] = T # abscissa: cycle time b[i,k] = F[i]/T + h[i]*d[i]*T/2. # ordinate: (convex) cost for this cycle time model = Model("multi-item, capacitated EOQ") x,c,w_ = {},{},{} for i in I: x[i] = model.addVar(vtype="C", name="x(%s)"%i) # cycle time for item i c[i] = model.addVar(vtype="C", name="c(%s)"%i) # total cost for item i for k in range(K): w_[i,k] = model.addVar(ub=1, vtype="C", name="w(%s,%s)"%(i,k)) #todo ?? for i in I: model.addCons(quicksum(w_[i,k] for k in range(K)) == 1) model.addCons(quicksum(a[i,k]*w_[i,k] for k in range(K)) == x[i]) model.addCons(quicksum(b[i,k]*w_[i,k] for k in range(K)) == c[i]) model.addCons(quicksum(w[i]*d[i]*x[i] for i in I) <= W) model.setObjective(quicksum(c[i] for i in I), "minimize") model.data = x,w return model if __name__ == "__main__": # multiple item EOQ I,F,h,d,w = multidict( {1:[300,10,10,20], 2:[300,10,30,40], 3:[300,10,50,10]} ) W = 2000 K = 1000 a0,aK = 0.1,10 model = eoq(I,F,h,d,w,W,a0,aK,K) model.optimize() x,w = model.data EPS = 1.e-6 for v in x: if model.getVal(x[v]) >= EPS: print(x[v].name,"=",model.getVal(x[v])) print("Optimal value:", model.getObjVal())
2,399
30.168831
93
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/even.py
##@file finished/even.py #@brief model to decide whether argument is even or odd ################################################################################ # # EVEN OR ODD? # # If a positional argument is given: # prints if the argument is even/odd/neither # else: # prints if a value is even/odd/neither per each value in a example list # # This example is made for newcomers and motivated by: # - modulus is unsupported for pyscipopt.scip.Variable and int # - variables are non-integer by default # Based on this: # https://github.com/SCIP-Interfaces/PySCIPOpt/issues/172#issuecomment-394644046 # ################################################################################ from pyscipopt import Model verbose = False sdic = {0:"even",1:"odd"} def parity(number): try: assert number == int(round(number)) m = Model() m.hideOutput() ### variables are non-negative by default since 0 is the default lb. ### To allow for negative values, give None as lower bound ### (None means -infinity as lower bound and +infinity as upper bound) x = m.addVar("x", vtype="I", lb=None, ub=None) #ub=None is default n = m.addVar("n", vtype="I", lb=None) s = m.addVar("s", vtype="B") ### CAVEAT: if number is negative, x's lb must be None ### if x is set by default as non-negative and number is negative: ### there is no feasible solution (trivial) but the program ### does not highlight which constraints conflict. m.addCons(x==number) m.addCons(s == x-2*n) m.setObjective(s) m.optimize() assert m.getStatus() == "optimal" if verbose: for v in m.getVars(): print("%s %d" % (v,m.getVal(v))) print("%d%%2 == %d?" % (m.getVal(x), m.getVal(s))) print(m.getVal(s) == m.getVal(x)%2) xval = m.getVal(x) sval = m.getVal(s) sstr = sdic[sval] print("%d is %s" % (xval, sstr)) except (AssertionError, TypeError): print("%s is neither even nor odd!" % number.__repr__()) if __name__ == "__main__": import sys from ast import literal_eval as leval example_values = [0, 1, 1.5, "hallo welt", 20, 25, -101, -15., -10, -int(2**31), int(2**31-1), int(2**63)-1] try: try: n = leval(sys.argv[1]) except ValueError: n = sys.argv[1] parity(n) except IndexError: for n in example_values: parity(n)
2,543
31.615385
112
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/flp-benders.py
##@file flp-benders.py #@brief model for solving the capacitated facility location problem using Benders' decomposition """ minimize the total (weighted) travel cost from n customers to some facilities with fixed costs and capacities. Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict, SCIP_PARAMSETTING import pdb def flp(I,J,d,M,f,c): """flp -- model for the capacitated facility location problem Parameters: - I: set of customers - J: set of facilities - d[i]: demand for customer i - M[j]: capacity of facility j - f[j]: fixed cost for using a facility in point j - c[i,j]: unit cost of servicing demand point i from facility j Returns a model, ready to be solved. """ master = Model("flp-master") subprob = Model("flp-subprob") # creating the problem y = {} for j in J: y[j] = master.addVar(vtype="B", name="y(%s)"%j) master.setObjective( quicksum(f[j]*y[j] for j in J), "minimize") master.data = y # creating the subproblem x,y = {},{} for j in J: y[j] = subprob.addVar(vtype="B", name="y(%s)"%j) for i in I: x[i,j] = subprob.addVar(vtype="C", name="x(%s,%s)"%(i,j)) for i in I: subprob.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i) for j in M: subprob.addCons(quicksum(x[i,j] for i in I) <= M[j]*y[j], "Capacity(%s)"%i) for (i,j) in x: subprob.addCons(x[i,j] <= d[i]*y[j], "Strong(%s,%s)"%(i,j)) subprob.setObjective( quicksum(c[i,j]*x[i,j] for i in I for j in J), "minimize") subprob.data = x,y return master, subprob def make_data(): """creates example data set""" I,d = multidict({1:80, 2:270, 3:250, 4:160, 5:180}) # demand J,M,f = multidict({1:[500,1000], 2:[500,1000], 3:[500,1000]}) # capacity, fixed costs c = {(1,1):4, (1,2):6, (1,3):9, # transportation costs (2,1):5, (2,2):4, (2,3):7, (3,1):6, (3,2):3, (3,3):4, (4,1):8, (4,2):5, (4,3):3, (5,1):10, (5,2):8, (5,3):4, } return I,J,d,M,f,c if __name__ == "__main__": I,J,d,M,f,c = make_data() master, subprob = flp(I,J,d,M,f,c) # initializing the default Benders' decomposition with the subproblem master.setPresolve(SCIP_PARAMSETTING.OFF) master.setBoolParam("misc/allowdualreds", False) master.setBoolParam("benders/copybenders", False) master.initBendersDefault(subprob) # optimizing the problem using Benders' decomposition master.optimize() # solving the subproblems to get the best solution master.computeBestSolSubproblems() EPS = 1.e-6 y = master.data facilities = [j for j in y if master.getVal(y[j]) > EPS] x, suby = subprob.data edges = [(i,j) for (i,j) in x if subprob.getVal(x[i,j]) > EPS] print("Optimal value:", master.getObjVal()) print("Facilities at nodes:", facilities) print("Edges:", edges) master.printStatistics() # since computeBestSolSubproblems() was called above, we need to free the # subproblems. This must happen after the solution is extracted, otherwise # the solution will be lost master.freeBendersSubproblems() try: # plot the result using networkx and matplotlib import networkx as NX import matplotlib.pyplot as P P.clf() G = NX.Graph() other = [j for j in y if j not in facilities] customers = ["c%s"%i for i in d] G.add_nodes_from(facilities) G.add_nodes_from(other) G.add_nodes_from(customers) for (i,j) in edges: G.add_edge("c%s"%i,j) position = NX.drawing.layout.spring_layout(G) NX.draw(G,position,node_color="y",nodelist=facilities) NX.draw(G,position,node_color="g",nodelist=other) NX.draw(G,position,node_color="b",nodelist=customers) P.show() except ImportError: print("install 'networkx' and 'matplotlib' for plotting")
4,087
30.689922
96
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/flp.py
##@file flp.py #@brief model for solving the capacitated facility location problem """ minimize the total (weighted) travel cost from n customers to some facilities with fixed costs and capacities. Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def flp(I,J,d,M,f,c): """flp -- model for the capacitated facility location problem Parameters: - I: set of customers - J: set of facilities - d[i]: demand for customer i - M[j]: capacity of facility j - f[j]: fixed cost for using a facility in point j - c[i,j]: unit cost of servicing demand point i from facility j Returns a model, ready to be solved. """ model = Model("flp") x,y = {},{} for j in J: y[j] = model.addVar(vtype="B", name="y(%s)"%j) for i in I: x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) for i in I: model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i) for j in M: model.addCons(quicksum(x[i,j] for i in I) <= M[j]*y[j], "Capacity(%s)"%i) for (i,j) in x: model.addCons(x[i,j] <= d[i]*y[j], "Strong(%s,%s)"%(i,j)) model.setObjective( quicksum(f[j]*y[j] for j in J) + quicksum(c[i,j]*x[i,j] for i in I for j in J), "minimize") model.data = x,y return model def make_data(): """creates example data set""" I,d = multidict({1:80, 2:270, 3:250, 4:160, 5:180}) # demand J,M,f = multidict({1:[500,1000], 2:[500,1000], 3:[500,1000]}) # capacity, fixed costs c = {(1,1):4, (1,2):6, (1,3):9, # transportation costs (2,1):5, (2,2):4, (2,3):7, (3,1):6, (3,2):3, (3,3):4, (4,1):8, (4,2):5, (4,3):3, (5,1):10, (5,2):8, (5,3):4, } return I,J,d,M,f,c if __name__ == "__main__": I,J,d,M,f,c = make_data() model = flp(I,J,d,M,f,c) model.optimize() EPS = 1.e-6 x,y = model.data edges = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > EPS] facilities = [j for j in y if model.getVal(y[j]) > EPS] print("Optimal value:", model.getObjVal()) print("Facilities at nodes:", facilities) print("Edges:", edges) try: # plot the result using networkx and matplotlib import networkx as NX import matplotlib.pyplot as P P.clf() G = NX.Graph() other = [j for j in y if j not in facilities] customers = ["c%s"%i for i in d] G.add_nodes_from(facilities) G.add_nodes_from(other) G.add_nodes_from(customers) for (i,j) in edges: G.add_edge("c%s"%i,j) position = NX.drawing.layout.spring_layout(G) NX.draw(G,position,node_color="y",nodelist=facilities) NX.draw(G,position,node_color="g",nodelist=other) NX.draw(G,position,node_color="b",nodelist=customers) P.show() except ImportError: print("install 'networkx' and 'matplotlib' for plotting")
3,029
29.918367
89
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/gcp.py
##@file gcp.py #@brief model for the graph coloring problem """ Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def gcp(V,E,K): """gcp -- model for minimizing the number of colors in a graph Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph - K: upper bound on the number of colors Returns a model, ready to be solved. """ model = Model("gcp") x,y = {},{} for k in range(K): y[k] = model.addVar(vtype="B", name="y(%s)"%k) for i in V: x[i,k] = model.addVar(vtype="B", name="x(%s,%s)"%(i,k)) for i in V: model.addCons(quicksum(x[i,k] for k in range(K)) == 1, "AssignColor(%s)"%i) for (i,j) in E: for k in range(K): model.addCons(x[i,k] + x[j,k] <= y[k], "NotSameColor(%s,%s,%s)"%(i,j,k)) model.setObjective(quicksum(y[k] for k in range(K)), "minimize") model.data = x return model def gcp_low(V,E,K): """gcp_low -- model for minimizing the number of colors in a graph (use colors with low indices) Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph - K: upper bound to the number of colors Returns a model, ready to be solved. """ model = Model("gcp - low colors") x,y = {},{} for k in range(K): y[k] = model.addVar(vtype="B", name="y(%s)"%k) for i in V: x[i,k] = model.addVar(vtype="B", name="x(%s,%s)"%(i,k)) for i in V: model.addCons(quicksum(x[i,k] for k in range(K)) == 1, "AssignColor(%s)" % i) for (i,j) in E: for k in range(K): model.addCons(x[i,k] + x[j,k] <= y[k], "NotSameColor(%s,%s,%s)"%(i,j,k)) for k in range(K-1): model.addCons(y[k] >= y[k+1], "LowColor(%s)"%k) model.setObjective(quicksum(y[k] for k in range(K)), "minimize") model.data = x return model def gcp_sos(V,E,K): """gcp_sos -- model for minimizing the number of colors in a graph (use sos type 1 constraints) Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph - K: upper bound to the number of colors Returns a model, ready to be solved. """ model = Model("gcp - sos constraints") x,y = {},{} for k in range(K): y[k] = model.addVar(vtype="B", name="y(%s)"%k) for i in V: x[i,k] = model.addVar(vtype="B", name="x(%s,%s)"%(i,k)) for i in V: model.addCons(quicksum(x[i,k] for k in range(K)) == 1, "AssignColor(%s)" % i) model.addConsSOS1([x[i,k] for k in range(K)]) for (i,j) in E: for k in range(K): model.addCons(x[i,k] + x[j,k] <= y[k], "NotSameColor(%s,%s,%s)"%(i,j,k)) for k in range(K-1): model.addCons(y[k] >= y[k+1], "LowColor(%s)"%k) model.setObjective(quicksum(y[k] for k in range(K)), "minimize") model.data = x return model import random def make_data(n,prob): """make_data: prepare data for a random graph Parameters: - n: number of vertices - prob: probability of existence of an edge, for each pair of vertices Returns a tuple with a list of vertices and a list edges. """ V = range(1,n+1) E = [(i,j) for i in V for j in V if i < j and random.random() < prob] return V,E if __name__ == "__main__": random.seed(1) V,E = make_data(20,.5) K = 10 # upper bound to the number of colors print("n,K=",len(V),K) model = gcp_low(V,E,K) model.optimize() print("Optimal value:", model.getObjVal()) x = model.data color = {} for i in V: for k in range(K): if model.getVal(x[i,k]) > 0.5: color[i] = k print("colors:",color) import time,sys models = [gcp,gcp_low,gcp_sos] cpu = {} N = 25 # number of observations print("#size\t%s\t%s\t%s" % tuple(m.__name__ for m in models)) for size in range(250): print(size,"\t",) K = size for prob in [0.1]: for m in models: name = m.__name__ if not (name,size-1,prob) in cpu or cpu[name,size-1,prob] < 100: #cpu.has_key((name,size-1,prob)) cpu[name,size,prob] = 0. for t in range(N): tinit = time.clock() random.seed(t) V,E = make_data(size,prob) model = m(V,E,K) model.hideOutput() # silent mode model.optimize() assert model.getObjVal() >= 0 and model.getObjVal() <= K tend = time.clock() cpu[name,size,prob] += tend - tinit cpu[name,size,prob] /= N else: cpu[name,size,prob] = "-" print(cpu[name,size,prob],"\t",) print() sys.stdout.flush()
5,046
29.77439
113
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/gcp_fixed_k.py
##@file gcp_fixed_k.py #@brief solve the graph coloring problem with fixed-k model """ Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def gcp_fixed_k(V,E,K): """gcp_fixed_k -- model for minimizing number of bad edges in coloring a graph Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph - K: number of colors to be used Returns a model, ready to be solved. """ model = Model("gcp - fixed k") x,z = {},{} for i in V: for k in range(K): x[i,k] = model.addVar(vtype="B", name="x(%s,%s)"%(i,k)) for (i,j) in E: z[i,j] = model.addVar(vtype="B", name="z(%s,%s)"%(i,j)) for i in V: model.addCons(quicksum(x[i,k] for k in range(K)) == 1, "AssignColor(%s)" % i) for (i,j) in E: for k in range(K): model.addCons(x[i,k] + x[j,k] <= 1 + z[i,j], "BadEdge(%s,%s,%s)"%(i,j,k)) model.setObjective(quicksum(z[i,j] for (i,j) in E), "minimize") model.data = x,z return model def solve_gcp(V,E): """solve_gcp -- solve the graph coloring problem with bisection and fixed-k model Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph Returns tuple with number of colors used, and dictionary mapping colors to vertices """ LB = 0 UB = len(V) color = {} while UB-LB > 1: K = int((UB+LB) / 2) gcp = gcp_fixed_k(V,E,K) # gcp.Params.OutputFlag = 0 # silent mode #gcp.Params.Cutoff = .1 gcp.setObjlimit(0.1) gcp.optimize() status = gcp.getStatus() if status == "optimal": x,z = gcp.data for i in V: for k in range(K): if gcp.getVal(x[i,k]) > 0.5: color[i] = k break # else: # raise "undefined color for", i UB = K else: LB = K return UB,color import random def make_data(n,prob): """make_data: prepare data for a random graph Parameters: - n: number of vertices - prob: probability of existence of an edge, for each pair of vertices Returns a tuple with a list of vertices and a list edges. """ V = range(1,n+1) E = [(i,j) for i in V for j in V if i < j and random.random() < prob] return V,E if __name__ == "__main__": random.seed(1) V,E = make_data(75,.25) K,color = solve_gcp(V,E) print("minimum number of colors:",K) print("solution:",color)
2,648
27.483871
87
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/gpp.py
##@file gpp.py #@brief model for the graph partitioning problem """ Copyright (c) by Joao Pedro PEDROSO, Masahiro MURAMATSU and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def gpp(V,E): """gpp -- model for the graph partitioning problem Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph Returns a model, ready to be solved. """ model = Model("gpp") x = {} y = {} for i in V: x[i] = model.addVar(vtype="B", name="x(%s)"%i) for (i,j) in E: y[i,j] = model.addVar(vtype="B", name="y(%s,%s)"%(i,j)) model.addCons(quicksum(x[i] for i in V) == len(V)/2, "Partition") for (i,j) in E: model.addCons(x[i] - x[j] <= y[i,j], "Edge(%s,%s)"%(i,j)) model.addCons(x[j] - x[i] <= y[i,j], "Edge(%s,%s)"%(j,i)) model.setObjective(quicksum(y[i,j] for (i,j) in E), "minimize") model.data = x return model def gpp_qo(V,E): """gpp_qo -- quadratic optimization model for the graph partitioning problem Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph Returns a model, ready to be solved. """ model = Model("gpp") x = {} for i in V: x[i] = model.addVar(vtype="B", name="x(%s)"%i) model.addCons(quicksum(x[i] for i in V) == len(V)/2, "Partition") model.setObjective(quicksum(x[i]*(1-x[j]) + x[j]*(1-x[i]) for (i,j) in E), "minimize") model.data = x return model def gpp_qo_ps(V,E): """gpp_qo_ps -- quadratic optimization, positive semidefinite model for the graph partitioning problem Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph Returns a model, ready to be solved. """ model = Model("gpp") x = {} for i in V: x[i] = model.addVar(vtype="B", name="x(%s)"%i) model.addCons(quicksum(x[i] for i in V) == len(V)/2, "Partition") model.setObjective(quicksum((x[i] - x[j]) * (x[i] - x[j]) for (i,j) in E), "minimize") model.data = x return model def gpp_soco(V,E): """gpp -- model for the graph partitioning problem in soco Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph Returns a model, ready to be solved. """ model = Model("gpp model -- soco") x,s,z = {},{},{} for i in V: x[i] = model.addVar(vtype="B", name="x(%s)"%i) for (i,j) in E: s[i,j] = model.addVar(vtype="C", name="s(%s,%s)"%(i,j)) z[i,j] = model.addVar(vtype="C", name="z(%s,%s)"%(i,j)) model.addCons(quicksum(x[i] for i in V) == len(V)/2, "Partition") for (i,j) in E: model.addCons((x[i] + x[j] -1)*(x[i] + x[j] -1) <= s[i,j], "S(%s,%s)"%(i,j)) model.addCons((x[j] - x[i])*(x[j] - x[i]) <= z[i,j], "Z(%s,%s)"%(i,j)) model.addCons(s[i,j] + z[i,j] == 1, "P(%s,%s)"%(i,j)) # # triangle inequalities (seem to make model slower) # for i in V: # for j in V: # for k in V: # if (i,j) in E and (j,k) in E and (i,k) in E: # print("\t***",(i,j,k) # model.addCons(z[i,j] + z[j,k] + z[i,k] <= 2, "T1(%s,%s,%s)"%(i,j,k)) # model.addCons(z[i,j] + s[j,k] + s[i,k] <= 2, "T2(%s,%s,%s)"%(i,j,k)) # model.addCons(s[i,j] + s[j,k] + z[i,k] <= 2, "T3(%s,%s,%s)"%(i,j,k)) # model.addCons(s[i,j] + z[j,k] + s[i,k] <= 2, "T4(%s,%s,%s)"%(i,j,k)) model.setObjective(quicksum(z[i,j] for (i,j) in E), "minimize") model.data = x,s,z return model import random def make_data(n,prob): """make_data: prepare data for a random graph Parameters: - n: number of vertices - prob: probability of existence of an edge, for each pair of vertices Returns a tuple with a list of vertices and a list edges. """ V = range(1,n+1) E = [(i,j) for i in V for j in V if i < j and random.random() < prob] return V,E if __name__ == "__main__": random.seed(1) V,E = make_data(4,.5) print("edges:",E) print("\n\n\nStandard model:") model = gpp(V,E) model.optimize() print("Optimal value:", model.getObjVal()) x = model.data print("partition:") print([i for i in V if model.getVal(x[i]) >= .5]) print([i for i in V if model.getVal(x[i]) < .5]) print("\n\n\nQuadratic optimization") model = gpp_qo(V,E) model.optimize() model.writeProblem("gpp_qo.lp") print("Optimal value:", model.getObjVal()) x = model.data print("partition:") print([i for i in V if model.getVal(x[i]) >= .5]) print([i for i in V if model.getVal(x[i]) < .5]) print("\n\n\nQuadratic optimization - positive semidefinite") model = gpp_qo_ps(V,E) model.optimize() model.writeProblem("gpp_qo.lp") print("Optimal value:", model.getObjVal()) x = model.data print("partition:") print([i for i in V if model.getVal(x[i]) >= .5]) print([i for i in V if model.getVal(x[i]) < .5]) print("\n\n\nSecond order cone optimization") model = gpp_soco(V,E) model.optimize() model.writeProblem("tmp.lp") status = model.getStatus() if status == "optimal": print("Optimal value:", model.getObjVal()) x,s,z = model.data print("partition:") print([i for i in V if model.getVal(x[i]) >= .5]) print([i for i in V if model.getVal(x[i]) < .5]) for (i,j) in s: print("(%s,%s)\t%s\t%s" % (i,j,model.getVal(s[i,j]),model.getVal(z[i,j])))
5,610
30.172222
106
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/kmedian.py
##@file kmedian.py #@brief model for solving the k-median problem. """ minimize the total (weighted) travel cost for servicing a set of customers from k facilities. Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ import math import random from pyscipopt import Model, quicksum, multidict def kmedian(I,J,c,k): """kmedian -- minimize total cost of servicing customers from k facilities Parameters: - I: set of customers - J: set of potential facilities - c[i,j]: cost of servicing customer i from facility j - k: number of facilities to be used Returns a model, ready to be solved. """ model = Model("k-median") x,y = {},{} for j in J: y[j] = model.addVar(vtype="B", name="y(%s)"%j) for i in I: x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j)) for i in I: model.addCons(quicksum(x[i,j] for j in J) == 1, "Assign(%s)"%i) for j in J: model.addCons(x[i,j] <= y[j], "Strong(%s,%s)"%(i,j)) model.addCons(quicksum(y[j] for j in J) == k, "Facilities") model.setObjective(quicksum(c[i,j]*x[i,j] for i in I for j in J), "minimize") model.data = x,y return model def distance(x1,y1,x2,y2): """return distance of two points""" return math.sqrt((x2-x1)**2 + (y2-y1)**2) def make_data(n,m,same=True): """creates example data set""" if same == True: I = range(n) J = range(m) x = [random.random() for i in range(max(m,n))] # positions of the points in the plane y = [random.random() for i in range(max(m,n))] else: I = range(n) J = range(n,n+m) x = [random.random() for i in range(n+m)] # positions of the points in the plane y = [random.random() for i in range(n+m)] c = {} for i in I: for j in J: c[i,j] = distance(x[i],y[i],x[j],y[j]) return I,J,c,x,y if __name__ == "__main__": import sys random.seed(67) n = 200 m = n I,J,c,x_pos,y_pos = make_data(n,m,same=True) k = 20 model = kmedian(I,J,c,k) # model.Params.Threads = 1 model.optimize() EPS = 1.e-6 x,y = model.data edges = [(i,j) for (i,j) in x if model.getVal(x[i,j]) > EPS] facilities = [j for j in y if model.getVal(y[j]) > EPS] print("Optimal value:",model.getObjVal()) print("Selected facilities:", facilities) print("Edges:", edges) print("max c:", max([c[i,j] for (i,j) in edges])) try: # plot the result using networkx and matplotlib import networkx as NX import matplotlib.pyplot as P P.clf() G = NX.Graph() facilities = set(j for j in J if model.getVal(y[j]) > EPS) other = set(j for j in J if j not in facilities) client = set(i for i in I if i not in facilities and i not in other) G.add_nodes_from(facilities) G.add_nodes_from(client) G.add_nodes_from(other) for (i,j) in edges: G.add_edge(i,j) position = {} for i in range(len(x_pos)): position[i] = (x_pos[i],y_pos[i]) NX.draw(G,position,with_labels=False,node_color="w",nodelist=facilities) NX.draw(G,position,with_labels=False,node_color="c",nodelist=other,node_size=50) NX.draw(G,position,with_labels=False,node_color="g",nodelist=client,node_size=50) P.show() except ImportError: print("install 'networkx' and 'matplotlib' for plotting")
3,491
29.902655
96
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/lo_wines.py
##@file lo_wines.py #@brief Simple SCIP example of linear programming. """ It solves the same instance as lo_wines_simple.py: maximize 15x + 18y + 30z subject to 2x + y + z <= 60 x + 2y + z <= 60 z <= 30 x,y,z >= 0 Variables correspond to the production of three types of wine blends, made from pure-grape wines. Constraints correspond to the inventory of pure-grape wines. Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, SCIP_PARAMSETTING #Initialize model model = Model("Wine blending") model.setPresolve(SCIP_PARAMSETTING.OFF) Inventory = {"Alfrocheiro":60, "Baga":60, "Castelao":30} Grapes = Inventory.keys() Profit = {"Dry":15, "Medium":18, "Sweet":30} Blends = Profit.keys() Use = { ("Alfrocheiro","Dry"):2, ("Alfrocheiro","Medium"):1, ("Alfrocheiro","Sweet"):1, ("Baga","Dry"):1, ("Baga","Medium"):2, ("Baga","Sweet"):1, ("Castelao","Dry"):0, ("Castelao","Medium"):0, ("Castelao","Sweet"):1 } # Create variables x = {} for j in Blends: x[j] = model.addVar(vtype="C", name="x(%s)"%j) # Create constraints c = {} for i in Grapes: c[i] = model.addCons(quicksum(Use[i,j]*x[j] for j in Blends) <= Inventory[i], name="Use(%s)"%i) # Objective model.setObjective(quicksum(Profit[j]*x[j] for j in Blends), "maximize") model.optimize() if model.getStatus() == "optimal": print("Optimal value:", model.getObjVal()) for j in x: print(x[j].name, "=", model.getVal(x[j]), " (red. cost: ", model.getVarRedcost(x[j]), ")") for i in c: try: dual = model.getDualsolLinear(c[i]) except: dual = None print("dual of", c[i].name, ":", dual) else: print("Problem could not be solved to optimality")
1,822
25.42029
99
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/logical.py
##@file finished/logical.py #@brief Tutorial example on how to use AND/OR/XOR constraints from pyscipopt import Model from pyscipopt import quicksum """ AND/OR/XOR CONSTRAINTS Tutorial example on how to use AND/OR/XOR constraints. N.B.: standard SCIP XOR constraint works differently from AND/OR by design. The constraint is set with a boolean rhs instead of an integer resultant. cf. http://listserv.zib.de/pipermail/scip/2018-May/003392.html A workaround to get the resultant as variable is here proposed. """ def printFunc(name,m): """prints results""" print("* %s *" % name) objSet = bool(m.getObjective().terms.keys()) print("* Is objective set? %s" % objSet) if objSet: print("* Sense: %s" % m.getObjectiveSense()) for v in m.getVars(): if v.name != "n": print("%s: %d" % (v, round(m.getVal(v)))) print("\n") # AND model = Model() model.hideOutput() x = model.addVar("x","B") y = model.addVar("y","B") z = model.addVar("z","B") r = model.addVar("r","B") model.addConsAnd([x,y,z],r) model.addCons(x==1) model.setObjective(r,sense="minimize") model.optimize() printFunc("AND",model) # OR model = Model() model.hideOutput() x = model.addVar("x","B") y = model.addVar("y","B") z = model.addVar("z","B") r = model.addVar("r","B") model.addConsOr([x,y,z],r) model.addCons(x==0) model.setObjective(r,sense="maximize") model.optimize() printFunc("OR",model) # XOR (r as boolean, standard) model = Model() model.hideOutput() x = model.addVar("x","B") y = model.addVar("y","B") z = model.addVar("z","B") r = True model.addConsXor([x,y,z],r) model.addCons(x==1) model.optimize() printFunc("Standard XOR (as boolean)",model) # XOR (r as variable, custom) model = Model() model.hideOutput() x = model.addVar("x","B") y = model.addVar("y","B") z = model.addVar("z","B") r = model.addVar("r","B") n = model.addVar("n","I") #auxiliary model.addCons(r+quicksum([x,y,z]) == 2*n) model.addCons(x==0) model.setObjective(r,sense="maximize") model.optimize() printFunc("Custom XOR (as variable)",model)
2,064
23.879518
76
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/lotsizing_lazy.py
##@file lotsizing_lazy.py #@brief solve the single-item lot-sizing problem. """ Approaches: - sils: solve the problem using the standard formulation - sils_cut: solve the problem using cutting planes Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, Conshdlr, quicksum, multidict, SCIP_RESULT, SCIP_PRESOLTIMING, SCIP_PROPTIMING class Conshdlr_sils(Conshdlr): def addcut(self, checkonly, sol): D,Ts = self.data y,x,I = self.model.data cutsadded = False for ell in Ts: lhs = 0 S,L = [],[] for t in range(1,ell+1): yt = self.model.getSolVal(sol, y[t]) xt = self.model.getSolVal(sol, x[t]) if D[t,ell]*yt < xt: S.append(t) lhs += D[t,ell]*yt else: L.append(t) lhs += xt if lhs < D[1,ell]: if checkonly: return True else: # add cutting plane constraint self.model.addCons(quicksum([x[t] for t in L]) + \ quicksum(D[t, ell] * y[t] for t in S) >= D[1, ell], removable = True) cutsadded = True return cutsadded def conscheck(self, constraints, solution, checkintegrality, checklprows, printreason, completely): if not self.addcut(checkonly = True, sol = solution): return {"result": SCIP_RESULT.INFEASIBLE} else: return {"result": SCIP_RESULT.FEASIBLE} def consenfolp(self, constraints, nusefulconss, solinfeasible): if self.addcut(checkonly = False): return {"result": SCIP_RESULT.CONSADDED} else: return {"result": SCIP_RESULT.FEASIBLE} def conslock(self, constraint, locktype, nlockspos, nlocksneg): pass def sils(T,f,c,d,h): """sils -- LP lotsizing for the single item lot sizing problem Parameters: - T: number of periods - P: set of products - f[t]: set-up costs (on period t) - c[t]: variable costs - d[t]: demand values - h[t]: holding costs Returns a model, ready to be solved. """ model = Model("single item lotsizing") Ts = range(1,T+1) M = sum(d[t] for t in Ts) y,x,I = {},{},{} for t in Ts: y[t] = model.addVar(vtype="I", ub=1, name="y(%s)"%t) x[t] = model.addVar(vtype="C", ub=M, name="x(%s)"%t) I[t] = model.addVar(vtype="C", name="I(%s)"%t) I[0] = 0 for t in Ts: model.addCons(x[t] <= M*y[t], "ConstrUB(%s)"%t) model.addCons(I[t-1] + x[t] == I[t] + d[t], "FlowCons(%s)"%t) model.setObjective(\ quicksum(f[t]*y[t] + c[t]*x[t] + h[t]*I[t] for t in Ts),\ "minimize") model.data = y,x,I return model def sils_cut(T,f,c,d,h, conshdlr): """solve_sils -- solve the lot sizing problem with cutting planes - start with a relaxed model - used lazy constraints to elimitate fractional setup variables with cutting planes Parameters: - T: number of periods - P: set of products - f[t]: set-up costs (on period t) - c[t]: variable costs - d[t]: demand values - h[t]: holding costs Returns the final model solved, with all necessary cuts added. """ Ts = range(1,T+1) model = sils(T,f,c,d,h) y,x,I = model.data # relax integer variables for t in Ts: model.chgVarType(y[t], "C") model.addVar(vtype="B", name="fake") # for making the problem MIP # compute D[i,j] = sum_{t=i}^j d[t] D = {} for t in Ts: s = 0 for j in range(t,T+1): s += d[j] D[t,j] = s #include the lot sizing constraint handler model.includeConshdlr(conshdlr, "SILS", "Constraint handler for single item lot sizing", sepapriority = 0, enfopriority = -1, chckpriority = -1, sepafreq = -1, propfreq = -1, eagerfreq = -1, maxprerounds = 0, delaysepa = False, delayprop = False, needscons = False, presoltiming = SCIP_PRESOLTIMING.FAST, proptiming = SCIP_PROPTIMING.BEFORELP) conshdlr.data = D,Ts model.data = y,x,I return model def mk_example(): """mk_example: book example for the single item lot sizing""" T = 5 _,f,c,d,h = multidict({ 1 : [3,1,5,1], 2 : [3,1,7,1], 3 : [3,3,3,1], 4 : [3,3,6,1], 5 : [3,3,4,1], }) return T,f,c,d,h if __name__ == "__main__": T,f,c,d,h = mk_example() model = sils(T,f,c,d,h) y,x,I = model.data model.optimize() print("\nOptimal value [standard]:",model.getObjVal()) print("%8s%8s%8s%8s%8s%8s%12s%12s" % ("t","fix","var","h","dem","y","x","I")) for t in range(1,T+1): print("%8d%8d%8d%8d%8d%8.1f%12.1f%12.1f" % (t,f[t],c[t],h[t],d[t],model.getVal(y[t]),model.getVal(x[t]),model.getVal(I[t]))) conshdlr = Conshdlr_sils() model = sils_cut(T,f,c,d,h, conshdlr) model.setBoolParam("misc/allowdualreds", 0) model.optimize() y,x,I = model.data print("\nOptimal value [cutting planes]:",model.getObjVal()) print("%8s%8s%8s%8s%8s%8s%12s%12s" % ("t","fix","var","h","dem","y","x","I")) for t in range(1,T+1): print("%8d%8d%8d%8d%8d%8.1f%12.1f%12.1f" % (t,f[t],c[t],h[t],d[t],model.getVal(y[t]),model.getVal(x[t]),model.getVal(I[t])))
5,578
33.226994
132
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/markowitz_soco.py
##@file markowitz_soco.py #@brief simple markowitz model for portfolio optimization. """ Approach: use second-order cone optimization. Copyright (c) by Joao Pedro PEDROSO, Masahiro MURAMATSU and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def markowitz(I,sigma,r,alpha): """markowitz -- simple markowitz model for portfolio optimization. Parameters: - I: set of items - sigma[i]: standard deviation of item i - r[i]: revenue of item i - alpha: acceptance threshold Returns a model, ready to be solved. """ model = Model("markowitz") x = {} for i in I: x[i] = model.addVar(vtype="C", name="x(%s)"%i) # quantity of i to buy model.addCons(quicksum(r[i]*x[i] for i in I) >= alpha) model.addCons(quicksum(x[i] for i in I) == 1) # set nonlinear objective: SCIP only allow for linear objectives hence the following obj = model.addVar(vtype="C", name="objective", lb = None, ub = None) # auxiliary variable to represent objective model.addCons(quicksum(sigma[i]**2 * x[i] * x[i] for i in I) <= obj) model.setObjective(obj, "minimize") model.data = x return model if __name__ == "__main__": # portfolio import math I,sigma,r = multidict( {1:[0.07,1.01], 2:[0.09,1.05], 3:[0.1,1.08], 4:[0.2,1.10], 5:[0.3,1.20]} ) alpha = 1.05 model = markowitz(I,sigma,r,alpha) model.optimize() x = model.data EPS = 1.e-6 print("%5s\t%8s" % ("i","x[i]")) for i in I: print("%5s\t%8g" % (i,model.getVal(x[i]))) print("sum:",sum(model.getVal(x[i]) for i in I)) print print("Optimal value:", model.getObjVal())
1,733
27.42623
118
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/mctransp.py
##@file mctransp.py #@brief a model for the multi-commodity transportation problem """ Model for solving the multi-commodity transportation problem: minimize the total transportation cost for satisfying demand at customers, from capacitated facilities. Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def mctransp(I,J,K,c,d,M): """mctransp -- model for solving the Multi-commodity Transportation Problem Parameters: - I: set of customers - J: set of facilities - K: set of commodities - c[i,j,k]: unit transportation cost on arc (i,j) for commodity k - d[i][k]: demand for commodity k at node i - M[j]: capacity Returns a model, ready to be solved. """ model = Model("multi-commodity transportation") # Create variables x = {} for (i,j,k) in c: x[i,j,k] = model.addVar(vtype="C", name="x(%s,%s,%s)" % (i,j,k)) # Demand constraints for i in I: for k in K: model.addCons(sum(x[i,j,k] for j in J if (i,j,k) in x) == d[i,k], "Demand(%s,%s)" % (i,k)) # Capacity constraints for j in J: model.addCons(sum(x[i,j,k] for (i,j2,k) in x if j2 == j) <= M[j], "Capacity(%s)" % j) # Objective model.setObjective(quicksum(c[i,j,k]*x[i,j,k] for (i,j,k) in x), "minimize") model.data = x return model def make_inst1(): """creates example data set 1""" d = {(1,1):80, (1,2):85, (1,3):300, (1,4):6, # {(customer,commodity):demand}} (2,1):270, (2,2):160, (2,3):400, (2,4):7, (3,1):250, (3,2):130, (3,3):350, (3,4):4, (4,1):160, (4,2):60, (4,3):200, (4,4):3, (5,1):180, (5,2):40, (5,3):150, (5,4):5 } I = set([i for (i,k) in d]) K = set([k for (i,k) in d]) J,M = multidict({1:3000, 2:3000, 3:3000}) # capacity produce = {1:[2,4], 2:[1,2,3], 3:[2,3,4]} # products that can be produced in each facility weight = {1:5, 2:2, 3:3, 4:4} # {commodity: weight} cost = {(1,1):4, (1,2):6, (1,3):9, # {(customer,factory): cost} (2,1):5, (2,2):4, (2,3):7, (3,1):6, (3,2):3, (3,3):4, (4,1):8, (4,2):5, (4,3):3, (5,1):10, (5,2):8, (5,3):4 } c = {} for i in I: for j in J: for k in produce[j]: c[i,j,k] = cost[i,j] * weight[k] return I,J,K,c,d,M def make_inst2(): """creates example data set 2""" d = {(1,1):45, # {(customer,commodity):demand}} (2,1):20, (3,1):30, (4,1):30, } I = set([i for (i,k) in d]) K = set([k for (i,k) in d]) J,M = multidict({1:35, 2:50, 3:40}) # {factory: capacity}} produce = {1:[1], 2:[1], 3:[1]} # products that can be produced in each facility weight = {1:1} # {commodity: weight} cost = {(1,1):8, (1,2):9, (1,3):14, # {(customer,factory): cost} (2,1):6, (2,2):12, (2,3):9 , (3,1):10, (3,2):13, (3,3):16, (4,1):9, (4,2):7, (4,3):5 , } c = {} for i in I: for j in J: for k in produce[j]: c[i,j,k] = cost[i,j] * weight[k] return I,J,K,c,d,M def make_inst3(): """creates example data set 3""" d = {(1,1):40, (1,2):30, (1,3):10, # {(customer,commodity):demand}} (2,1):70, (2,2):100, (2,3):100, (3,1):0, (3,2):0, (3,3):250, (4,1):60, (4,2):100, (4,3):0, (5,1):180, (5,2):0, (5,3):0 } I = set([i for (i,k) in d]) K = set([k for (i,k) in d]) J,M = multidict({1:500, 2:500, 3:500}) # capacity produce = {1:[2,4], 2:[1,2,3], 3:[2,3,4]} # products that can be produced in each facility weight = {1:5, 2:2, 3:3, 4:4} # {commodity: weight} cost = {(1,1):4, (1,2):6, (1,3):9, # {(customer,factory): cost} (2,1):5, (2,2):4, (2,3):7, (3,1):6, (3,2):3, (3,3):4, (4,1):8, (4,2):5, (4,3):3, (5,1):10, (5,2):8, (5,3):4 } c = {} for i in I: for j in J: for k in produce[j]: c[i,j,k] = cost[i,j] * weight[k] return I,J,K,c,d,M if __name__ == "__main__": I,J,K,c,d,M = make_inst3(); model = mctransp(I,J,K,c,d,M) model.writeProblem("transp.lp") model.optimize() print("Optimal value:",model.getObjVal()) EPS = 1.e-6 x = model.data for (i,j,k) in x: if model.getVal(x[i,j,k]) > EPS: print("sending %10s units of %3s from plant %3s to customer %3s" % (model.getVal(x[i,j,k]),k,j,i))
4,759
31.380952
110
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/mkp.py
##@file mkp.py #@brief model for the multi-constrained knapsack problem """ Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def mkp(I,J,v,a,b): """mkp -- model for solving the multi-constrained knapsack Parameters: - I: set of dimensions - J: set of items - v[j]: value of item j - a[i,j]: weight of item j on dimension i - b[i]: capacity of knapsack on dimension i Returns a model, ready to be solved. """ model = Model("mkp") # Create Variables x = {} for j in J: x[j] = model.addVar(vtype="B", name="x(%s)"%j) # Create constraints for i in I: model.addCons(quicksum(a[i,j]*x[j] for j in J) <= b[i], "Capacity(%s)"%i) # Objective model.setObjective(quicksum(v[j]*x[j] for j in J), "maximize") model.data = x return model def example(): """creates example data set""" J,v = multidict({1:16, 2:19, 3:23, 4:28}) a = {(1,1):2, (1,2):3, (1,3):4, (1,4):5, (2,1):3000, (2,2):3500, (2,3):5100, (2,4):7200, } I,b = multidict({1:7, 2:10000}) return I,J,v,a,b if __name__ == "__main__": I,J,v,a,b = example() model = mkp(I,J,v,a,b) x = model.data model.optimize() print("Optimal value:", model.getObjVal()) EPS = 1.e-6 for i in x: v = x[i] if model.getVal(v) > EPS: print(v.name, "=", model.getVal(v))
1,482
23.716667
81
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/pfs.py
##@file pfs.py #@brief model for the permutation flow shop problem """ Use a position index formulation for modeling the permutation flow shop problem, with the objective of minimizing the makespan (maximum completion time). Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ import math import random from pyscipopt import Model, quicksum, multidict def permutation_flow_shop(n,m,p): """gpp -- model for the graph partitioning problem Parameters: - n: number of jobs - m: number of machines - p[i,j]: processing time of job i on machine j Returns a model, ready to be solved. """ model = Model("permutation flow shop") x,s,f = {},{},{} for j in range(1,n+1): for k in range(1,n+1): x[j,k] = model.addVar(vtype="B", name="x(%s,%s)"%(j,k)) for i in range(1,m+1): for k in range(1,n+1): s[i,k] = model.addVar(vtype="C", name="start(%s,%s)"%(i,k)) f[i,k] = model.addVar(vtype="C", name="finish(%s,%s)"%(i,k)) for j in range(1,n+1): model.addCons(quicksum(x[j,k] for k in range(1,n+1)) == 1, "Assign1(%s)"%(j)) model.addCons(quicksum(x[k,j] for k in range(1,n+1)) == 1, "Assign2(%s)"%(j)) for i in range(1,m+1): for k in range(1,n+1): if k != n: model.addCons(f[i,k] <= s[i,k+1], "FinishStart(%s,%s)"%(i,k)) if i != m: model.addCons(f[i,k] <= s[i+1,k], "Machine(%s,%s)"%(i,k)) model.addCons(s[i,k] + quicksum(p[i,j]*x[j,k] for j in range(1,n+1)) <= f[i,k], "StartFinish(%s,%s)"%(i,k)) model.setObjective(f[m,n], "minimize") model.data = x,s,f return model def make_data(n,m): """make_data: prepare matrix of m times n random processing times""" p = {} for i in range(1,m+1): for j in range(1,n+1): p[i,j] = random.randint(1,10) return p def example(): """creates example data set""" proc = [[2,3,1],[4,2,3],[1,4,1]] p = {} for i in range(3): for j in range(3): p[i+1,j+1] = proc[j][i] return p if __name__ == "__main__": random.seed(1) n = 15 m = 10 p = make_data(n,m) # n = 3 # m = 3 # p = example() print("processing times (%s jobs, %s machines):" % (n,m)) for i in range(1,m+1): for j in range(1,n+1): print(p[i,j],) print model = permutation_flow_shop(n,m,p) # model.write("permflow.lp") model.optimize() x,s,f = model.data print("Optimal value:", model.getObjVal()) ### for (j,k) in sorted(x): ### if x[j,k].X > 0.5: ### print(x[j,k].VarName,x[j,k].X ### ### for i in sorted(s): ### print(s[i].VarName,s[i].X ### ### for i in sorted(f): ### print(f[i].VarName,f[i].X # x[j,k] = 1 if j is the k-th job; extract job sequence: seq = [j for (k,j) in sorted([(k,j) for (j,k) in x if model.getVal(x[j,k]) > 0.5])] print("optimal job permutation:",seq)
3,052
27.53271
91
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/piecewise.py
##@file piecewise.py #@brief several approaches for solving problems with piecewise linear functions. """ Approaches: - mult_selection: multiple selection model - convex_comb_sos: model with SOS2 constraints - convex_comb_dis: convex combination with binary variables (disaggregated model) - convex_comb_dis_log: convex combination with a logarithmic number of binary variables - convex_comb_agg: convex combination with binary variables (aggregated model) - convex_comb_agg_log: convex combination with a logarithmic number of binary variables Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ import math import random from pyscipopt import Model, quicksum, multidict def mult_selection(model,a,b): """mult_selection -- add piecewise relation with multiple selection formulation Parameters: - model: a model where to include the piecewise linear relation - a[k]: x-coordinate of the k-th point in the piecewise linear relation - b[k]: y-coordinate of the k-th point in the piecewise linear relation Returns the model with the piecewise linear relation on added variables X, Y, and z. """ K = len(a)-1 w,z = {},{} for k in range(K): w[k] = model.addVar(lb=-model.infinity()) # do not name variables for avoiding clash z[k] = model.addVar(vtype="B") X = model.addVar(lb=a[0], ub=a[K], vtype="C") Y = model.addVar(lb=-model.infinity()) for k in range(K): model.addCons(w[k] >= a[k]*z[k]) model.addCons(w[k] <= a[k+1]*z[k]) model.addCons(quicksum(z[k] for k in range(K)) == 1) model.addCons(X == quicksum(w[k] for k in range(K))) c = [float(b[k+1]-b[k])/(a[k+1]-a[k]) for k in range(K)] d = [b[k]-c[k]*a[k] for k in range(K)] model.addCons(Y == quicksum(d[k]*z[k] + c[k]*w[k] for k in range(K))) return X,Y,z def convex_comb_sos(model,a,b): """convex_comb_sos -- add piecewise relation with gurobi's SOS constraints Parameters: - model: a model where to include the piecewise linear relation - a[k]: x-coordinate of the k-th point in the piecewise linear relation - b[k]: y-coordinate of the k-th point in the piecewise linear relation Returns the model with the piecewise linear relation on added variables X, Y, and z. """ K = len(a)-1 z = {} for k in range(K+1): z[k] = model.addVar(lb=0, ub=1, vtype="C") X = model.addVar(lb=a[0], ub=a[K], vtype="C") Y = model.addVar(lb=-model.infinity(), vtype="C") model.addCons(X == quicksum(a[k]*z[k] for k in range(K+1))) model.addCons(Y == quicksum(b[k]*z[k] for k in range(K+1))) model.addCons(quicksum(z[k] for k in range(K+1)) == 1) model.addConsSOS2([z[k] for k in range(K+1)]) return X,Y,z def convex_comb_dis(model,a,b): """convex_comb_dis -- add piecewise relation with convex combination formulation Parameters: - model: a model where to include the piecewise linear relation - a[k]: x-coordinate of the k-th point in the piecewise linear relation - b[k]: y-coordinate of the k-th point in the piecewise linear relation Returns the model with the piecewise linear relation on added variables X, Y, and z. """ K = len(a)-1 wL,wR,z = {},{},{} for k in range(K): wL[k] = model.addVar(lb=0, ub=1, vtype="C") wR[k] = model.addVar(lb=0, ub=1, vtype="C") z[k] = model.addVar(vtype="B") X = model.addVar(lb=a[0], ub=a[K], vtype="C") Y = model.addVar(lb=-model.infinity(), vtype="C") model.addCons(X == quicksum(a[k]*wL[k] + a[k+1]*wR[k] for k in range(K))) model.addCons(Y == quicksum(b[k]*wL[k] + b[k+1]*wR[k] for k in range(K))) for k in range(K): model.addCons(wL[k] + wR[k] == z[k]) model.addCons(quicksum(z[k] for k in range(K)) == 1) return X,Y,z def gray(i): """returns i^int(i/2)""" return i^(int(i/2)) def convex_comb_dis_log(model,a,b): """convex_comb_dis_log -- add piecewise relation with a logarithmic number of binary variables using the convex combination formulation. Parameters: - model: a model where to include the piecewise linear relation - a[k]: x-coordinate of the k-th point in the piecewise linear relation - b[k]: y-coordinate of the k-th point in the piecewise linear relation Returns the model with the piecewise linear relation on added variables X, Y, and z. """ K = len(a)-1 G = int(math.ceil((math.log(K)/math.log(2)))) # number of required bits N = 1<<G # number of required variables # print("K,G,N:",K,G,N wL,wR,z = {},{},{} for k in range(N): wL[k] = model.addVar(lb=0, ub=1, vtype="C") wR[k] = model.addVar(lb=0, ub=1, vtype="C") X = model.addVar(lb=a[0], ub=a[K], vtype="C") Y = model.addVar(lb=-model.infinity(), vtype="C") g = {} for j in range(G): g[j] = model.addVar(vtype="B") model.addCons(X == quicksum(a[k]*wL[k] + a[k+1]*wR[k] for k in range(K))) model.addCons(Y == quicksum(b[k]*wL[k] + b[k+1]*wR[k] for k in range(K))) model.addCons(quicksum(wL[k] + wR[k] for k in range(K)) == 1) # binary variables setup for j in range(G): ones = [] zeros = [] for k in range(K): if k & (1<<j): ones.append(k) else: zeros.append(k) model.addCons(quicksum(wL[k] + wR[k] for k in ones) <= g[j]) model.addCons(quicksum(wL[k] + wR[k] for k in zeros) <= 1-g[j]) return X,Y,wL,wR def convex_comb_agg(model,a,b): """convex_comb_agg -- add piecewise relation convex combination formulation -- non-disaggregated. Parameters: - model: a model where to include the piecewise linear relation - a[k]: x-coordinate of the k-th point in the piecewise linear relation - b[k]: y-coordinate of the k-th point in the piecewise linear relation Returns the model with the piecewise linear relation on added variables X, Y, and z. """ K = len(a)-1 w,z = {},{} for k in range(K+1): w[k] = model.addVar(lb=0, ub=1, vtype="C") for k in range(K): z[k] = model.addVar(vtype="B") X = model.addVar(lb=a[0], ub=a[K], vtype="C") Y = model.addVar(lb=-model.infinity(), vtype="C") model.addCons(X == quicksum(a[k]*w[k] for k in range(K+1))) model.addCons(Y == quicksum(b[k]*w[k] for k in range(K+1))) model.addCons(w[0] <= z[0]) model.addCons(w[K] <= z[K-1]) for k in range(1,K): model.addCons(w[k] <= z[k-1]+z[k]) model.addCons(quicksum(w[k] for k in range(K+1)) == 1) model.addCons(quicksum(z[k] for k in range(K)) == 1) return X,Y,z def convex_comb_agg_log(model,a,b): """convex_comb_agg_log -- add piecewise relation with a logarithmic number of binary variables using the convex combination formulation -- non-disaggregated. Parameters: - model: a model where to include the piecewise linear relation - a[k]: x-coordinate of the k-th point in the piecewise linear relation - b[k]: y-coordinate of the k-th point in the piecewise linear relation Returns the model with the piecewise linear relation on added variables X, Y, and z. """ K = len(a)-1 G = int(math.ceil((math.log(K)/math.log(2)))) # number of required bits w,g = {},{} for k in range(K+1): w[k] = model.addVar(lb=0, ub=1, vtype="C") for j in range(G): g[j] = model.addVar(vtype="B") X = model.addVar(lb=a[0], ub=a[K], vtype="C") Y = model.addVar(lb=-model.infinity(), vtype="C") model.addCons(X == quicksum(a[k]*w[k] for k in range(K+1))) model.addCons(Y == quicksum(b[k]*w[k] for k in range(K+1))) model.addCons(quicksum(w[k] for k in range(K+1)) == 1) # binary variables setup for j in range(G): zeros,ones = [0],[] # print(j,"\tinit zeros:",zeros,"ones:",ones for k in range(1,K+1): # print(j,k,"\t>zeros:",zeros,"ones:",ones if (1 & gray(k)>>j) == 1 and (1 & gray(k-1)>>j) == 1: ones.append(k) if (1 & gray(k)>>j) == 0 and (1 & gray(k-1)>>j) == 0: zeros.append(k) # print(j,k,"\tzeros>:",zeros,"ones:",ones # print(j,"\tzeros:",zeros,"ones:",ones model.addCons(quicksum(w[k] for k in ones) <= g[j]) model.addCons(quicksum(w[k] for k in zeros) <= 1-g[j]) return X,Y,w if __name__ == "__main__": # random.seed(1) a = [ -10, 10, 15, 25, 30, 35, 40, 45, 50, 55, 60, 70] b = [ -20,-20, 15, -21, 0, 50, 18, 0, 15, 24, 10, 15] print("\n\n\npiecewise: multiple selection") model = Model("multiple selection") X,Y,z = mult_selection(model,a,b) # X,Y --> piecewise linear replacement of x,f(x) based on points a,b # model using X and Y (and possibly other variables) u = model.addVar(vtype="C", name="u") A = model.addCons(3*X + 4*Y <= 250, "A") B = model.addCons(7*X - 2*Y + 3*u == 170, "B") model.setObjective(2*X + 15*Y + 5*u, "maximize") model.optimize() print("X:",model.getVal(X)) print("Y:",model.getVal(Y)) print("u:",model.getVal(u)) print("\n\n\npiecewise: disaggregated convex combination") model = Model("disaggregated convex combination") X,Y,z = convex_comb_dis(model,a,b) u = model.addVar(vtype="C", name="u") A = model.addCons(3*X + 4*Y <= 250, "A") B = model.addCons(7*X - 2*Y + 3*u == 170, "B") model.setObjective(2*X + 15*Y + 5*u, "maximize") model.optimize() print("X:",model.getVal(X)) print("Y:",model.getVal(Y)) print("u:",model.getVal(u)) print("\n\n\npiecewise: disaggregated convex combination, logarithmic number of variables") model = Model("disaggregated convex combination (log)") X,Y,z = convex_comb_dis(model,a,b) u = model.addVar(vtype="C", name="u") A = model.addCons(3*X + 4*Y <= 250, "A") B = model.addCons(7*X - 2*Y + 3*u == 170, "B") model.setObjective(2*X + 15*Y + 5*u, "maximize") model.optimize() print("X:",model.getVal(X)) print("Y:",model.getVal(Y)) print("u:",model.getVal(u)) print("\n\n\npiecewise: SOS2 constraint") model = Model("SOS2") X,Y,w = convex_comb_sos(model,a,b) u = model.addVar(vtype="C", name="u") A = model.addCons(3*X + 4*Y <= 250, "A") B = model.addCons(7*X - 2*Y + 3*u == 170, "B") model.setObjective(2*X + 15*Y + 5*u, "maximize") model.optimize() print("X:",model.getVal(X)) print("Y:",model.getVal(Y)) print("u:",model.getVal(u)) print("\n\n\npiecewise: aggregated convex combination") model = Model("aggregated convex combination") X,Y,z = convex_comb_agg(model,a,b) u = model.addVar(vtype="C", name="u") A = model.addCons(3*X + 4*Y <= 250, "A") B = model.addCons(7*X - 2*Y + 3*u == 170, "B") model.setObjective(2*X + 15*Y + 5*u, "maximize") model.optimize() print("X:",model.getVal(X)) print("Y:",model.getVal(Y)) print("u:",model.getVal(u)) print("\n\n\npiecewise: aggregated convex combination, logarithmic number of variables") model = Model("aggregated convex combination (log)") X,Y,w = convex_comb_agg_log(model,a,b) u = model.addVar(vtype="C", name="u") A = model.addCons(3*X + 4*Y <= 250, "A") B = model.addCons(7*X - 2*Y + 3*u == 170, "B") model.setObjective(2*X + 15*Y + 5*u, "maximize") model.optimize() print("X:",model.getVal(X)) print("Y:",model.getVal(Y)) print("u:",model.getVal(u))
11,608
37.1875
106
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/prodmix_soco.py
##@file prodmix_soco.py #@brief product mix model using soco. """ Copyright (c) by Joao Pedro PEDROSO, Masahiro MURAMATSU and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def prodmix(I,K,a,p,epsilon,LB): """prodmix: robust production planning using soco Parameters: I - set of materials K - set of components a[i][k] - coef. matrix p[i] - price of material i LB[k] - amount needed for k Returns a model, ready to be solved. """ model = Model("robust product mix") x,rhs = {},{} for i in I: x[i] = model.addVar(vtype="C", name="x(%s)"%i) for k in K: rhs[k] = model.addVar(vtype="C", name="rhs(%s)"%k) model.addCons(quicksum(x[i] for i in I) == 1) for k in K: model.addCons(rhs[k] == -LB[k]+ quicksum(a[i,k]*x[i] for i in I) ) model.addCons(quicksum(epsilon*epsilon*x[i]*x[i] for i in I) <= rhs[k]*rhs[k]) model.setObjective(quicksum(p[i]*x[i] for i in I), "minimize") model.data = x,rhs return model def make_data(): """creates example data set""" a = { (1,1):.25, (1,2):.15, (1,3):.2, (2,1):.3, (2,2):.3, (2,3):.1, (3,1):.15, (3,2):.65, (3,3):.05, (4,1):.1, (4,2):.05, (4,3):.8 } epsilon = 0.01 I,p = multidict({1:5, 2:6, 3:8, 4:20}) K,LB = multidict({1:.2, 2:.3, 3:.2}) return I,K,a,p,epsilon,LB if __name__ == "__main__": I,K,a,p,epsilon,LB = make_data() model = prodmix(I,K,a,p,epsilon,LB) model.optimize() print("Objective value:",model.getObjVal()) x,rhs = model.data for i in I: print(i,": ",model.getVal(x[i]))
1,680
27.491525
86
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/rcs.py
##@file rcs.py #@brief model for the resource constrained scheduling problem """ Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def rcs(J,P,R,T,p,c,a,RUB): """rcs -- model for the resource constrained scheduling problem Parameters: - J: set of jobs - P: set of precedence constraints between jobs - R: set of resources - T: number of periods - p[j]: processing time of job j - c[j,t]: cost incurred when job j starts processing on period t. - a[j,r,t]: resource r usage for job j on period t (after job starts) - RUB[r,t]: upper bound for resource r on period t Returns a model, ready to be solved. """ model = Model("resource constrained scheduling") s,x = {},{} # s - start time variable; x=1 if job j starts on period t for j in J: s[j] = model.addVar(vtype="C", name="s(%s)"%j) for t in range(1,T-p[j]+2): x[j,t] = model.addVar(vtype="B", name="x(%s,%s)"%(j,t)) for j in J: # job execution constraints model.addCons(quicksum(x[j,t] for t in range(1,T-p[j]+2)) == 1, "ConstrJob(%s,%s)"%(j,t)) # start time constraints model.addCons(quicksum((t-1)*x[j,t] for t in range(2,T-p[j]+2)) == s[j], "ConstrJob(%s,%s)"%(j,t)) # resource upper bound constraints for t in range(1,T-p[j]+2): for r in R: model.addCons( quicksum(a[j,r,t-t_]*x[j,t_] for j in J for t_ in range(max(t-p[j]+1,1),min(t+1,T-p[j]+2))) \ <= RUB[r,t], "ResourceUB(%s)"%t) # time (precedence) constraints, i.e., s[k]-s[j] >= p[j] for (j,k) in P: model.addCons(s[k] - s[j] >= p[j], "Precedence(%s,%s)"%(j,k)) model.setObjective(quicksum(c[j,t]*x[j,t] for (j,t) in x), "minimize") model.data = x,s return model def make_1r(): """creates example data set 1""" J, p = multidict({ # jobs, processing times 1 : 1, 2 : 3, 3 : 2, 4 : 2, }) P = [(1,2), (1,3), (2,4)] R = [1] T = 6 c = {} for j in J: for t in range(1,T-p[j]+2): c[j,t] = 1*(t-1+p[j]) a = { (1,1,0):2, (2,1,0):2, (2,1,1):1, (2,1,2):1, (3,1,0):1, (3,1,1):1, (4,1,0):1, (4,1,1):2, } RUB = {(1,1):2, (1,2):2, (1,3):1, (1,4):2, (1,5):2, (1,6):2} return (J,P,R,T,p,c,a,RUB) def make_2r(): """creates example data set 2""" J, p = multidict({ # jobs, processing times 1 : 2, 2 : 2, 3 : 3, 4 : 2, 5 : 5, }) P = [(1,2), (1,3), (2,4)] R = [1,2] T = 6 c = {} for j in J: for t in range(1,T-p[j]+2): c[j,t] = 1*(t-1+p[j]) a = { # resource 1: (1,1,0):2, (1,1,1):2, (2,1,0):1, (2,1,1):1, (3,1,0):1, (3,1,1):1, (3,1,2):1, (4,1,0):1, (4,1,1):1, (5,1,0):0, (5,1,1):0, (5,1,2):1, (5,1,3):0, (5,1,4):0, # resource 2: (1,2,0):1, (1,2,1):0, (2,2,0):1, (2,2,1):1, (3,2,0):0, (3,2,1):0, (3,2,2):0, (4,2,0):1, (4,2,1):2, (5,2,0):1, (5,2,1):2, (5,2,2):1, (5,2,3):1, (5,2,4):1, } RUB = {(1,1):2, (1,2):2, (1,3):2, (1,4):2, (1,5):2, (1,6):2, (1,7):2, (2,1):2, (2,2):2, (2,3):2, (2,4):2, (2,5):2, (2,6):2, (2,7):2 } return (J,P,R,T,p,c,a,RUB) if __name__ == "__main__": (J,P,R,T,p,c,a,RUB) = make_2r() model = rcs(J,P,R,T,p,c,a,RUB) model.optimize() x,s = model.data print("Optimal value:",model.getObjVal()) for (j,t) in x: if model.getVal(x[j,t]) > 0.5: print(x[j,t].name,"=",model.getVal(x[j,t])) for j in s: if model.getVal(s[j]) > 0.: print(s[j].name,"=",model.getVal(s[j]))
3,840
29.484127
109
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/read_tsplib.py
##@file read_tsplib.py #@brief read standard instances of the traveling salesman problem """ Functions provided: * read_tsplib - read a symmetric tsp instance * read_atsplib - asymmetric Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ import gzip import math def distL2(x1,y1,x2,y2): """Compute the L2-norm (Euclidean) distance between two points. The distance is rounded to the closest integer, for compatibility with the TSPLIB convention. The two points are located on coordinates (x1,y1) and (x2,y2), sent as parameters""" xdiff = x2 - x1 ydiff = y2 - y1 return int(math.sqrt(xdiff*xdiff + ydiff*ydiff) + .5) def distL1(x1,y1,x2,y2): """Compute the L1-norm (Manhattan) distance between two points. The distance is rounded to the closest integer, for compatibility with the TSPLIB convention. The two points are located on coordinates (x1,y1) and (x2,y2), sent as parameters""" return int(abs(x2-x1) + abs(y2-y1)+.5) def distLinf(x1,y1,x2,y2): """Compute the Linfty distance between two points (see TSPLIB documentation)""" return int(max(abs(x2-x1),abs(y2-y1))) def distATT(x1,y1,x2,y2): """Compute the ATT distance between two points (see TSPLIB documentation)""" xd = x2 - x1 yd = y2 - y1 rij = math.sqrt((xd*xd + yd*yd) /10.) tij = int(rij + .5) if tij < rij: return tij + 1 else: return tij def distCEIL2D(x1,y1,x2,y2): """returns smallest integer not less than the distance of two points""" xdiff = x2 - x1 ydiff = y2 - y1 return int(math.ceil(math.sqrt(xdiff*xdiff + ydiff*ydiff))) def distGEO(x1,y1,x2,y2): print("Implementation is wrong") assert False PI = 3.141592 deg = int(x1 + .5) min_ = x1 - deg lat1 = PI * (deg + 5.*min_/3)/180. deg = int(y1 + .5) min_ = y1 - deg long1 = PI * (deg + 5.*min_/3)/180. deg = int(x2 + .5) min_ = x2 - deg lat2 = PI * (deg + 5.*min_/3)/180. deg = int(y2 + .5) min_ = y2 - deg long2 = PI * (deg + 5.*min_/3)/180. RRR = 6378.388 q1 = math.cos( long1 - long2 ); q2 = math.cos( lat1 - lat2 ); q3 = math.cos( lat1 + lat2 ); return int(RRR * math.acos(.5*((1.+q1)*q2 - (1.-q1)*q3)) + 1.) def read_explicit_lowerdiag(f,n): c = {} i,j = 1,1 while True: line = f.readline() for data in line.split(): c[j,i] = int(data) j += 1 if j>i: i += 1 j = 1 if i > n: return range(1,n+1),c,None,None def read_explicit_upper(f,n): c = {} i,j = 1,2 while True: line = f.readline() for data in line.split(): c[i,j] = int(data) j += 1 if j>n: i += 1 j = i+1 if i == n: return range(1,n+1),c,None,None def read_explicit_upperdiag(f,n): c = {} i,j = 1,1 while True: line = f.readline() for data in line.split(): c[i,j] = int(data) j += 1 if j>n: i += 1 j = i if i == n: return range(1,n+1),c,None,None def read_explicit_matrix(f,n): c = {} i,j = 1,1 while True: line = f.readline() for data in line.split(): if j>i: c[i,j] = int(data) j += 1 if j>n: i += 1 j = 1 if i == n: return range(1,n+1),c,None,None def read_tsplib(filename): "basic function for reading a symmetric problem in the TSPLIB format" "data is stored in an upper triangular matrix" "NOTE: some distance types are not handled yet" if filename[-3:] == ".gz": f = gzip.open(filename, "rt") else: f = open(filename) line = f.readline() while line.find("DIMENSION") == -1: line = f.readline() n = int(line.split()[-1]) while line.find("EDGE_WEIGHT_TYPE") == -1: line = f.readline() if line.find("EUC_2D") != -1: dist = distL2 elif line.find("MAN_2D") != -1: dist = distL1 elif line.find("MAX_2D") != -1: dist = distLinf elif line.find("ATT") != -1: dist = distATT elif line.find("CEIL_2D") != -1: dist = distCEIL2D # elif line.find("GEO") != -1: # print("geographic" # dist = distGEO elif line.find("EXPLICIT") != -1: while line.find("EDGE_WEIGHT_FORMAT") == -1: line = f.readline() if line.find("LOWER_DIAG_ROW") != -1: while line.find("EDGE_WEIGHT_SECTION") == -1: line = f.readline() return read_explicit_lowerdiag(f,n) if line.find("UPPER_ROW") != -1: while line.find("EDGE_WEIGHT_SECTION") == -1: line = f.readline() return read_explicit_upper(f,n) if line.find("UPPER_DIAG_ROW") != -1: while line.find("EDGE_WEIGHT_SECTION") == -1: line = f.readline() return read_explicit_upperdiag(f,n) if line.find("FULL_MATRIX") != -1: while line.find("EDGE_WEIGHT_SECTION") == -1: line = f.readline() return read_explicit_matrix(f,n) print("error reading line " + line) raise(Exception) else: print("cannot deal with '%s' distances" % line) raise Exception while line.find("NODE_COORD_SECTION") == -1: line = f.readline() x,y = {},{} while 1: line = f.readline() if line.find("EOF") != -1 or not line: break (i,xi,yi) = line.split() x[i] = float(xi) y[i] = float(yi) V = x.keys() c = {} # dictionary to hold n times n matrix for i in V: for j in V: c[i,j] = dist(x[i],y[i],x[j],y[j]) return V,c,x,y def read_atsplib(filename): "basic function for reading a ATSP problem on the TSPLIB format" "NOTE: only works for explicit matrices" if filename[-3:] == ".gz": f = gzip.open(filename, 'r') data = f.readlines() else: f = open(filename, 'r') data = f.readlines() for line in data: if line.find("DIMENSION") >= 0: n = int(line.split()[1]) break else: raise IOError("'DIMENSION' keyword not found in file '%s'" % filename) for line in data: if line.find("EDGE_WEIGHT_TYPE") >= 0: if line.split()[1] == "EXPLICIT": break else: raise IOError("'EDGE_WEIGHT_TYPE' is not 'EXPLICIT' in file '%s'" % filename) for k,line in enumerate(data): if line.find("EDGE_WEIGHT_SECTION") >= 0: break else: raise IOError("'EDGE_WEIGHT_SECTION' not found in file '%s'" % filename) c = {} # flatten list of distances dist = [] for line in data[k+1:]: if line.find("EOF") >= 0: break for val in line.split(): dist.append(int(val)) k = 0 for i in range(n): for j in range(n): c[i+1,j+1] = dist[k] k += 1 return n,c if __name__ == "__main__": import sys # Parse argument if len(sys.argv) < 2: print('Usage: %s instance' % sys.argv[0]) exit(1) from read_tsplib import read_tsplib V,c,x,y = read_tsplib(sys.argv[1]) print(len(V), "vertices,", len(c), "arcs") print("distance matrix:") for i in V: for j in V: if j > i: print(c[i,j],) elif j < i: print(c[j,i],) else: print(0,) print print
7,764
26.055749
85
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/ssa.py
##@file ssa.py #@brief multi-stage (serial) safety stock allocation model """ Approach: use SOS2 constraints for modeling non-linear functions. Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict import math import random from piecewise import convex_comb_sos def ssa(n,h,K,f,T): """ssa -- multi-stage (serial) safety stock allocation model Parameters: - n: number of stages - h[i]: inventory cost on stage i - K: number of linear segments - f: (non-linear) cost function - T[i]: production lead time on stage i Returns the model with the piecewise linear relation on added variables x, f, and z. """ model = Model("safety stock allocation") # calculate endpoints for linear segments a,b = {},{} for i in range(1,n+1): a[i] = [k for k in range(K)] b[i] = [f(i,k) for k in range(K)] # x: net replenishment time for stage i # y: corresponding cost # s: piecewise linear segment of variable x x,y,s = {},{},{} L = {} # service time of stage i for i in range(1,n+1): x[i],y[i],s[i] = convex_comb_sos(model,a[i],b[i]) if i == 1: L[i] = model.addVar(ub=0, vtype="C", name="L[%s]"%i) else: L[i] = model.addVar(vtype="C", name="L[%s]"%i) L[n+1] = model.addVar(ub=0, vtype="C", name="L[%s]"%(n+1)) for i in range(1,n+1): # net replenishment time for each stage i model.addCons(x[i] + L[i] == T[i] + L[i+1]) model.setObjective(quicksum(h[i]*y[i] for i in range(1,n+1)), "minimize") model.data = x,s,L return model def make_data(): """creates example data set""" n = 30 # number of stages z = 1.65 # for 95% service level sigma = 100 # demand's standard deviation h = {} # inventory cost T = {} # production lead time h[n] = 1 for i in range(n-1,0,-1): h[i] = h[i+1] + random.randint(30,50) K = 0 # number of segments (=sum of processing times) for i in range(1,n+1): T[i] = random.randint(3,5) # production lead time at stage i K += T[i] return z,sigma,h,T,K,n if __name__ == "__main__": random.seed(1) z,sigma,h,T,K,n = make_data() def f(i,k): return sigma*z*math.sqrt(k) model = ssa(n,h,K,f,T) model.optimize() # model.write("ssa.lp") x,s,L = model.data for i in range(1,n+1): for k in range(K): if model.getVal(s[i][k]) >= 0.001: print(s[i][k].name,model.getVal(s[i][k])) print print("%10s%10s%10s%10s" % ("Period","x","L","T")) for i in range(1,n+1): print("%10s%10s%10s%10s" % (i,model.getVal(x[i]), model.getVal(L[i]), T[i])) print("Objective:",model.getObjVal())
2,828
28.164948
88
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/ssp.py
##@file ssp.py #@brief model for the stable set problem """ Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012 """ from pyscipopt import Model, quicksum, multidict def ssp(V,E): """ssp -- model for the stable set problem Parameters: - V: set/list of nodes in the graph - E: set/list of edges in the graph Returns a model, ready to be solved. """ model = Model("ssp") x = {} for i in V: x[i] = model.addVar(vtype="B", name="x(%s)"%i) for (i,j) in E: model.addCons(x[i] + x[j] <= 1, "Edge(%s,%s)"%(i,j)) model.setObjective(quicksum(x[i] for i in V), "maximize") model.data = x return model import random def make_data(n,prob): """make_data: prepare data for a random graph Parameters: - n: number of vertices - prob: probability of existence of an edge, for each pair of vertices Returns a tuple with a list of vertices and a list edges. """ V = range(1,n+1) E = [(i,j) for i in V for j in V if i < j and random.random() < prob] return V,E if __name__ == "__main__": random.seed(1) V,E = make_data(100,.5) model = ssp(V,E) model.optimize() print("Optimal value:", model.getObjVal()) x = model.data print("Maximum stable set:") print([i for i in V if model.getVal(x[i]) > 0.5])
1,346
23.490909
77
py
PB-DFS
PB-DFS-master/PySCIPOpt/examples/finished/sudoku.py
##@file sudoku.py #@brief Simple example of modeling a Sudoku as a binary program #!/usr/bin/env python from pyscipopt import Model, quicksum # initial Sudoku values init = [5, 3, 0, 0, 7, 0, 0, 0, 0, 6, 0, 0, 1, 9, 5, 0, 0, 0, 0, 9, 8, 0, 0, 0, 0, 6, 0, 8, 0, 0, 0, 6, 0, 0, 0, 3, 4, 0, 0, 8, 0, 3, 0, 0, 1, 7, 0, 0, 0, 2, 0, 0, 0, 6, 0, 6, 0, 0, 0, 0, 2, 8, 0, 0, 0, 0, 4, 1, 9, 0, 0, 5, 0, 0, 0, 0, 8, 0, 0, 7, 9] m = Model() # create a binary variable for every field and value x = {} for i in range(9): for j in range(9): for k in range(9): name = str(i)+','+str(j)+','+str(k) x[i,j,k] = m.addVar(name, vtype='B') # fill in initial values for i in range(9): for j in range(9): if init[j + 9*i] != 0: m.addCons(x[i,j,init[j + 9*i]-1] == 1) # only one digit in every field for i in range(9): for j in range(9): m.addCons(quicksum(x[i,j,k] for k in range(9)) == 1) # set up row and column constraints for ind in range(9): for k in range(9): m.addCons(quicksum(x[ind,j,k] for j in range(9)) == 1) m.addCons(quicksum(x[i,ind,k] for i in range(9)) == 1) # set up square constraints for row in range(3): for col in range(3): for k in range(9): m.addCons(quicksum(x[i+3*row, j+3*col, k] for i in range(3) for j in range(3)) == 1) m.hideOutput() m.optimize() if m.getStatus() != 'optimal': print('Sudoku is not feasible!') else: print('\nSudoku solution:\n') sol = {} for i in range(9): out = '' for j in range(9): for k in range(9): if m.getVal(x[i,j,k]) == 1: sol[i,j] = k+1 out += str(sol[i,j]) + ' ' print(out)
1,802
25.514706
96
py